qemu/atomic128: Split atomic16_read
[qemu/ar7.git] / target / s390x / tcg / mem_helper.c
blobd02ec861d8bf51839c3ae47d354b5ab306fc06ea
1 /*
2 * S/390 memory access helper routines
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2009 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
22 #include "qemu/log.h"
23 #include "cpu.h"
24 #include "s390x-internal.h"
25 #include "tcg_s390x.h"
26 #include "exec/helper-proto.h"
27 #include "exec/exec-all.h"
28 #include "exec/cpu_ldst.h"
29 #include "hw/core/tcg-cpu-ops.h"
30 #include "qemu/int128.h"
31 #include "qemu/atomic128.h"
32 #include "trace.h"
34 #if !defined(CONFIG_USER_ONLY)
35 #include "hw/s390x/storage-keys.h"
36 #include "hw/boards.h"
37 #endif
39 #ifdef CONFIG_USER_ONLY
40 # define user_or_likely(X) true
41 #else
42 # define user_or_likely(X) likely(X)
43 #endif
45 /*****************************************************************************/
46 /* Softmmu support */
48 /* #define DEBUG_HELPER */
49 #ifdef DEBUG_HELPER
50 #define HELPER_LOG(x...) qemu_log(x)
51 #else
52 #define HELPER_LOG(x...)
53 #endif
55 static inline bool psw_key_valid(CPUS390XState *env, uint8_t psw_key)
57 uint16_t pkm = env->cregs[3] >> 16;
59 if (env->psw.mask & PSW_MASK_PSTATE) {
60 /* PSW key has range 0..15, it is valid if the bit is 1 in the PKM */
61 return pkm & (0x8000 >> psw_key);
63 return true;
66 static bool is_destructive_overlap(CPUS390XState *env, uint64_t dest,
67 uint64_t src, uint32_t len)
69 if (!len || src == dest) {
70 return false;
72 /* Take care of wrapping at the end of address space. */
73 if (unlikely(wrap_address(env, src + len - 1) < src)) {
74 return dest > src || dest <= wrap_address(env, src + len - 1);
76 return dest > src && dest <= src + len - 1;
79 /* Trigger a SPECIFICATION exception if an address or a length is not
80 naturally aligned. */
81 static inline void check_alignment(CPUS390XState *env, uint64_t v,
82 int wordsize, uintptr_t ra)
84 if (v % wordsize) {
85 tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra);
89 /* Load a value from memory according to its size. */
90 static inline uint64_t cpu_ldusize_data_ra(CPUS390XState *env, uint64_t addr,
91 int wordsize, uintptr_t ra)
93 switch (wordsize) {
94 case 1:
95 return cpu_ldub_data_ra(env, addr, ra);
96 case 2:
97 return cpu_lduw_data_ra(env, addr, ra);
98 default:
99 abort();
103 /* Store a to memory according to its size. */
104 static inline void cpu_stsize_data_ra(CPUS390XState *env, uint64_t addr,
105 uint64_t value, int wordsize,
106 uintptr_t ra)
108 switch (wordsize) {
109 case 1:
110 cpu_stb_data_ra(env, addr, value, ra);
111 break;
112 case 2:
113 cpu_stw_data_ra(env, addr, value, ra);
114 break;
115 default:
116 abort();
120 /* An access covers at most 4096 bytes and therefore at most two pages. */
121 typedef struct S390Access {
122 target_ulong vaddr1;
123 target_ulong vaddr2;
124 void *haddr1;
125 void *haddr2;
126 uint16_t size1;
127 uint16_t size2;
129 * If we can't access the host page directly, we'll have to do I/O access
130 * via ld/st helpers. These are internal details, so we store the
131 * mmu idx to do the access here instead of passing it around in the
132 * helpers.
134 int mmu_idx;
135 } S390Access;
138 * With nonfault=1, return the PGM_ exception that would have been injected
139 * into the guest; return 0 if no exception was detected.
141 * For !CONFIG_USER_ONLY, the TEC is stored stored to env->tlb_fill_tec.
142 * For CONFIG_USER_ONLY, the faulting address is stored to env->__excp_addr.
144 static inline int s390_probe_access(CPUArchState *env, target_ulong addr,
145 int size, MMUAccessType access_type,
146 int mmu_idx, bool nonfault,
147 void **phost, uintptr_t ra)
149 int flags = probe_access_flags(env, addr, 0, access_type, mmu_idx,
150 nonfault, phost, ra);
152 if (unlikely(flags & TLB_INVALID_MASK)) {
153 #ifdef CONFIG_USER_ONLY
154 /* Address is in TEC in system mode; see s390_cpu_record_sigsegv. */
155 env->__excp_addr = addr & TARGET_PAGE_MASK;
156 return (page_get_flags(addr) & PAGE_VALID
157 ? PGM_PROTECTION : PGM_ADDRESSING);
158 #else
159 return env->tlb_fill_exc;
160 #endif
163 #ifndef CONFIG_USER_ONLY
164 if (unlikely(flags & TLB_WATCHPOINT)) {
165 /* S390 does not presently use transaction attributes. */
166 cpu_check_watchpoint(env_cpu(env), addr, size,
167 MEMTXATTRS_UNSPECIFIED,
168 (access_type == MMU_DATA_STORE
169 ? BP_MEM_WRITE : BP_MEM_READ), ra);
171 #endif
173 return 0;
176 static int access_prepare_nf(S390Access *access, CPUS390XState *env,
177 bool nonfault, vaddr vaddr1, int size,
178 MMUAccessType access_type,
179 int mmu_idx, uintptr_t ra)
181 int size1, size2, exc;
183 assert(size > 0 && size <= 4096);
185 size1 = MIN(size, -(vaddr1 | TARGET_PAGE_MASK)),
186 size2 = size - size1;
188 memset(access, 0, sizeof(*access));
189 access->vaddr1 = vaddr1;
190 access->size1 = size1;
191 access->size2 = size2;
192 access->mmu_idx = mmu_idx;
194 exc = s390_probe_access(env, vaddr1, size1, access_type, mmu_idx, nonfault,
195 &access->haddr1, ra);
196 if (unlikely(exc)) {
197 return exc;
199 if (unlikely(size2)) {
200 /* The access crosses page boundaries. */
201 vaddr vaddr2 = wrap_address(env, vaddr1 + size1);
203 access->vaddr2 = vaddr2;
204 exc = s390_probe_access(env, vaddr2, size2, access_type, mmu_idx,
205 nonfault, &access->haddr2, ra);
206 if (unlikely(exc)) {
207 return exc;
210 return 0;
213 static inline void access_prepare(S390Access *ret, CPUS390XState *env,
214 vaddr vaddr, int size,
215 MMUAccessType access_type, int mmu_idx,
216 uintptr_t ra)
218 int exc = access_prepare_nf(ret, env, false, vaddr, size,
219 access_type, mmu_idx, ra);
220 assert(!exc);
223 /* Helper to handle memset on a single page. */
224 static void do_access_memset(CPUS390XState *env, vaddr vaddr, char *haddr,
225 uint8_t byte, uint16_t size, int mmu_idx,
226 uintptr_t ra)
228 #ifdef CONFIG_USER_ONLY
229 memset(haddr, byte, size);
230 #else
231 if (likely(haddr)) {
232 memset(haddr, byte, size);
233 } else {
234 MemOpIdx oi = make_memop_idx(MO_UB, mmu_idx);
235 for (int i = 0; i < size; i++) {
236 cpu_stb_mmu(env, vaddr + i, byte, oi, ra);
239 #endif
242 static void access_memset(CPUS390XState *env, S390Access *desta,
243 uint8_t byte, uintptr_t ra)
246 do_access_memset(env, desta->vaddr1, desta->haddr1, byte, desta->size1,
247 desta->mmu_idx, ra);
248 if (likely(!desta->size2)) {
249 return;
251 do_access_memset(env, desta->vaddr2, desta->haddr2, byte, desta->size2,
252 desta->mmu_idx, ra);
255 static uint8_t access_get_byte(CPUS390XState *env, S390Access *access,
256 int offset, uintptr_t ra)
258 target_ulong vaddr = access->vaddr1;
259 void *haddr = access->haddr1;
261 if (unlikely(offset >= access->size1)) {
262 offset -= access->size1;
263 vaddr = access->vaddr2;
264 haddr = access->haddr2;
267 if (user_or_likely(haddr)) {
268 return ldub_p(haddr + offset);
269 } else {
270 MemOpIdx oi = make_memop_idx(MO_UB, access->mmu_idx);
271 return cpu_ldb_mmu(env, vaddr + offset, oi, ra);
275 static void access_set_byte(CPUS390XState *env, S390Access *access,
276 int offset, uint8_t byte, uintptr_t ra)
278 target_ulong vaddr = access->vaddr1;
279 void *haddr = access->haddr1;
281 if (unlikely(offset >= access->size1)) {
282 offset -= access->size1;
283 vaddr = access->vaddr2;
284 haddr = access->haddr2;
287 if (user_or_likely(haddr)) {
288 stb_p(haddr + offset, byte);
289 } else {
290 MemOpIdx oi = make_memop_idx(MO_UB, access->mmu_idx);
291 cpu_stb_mmu(env, vaddr + offset, byte, oi, ra);
296 * Move data with the same semantics as memmove() in case ranges don't overlap
297 * or src > dest. Undefined behavior on destructive overlaps.
299 static void access_memmove(CPUS390XState *env, S390Access *desta,
300 S390Access *srca, uintptr_t ra)
302 int len = desta->size1 + desta->size2;
303 int diff;
305 assert(len == srca->size1 + srca->size2);
307 /* Fallback to slow access in case we don't have access to all host pages */
308 if (unlikely(!desta->haddr1 || (desta->size2 && !desta->haddr2) ||
309 !srca->haddr1 || (srca->size2 && !srca->haddr2))) {
310 int i;
312 for (i = 0; i < len; i++) {
313 uint8_t byte = access_get_byte(env, srca, i, ra);
315 access_set_byte(env, desta, i, byte, ra);
317 return;
320 diff = desta->size1 - srca->size1;
321 if (likely(diff == 0)) {
322 memmove(desta->haddr1, srca->haddr1, srca->size1);
323 if (unlikely(srca->size2)) {
324 memmove(desta->haddr2, srca->haddr2, srca->size2);
326 } else if (diff > 0) {
327 memmove(desta->haddr1, srca->haddr1, srca->size1);
328 memmove(desta->haddr1 + srca->size1, srca->haddr2, diff);
329 if (likely(desta->size2)) {
330 memmove(desta->haddr2, srca->haddr2 + diff, desta->size2);
332 } else {
333 diff = -diff;
334 memmove(desta->haddr1, srca->haddr1, desta->size1);
335 memmove(desta->haddr2, srca->haddr1 + desta->size1, diff);
336 if (likely(srca->size2)) {
337 memmove(desta->haddr2 + diff, srca->haddr2, srca->size2);
342 static int mmu_idx_from_as(uint8_t as)
344 switch (as) {
345 case AS_PRIMARY:
346 return MMU_PRIMARY_IDX;
347 case AS_SECONDARY:
348 return MMU_SECONDARY_IDX;
349 case AS_HOME:
350 return MMU_HOME_IDX;
351 default:
352 /* FIXME AS_ACCREG */
353 g_assert_not_reached();
357 /* and on array */
358 static uint32_t do_helper_nc(CPUS390XState *env, uint32_t l, uint64_t dest,
359 uint64_t src, uintptr_t ra)
361 const int mmu_idx = cpu_mmu_index(env, false);
362 S390Access srca1, srca2, desta;
363 uint32_t i;
364 uint8_t c = 0;
366 HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
367 __func__, l, dest, src);
369 /* NC always processes one more byte than specified - maximum is 256 */
370 l++;
372 access_prepare(&srca1, env, src, l, MMU_DATA_LOAD, mmu_idx, ra);
373 access_prepare(&srca2, env, dest, l, MMU_DATA_LOAD, mmu_idx, ra);
374 access_prepare(&desta, env, dest, l, MMU_DATA_STORE, mmu_idx, ra);
375 for (i = 0; i < l; i++) {
376 const uint8_t x = access_get_byte(env, &srca1, i, ra) &
377 access_get_byte(env, &srca2, i, ra);
379 c |= x;
380 access_set_byte(env, &desta, i, x, ra);
382 return c != 0;
385 uint32_t HELPER(nc)(CPUS390XState *env, uint32_t l, uint64_t dest,
386 uint64_t src)
388 return do_helper_nc(env, l, dest, src, GETPC());
391 /* xor on array */
392 static uint32_t do_helper_xc(CPUS390XState *env, uint32_t l, uint64_t dest,
393 uint64_t src, uintptr_t ra)
395 const int mmu_idx = cpu_mmu_index(env, false);
396 S390Access srca1, srca2, desta;
397 uint32_t i;
398 uint8_t c = 0;
400 HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
401 __func__, l, dest, src);
403 /* XC always processes one more byte than specified - maximum is 256 */
404 l++;
406 access_prepare(&srca1, env, src, l, MMU_DATA_LOAD, mmu_idx, ra);
407 access_prepare(&srca2, env, dest, l, MMU_DATA_LOAD, mmu_idx, ra);
408 access_prepare(&desta, env, dest, l, MMU_DATA_STORE, mmu_idx, ra);
410 /* xor with itself is the same as memset(0) */
411 if (src == dest) {
412 access_memset(env, &desta, 0, ra);
413 return 0;
416 for (i = 0; i < l; i++) {
417 const uint8_t x = access_get_byte(env, &srca1, i, ra) ^
418 access_get_byte(env, &srca2, i, ra);
420 c |= x;
421 access_set_byte(env, &desta, i, x, ra);
423 return c != 0;
426 uint32_t HELPER(xc)(CPUS390XState *env, uint32_t l, uint64_t dest,
427 uint64_t src)
429 return do_helper_xc(env, l, dest, src, GETPC());
432 /* or on array */
433 static uint32_t do_helper_oc(CPUS390XState *env, uint32_t l, uint64_t dest,
434 uint64_t src, uintptr_t ra)
436 const int mmu_idx = cpu_mmu_index(env, false);
437 S390Access srca1, srca2, desta;
438 uint32_t i;
439 uint8_t c = 0;
441 HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
442 __func__, l, dest, src);
444 /* OC always processes one more byte than specified - maximum is 256 */
445 l++;
447 access_prepare(&srca1, env, src, l, MMU_DATA_LOAD, mmu_idx, ra);
448 access_prepare(&srca2, env, dest, l, MMU_DATA_LOAD, mmu_idx, ra);
449 access_prepare(&desta, env, dest, l, MMU_DATA_STORE, mmu_idx, ra);
450 for (i = 0; i < l; i++) {
451 const uint8_t x = access_get_byte(env, &srca1, i, ra) |
452 access_get_byte(env, &srca2, i, ra);
454 c |= x;
455 access_set_byte(env, &desta, i, x, ra);
457 return c != 0;
460 uint32_t HELPER(oc)(CPUS390XState *env, uint32_t l, uint64_t dest,
461 uint64_t src)
463 return do_helper_oc(env, l, dest, src, GETPC());
466 /* memmove */
467 static uint32_t do_helper_mvc(CPUS390XState *env, uint32_t l, uint64_t dest,
468 uint64_t src, uintptr_t ra)
470 const int mmu_idx = cpu_mmu_index(env, false);
471 S390Access srca, desta;
472 uint32_t i;
474 HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
475 __func__, l, dest, src);
477 /* MVC always copies one more byte than specified - maximum is 256 */
478 l++;
480 access_prepare(&srca, env, src, l, MMU_DATA_LOAD, mmu_idx, ra);
481 access_prepare(&desta, env, dest, l, MMU_DATA_STORE, mmu_idx, ra);
484 * "When the operands overlap, the result is obtained as if the operands
485 * were processed one byte at a time". Only non-destructive overlaps
486 * behave like memmove().
488 if (dest == src + 1) {
489 access_memset(env, &desta, access_get_byte(env, &srca, 0, ra), ra);
490 } else if (!is_destructive_overlap(env, dest, src, l)) {
491 access_memmove(env, &desta, &srca, ra);
492 } else {
493 for (i = 0; i < l; i++) {
494 uint8_t byte = access_get_byte(env, &srca, i, ra);
496 access_set_byte(env, &desta, i, byte, ra);
500 return env->cc_op;
503 void HELPER(mvc)(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src)
505 do_helper_mvc(env, l, dest, src, GETPC());
508 /* move right to left */
509 void HELPER(mvcrl)(CPUS390XState *env, uint64_t l, uint64_t dest, uint64_t src)
511 const int mmu_idx = cpu_mmu_index(env, false);
512 const uint64_t ra = GETPC();
513 S390Access srca, desta;
514 int32_t i;
516 /* MVCRL always copies one more byte than specified - maximum is 256 */
517 l++;
519 access_prepare(&srca, env, src, l, MMU_DATA_LOAD, mmu_idx, ra);
520 access_prepare(&desta, env, dest, l, MMU_DATA_STORE, mmu_idx, ra);
522 for (i = l - 1; i >= 0; i--) {
523 uint8_t byte = access_get_byte(env, &srca, i, ra);
524 access_set_byte(env, &desta, i, byte, ra);
528 /* move inverse */
529 void HELPER(mvcin)(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src)
531 const int mmu_idx = cpu_mmu_index(env, false);
532 S390Access srca, desta;
533 uintptr_t ra = GETPC();
534 int i;
536 /* MVCIN always copies one more byte than specified - maximum is 256 */
537 l++;
539 src = wrap_address(env, src - l + 1);
540 access_prepare(&srca, env, src, l, MMU_DATA_LOAD, mmu_idx, ra);
541 access_prepare(&desta, env, dest, l, MMU_DATA_STORE, mmu_idx, ra);
542 for (i = 0; i < l; i++) {
543 const uint8_t x = access_get_byte(env, &srca, l - i - 1, ra);
545 access_set_byte(env, &desta, i, x, ra);
549 /* move numerics */
550 void HELPER(mvn)(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src)
552 const int mmu_idx = cpu_mmu_index(env, false);
553 S390Access srca1, srca2, desta;
554 uintptr_t ra = GETPC();
555 int i;
557 /* MVN always copies one more byte than specified - maximum is 256 */
558 l++;
560 access_prepare(&srca1, env, src, l, MMU_DATA_LOAD, mmu_idx, ra);
561 access_prepare(&srca2, env, dest, l, MMU_DATA_LOAD, mmu_idx, ra);
562 access_prepare(&desta, env, dest, l, MMU_DATA_STORE, mmu_idx, ra);
563 for (i = 0; i < l; i++) {
564 const uint8_t x = (access_get_byte(env, &srca1, i, ra) & 0x0f) |
565 (access_get_byte(env, &srca2, i, ra) & 0xf0);
567 access_set_byte(env, &desta, i, x, ra);
571 /* move with offset */
572 void HELPER(mvo)(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src)
574 const int mmu_idx = cpu_mmu_index(env, false);
575 /* MVO always processes one more byte than specified - maximum is 16 */
576 const int len_dest = (l >> 4) + 1;
577 const int len_src = (l & 0xf) + 1;
578 uintptr_t ra = GETPC();
579 uint8_t byte_dest, byte_src;
580 S390Access srca, desta;
581 int i, j;
583 access_prepare(&srca, env, src, len_src, MMU_DATA_LOAD, mmu_idx, ra);
584 access_prepare(&desta, env, dest, len_dest, MMU_DATA_STORE, mmu_idx, ra);
586 /* Handle rightmost byte */
587 byte_dest = cpu_ldub_data_ra(env, dest + len_dest - 1, ra);
588 byte_src = access_get_byte(env, &srca, len_src - 1, ra);
589 byte_dest = (byte_dest & 0x0f) | (byte_src << 4);
590 access_set_byte(env, &desta, len_dest - 1, byte_dest, ra);
592 /* Process remaining bytes from right to left */
593 for (i = len_dest - 2, j = len_src - 2; i >= 0; i--, j--) {
594 byte_dest = byte_src >> 4;
595 if (j >= 0) {
596 byte_src = access_get_byte(env, &srca, j, ra);
597 } else {
598 byte_src = 0;
600 byte_dest |= byte_src << 4;
601 access_set_byte(env, &desta, i, byte_dest, ra);
605 /* move zones */
606 void HELPER(mvz)(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src)
608 const int mmu_idx = cpu_mmu_index(env, false);
609 S390Access srca1, srca2, desta;
610 uintptr_t ra = GETPC();
611 int i;
613 /* MVZ always copies one more byte than specified - maximum is 256 */
614 l++;
616 access_prepare(&srca1, env, src, l, MMU_DATA_LOAD, mmu_idx, ra);
617 access_prepare(&srca2, env, dest, l, MMU_DATA_LOAD, mmu_idx, ra);
618 access_prepare(&desta, env, dest, l, MMU_DATA_STORE, mmu_idx, ra);
619 for (i = 0; i < l; i++) {
620 const uint8_t x = (access_get_byte(env, &srca1, i, ra) & 0xf0) |
621 (access_get_byte(env, &srca2, i, ra) & 0x0f);
623 access_set_byte(env, &desta, i, x, ra);
627 /* compare unsigned byte arrays */
628 static uint32_t do_helper_clc(CPUS390XState *env, uint32_t l, uint64_t s1,
629 uint64_t s2, uintptr_t ra)
631 uint32_t i;
632 uint32_t cc = 0;
634 HELPER_LOG("%s l %d s1 %" PRIx64 " s2 %" PRIx64 "\n",
635 __func__, l, s1, s2);
637 for (i = 0; i <= l; i++) {
638 uint8_t x = cpu_ldub_data_ra(env, s1 + i, ra);
639 uint8_t y = cpu_ldub_data_ra(env, s2 + i, ra);
640 HELPER_LOG("%02x (%c)/%02x (%c) ", x, x, y, y);
641 if (x < y) {
642 cc = 1;
643 break;
644 } else if (x > y) {
645 cc = 2;
646 break;
650 HELPER_LOG("\n");
651 return cc;
654 uint32_t HELPER(clc)(CPUS390XState *env, uint32_t l, uint64_t s1, uint64_t s2)
656 return do_helper_clc(env, l, s1, s2, GETPC());
659 /* compare logical under mask */
660 uint32_t HELPER(clm)(CPUS390XState *env, uint32_t r1, uint32_t mask,
661 uint64_t addr)
663 uintptr_t ra = GETPC();
664 uint32_t cc = 0;
666 HELPER_LOG("%s: r1 0x%x mask 0x%x addr 0x%" PRIx64 "\n", __func__, r1,
667 mask, addr);
669 while (mask) {
670 if (mask & 8) {
671 uint8_t d = cpu_ldub_data_ra(env, addr, ra);
672 uint8_t r = extract32(r1, 24, 8);
673 HELPER_LOG("mask 0x%x %02x/%02x (0x%" PRIx64 ") ", mask, r, d,
674 addr);
675 if (r < d) {
676 cc = 1;
677 break;
678 } else if (r > d) {
679 cc = 2;
680 break;
682 addr++;
684 mask = (mask << 1) & 0xf;
685 r1 <<= 8;
688 HELPER_LOG("\n");
689 return cc;
692 static inline uint64_t get_address(CPUS390XState *env, int reg)
694 return wrap_address(env, env->regs[reg]);
698 * Store the address to the given register, zeroing out unused leftmost
699 * bits in bit positions 32-63 (24-bit and 31-bit mode only).
701 static inline void set_address_zero(CPUS390XState *env, int reg,
702 uint64_t address)
704 if (env->psw.mask & PSW_MASK_64) {
705 env->regs[reg] = address;
706 } else {
707 if (!(env->psw.mask & PSW_MASK_32)) {
708 address &= 0x00ffffff;
709 } else {
710 address &= 0x7fffffff;
712 env->regs[reg] = deposit64(env->regs[reg], 0, 32, address);
716 static inline void set_address(CPUS390XState *env, int reg, uint64_t address)
718 if (env->psw.mask & PSW_MASK_64) {
719 /* 64-Bit mode */
720 env->regs[reg] = address;
721 } else {
722 if (!(env->psw.mask & PSW_MASK_32)) {
723 /* 24-Bit mode. According to the PoO it is implementation
724 dependent if bits 32-39 remain unchanged or are set to
725 zeros. Choose the former so that the function can also be
726 used for TRT. */
727 env->regs[reg] = deposit64(env->regs[reg], 0, 24, address);
728 } else {
729 /* 31-Bit mode. According to the PoO it is implementation
730 dependent if bit 32 remains unchanged or is set to zero.
731 Choose the latter so that the function can also be used for
732 TRT. */
733 address &= 0x7fffffff;
734 env->regs[reg] = deposit64(env->regs[reg], 0, 32, address);
739 static inline uint64_t wrap_length32(CPUS390XState *env, uint64_t length)
741 if (!(env->psw.mask & PSW_MASK_64)) {
742 return (uint32_t)length;
744 return length;
747 static inline uint64_t wrap_length31(CPUS390XState *env, uint64_t length)
749 if (!(env->psw.mask & PSW_MASK_64)) {
750 /* 24-Bit and 31-Bit mode */
751 length &= 0x7fffffff;
753 return length;
756 static inline uint64_t get_length(CPUS390XState *env, int reg)
758 return wrap_length31(env, env->regs[reg]);
761 static inline void set_length(CPUS390XState *env, int reg, uint64_t length)
763 if (env->psw.mask & PSW_MASK_64) {
764 /* 64-Bit mode */
765 env->regs[reg] = length;
766 } else {
767 /* 24-Bit and 31-Bit mode */
768 env->regs[reg] = deposit64(env->regs[reg], 0, 32, length);
772 /* search string (c is byte to search, r2 is string, r1 end of string) */
773 void HELPER(srst)(CPUS390XState *env, uint32_t r1, uint32_t r2)
775 uintptr_t ra = GETPC();
776 uint64_t end, str;
777 uint32_t len;
778 uint8_t v, c = env->regs[0];
780 /* Bits 32-55 must contain all 0. */
781 if (env->regs[0] & 0xffffff00u) {
782 tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra);
785 str = get_address(env, r2);
786 end = get_address(env, r1);
788 /* Lest we fail to service interrupts in a timely manner, limit the
789 amount of work we're willing to do. For now, let's cap at 8k. */
790 for (len = 0; len < 0x2000; ++len) {
791 if (str + len == end) {
792 /* Character not found. R1 & R2 are unmodified. */
793 env->cc_op = 2;
794 return;
796 v = cpu_ldub_data_ra(env, str + len, ra);
797 if (v == c) {
798 /* Character found. Set R1 to the location; R2 is unmodified. */
799 env->cc_op = 1;
800 set_address(env, r1, str + len);
801 return;
805 /* CPU-determined bytes processed. Advance R2 to next byte to process. */
806 env->cc_op = 3;
807 set_address(env, r2, str + len);
810 void HELPER(srstu)(CPUS390XState *env, uint32_t r1, uint32_t r2)
812 uintptr_t ra = GETPC();
813 uint32_t len;
814 uint16_t v, c = env->regs[0];
815 uint64_t end, str, adj_end;
817 /* Bits 32-47 of R0 must be zero. */
818 if (env->regs[0] & 0xffff0000u) {
819 tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra);
822 str = get_address(env, r2);
823 end = get_address(env, r1);
825 /* If the LSB of the two addresses differ, use one extra byte. */
826 adj_end = end + ((str ^ end) & 1);
828 /* Lest we fail to service interrupts in a timely manner, limit the
829 amount of work we're willing to do. For now, let's cap at 8k. */
830 for (len = 0; len < 0x2000; len += 2) {
831 if (str + len == adj_end) {
832 /* End of input found. */
833 env->cc_op = 2;
834 return;
836 v = cpu_lduw_data_ra(env, str + len, ra);
837 if (v == c) {
838 /* Character found. Set R1 to the location; R2 is unmodified. */
839 env->cc_op = 1;
840 set_address(env, r1, str + len);
841 return;
845 /* CPU-determined bytes processed. Advance R2 to next byte to process. */
846 env->cc_op = 3;
847 set_address(env, r2, str + len);
850 /* unsigned string compare (c is string terminator) */
851 Int128 HELPER(clst)(CPUS390XState *env, uint64_t c, uint64_t s1, uint64_t s2)
853 uintptr_t ra = GETPC();
854 uint32_t len;
856 c = c & 0xff;
857 s1 = wrap_address(env, s1);
858 s2 = wrap_address(env, s2);
860 /* Lest we fail to service interrupts in a timely manner, limit the
861 amount of work we're willing to do. For now, let's cap at 8k. */
862 for (len = 0; len < 0x2000; ++len) {
863 uint8_t v1 = cpu_ldub_data_ra(env, s1 + len, ra);
864 uint8_t v2 = cpu_ldub_data_ra(env, s2 + len, ra);
865 if (v1 == v2) {
866 if (v1 == c) {
867 /* Equal. CC=0, and don't advance the registers. */
868 env->cc_op = 0;
869 return int128_make128(s2, s1);
871 } else {
872 /* Unequal. CC={1,2}, and advance the registers. Note that
873 the terminator need not be zero, but the string that contains
874 the terminator is by definition "low". */
875 env->cc_op = (v1 == c ? 1 : v2 == c ? 2 : v1 < v2 ? 1 : 2);
876 return int128_make128(s2 + len, s1 + len);
880 /* CPU-determined bytes equal; advance the registers. */
881 env->cc_op = 3;
882 return int128_make128(s2 + len, s1 + len);
885 /* move page */
886 uint32_t HELPER(mvpg)(CPUS390XState *env, uint64_t r0, uint32_t r1, uint32_t r2)
888 const uint64_t src = get_address(env, r2) & TARGET_PAGE_MASK;
889 const uint64_t dst = get_address(env, r1) & TARGET_PAGE_MASK;
890 const int mmu_idx = cpu_mmu_index(env, false);
891 const bool f = extract64(r0, 11, 1);
892 const bool s = extract64(r0, 10, 1);
893 const bool cco = extract64(r0, 8, 1);
894 uintptr_t ra = GETPC();
895 S390Access srca, desta;
896 int exc;
898 if ((f && s) || extract64(r0, 12, 4)) {
899 tcg_s390_program_interrupt(env, PGM_SPECIFICATION, GETPC());
903 * We always manually handle exceptions such that we can properly store
904 * r1/r2 to the lowcore on page-translation exceptions.
906 * TODO: Access key handling
908 exc = access_prepare_nf(&srca, env, true, src, TARGET_PAGE_SIZE,
909 MMU_DATA_LOAD, mmu_idx, ra);
910 if (exc) {
911 if (cco) {
912 return 2;
914 goto inject_exc;
916 exc = access_prepare_nf(&desta, env, true, dst, TARGET_PAGE_SIZE,
917 MMU_DATA_STORE, mmu_idx, ra);
918 if (exc) {
919 if (cco && exc != PGM_PROTECTION) {
920 return 1;
922 goto inject_exc;
924 access_memmove(env, &desta, &srca, ra);
925 return 0; /* data moved */
926 inject_exc:
927 #if !defined(CONFIG_USER_ONLY)
928 if (exc != PGM_ADDRESSING) {
929 stq_phys(env_cpu(env)->as, env->psa + offsetof(LowCore, trans_exc_code),
930 env->tlb_fill_tec);
932 if (exc == PGM_PAGE_TRANS) {
933 stb_phys(env_cpu(env)->as, env->psa + offsetof(LowCore, op_access_id),
934 r1 << 4 | r2);
936 #endif
937 tcg_s390_program_interrupt(env, exc, ra);
940 /* string copy */
941 uint32_t HELPER(mvst)(CPUS390XState *env, uint32_t r1, uint32_t r2)
943 const int mmu_idx = cpu_mmu_index(env, false);
944 const uint64_t d = get_address(env, r1);
945 const uint64_t s = get_address(env, r2);
946 const uint8_t c = env->regs[0];
947 const int len = MIN(-(d | TARGET_PAGE_MASK), -(s | TARGET_PAGE_MASK));
948 S390Access srca, desta;
949 uintptr_t ra = GETPC();
950 int i;
952 if (env->regs[0] & 0xffffff00ull) {
953 tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra);
957 * Our access should not exceed single pages, as we must not report access
958 * exceptions exceeding the actually copied range (which we don't know at
959 * this point). We might over-indicate watchpoints within the pages
960 * (if we ever care, we have to limit processing to a single byte).
962 access_prepare(&srca, env, s, len, MMU_DATA_LOAD, mmu_idx, ra);
963 access_prepare(&desta, env, d, len, MMU_DATA_STORE, mmu_idx, ra);
964 for (i = 0; i < len; i++) {
965 const uint8_t v = access_get_byte(env, &srca, i, ra);
967 access_set_byte(env, &desta, i, v, ra);
968 if (v == c) {
969 set_address_zero(env, r1, d + i);
970 return 1;
973 set_address_zero(env, r1, d + len);
974 set_address_zero(env, r2, s + len);
975 return 3;
978 /* load access registers r1 to r3 from memory at a2 */
979 void HELPER(lam)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
981 uintptr_t ra = GETPC();
982 int i;
984 if (a2 & 0x3) {
985 tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra);
988 for (i = r1;; i = (i + 1) % 16) {
989 env->aregs[i] = cpu_ldl_data_ra(env, a2, ra);
990 a2 += 4;
992 if (i == r3) {
993 break;
998 /* store access registers r1 to r3 in memory at a2 */
999 void HELPER(stam)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
1001 uintptr_t ra = GETPC();
1002 int i;
1004 if (a2 & 0x3) {
1005 tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra);
1008 for (i = r1;; i = (i + 1) % 16) {
1009 cpu_stl_data_ra(env, a2, env->aregs[i], ra);
1010 a2 += 4;
1012 if (i == r3) {
1013 break;
1018 /* move long helper */
1019 static inline uint32_t do_mvcl(CPUS390XState *env,
1020 uint64_t *dest, uint64_t *destlen,
1021 uint64_t *src, uint64_t *srclen,
1022 uint16_t pad, int wordsize, uintptr_t ra)
1024 const int mmu_idx = cpu_mmu_index(env, false);
1025 int len = MIN(*destlen, -(*dest | TARGET_PAGE_MASK));
1026 S390Access srca, desta;
1027 int i, cc;
1029 if (*destlen == *srclen) {
1030 cc = 0;
1031 } else if (*destlen < *srclen) {
1032 cc = 1;
1033 } else {
1034 cc = 2;
1037 if (!*destlen) {
1038 return cc;
1042 * Only perform one type of type of operation (move/pad) at a time.
1043 * Stay within single pages.
1045 if (*srclen) {
1046 /* Copy the src array */
1047 len = MIN(MIN(*srclen, -(*src | TARGET_PAGE_MASK)), len);
1048 *destlen -= len;
1049 *srclen -= len;
1050 access_prepare(&srca, env, *src, len, MMU_DATA_LOAD, mmu_idx, ra);
1051 access_prepare(&desta, env, *dest, len, MMU_DATA_STORE, mmu_idx, ra);
1052 access_memmove(env, &desta, &srca, ra);
1053 *src = wrap_address(env, *src + len);
1054 *dest = wrap_address(env, *dest + len);
1055 } else if (wordsize == 1) {
1056 /* Pad the remaining area */
1057 *destlen -= len;
1058 access_prepare(&desta, env, *dest, len, MMU_DATA_STORE, mmu_idx, ra);
1059 access_memset(env, &desta, pad, ra);
1060 *dest = wrap_address(env, *dest + len);
1061 } else {
1062 access_prepare(&desta, env, *dest, len, MMU_DATA_STORE, mmu_idx, ra);
1064 /* The remaining length selects the padding byte. */
1065 for (i = 0; i < len; (*destlen)--, i++) {
1066 if (*destlen & 1) {
1067 access_set_byte(env, &desta, i, pad, ra);
1068 } else {
1069 access_set_byte(env, &desta, i, pad >> 8, ra);
1072 *dest = wrap_address(env, *dest + len);
1075 return *destlen ? 3 : cc;
1078 /* move long */
1079 uint32_t HELPER(mvcl)(CPUS390XState *env, uint32_t r1, uint32_t r2)
1081 const int mmu_idx = cpu_mmu_index(env, false);
1082 uintptr_t ra = GETPC();
1083 uint64_t destlen = env->regs[r1 + 1] & 0xffffff;
1084 uint64_t dest = get_address(env, r1);
1085 uint64_t srclen = env->regs[r2 + 1] & 0xffffff;
1086 uint64_t src = get_address(env, r2);
1087 uint8_t pad = env->regs[r2 + 1] >> 24;
1088 CPUState *cs = env_cpu(env);
1089 S390Access srca, desta;
1090 uint32_t cc, cur_len;
1092 if (is_destructive_overlap(env, dest, src, MIN(srclen, destlen))) {
1093 cc = 3;
1094 } else if (srclen == destlen) {
1095 cc = 0;
1096 } else if (destlen < srclen) {
1097 cc = 1;
1098 } else {
1099 cc = 2;
1102 /* We might have to zero-out some bits even if there was no action. */
1103 if (unlikely(!destlen || cc == 3)) {
1104 set_address_zero(env, r2, src);
1105 set_address_zero(env, r1, dest);
1106 return cc;
1107 } else if (!srclen) {
1108 set_address_zero(env, r2, src);
1112 * Only perform one type of type of operation (move/pad) in one step.
1113 * Stay within single pages.
1115 while (destlen) {
1116 cur_len = MIN(destlen, -(dest | TARGET_PAGE_MASK));
1117 if (!srclen) {
1118 access_prepare(&desta, env, dest, cur_len,
1119 MMU_DATA_STORE, mmu_idx, ra);
1120 access_memset(env, &desta, pad, ra);
1121 } else {
1122 cur_len = MIN(MIN(srclen, -(src | TARGET_PAGE_MASK)), cur_len);
1124 access_prepare(&srca, env, src, cur_len,
1125 MMU_DATA_LOAD, mmu_idx, ra);
1126 access_prepare(&desta, env, dest, cur_len,
1127 MMU_DATA_STORE, mmu_idx, ra);
1128 access_memmove(env, &desta, &srca, ra);
1129 src = wrap_address(env, src + cur_len);
1130 srclen -= cur_len;
1131 env->regs[r2 + 1] = deposit64(env->regs[r2 + 1], 0, 24, srclen);
1132 set_address_zero(env, r2, src);
1134 dest = wrap_address(env, dest + cur_len);
1135 destlen -= cur_len;
1136 env->regs[r1 + 1] = deposit64(env->regs[r1 + 1], 0, 24, destlen);
1137 set_address_zero(env, r1, dest);
1140 * MVCL is interruptible. Return to the main loop if requested after
1141 * writing back all state to registers. If no interrupt will get
1142 * injected, we'll end up back in this handler and continue processing
1143 * the remaining parts.
1145 if (destlen && unlikely(cpu_loop_exit_requested(cs))) {
1146 cpu_loop_exit_restore(cs, ra);
1149 return cc;
1152 /* move long extended */
1153 uint32_t HELPER(mvcle)(CPUS390XState *env, uint32_t r1, uint64_t a2,
1154 uint32_t r3)
1156 uintptr_t ra = GETPC();
1157 uint64_t destlen = get_length(env, r1 + 1);
1158 uint64_t dest = get_address(env, r1);
1159 uint64_t srclen = get_length(env, r3 + 1);
1160 uint64_t src = get_address(env, r3);
1161 uint8_t pad = a2;
1162 uint32_t cc;
1164 cc = do_mvcl(env, &dest, &destlen, &src, &srclen, pad, 1, ra);
1166 set_length(env, r1 + 1, destlen);
1167 set_length(env, r3 + 1, srclen);
1168 set_address(env, r1, dest);
1169 set_address(env, r3, src);
1171 return cc;
1174 /* move long unicode */
1175 uint32_t HELPER(mvclu)(CPUS390XState *env, uint32_t r1, uint64_t a2,
1176 uint32_t r3)
1178 uintptr_t ra = GETPC();
1179 uint64_t destlen = get_length(env, r1 + 1);
1180 uint64_t dest = get_address(env, r1);
1181 uint64_t srclen = get_length(env, r3 + 1);
1182 uint64_t src = get_address(env, r3);
1183 uint16_t pad = a2;
1184 uint32_t cc;
1186 cc = do_mvcl(env, &dest, &destlen, &src, &srclen, pad, 2, ra);
1188 set_length(env, r1 + 1, destlen);
1189 set_length(env, r3 + 1, srclen);
1190 set_address(env, r1, dest);
1191 set_address(env, r3, src);
1193 return cc;
1196 /* compare logical long helper */
1197 static inline uint32_t do_clcl(CPUS390XState *env,
1198 uint64_t *src1, uint64_t *src1len,
1199 uint64_t *src3, uint64_t *src3len,
1200 uint16_t pad, uint64_t limit,
1201 int wordsize, uintptr_t ra)
1203 uint64_t len = MAX(*src1len, *src3len);
1204 uint32_t cc = 0;
1206 check_alignment(env, *src1len | *src3len, wordsize, ra);
1208 if (!len) {
1209 return cc;
1212 /* Lest we fail to service interrupts in a timely manner, limit the
1213 amount of work we're willing to do. */
1214 if (len > limit) {
1215 len = limit;
1216 cc = 3;
1219 for (; len; len -= wordsize) {
1220 uint16_t v1 = pad;
1221 uint16_t v3 = pad;
1223 if (*src1len) {
1224 v1 = cpu_ldusize_data_ra(env, *src1, wordsize, ra);
1226 if (*src3len) {
1227 v3 = cpu_ldusize_data_ra(env, *src3, wordsize, ra);
1230 if (v1 != v3) {
1231 cc = (v1 < v3) ? 1 : 2;
1232 break;
1235 if (*src1len) {
1236 *src1 += wordsize;
1237 *src1len -= wordsize;
1239 if (*src3len) {
1240 *src3 += wordsize;
1241 *src3len -= wordsize;
1245 return cc;
1249 /* compare logical long */
1250 uint32_t HELPER(clcl)(CPUS390XState *env, uint32_t r1, uint32_t r2)
1252 uintptr_t ra = GETPC();
1253 uint64_t src1len = extract64(env->regs[r1 + 1], 0, 24);
1254 uint64_t src1 = get_address(env, r1);
1255 uint64_t src3len = extract64(env->regs[r2 + 1], 0, 24);
1256 uint64_t src3 = get_address(env, r2);
1257 uint8_t pad = env->regs[r2 + 1] >> 24;
1258 uint32_t cc;
1260 cc = do_clcl(env, &src1, &src1len, &src3, &src3len, pad, -1, 1, ra);
1262 env->regs[r1 + 1] = deposit64(env->regs[r1 + 1], 0, 24, src1len);
1263 env->regs[r2 + 1] = deposit64(env->regs[r2 + 1], 0, 24, src3len);
1264 set_address(env, r1, src1);
1265 set_address(env, r2, src3);
1267 return cc;
1270 /* compare logical long extended memcompare insn with padding */
1271 uint32_t HELPER(clcle)(CPUS390XState *env, uint32_t r1, uint64_t a2,
1272 uint32_t r3)
1274 uintptr_t ra = GETPC();
1275 uint64_t src1len = get_length(env, r1 + 1);
1276 uint64_t src1 = get_address(env, r1);
1277 uint64_t src3len = get_length(env, r3 + 1);
1278 uint64_t src3 = get_address(env, r3);
1279 uint8_t pad = a2;
1280 uint32_t cc;
1282 cc = do_clcl(env, &src1, &src1len, &src3, &src3len, pad, 0x2000, 1, ra);
1284 set_length(env, r1 + 1, src1len);
1285 set_length(env, r3 + 1, src3len);
1286 set_address(env, r1, src1);
1287 set_address(env, r3, src3);
1289 return cc;
1292 /* compare logical long unicode memcompare insn with padding */
1293 uint32_t HELPER(clclu)(CPUS390XState *env, uint32_t r1, uint64_t a2,
1294 uint32_t r3)
1296 uintptr_t ra = GETPC();
1297 uint64_t src1len = get_length(env, r1 + 1);
1298 uint64_t src1 = get_address(env, r1);
1299 uint64_t src3len = get_length(env, r3 + 1);
1300 uint64_t src3 = get_address(env, r3);
1301 uint16_t pad = a2;
1302 uint32_t cc = 0;
1304 cc = do_clcl(env, &src1, &src1len, &src3, &src3len, pad, 0x1000, 2, ra);
1306 set_length(env, r1 + 1, src1len);
1307 set_length(env, r3 + 1, src3len);
1308 set_address(env, r1, src1);
1309 set_address(env, r3, src3);
1311 return cc;
1314 /* checksum */
1315 Int128 HELPER(cksm)(CPUS390XState *env, uint64_t r1,
1316 uint64_t src, uint64_t src_len)
1318 uintptr_t ra = GETPC();
1319 uint64_t max_len, len;
1320 uint64_t cksm = (uint32_t)r1;
1322 /* Lest we fail to service interrupts in a timely manner, limit the
1323 amount of work we're willing to do. For now, let's cap at 8k. */
1324 max_len = (src_len > 0x2000 ? 0x2000 : src_len);
1326 /* Process full words as available. */
1327 for (len = 0; len + 4 <= max_len; len += 4, src += 4) {
1328 cksm += (uint32_t)cpu_ldl_data_ra(env, src, ra);
1331 switch (max_len - len) {
1332 case 1:
1333 cksm += cpu_ldub_data_ra(env, src, ra) << 24;
1334 len += 1;
1335 break;
1336 case 2:
1337 cksm += cpu_lduw_data_ra(env, src, ra) << 16;
1338 len += 2;
1339 break;
1340 case 3:
1341 cksm += cpu_lduw_data_ra(env, src, ra) << 16;
1342 cksm += cpu_ldub_data_ra(env, src + 2, ra) << 8;
1343 len += 3;
1344 break;
1347 /* Fold the carry from the checksum. Note that we can see carry-out
1348 during folding more than once (but probably not more than twice). */
1349 while (cksm > 0xffffffffull) {
1350 cksm = (uint32_t)cksm + (cksm >> 32);
1353 /* Indicate whether or not we've processed everything. */
1354 env->cc_op = (len == src_len ? 0 : 3);
1356 /* Return both cksm and processed length. */
1357 return int128_make128(cksm, len);
1360 void HELPER(pack)(CPUS390XState *env, uint32_t len, uint64_t dest, uint64_t src)
1362 uintptr_t ra = GETPC();
1363 int len_dest = len >> 4;
1364 int len_src = len & 0xf;
1365 uint8_t b;
1367 dest += len_dest;
1368 src += len_src;
1370 /* last byte is special, it only flips the nibbles */
1371 b = cpu_ldub_data_ra(env, src, ra);
1372 cpu_stb_data_ra(env, dest, (b << 4) | (b >> 4), ra);
1373 src--;
1374 len_src--;
1376 /* now pack every value */
1377 while (len_dest > 0) {
1378 b = 0;
1380 if (len_src >= 0) {
1381 b = cpu_ldub_data_ra(env, src, ra) & 0x0f;
1382 src--;
1383 len_src--;
1385 if (len_src >= 0) {
1386 b |= cpu_ldub_data_ra(env, src, ra) << 4;
1387 src--;
1388 len_src--;
1391 len_dest--;
1392 dest--;
1393 cpu_stb_data_ra(env, dest, b, ra);
1397 static inline void do_pkau(CPUS390XState *env, uint64_t dest, uint64_t src,
1398 uint32_t srclen, int ssize, uintptr_t ra)
1400 int i;
1401 /* The destination operand is always 16 bytes long. */
1402 const int destlen = 16;
1404 /* The operands are processed from right to left. */
1405 src += srclen - 1;
1406 dest += destlen - 1;
1408 for (i = 0; i < destlen; i++) {
1409 uint8_t b = 0;
1411 /* Start with a positive sign */
1412 if (i == 0) {
1413 b = 0xc;
1414 } else if (srclen > ssize) {
1415 b = cpu_ldub_data_ra(env, src, ra) & 0x0f;
1416 src -= ssize;
1417 srclen -= ssize;
1420 if (srclen > ssize) {
1421 b |= cpu_ldub_data_ra(env, src, ra) << 4;
1422 src -= ssize;
1423 srclen -= ssize;
1426 cpu_stb_data_ra(env, dest, b, ra);
1427 dest--;
1432 void HELPER(pka)(CPUS390XState *env, uint64_t dest, uint64_t src,
1433 uint32_t srclen)
1435 do_pkau(env, dest, src, srclen, 1, GETPC());
1438 void HELPER(pku)(CPUS390XState *env, uint64_t dest, uint64_t src,
1439 uint32_t srclen)
1441 do_pkau(env, dest, src, srclen, 2, GETPC());
1444 void HELPER(unpk)(CPUS390XState *env, uint32_t len, uint64_t dest,
1445 uint64_t src)
1447 uintptr_t ra = GETPC();
1448 int len_dest = len >> 4;
1449 int len_src = len & 0xf;
1450 uint8_t b;
1451 int second_nibble = 0;
1453 dest += len_dest;
1454 src += len_src;
1456 /* last byte is special, it only flips the nibbles */
1457 b = cpu_ldub_data_ra(env, src, ra);
1458 cpu_stb_data_ra(env, dest, (b << 4) | (b >> 4), ra);
1459 src--;
1460 len_src--;
1462 /* now pad every nibble with 0xf0 */
1464 while (len_dest > 0) {
1465 uint8_t cur_byte = 0;
1467 if (len_src > 0) {
1468 cur_byte = cpu_ldub_data_ra(env, src, ra);
1471 len_dest--;
1472 dest--;
1474 /* only advance one nibble at a time */
1475 if (second_nibble) {
1476 cur_byte >>= 4;
1477 len_src--;
1478 src--;
1480 second_nibble = !second_nibble;
1482 /* digit */
1483 cur_byte = (cur_byte & 0xf);
1484 /* zone bits */
1485 cur_byte |= 0xf0;
1487 cpu_stb_data_ra(env, dest, cur_byte, ra);
1491 static inline uint32_t do_unpkau(CPUS390XState *env, uint64_t dest,
1492 uint32_t destlen, int dsize, uint64_t src,
1493 uintptr_t ra)
1495 int i;
1496 uint32_t cc;
1497 uint8_t b;
1498 /* The source operand is always 16 bytes long. */
1499 const int srclen = 16;
1501 /* The operands are processed from right to left. */
1502 src += srclen - 1;
1503 dest += destlen - dsize;
1505 /* Check for the sign. */
1506 b = cpu_ldub_data_ra(env, src, ra);
1507 src--;
1508 switch (b & 0xf) {
1509 case 0xa:
1510 case 0xc:
1511 case 0xe ... 0xf:
1512 cc = 0; /* plus */
1513 break;
1514 case 0xb:
1515 case 0xd:
1516 cc = 1; /* minus */
1517 break;
1518 default:
1519 case 0x0 ... 0x9:
1520 cc = 3; /* invalid */
1521 break;
1524 /* Now pad every nibble with 0x30, advancing one nibble at a time. */
1525 for (i = 0; i < destlen; i += dsize) {
1526 if (i == (31 * dsize)) {
1527 /* If length is 32/64 bytes, the leftmost byte is 0. */
1528 b = 0;
1529 } else if (i % (2 * dsize)) {
1530 b = cpu_ldub_data_ra(env, src, ra);
1531 src--;
1532 } else {
1533 b >>= 4;
1535 cpu_stsize_data_ra(env, dest, 0x30 + (b & 0xf), dsize, ra);
1536 dest -= dsize;
1539 return cc;
1542 uint32_t HELPER(unpka)(CPUS390XState *env, uint64_t dest, uint32_t destlen,
1543 uint64_t src)
1545 return do_unpkau(env, dest, destlen, 1, src, GETPC());
1548 uint32_t HELPER(unpku)(CPUS390XState *env, uint64_t dest, uint32_t destlen,
1549 uint64_t src)
1551 return do_unpkau(env, dest, destlen, 2, src, GETPC());
1554 uint32_t HELPER(tp)(CPUS390XState *env, uint64_t dest, uint32_t destlen)
1556 uintptr_t ra = GETPC();
1557 uint32_t cc = 0;
1558 int i;
1560 for (i = 0; i < destlen; i++) {
1561 uint8_t b = cpu_ldub_data_ra(env, dest + i, ra);
1562 /* digit */
1563 cc |= (b & 0xf0) > 0x90 ? 2 : 0;
1565 if (i == (destlen - 1)) {
1566 /* sign */
1567 cc |= (b & 0xf) < 0xa ? 1 : 0;
1568 } else {
1569 /* digit */
1570 cc |= (b & 0xf) > 0x9 ? 2 : 0;
1574 return cc;
1577 static uint32_t do_helper_tr(CPUS390XState *env, uint32_t len, uint64_t array,
1578 uint64_t trans, uintptr_t ra)
1580 uint32_t i;
1582 for (i = 0; i <= len; i++) {
1583 uint8_t byte = cpu_ldub_data_ra(env, array + i, ra);
1584 uint8_t new_byte = cpu_ldub_data_ra(env, trans + byte, ra);
1585 cpu_stb_data_ra(env, array + i, new_byte, ra);
1588 return env->cc_op;
1591 void HELPER(tr)(CPUS390XState *env, uint32_t len, uint64_t array,
1592 uint64_t trans)
1594 do_helper_tr(env, len, array, trans, GETPC());
1597 Int128 HELPER(tre)(CPUS390XState *env, uint64_t array,
1598 uint64_t len, uint64_t trans)
1600 uintptr_t ra = GETPC();
1601 uint8_t end = env->regs[0] & 0xff;
1602 uint64_t l = len;
1603 uint64_t i;
1604 uint32_t cc = 0;
1606 if (!(env->psw.mask & PSW_MASK_64)) {
1607 array &= 0x7fffffff;
1608 l = (uint32_t)l;
1611 /* Lest we fail to service interrupts in a timely manner, limit the
1612 amount of work we're willing to do. For now, let's cap at 8k. */
1613 if (l > 0x2000) {
1614 l = 0x2000;
1615 cc = 3;
1618 for (i = 0; i < l; i++) {
1619 uint8_t byte, new_byte;
1621 byte = cpu_ldub_data_ra(env, array + i, ra);
1623 if (byte == end) {
1624 cc = 1;
1625 break;
1628 new_byte = cpu_ldub_data_ra(env, trans + byte, ra);
1629 cpu_stb_data_ra(env, array + i, new_byte, ra);
1632 env->cc_op = cc;
1633 return int128_make128(len - i, array + i);
1636 static inline uint32_t do_helper_trt(CPUS390XState *env, int len,
1637 uint64_t array, uint64_t trans,
1638 int inc, uintptr_t ra)
1640 int i;
1642 for (i = 0; i <= len; i++) {
1643 uint8_t byte = cpu_ldub_data_ra(env, array + i * inc, ra);
1644 uint8_t sbyte = cpu_ldub_data_ra(env, trans + byte, ra);
1646 if (sbyte != 0) {
1647 set_address(env, 1, array + i * inc);
1648 env->regs[2] = deposit64(env->regs[2], 0, 8, sbyte);
1649 return (i == len) ? 2 : 1;
1653 return 0;
1656 static uint32_t do_helper_trt_fwd(CPUS390XState *env, uint32_t len,
1657 uint64_t array, uint64_t trans,
1658 uintptr_t ra)
1660 return do_helper_trt(env, len, array, trans, 1, ra);
1663 uint32_t HELPER(trt)(CPUS390XState *env, uint32_t len, uint64_t array,
1664 uint64_t trans)
1666 return do_helper_trt(env, len, array, trans, 1, GETPC());
1669 static uint32_t do_helper_trt_bkwd(CPUS390XState *env, uint32_t len,
1670 uint64_t array, uint64_t trans,
1671 uintptr_t ra)
1673 return do_helper_trt(env, len, array, trans, -1, ra);
1676 uint32_t HELPER(trtr)(CPUS390XState *env, uint32_t len, uint64_t array,
1677 uint64_t trans)
1679 return do_helper_trt(env, len, array, trans, -1, GETPC());
1682 /* Translate one/two to one/two */
1683 uint32_t HELPER(trXX)(CPUS390XState *env, uint32_t r1, uint32_t r2,
1684 uint32_t tst, uint32_t sizes)
1686 uintptr_t ra = GETPC();
1687 int dsize = (sizes & 1) ? 1 : 2;
1688 int ssize = (sizes & 2) ? 1 : 2;
1689 uint64_t tbl = get_address(env, 1);
1690 uint64_t dst = get_address(env, r1);
1691 uint64_t len = get_length(env, r1 + 1);
1692 uint64_t src = get_address(env, r2);
1693 uint32_t cc = 3;
1694 int i;
1696 /* The lower address bits of TBL are ignored. For TROO, TROT, it's
1697 the low 3 bits (double-word aligned). For TRTO, TRTT, it's either
1698 the low 12 bits (4K, without ETF2-ENH) or 3 bits (with ETF2-ENH). */
1699 if (ssize == 2 && !s390_has_feat(S390_FEAT_ETF2_ENH)) {
1700 tbl &= -4096;
1701 } else {
1702 tbl &= -8;
1705 check_alignment(env, len, ssize, ra);
1707 /* Lest we fail to service interrupts in a timely manner, */
1708 /* limit the amount of work we're willing to do. */
1709 for (i = 0; i < 0x2000; i++) {
1710 uint16_t sval = cpu_ldusize_data_ra(env, src, ssize, ra);
1711 uint64_t tble = tbl + (sval * dsize);
1712 uint16_t dval = cpu_ldusize_data_ra(env, tble, dsize, ra);
1713 if (dval == tst) {
1714 cc = 1;
1715 break;
1717 cpu_stsize_data_ra(env, dst, dval, dsize, ra);
1719 len -= ssize;
1720 src += ssize;
1721 dst += dsize;
1723 if (len == 0) {
1724 cc = 0;
1725 break;
1729 set_address(env, r1, dst);
1730 set_length(env, r1 + 1, len);
1731 set_address(env, r2, src);
1733 return cc;
1736 static uint32_t do_csst(CPUS390XState *env, uint32_t r3, uint64_t a1,
1737 uint64_t a2, bool parallel)
1739 uint32_t mem_idx = cpu_mmu_index(env, false);
1740 MemOpIdx oi16 = make_memop_idx(MO_TE | MO_128, mem_idx);
1741 MemOpIdx oi8 = make_memop_idx(MO_TE | MO_64, mem_idx);
1742 MemOpIdx oi4 = make_memop_idx(MO_TE | MO_32, mem_idx);
1743 MemOpIdx oi2 = make_memop_idx(MO_TE | MO_16, mem_idx);
1744 MemOpIdx oi1 = make_memop_idx(MO_8, mem_idx);
1745 uintptr_t ra = GETPC();
1746 uint32_t fc = extract32(env->regs[0], 0, 8);
1747 uint32_t sc = extract32(env->regs[0], 8, 8);
1748 uint64_t pl = get_address(env, 1) & -16;
1749 uint64_t svh, svl;
1750 uint32_t cc;
1752 /* Sanity check the function code and storage characteristic. */
1753 if (fc > 1 || sc > 3) {
1754 if (!s390_has_feat(S390_FEAT_COMPARE_AND_SWAP_AND_STORE_2)) {
1755 goto spec_exception;
1757 if (fc > 2 || sc > 4 || (fc == 2 && (r3 & 1))) {
1758 goto spec_exception;
1762 /* Sanity check the alignments. */
1763 if (extract32(a1, 0, fc + 2) || extract32(a2, 0, sc)) {
1764 goto spec_exception;
1767 /* Sanity check writability of the store address. */
1768 probe_write(env, a2, 1 << sc, mem_idx, ra);
1771 * Note that the compare-and-swap is atomic, and the store is atomic,
1772 * but the complete operation is not. Therefore we do not need to
1773 * assert serial context in order to implement this. That said,
1774 * restart early if we can't support either operation that is supposed
1775 * to be atomic.
1777 if (parallel) {
1778 uint32_t max = 2;
1779 #ifdef CONFIG_ATOMIC64
1780 max = 3;
1781 #endif
1782 if ((HAVE_CMPXCHG128 ? 0 : fc + 2 > max) ||
1783 (HAVE_ATOMIC128_RW ? 0 : sc > max)) {
1784 cpu_loop_exit_atomic(env_cpu(env), ra);
1789 * All loads happen before all stores. For simplicity, load the entire
1790 * store value area from the parameter list.
1792 svh = cpu_ldq_mmu(env, pl + 16, oi8, ra);
1793 svl = cpu_ldq_mmu(env, pl + 24, oi8, ra);
1795 switch (fc) {
1796 case 0:
1798 uint32_t nv = cpu_ldl_mmu(env, pl, oi4, ra);
1799 uint32_t cv = env->regs[r3];
1800 uint32_t ov;
1802 if (parallel) {
1803 ov = cpu_atomic_cmpxchgl_be_mmu(env, a1, cv, nv, oi4, ra);
1804 } else {
1805 ov = cpu_ldl_mmu(env, a1, oi4, ra);
1806 cpu_stl_mmu(env, a1, (ov == cv ? nv : ov), oi4, ra);
1808 cc = (ov != cv);
1809 env->regs[r3] = deposit64(env->regs[r3], 32, 32, ov);
1811 break;
1813 case 1:
1815 uint64_t nv = cpu_ldq_mmu(env, pl, oi8, ra);
1816 uint64_t cv = env->regs[r3];
1817 uint64_t ov;
1819 if (parallel) {
1820 #ifdef CONFIG_ATOMIC64
1821 ov = cpu_atomic_cmpxchgq_be_mmu(env, a1, cv, nv, oi8, ra);
1822 #else
1823 /* Note that we asserted !parallel above. */
1824 g_assert_not_reached();
1825 #endif
1826 } else {
1827 ov = cpu_ldq_mmu(env, a1, oi8, ra);
1828 cpu_stq_mmu(env, a1, (ov == cv ? nv : ov), oi8, ra);
1830 cc = (ov != cv);
1831 env->regs[r3] = ov;
1833 break;
1835 case 2:
1837 Int128 nv = cpu_ld16_mmu(env, pl, oi16, ra);
1838 Int128 cv = int128_make128(env->regs[r3 + 1], env->regs[r3]);
1839 Int128 ov;
1841 if (!parallel) {
1842 ov = cpu_ld16_mmu(env, a1, oi16, ra);
1843 cc = !int128_eq(ov, cv);
1844 if (cc) {
1845 nv = ov;
1847 cpu_st16_mmu(env, a1, nv, oi16, ra);
1848 } else if (HAVE_CMPXCHG128) {
1849 ov = cpu_atomic_cmpxchgo_be_mmu(env, a1, cv, nv, oi16, ra);
1850 cc = !int128_eq(ov, cv);
1851 } else {
1852 /* Note that we asserted !parallel above. */
1853 g_assert_not_reached();
1856 env->regs[r3 + 0] = int128_gethi(ov);
1857 env->regs[r3 + 1] = int128_getlo(ov);
1859 break;
1861 default:
1862 g_assert_not_reached();
1865 /* Store only if the comparison succeeded. Note that above we use a pair
1866 of 64-bit big-endian loads, so for sc < 3 we must extract the value
1867 from the most-significant bits of svh. */
1868 if (cc == 0) {
1869 switch (sc) {
1870 case 0:
1871 cpu_stb_mmu(env, a2, svh >> 56, oi1, ra);
1872 break;
1873 case 1:
1874 cpu_stw_mmu(env, a2, svh >> 48, oi2, ra);
1875 break;
1876 case 2:
1877 cpu_stl_mmu(env, a2, svh >> 32, oi4, ra);
1878 break;
1879 case 3:
1880 cpu_stq_mmu(env, a2, svh, oi8, ra);
1881 break;
1882 case 4:
1883 cpu_st16_mmu(env, a2, int128_make128(svl, svh), oi16, ra);
1884 break;
1885 default:
1886 g_assert_not_reached();
1890 return cc;
1892 spec_exception:
1893 tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra);
1896 uint32_t HELPER(csst)(CPUS390XState *env, uint32_t r3, uint64_t a1, uint64_t a2)
1898 return do_csst(env, r3, a1, a2, false);
1901 uint32_t HELPER(csst_parallel)(CPUS390XState *env, uint32_t r3, uint64_t a1,
1902 uint64_t a2)
1904 return do_csst(env, r3, a1, a2, true);
1907 #if !defined(CONFIG_USER_ONLY)
1908 void HELPER(lctlg)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
1910 uintptr_t ra = GETPC();
1911 bool PERchanged = false;
1912 uint64_t src = a2;
1913 uint32_t i;
1915 if (src & 0x7) {
1916 tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra);
1919 for (i = r1;; i = (i + 1) % 16) {
1920 uint64_t val = cpu_ldq_data_ra(env, src, ra);
1921 if (env->cregs[i] != val && i >= 9 && i <= 11) {
1922 PERchanged = true;
1924 env->cregs[i] = val;
1925 HELPER_LOG("load ctl %d from 0x%" PRIx64 " == 0x%" PRIx64 "\n",
1926 i, src, val);
1927 src += sizeof(uint64_t);
1929 if (i == r3) {
1930 break;
1934 if (PERchanged && env->psw.mask & PSW_MASK_PER) {
1935 s390_cpu_recompute_watchpoints(env_cpu(env));
1938 tlb_flush(env_cpu(env));
1941 void HELPER(lctl)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
1943 uintptr_t ra = GETPC();
1944 bool PERchanged = false;
1945 uint64_t src = a2;
1946 uint32_t i;
1948 if (src & 0x3) {
1949 tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra);
1952 for (i = r1;; i = (i + 1) % 16) {
1953 uint32_t val = cpu_ldl_data_ra(env, src, ra);
1954 if ((uint32_t)env->cregs[i] != val && i >= 9 && i <= 11) {
1955 PERchanged = true;
1957 env->cregs[i] = deposit64(env->cregs[i], 0, 32, val);
1958 HELPER_LOG("load ctl %d from 0x%" PRIx64 " == 0x%x\n", i, src, val);
1959 src += sizeof(uint32_t);
1961 if (i == r3) {
1962 break;
1966 if (PERchanged && env->psw.mask & PSW_MASK_PER) {
1967 s390_cpu_recompute_watchpoints(env_cpu(env));
1970 tlb_flush(env_cpu(env));
1973 void HELPER(stctg)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
1975 uintptr_t ra = GETPC();
1976 uint64_t dest = a2;
1977 uint32_t i;
1979 if (dest & 0x7) {
1980 tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra);
1983 for (i = r1;; i = (i + 1) % 16) {
1984 cpu_stq_data_ra(env, dest, env->cregs[i], ra);
1985 dest += sizeof(uint64_t);
1987 if (i == r3) {
1988 break;
1993 void HELPER(stctl)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
1995 uintptr_t ra = GETPC();
1996 uint64_t dest = a2;
1997 uint32_t i;
1999 if (dest & 0x3) {
2000 tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra);
2003 for (i = r1;; i = (i + 1) % 16) {
2004 cpu_stl_data_ra(env, dest, env->cregs[i], ra);
2005 dest += sizeof(uint32_t);
2007 if (i == r3) {
2008 break;
2013 uint32_t HELPER(testblock)(CPUS390XState *env, uint64_t real_addr)
2015 uintptr_t ra = GETPC();
2016 int i;
2018 real_addr = wrap_address(env, real_addr) & TARGET_PAGE_MASK;
2020 for (i = 0; i < TARGET_PAGE_SIZE; i += 8) {
2021 cpu_stq_mmuidx_ra(env, real_addr + i, 0, MMU_REAL_IDX, ra);
2024 return 0;
2027 uint32_t HELPER(tprot)(CPUS390XState *env, uint64_t a1, uint64_t a2)
2029 S390CPU *cpu = env_archcpu(env);
2030 CPUState *cs = env_cpu(env);
2033 * TODO: we currently don't handle all access protection types
2034 * (including access-list and key-controlled) as well as AR mode.
2036 if (!s390_cpu_virt_mem_check_write(cpu, a1, 0, 1)) {
2037 /* Fetching permitted; storing permitted */
2038 return 0;
2041 if (env->int_pgm_code == PGM_PROTECTION) {
2042 /* retry if reading is possible */
2043 cs->exception_index = -1;
2044 if (!s390_cpu_virt_mem_check_read(cpu, a1, 0, 1)) {
2045 /* Fetching permitted; storing not permitted */
2046 return 1;
2050 switch (env->int_pgm_code) {
2051 case PGM_PROTECTION:
2052 /* Fetching not permitted; storing not permitted */
2053 cs->exception_index = -1;
2054 return 2;
2055 case PGM_ADDRESSING:
2056 case PGM_TRANS_SPEC:
2057 /* exceptions forwarded to the guest */
2058 s390_cpu_virt_mem_handle_exc(cpu, GETPC());
2059 return 0;
2062 /* Translation not available */
2063 cs->exception_index = -1;
2064 return 3;
2067 /* insert storage key extended */
2068 uint64_t HELPER(iske)(CPUS390XState *env, uint64_t r2)
2070 static S390SKeysState *ss;
2071 static S390SKeysClass *skeyclass;
2072 uint64_t addr = wrap_address(env, r2);
2073 uint8_t key;
2074 int rc;
2076 addr = mmu_real2abs(env, addr);
2077 if (!mmu_absolute_addr_valid(addr, false)) {
2078 tcg_s390_program_interrupt(env, PGM_ADDRESSING, GETPC());
2081 if (unlikely(!ss)) {
2082 ss = s390_get_skeys_device();
2083 skeyclass = S390_SKEYS_GET_CLASS(ss);
2084 if (skeyclass->enable_skeys && !skeyclass->enable_skeys(ss)) {
2085 tlb_flush_all_cpus_synced(env_cpu(env));
2089 rc = skeyclass->get_skeys(ss, addr / TARGET_PAGE_SIZE, 1, &key);
2090 if (rc) {
2091 trace_get_skeys_nonzero(rc);
2092 return 0;
2094 return key;
2097 /* set storage key extended */
2098 void HELPER(sske)(CPUS390XState *env, uint64_t r1, uint64_t r2)
2100 static S390SKeysState *ss;
2101 static S390SKeysClass *skeyclass;
2102 uint64_t addr = wrap_address(env, r2);
2103 uint8_t key;
2104 int rc;
2106 addr = mmu_real2abs(env, addr);
2107 if (!mmu_absolute_addr_valid(addr, false)) {
2108 tcg_s390_program_interrupt(env, PGM_ADDRESSING, GETPC());
2111 if (unlikely(!ss)) {
2112 ss = s390_get_skeys_device();
2113 skeyclass = S390_SKEYS_GET_CLASS(ss);
2114 if (skeyclass->enable_skeys && !skeyclass->enable_skeys(ss)) {
2115 tlb_flush_all_cpus_synced(env_cpu(env));
2119 key = r1 & 0xfe;
2120 rc = skeyclass->set_skeys(ss, addr / TARGET_PAGE_SIZE, 1, &key);
2121 if (rc) {
2122 trace_set_skeys_nonzero(rc);
2125 * As we can only flush by virtual address and not all the entries
2126 * that point to a physical address we have to flush the whole TLB.
2128 tlb_flush_all_cpus_synced(env_cpu(env));
2131 /* reset reference bit extended */
2132 uint32_t HELPER(rrbe)(CPUS390XState *env, uint64_t r2)
2134 uint64_t addr = wrap_address(env, r2);
2135 static S390SKeysState *ss;
2136 static S390SKeysClass *skeyclass;
2137 uint8_t re, key;
2138 int rc;
2140 addr = mmu_real2abs(env, addr);
2141 if (!mmu_absolute_addr_valid(addr, false)) {
2142 tcg_s390_program_interrupt(env, PGM_ADDRESSING, GETPC());
2145 if (unlikely(!ss)) {
2146 ss = s390_get_skeys_device();
2147 skeyclass = S390_SKEYS_GET_CLASS(ss);
2148 if (skeyclass->enable_skeys && !skeyclass->enable_skeys(ss)) {
2149 tlb_flush_all_cpus_synced(env_cpu(env));
2153 rc = skeyclass->get_skeys(ss, addr / TARGET_PAGE_SIZE, 1, &key);
2154 if (rc) {
2155 trace_get_skeys_nonzero(rc);
2156 return 0;
2159 re = key & (SK_R | SK_C);
2160 key &= ~SK_R;
2162 rc = skeyclass->set_skeys(ss, addr / TARGET_PAGE_SIZE, 1, &key);
2163 if (rc) {
2164 trace_set_skeys_nonzero(rc);
2165 return 0;
2168 * As we can only flush by virtual address and not all the entries
2169 * that point to a physical address we have to flush the whole TLB.
2171 tlb_flush_all_cpus_synced(env_cpu(env));
2174 * cc
2176 * 0 Reference bit zero; change bit zero
2177 * 1 Reference bit zero; change bit one
2178 * 2 Reference bit one; change bit zero
2179 * 3 Reference bit one; change bit one
2182 return re >> 1;
2185 uint32_t HELPER(mvcs)(CPUS390XState *env, uint64_t l, uint64_t a1, uint64_t a2,
2186 uint64_t key)
2188 const uint8_t psw_as = (env->psw.mask & PSW_MASK_ASC) >> PSW_SHIFT_ASC;
2189 S390Access srca, desta;
2190 uintptr_t ra = GETPC();
2191 int cc = 0;
2193 HELPER_LOG("%s: %16" PRIx64 " %16" PRIx64 " %16" PRIx64 "\n",
2194 __func__, l, a1, a2);
2196 if (!(env->psw.mask & PSW_MASK_DAT) || !(env->cregs[0] & CR0_SECONDARY) ||
2197 psw_as == AS_HOME || psw_as == AS_ACCREG) {
2198 s390_program_interrupt(env, PGM_SPECIAL_OP, ra);
2201 if (!psw_key_valid(env, (key >> 4) & 0xf)) {
2202 s390_program_interrupt(env, PGM_PRIVILEGED, ra);
2205 l = wrap_length32(env, l);
2206 if (l > 256) {
2207 /* max 256 */
2208 l = 256;
2209 cc = 3;
2210 } else if (!l) {
2211 return cc;
2214 access_prepare(&srca, env, a2, l, MMU_DATA_LOAD, MMU_PRIMARY_IDX, ra);
2215 access_prepare(&desta, env, a1, l, MMU_DATA_STORE, MMU_SECONDARY_IDX, ra);
2216 access_memmove(env, &desta, &srca, ra);
2217 return cc;
2220 uint32_t HELPER(mvcp)(CPUS390XState *env, uint64_t l, uint64_t a1, uint64_t a2,
2221 uint64_t key)
2223 const uint8_t psw_as = (env->psw.mask & PSW_MASK_ASC) >> PSW_SHIFT_ASC;
2224 S390Access srca, desta;
2225 uintptr_t ra = GETPC();
2226 int cc = 0;
2228 HELPER_LOG("%s: %16" PRIx64 " %16" PRIx64 " %16" PRIx64 "\n",
2229 __func__, l, a1, a2);
2231 if (!(env->psw.mask & PSW_MASK_DAT) || !(env->cregs[0] & CR0_SECONDARY) ||
2232 psw_as == AS_HOME || psw_as == AS_ACCREG) {
2233 s390_program_interrupt(env, PGM_SPECIAL_OP, ra);
2236 if (!psw_key_valid(env, (key >> 4) & 0xf)) {
2237 s390_program_interrupt(env, PGM_PRIVILEGED, ra);
2240 l = wrap_length32(env, l);
2241 if (l > 256) {
2242 /* max 256 */
2243 l = 256;
2244 cc = 3;
2245 } else if (!l) {
2246 return cc;
2248 access_prepare(&srca, env, a2, l, MMU_DATA_LOAD, MMU_SECONDARY_IDX, ra);
2249 access_prepare(&desta, env, a1, l, MMU_DATA_STORE, MMU_PRIMARY_IDX, ra);
2250 access_memmove(env, &desta, &srca, ra);
2251 return cc;
2254 void HELPER(idte)(CPUS390XState *env, uint64_t r1, uint64_t r2, uint32_t m4)
2256 CPUState *cs = env_cpu(env);
2257 const uintptr_t ra = GETPC();
2258 uint64_t table, entry, raddr;
2259 uint16_t entries, i, index = 0;
2261 if (r2 & 0xff000) {
2262 tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra);
2265 if (!(r2 & 0x800)) {
2266 /* invalidation-and-clearing operation */
2267 table = r1 & ASCE_ORIGIN;
2268 entries = (r2 & 0x7ff) + 1;
2270 switch (r1 & ASCE_TYPE_MASK) {
2271 case ASCE_TYPE_REGION1:
2272 index = (r2 >> 53) & 0x7ff;
2273 break;
2274 case ASCE_TYPE_REGION2:
2275 index = (r2 >> 42) & 0x7ff;
2276 break;
2277 case ASCE_TYPE_REGION3:
2278 index = (r2 >> 31) & 0x7ff;
2279 break;
2280 case ASCE_TYPE_SEGMENT:
2281 index = (r2 >> 20) & 0x7ff;
2282 break;
2284 for (i = 0; i < entries; i++) {
2285 /* addresses are not wrapped in 24/31bit mode but table index is */
2286 raddr = table + ((index + i) & 0x7ff) * sizeof(entry);
2287 entry = cpu_ldq_mmuidx_ra(env, raddr, MMU_REAL_IDX, ra);
2288 if (!(entry & REGION_ENTRY_I)) {
2289 /* we are allowed to not store if already invalid */
2290 entry |= REGION_ENTRY_I;
2291 cpu_stq_mmuidx_ra(env, raddr, entry, MMU_REAL_IDX, ra);
2296 /* We simply flush the complete tlb, therefore we can ignore r3. */
2297 if (m4 & 1) {
2298 tlb_flush(cs);
2299 } else {
2300 tlb_flush_all_cpus_synced(cs);
2304 /* invalidate pte */
2305 void HELPER(ipte)(CPUS390XState *env, uint64_t pto, uint64_t vaddr,
2306 uint32_t m4)
2308 CPUState *cs = env_cpu(env);
2309 const uintptr_t ra = GETPC();
2310 uint64_t page = vaddr & TARGET_PAGE_MASK;
2311 uint64_t pte_addr, pte;
2313 /* Compute the page table entry address */
2314 pte_addr = (pto & SEGMENT_ENTRY_ORIGIN);
2315 pte_addr += VADDR_PAGE_TX(vaddr) * 8;
2317 /* Mark the page table entry as invalid */
2318 pte = cpu_ldq_mmuidx_ra(env, pte_addr, MMU_REAL_IDX, ra);
2319 pte |= PAGE_ENTRY_I;
2320 cpu_stq_mmuidx_ra(env, pte_addr, pte, MMU_REAL_IDX, ra);
2322 /* XXX we exploit the fact that Linux passes the exact virtual
2323 address here - it's not obliged to! */
2324 if (m4 & 1) {
2325 if (vaddr & ~VADDR_PAGE_TX_MASK) {
2326 tlb_flush_page(cs, page);
2327 /* XXX 31-bit hack */
2328 tlb_flush_page(cs, page ^ 0x80000000);
2329 } else {
2330 /* looks like we don't have a valid virtual address */
2331 tlb_flush(cs);
2333 } else {
2334 if (vaddr & ~VADDR_PAGE_TX_MASK) {
2335 tlb_flush_page_all_cpus_synced(cs, page);
2336 /* XXX 31-bit hack */
2337 tlb_flush_page_all_cpus_synced(cs, page ^ 0x80000000);
2338 } else {
2339 /* looks like we don't have a valid virtual address */
2340 tlb_flush_all_cpus_synced(cs);
2345 /* flush local tlb */
2346 void HELPER(ptlb)(CPUS390XState *env)
2348 tlb_flush(env_cpu(env));
2351 /* flush global tlb */
2352 void HELPER(purge)(CPUS390XState *env)
2354 tlb_flush_all_cpus_synced(env_cpu(env));
2357 /* load real address */
2358 uint64_t HELPER(lra)(CPUS390XState *env, uint64_t addr)
2360 uint64_t asc = env->psw.mask & PSW_MASK_ASC;
2361 uint64_t ret, tec;
2362 int flags, exc, cc;
2364 /* XXX incomplete - has more corner cases */
2365 if (!(env->psw.mask & PSW_MASK_64) && (addr >> 32)) {
2366 tcg_s390_program_interrupt(env, PGM_SPECIAL_OP, GETPC());
2369 exc = mmu_translate(env, addr, MMU_S390_LRA, asc, &ret, &flags, &tec);
2370 if (exc) {
2371 cc = 3;
2372 ret = exc | 0x80000000;
2373 } else {
2374 cc = 0;
2375 ret |= addr & ~TARGET_PAGE_MASK;
2378 env->cc_op = cc;
2379 return ret;
2381 #endif
2383 /* Execute instruction. This instruction executes an insn modified with
2384 the contents of r1. It does not change the executed instruction in memory;
2385 it does not change the program counter.
2387 Perform this by recording the modified instruction in env->ex_value.
2388 This will be noticed by cpu_get_tb_cpu_state and thus tb translation.
2390 void HELPER(ex)(CPUS390XState *env, uint32_t ilen, uint64_t r1, uint64_t addr)
2392 uint64_t insn;
2393 uint8_t opc;
2395 /* EXECUTE targets must be at even addresses. */
2396 if (addr & 1) {
2397 tcg_s390_program_interrupt(env, PGM_SPECIFICATION, GETPC());
2400 insn = cpu_lduw_code(env, addr);
2401 opc = insn >> 8;
2403 /* Or in the contents of R1[56:63]. */
2404 insn |= r1 & 0xff;
2406 /* Load the rest of the instruction. */
2407 insn <<= 48;
2408 switch (get_ilen(opc)) {
2409 case 2:
2410 break;
2411 case 4:
2412 insn |= (uint64_t)cpu_lduw_code(env, addr + 2) << 32;
2413 break;
2414 case 6:
2415 insn |= (uint64_t)(uint32_t)cpu_ldl_code(env, addr + 2) << 16;
2416 break;
2417 default:
2418 g_assert_not_reached();
2421 /* The very most common cases can be sped up by avoiding a new TB. */
2422 if ((opc & 0xf0) == 0xd0) {
2423 typedef uint32_t (*dx_helper)(CPUS390XState *, uint32_t, uint64_t,
2424 uint64_t, uintptr_t);
2425 static const dx_helper dx[16] = {
2426 [0x0] = do_helper_trt_bkwd,
2427 [0x2] = do_helper_mvc,
2428 [0x4] = do_helper_nc,
2429 [0x5] = do_helper_clc,
2430 [0x6] = do_helper_oc,
2431 [0x7] = do_helper_xc,
2432 [0xc] = do_helper_tr,
2433 [0xd] = do_helper_trt_fwd,
2435 dx_helper helper = dx[opc & 0xf];
2437 if (helper) {
2438 uint32_t l = extract64(insn, 48, 8);
2439 uint32_t b1 = extract64(insn, 44, 4);
2440 uint32_t d1 = extract64(insn, 32, 12);
2441 uint32_t b2 = extract64(insn, 28, 4);
2442 uint32_t d2 = extract64(insn, 16, 12);
2443 uint64_t a1 = wrap_address(env, (b1 ? env->regs[b1] : 0) + d1);
2444 uint64_t a2 = wrap_address(env, (b2 ? env->regs[b2] : 0) + d2);
2446 env->cc_op = helper(env, l, a1, a2, 0);
2447 env->psw.addr += ilen;
2448 return;
2450 } else if (opc == 0x0a) {
2451 env->int_svc_code = extract64(insn, 48, 8);
2452 env->int_svc_ilen = ilen;
2453 helper_exception(env, EXCP_SVC);
2454 g_assert_not_reached();
2457 /* Record the insn we want to execute as well as the ilen to use
2458 during the execution of the target insn. This will also ensure
2459 that ex_value is non-zero, which flags that we are in a state
2460 that requires such execution. */
2461 env->ex_value = insn | ilen;
2462 env->ex_target = addr;
2465 uint32_t HELPER(mvcos)(CPUS390XState *env, uint64_t dest, uint64_t src,
2466 uint64_t len)
2468 const uint8_t psw_key = (env->psw.mask & PSW_MASK_KEY) >> PSW_SHIFT_KEY;
2469 const uint8_t psw_as = (env->psw.mask & PSW_MASK_ASC) >> PSW_SHIFT_ASC;
2470 const uint64_t r0 = env->regs[0];
2471 const uintptr_t ra = GETPC();
2472 uint8_t dest_key, dest_as, dest_k, dest_a;
2473 uint8_t src_key, src_as, src_k, src_a;
2474 uint64_t val;
2475 int cc = 0;
2477 HELPER_LOG("%s dest %" PRIx64 ", src %" PRIx64 ", len %" PRIx64 "\n",
2478 __func__, dest, src, len);
2480 if (!(env->psw.mask & PSW_MASK_DAT)) {
2481 tcg_s390_program_interrupt(env, PGM_SPECIAL_OP, ra);
2484 /* OAC (operand access control) for the first operand -> dest */
2485 val = (r0 & 0xffff0000ULL) >> 16;
2486 dest_key = (val >> 12) & 0xf;
2487 dest_as = (val >> 6) & 0x3;
2488 dest_k = (val >> 1) & 0x1;
2489 dest_a = val & 0x1;
2491 /* OAC (operand access control) for the second operand -> src */
2492 val = (r0 & 0x0000ffffULL);
2493 src_key = (val >> 12) & 0xf;
2494 src_as = (val >> 6) & 0x3;
2495 src_k = (val >> 1) & 0x1;
2496 src_a = val & 0x1;
2498 if (!dest_k) {
2499 dest_key = psw_key;
2501 if (!src_k) {
2502 src_key = psw_key;
2504 if (!dest_a) {
2505 dest_as = psw_as;
2507 if (!src_a) {
2508 src_as = psw_as;
2511 if (dest_a && dest_as == AS_HOME && (env->psw.mask & PSW_MASK_PSTATE)) {
2512 tcg_s390_program_interrupt(env, PGM_SPECIAL_OP, ra);
2514 if (!(env->cregs[0] & CR0_SECONDARY) &&
2515 (dest_as == AS_SECONDARY || src_as == AS_SECONDARY)) {
2516 tcg_s390_program_interrupt(env, PGM_SPECIAL_OP, ra);
2518 if (!psw_key_valid(env, dest_key) || !psw_key_valid(env, src_key)) {
2519 tcg_s390_program_interrupt(env, PGM_PRIVILEGED, ra);
2522 len = wrap_length32(env, len);
2523 if (len > 4096) {
2524 cc = 3;
2525 len = 4096;
2528 /* FIXME: AR-mode and proper problem state mode (using PSW keys) missing */
2529 if (src_as == AS_ACCREG || dest_as == AS_ACCREG ||
2530 (env->psw.mask & PSW_MASK_PSTATE)) {
2531 qemu_log_mask(LOG_UNIMP, "%s: AR-mode and PSTATE support missing\n",
2532 __func__);
2533 tcg_s390_program_interrupt(env, PGM_ADDRESSING, ra);
2536 /* FIXME: Access using correct keys and AR-mode */
2537 if (len) {
2538 S390Access srca, desta;
2540 access_prepare(&srca, env, src, len, MMU_DATA_LOAD,
2541 mmu_idx_from_as(src_as), ra);
2542 access_prepare(&desta, env, dest, len, MMU_DATA_STORE,
2543 mmu_idx_from_as(dest_as), ra);
2545 access_memmove(env, &desta, &srca, ra);
2548 return cc;
2551 /* Decode a Unicode character. A return value < 0 indicates success, storing
2552 the UTF-32 result into OCHAR and the input length into OLEN. A return
2553 value >= 0 indicates failure, and the CC value to be returned. */
2554 typedef int (*decode_unicode_fn)(CPUS390XState *env, uint64_t addr,
2555 uint64_t ilen, bool enh_check, uintptr_t ra,
2556 uint32_t *ochar, uint32_t *olen);
2558 /* Encode a Unicode character. A return value < 0 indicates success, storing
2559 the bytes into ADDR and the output length into OLEN. A return value >= 0
2560 indicates failure, and the CC value to be returned. */
2561 typedef int (*encode_unicode_fn)(CPUS390XState *env, uint64_t addr,
2562 uint64_t ilen, uintptr_t ra, uint32_t c,
2563 uint32_t *olen);
2565 static int decode_utf8(CPUS390XState *env, uint64_t addr, uint64_t ilen,
2566 bool enh_check, uintptr_t ra,
2567 uint32_t *ochar, uint32_t *olen)
2569 uint8_t s0, s1, s2, s3;
2570 uint32_t c, l;
2572 if (ilen < 1) {
2573 return 0;
2575 s0 = cpu_ldub_data_ra(env, addr, ra);
2576 if (s0 <= 0x7f) {
2577 /* one byte character */
2578 l = 1;
2579 c = s0;
2580 } else if (s0 <= (enh_check ? 0xc1 : 0xbf)) {
2581 /* invalid character */
2582 return 2;
2583 } else if (s0 <= 0xdf) {
2584 /* two byte character */
2585 l = 2;
2586 if (ilen < 2) {
2587 return 0;
2589 s1 = cpu_ldub_data_ra(env, addr + 1, ra);
2590 c = s0 & 0x1f;
2591 c = (c << 6) | (s1 & 0x3f);
2592 if (enh_check && (s1 & 0xc0) != 0x80) {
2593 return 2;
2595 } else if (s0 <= 0xef) {
2596 /* three byte character */
2597 l = 3;
2598 if (ilen < 3) {
2599 return 0;
2601 s1 = cpu_ldub_data_ra(env, addr + 1, ra);
2602 s2 = cpu_ldub_data_ra(env, addr + 2, ra);
2603 c = s0 & 0x0f;
2604 c = (c << 6) | (s1 & 0x3f);
2605 c = (c << 6) | (s2 & 0x3f);
2606 /* Fold the byte-by-byte range descriptions in the PoO into
2607 tests against the complete value. It disallows encodings
2608 that could be smaller, and the UTF-16 surrogates. */
2609 if (enh_check
2610 && ((s1 & 0xc0) != 0x80
2611 || (s2 & 0xc0) != 0x80
2612 || c < 0x1000
2613 || (c >= 0xd800 && c <= 0xdfff))) {
2614 return 2;
2616 } else if (s0 <= (enh_check ? 0xf4 : 0xf7)) {
2617 /* four byte character */
2618 l = 4;
2619 if (ilen < 4) {
2620 return 0;
2622 s1 = cpu_ldub_data_ra(env, addr + 1, ra);
2623 s2 = cpu_ldub_data_ra(env, addr + 2, ra);
2624 s3 = cpu_ldub_data_ra(env, addr + 3, ra);
2625 c = s0 & 0x07;
2626 c = (c << 6) | (s1 & 0x3f);
2627 c = (c << 6) | (s2 & 0x3f);
2628 c = (c << 6) | (s3 & 0x3f);
2629 /* See above. */
2630 if (enh_check
2631 && ((s1 & 0xc0) != 0x80
2632 || (s2 & 0xc0) != 0x80
2633 || (s3 & 0xc0) != 0x80
2634 || c < 0x010000
2635 || c > 0x10ffff)) {
2636 return 2;
2638 } else {
2639 /* invalid character */
2640 return 2;
2643 *ochar = c;
2644 *olen = l;
2645 return -1;
2648 static int decode_utf16(CPUS390XState *env, uint64_t addr, uint64_t ilen,
2649 bool enh_check, uintptr_t ra,
2650 uint32_t *ochar, uint32_t *olen)
2652 uint16_t s0, s1;
2653 uint32_t c, l;
2655 if (ilen < 2) {
2656 return 0;
2658 s0 = cpu_lduw_data_ra(env, addr, ra);
2659 if ((s0 & 0xfc00) != 0xd800) {
2660 /* one word character */
2661 l = 2;
2662 c = s0;
2663 } else {
2664 /* two word character */
2665 l = 4;
2666 if (ilen < 4) {
2667 return 0;
2669 s1 = cpu_lduw_data_ra(env, addr + 2, ra);
2670 c = extract32(s0, 6, 4) + 1;
2671 c = (c << 6) | (s0 & 0x3f);
2672 c = (c << 10) | (s1 & 0x3ff);
2673 if (enh_check && (s1 & 0xfc00) != 0xdc00) {
2674 /* invalid surrogate character */
2675 return 2;
2679 *ochar = c;
2680 *olen = l;
2681 return -1;
2684 static int decode_utf32(CPUS390XState *env, uint64_t addr, uint64_t ilen,
2685 bool enh_check, uintptr_t ra,
2686 uint32_t *ochar, uint32_t *olen)
2688 uint32_t c;
2690 if (ilen < 4) {
2691 return 0;
2693 c = cpu_ldl_data_ra(env, addr, ra);
2694 if ((c >= 0xd800 && c <= 0xdbff) || c > 0x10ffff) {
2695 /* invalid unicode character */
2696 return 2;
2699 *ochar = c;
2700 *olen = 4;
2701 return -1;
2704 static int encode_utf8(CPUS390XState *env, uint64_t addr, uint64_t ilen,
2705 uintptr_t ra, uint32_t c, uint32_t *olen)
2707 uint8_t d[4];
2708 uint32_t l, i;
2710 if (c <= 0x7f) {
2711 /* one byte character */
2712 l = 1;
2713 d[0] = c;
2714 } else if (c <= 0x7ff) {
2715 /* two byte character */
2716 l = 2;
2717 d[1] = 0x80 | extract32(c, 0, 6);
2718 d[0] = 0xc0 | extract32(c, 6, 5);
2719 } else if (c <= 0xffff) {
2720 /* three byte character */
2721 l = 3;
2722 d[2] = 0x80 | extract32(c, 0, 6);
2723 d[1] = 0x80 | extract32(c, 6, 6);
2724 d[0] = 0xe0 | extract32(c, 12, 4);
2725 } else {
2726 /* four byte character */
2727 l = 4;
2728 d[3] = 0x80 | extract32(c, 0, 6);
2729 d[2] = 0x80 | extract32(c, 6, 6);
2730 d[1] = 0x80 | extract32(c, 12, 6);
2731 d[0] = 0xf0 | extract32(c, 18, 3);
2734 if (ilen < l) {
2735 return 1;
2737 for (i = 0; i < l; ++i) {
2738 cpu_stb_data_ra(env, addr + i, d[i], ra);
2741 *olen = l;
2742 return -1;
2745 static int encode_utf16(CPUS390XState *env, uint64_t addr, uint64_t ilen,
2746 uintptr_t ra, uint32_t c, uint32_t *olen)
2748 uint16_t d0, d1;
2750 if (c <= 0xffff) {
2751 /* one word character */
2752 if (ilen < 2) {
2753 return 1;
2755 cpu_stw_data_ra(env, addr, c, ra);
2756 *olen = 2;
2757 } else {
2758 /* two word character */
2759 if (ilen < 4) {
2760 return 1;
2762 d1 = 0xdc00 | extract32(c, 0, 10);
2763 d0 = 0xd800 | extract32(c, 10, 6);
2764 d0 = deposit32(d0, 6, 4, extract32(c, 16, 5) - 1);
2765 cpu_stw_data_ra(env, addr + 0, d0, ra);
2766 cpu_stw_data_ra(env, addr + 2, d1, ra);
2767 *olen = 4;
2770 return -1;
2773 static int encode_utf32(CPUS390XState *env, uint64_t addr, uint64_t ilen,
2774 uintptr_t ra, uint32_t c, uint32_t *olen)
2776 if (ilen < 4) {
2777 return 1;
2779 cpu_stl_data_ra(env, addr, c, ra);
2780 *olen = 4;
2781 return -1;
2784 static inline uint32_t convert_unicode(CPUS390XState *env, uint32_t r1,
2785 uint32_t r2, uint32_t m3, uintptr_t ra,
2786 decode_unicode_fn decode,
2787 encode_unicode_fn encode)
2789 uint64_t dst = get_address(env, r1);
2790 uint64_t dlen = get_length(env, r1 + 1);
2791 uint64_t src = get_address(env, r2);
2792 uint64_t slen = get_length(env, r2 + 1);
2793 bool enh_check = m3 & 1;
2794 int cc, i;
2796 /* Lest we fail to service interrupts in a timely manner, limit the
2797 amount of work we're willing to do. For now, let's cap at 256. */
2798 for (i = 0; i < 256; ++i) {
2799 uint32_t c, ilen, olen;
2801 cc = decode(env, src, slen, enh_check, ra, &c, &ilen);
2802 if (unlikely(cc >= 0)) {
2803 break;
2805 cc = encode(env, dst, dlen, ra, c, &olen);
2806 if (unlikely(cc >= 0)) {
2807 break;
2810 src += ilen;
2811 slen -= ilen;
2812 dst += olen;
2813 dlen -= olen;
2814 cc = 3;
2817 set_address(env, r1, dst);
2818 set_length(env, r1 + 1, dlen);
2819 set_address(env, r2, src);
2820 set_length(env, r2 + 1, slen);
2822 return cc;
2825 uint32_t HELPER(cu12)(CPUS390XState *env, uint32_t r1, uint32_t r2, uint32_t m3)
2827 return convert_unicode(env, r1, r2, m3, GETPC(),
2828 decode_utf8, encode_utf16);
2831 uint32_t HELPER(cu14)(CPUS390XState *env, uint32_t r1, uint32_t r2, uint32_t m3)
2833 return convert_unicode(env, r1, r2, m3, GETPC(),
2834 decode_utf8, encode_utf32);
2837 uint32_t HELPER(cu21)(CPUS390XState *env, uint32_t r1, uint32_t r2, uint32_t m3)
2839 return convert_unicode(env, r1, r2, m3, GETPC(),
2840 decode_utf16, encode_utf8);
2843 uint32_t HELPER(cu24)(CPUS390XState *env, uint32_t r1, uint32_t r2, uint32_t m3)
2845 return convert_unicode(env, r1, r2, m3, GETPC(),
2846 decode_utf16, encode_utf32);
2849 uint32_t HELPER(cu41)(CPUS390XState *env, uint32_t r1, uint32_t r2, uint32_t m3)
2851 return convert_unicode(env, r1, r2, m3, GETPC(),
2852 decode_utf32, encode_utf8);
2855 uint32_t HELPER(cu42)(CPUS390XState *env, uint32_t r1, uint32_t r2, uint32_t m3)
2857 return convert_unicode(env, r1, r2, m3, GETPC(),
2858 decode_utf32, encode_utf16);
2861 void probe_write_access(CPUS390XState *env, uint64_t addr, uint64_t len,
2862 uintptr_t ra)
2864 /* test the actual access, not just any access to the page due to LAP */
2865 while (len) {
2866 const uint64_t pagelen = -(addr | TARGET_PAGE_MASK);
2867 const uint64_t curlen = MIN(pagelen, len);
2869 probe_write(env, addr, curlen, cpu_mmu_index(env, false), ra);
2870 addr = wrap_address(env, addr + curlen);
2871 len -= curlen;
2875 void HELPER(probe_write_access)(CPUS390XState *env, uint64_t addr, uint64_t len)
2877 probe_write_access(env, addr, len, GETPC());