s390x/tcg: NC: Fault-safe handling
[qemu/kevin.git] / target / s390x / mem_helper.c
bloba97e4aa5352a2e051206b4a46632b2ca48b99cc3
1 /*
2 * S/390 memory access helper routines
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2009 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "internal.h"
24 #include "exec/helper-proto.h"
25 #include "exec/exec-all.h"
26 #include "exec/cpu_ldst.h"
27 #include "qemu/int128.h"
28 #include "qemu/atomic128.h"
30 #if !defined(CONFIG_USER_ONLY)
31 #include "hw/s390x/storage-keys.h"
32 #endif
34 /*****************************************************************************/
35 /* Softmmu support */
37 /* #define DEBUG_HELPER */
38 #ifdef DEBUG_HELPER
39 #define HELPER_LOG(x...) qemu_log(x)
40 #else
41 #define HELPER_LOG(x...)
42 #endif
44 static inline bool psw_key_valid(CPUS390XState *env, uint8_t psw_key)
46 uint16_t pkm = env->cregs[3] >> 16;
48 if (env->psw.mask & PSW_MASK_PSTATE) {
49 /* PSW key has range 0..15, it is valid if the bit is 1 in the PKM */
50 return pkm & (0x80 >> psw_key);
52 return true;
55 static bool is_destructive_overlap(CPUS390XState *env, uint64_t dest,
56 uint64_t src, uint32_t len)
58 if (!len || src == dest) {
59 return false;
61 /* Take care of wrapping at the end of address space. */
62 if (unlikely(wrap_address(env, src + len - 1) < src)) {
63 return dest > src || dest <= wrap_address(env, src + len - 1);
65 return dest > src && dest <= src + len - 1;
68 /* Trigger a SPECIFICATION exception if an address or a length is not
69 naturally aligned. */
70 static inline void check_alignment(CPUS390XState *env, uint64_t v,
71 int wordsize, uintptr_t ra)
73 if (v % wordsize) {
74 s390_program_interrupt(env, PGM_SPECIFICATION, 6, ra);
78 /* Load a value from memory according to its size. */
79 static inline uint64_t cpu_ldusize_data_ra(CPUS390XState *env, uint64_t addr,
80 int wordsize, uintptr_t ra)
82 switch (wordsize) {
83 case 1:
84 return cpu_ldub_data_ra(env, addr, ra);
85 case 2:
86 return cpu_lduw_data_ra(env, addr, ra);
87 default:
88 abort();
92 /* Store a to memory according to its size. */
93 static inline void cpu_stsize_data_ra(CPUS390XState *env, uint64_t addr,
94 uint64_t value, int wordsize,
95 uintptr_t ra)
97 switch (wordsize) {
98 case 1:
99 cpu_stb_data_ra(env, addr, value, ra);
100 break;
101 case 2:
102 cpu_stw_data_ra(env, addr, value, ra);
103 break;
104 default:
105 abort();
109 /* An access covers at most 4096 bytes and therefore at most two pages. */
110 typedef struct S390Access {
111 target_ulong vaddr1;
112 target_ulong vaddr2;
113 char *haddr1;
114 char *haddr2;
115 uint16_t size1;
116 uint16_t size2;
118 * If we can't access the host page directly, we'll have to do I/O access
119 * via ld/st helpers. These are internal details, so we store the
120 * mmu idx to do the access here instead of passing it around in the
121 * helpers. Maybe, one day we can get rid of ld/st access - once we can
122 * handle TLB_NOTDIRTY differently. We don't expect these special accesses
123 * to trigger exceptions - only if we would have TLB_NOTDIRTY on LAP
124 * pages, we might trigger a new MMU translation - very unlikely that
125 * the mapping changes in between and we would trigger a fault.
127 int mmu_idx;
128 } S390Access;
130 static S390Access access_prepare(CPUS390XState *env, vaddr vaddr, int size,
131 MMUAccessType access_type, int mmu_idx,
132 uintptr_t ra)
134 S390Access access = {
135 .vaddr1 = vaddr,
136 .size1 = MIN(size, -(vaddr | TARGET_PAGE_MASK)),
137 .mmu_idx = mmu_idx,
140 g_assert(size > 0 && size <= 4096);
141 access.haddr1 = probe_access(env, access.vaddr1, access.size1, access_type,
142 mmu_idx, ra);
144 if (unlikely(access.size1 != size)) {
145 /* The access crosses page boundaries. */
146 access.vaddr2 = wrap_address(env, vaddr + access.size1);
147 access.size2 = size - access.size1;
148 access.haddr2 = probe_access(env, access.vaddr2, access.size2,
149 access_type, mmu_idx, ra);
151 return access;
154 /* Helper to handle memset on a single page. */
155 static void do_access_memset(CPUS390XState *env, vaddr vaddr, char *haddr,
156 uint8_t byte, uint16_t size, int mmu_idx,
157 uintptr_t ra)
159 #ifdef CONFIG_USER_ONLY
160 g_assert(haddr);
161 memset(haddr, byte, size);
162 #else
163 TCGMemOpIdx oi = make_memop_idx(MO_UB, mmu_idx);
164 int i;
166 if (likely(haddr)) {
167 memset(haddr, byte, size);
168 } else {
170 * Do a single access and test if we can then get access to the
171 * page. This is especially relevant to speed up TLB_NOTDIRTY.
173 g_assert(size > 0);
174 helper_ret_stb_mmu(env, vaddr, byte, oi, ra);
175 haddr = tlb_vaddr_to_host(env, vaddr, MMU_DATA_STORE, mmu_idx);
176 if (likely(haddr)) {
177 memset(haddr + 1, byte, size - 1);
178 } else {
179 for (i = 1; i < size; i++) {
180 helper_ret_stb_mmu(env, vaddr + i, byte, oi, ra);
184 #endif
187 static void access_memset(CPUS390XState *env, S390Access *desta,
188 uint8_t byte, uintptr_t ra)
191 do_access_memset(env, desta->vaddr1, desta->haddr1, byte, desta->size1,
192 desta->mmu_idx, ra);
193 if (likely(!desta->size2)) {
194 return;
196 do_access_memset(env, desta->vaddr2, desta->haddr2, byte, desta->size2,
197 desta->mmu_idx, ra);
200 static uint8_t do_access_get_byte(CPUS390XState *env, vaddr vaddr, char **haddr,
201 int offset, int mmu_idx, uintptr_t ra)
203 #ifdef CONFIG_USER_ONLY
204 return ldub_p(*haddr + offset);
205 #else
206 TCGMemOpIdx oi = make_memop_idx(MO_UB, mmu_idx);
207 uint8_t byte;
209 if (likely(*haddr)) {
210 return ldub_p(*haddr + offset);
213 * Do a single access and test if we can then get access to the
214 * page. This is especially relevant to speed up TLB_NOTDIRTY.
216 byte = helper_ret_ldub_mmu(env, vaddr + offset, oi, ra);
217 *haddr = tlb_vaddr_to_host(env, vaddr, MMU_DATA_LOAD, mmu_idx);
218 return byte;
219 #endif
222 static uint8_t access_get_byte(CPUS390XState *env, S390Access *access,
223 int offset, uintptr_t ra)
225 if (offset < access->size1) {
226 return do_access_get_byte(env, access->vaddr1, &access->haddr1,
227 offset, access->mmu_idx, ra);
229 return do_access_get_byte(env, access->vaddr2, &access->haddr2,
230 offset - access->size1, access->mmu_idx, ra);
233 static void do_access_set_byte(CPUS390XState *env, vaddr vaddr, char **haddr,
234 int offset, uint8_t byte, int mmu_idx,
235 uintptr_t ra)
237 #ifdef CONFIG_USER_ONLY
238 stb_p(*haddr + offset, byte);
239 #else
240 TCGMemOpIdx oi = make_memop_idx(MO_UB, mmu_idx);
242 if (likely(*haddr)) {
243 stb_p(*haddr + offset, byte);
244 return;
247 * Do a single access and test if we can then get access to the
248 * page. This is especially relevant to speed up TLB_NOTDIRTY.
250 helper_ret_stb_mmu(env, vaddr + offset, byte, oi, ra);
251 *haddr = tlb_vaddr_to_host(env, vaddr, MMU_DATA_STORE, mmu_idx);
252 #endif
255 static void access_set_byte(CPUS390XState *env, S390Access *access,
256 int offset, uint8_t byte, uintptr_t ra)
258 if (offset < access->size1) {
259 do_access_set_byte(env, access->vaddr1, &access->haddr1, offset, byte,
260 access->mmu_idx, ra);
261 } else {
262 do_access_set_byte(env, access->vaddr2, &access->haddr2,
263 offset - access->size1, byte, access->mmu_idx, ra);
268 * Move data with the same semantics as memmove() in case ranges don't overlap
269 * or src > dest. Undefined behavior on destructive overlaps.
271 static void access_memmove(CPUS390XState *env, S390Access *desta,
272 S390Access *srca, uintptr_t ra)
274 int diff;
276 g_assert(desta->size1 + desta->size2 == srca->size1 + srca->size2);
278 /* Fallback to slow access in case we don't have access to all host pages */
279 if (unlikely(!desta->haddr1 || (desta->size2 && !desta->haddr2) ||
280 !srca->haddr1 || (srca->size2 && !srca->haddr2))) {
281 int i;
283 for (i = 0; i < desta->size1 + desta->size2; i++) {
284 uint8_t byte = access_get_byte(env, srca, i, ra);
286 access_set_byte(env, desta, i, byte, ra);
288 return;
291 if (srca->size1 == desta->size1) {
292 memmove(desta->haddr1, srca->haddr1, srca->size1);
293 if (unlikely(srca->size2)) {
294 memmove(desta->haddr2, srca->haddr2, srca->size2);
296 } else if (srca->size1 < desta->size1) {
297 diff = desta->size1 - srca->size1;
298 memmove(desta->haddr1, srca->haddr1, srca->size1);
299 memmove(desta->haddr1 + srca->size1, srca->haddr2, diff);
300 if (likely(desta->size2)) {
301 memmove(desta->haddr2, srca->haddr2 + diff, desta->size2);
303 } else {
304 diff = srca->size1 - desta->size1;
305 memmove(desta->haddr1, srca->haddr1, desta->size1);
306 memmove(desta->haddr2, srca->haddr1 + desta->size1, diff);
307 if (likely(srca->size2)) {
308 memmove(desta->haddr2 + diff, srca->haddr2, srca->size2);
313 static int mmu_idx_from_as(uint8_t as)
315 switch (as) {
316 case AS_PRIMARY:
317 return MMU_PRIMARY_IDX;
318 case AS_SECONDARY:
319 return MMU_SECONDARY_IDX;
320 case AS_HOME:
321 return MMU_HOME_IDX;
322 default:
323 /* FIXME AS_ACCREG */
324 g_assert_not_reached();
328 /* and on array */
329 static uint32_t do_helper_nc(CPUS390XState *env, uint32_t l, uint64_t dest,
330 uint64_t src, uintptr_t ra)
332 const int mmu_idx = cpu_mmu_index(env, false);
333 S390Access srca1, srca2, desta;
334 uint32_t i;
335 uint8_t c = 0;
337 HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
338 __func__, l, dest, src);
340 /* NC always processes one more byte than specified - maximum is 256 */
341 l++;
343 srca1 = access_prepare(env, src, l, MMU_DATA_LOAD, mmu_idx, ra);
344 srca2 = access_prepare(env, dest, l, MMU_DATA_LOAD, mmu_idx, ra);
345 desta = access_prepare(env, dest, l, MMU_DATA_STORE, mmu_idx, ra);
346 for (i = 0; i < l; i++) {
347 const uint8_t x = access_get_byte(env, &srca1, i, ra) &
348 access_get_byte(env, &srca2, i, ra);
350 c |= x;
351 access_set_byte(env, &desta, i, x, ra);
353 return c != 0;
356 uint32_t HELPER(nc)(CPUS390XState *env, uint32_t l, uint64_t dest,
357 uint64_t src)
359 return do_helper_nc(env, l, dest, src, GETPC());
362 /* xor on array */
363 static uint32_t do_helper_xc(CPUS390XState *env, uint32_t l, uint64_t dest,
364 uint64_t src, uintptr_t ra)
366 const int mmu_idx = cpu_mmu_index(env, false);
367 S390Access srca1, srca2, desta;
368 uint32_t i;
369 uint8_t c = 0;
371 HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
372 __func__, l, dest, src);
374 /* XC always processes one more byte than specified - maximum is 256 */
375 l++;
377 srca1 = access_prepare(env, src, l, MMU_DATA_LOAD, mmu_idx, ra);
378 srca2 = access_prepare(env, dest, l, MMU_DATA_LOAD, mmu_idx, ra);
379 desta = access_prepare(env, dest, l, MMU_DATA_STORE, mmu_idx, ra);
381 /* xor with itself is the same as memset(0) */
382 if (src == dest) {
383 access_memset(env, &desta, 0, ra);
384 return 0;
387 for (i = 0; i < l; i++) {
388 const uint8_t x = access_get_byte(env, &srca1, i, ra) ^
389 access_get_byte(env, &srca2, i, ra);
391 c |= x;
392 access_set_byte(env, &desta, i, x, ra);
394 return c != 0;
397 uint32_t HELPER(xc)(CPUS390XState *env, uint32_t l, uint64_t dest,
398 uint64_t src)
400 return do_helper_xc(env, l, dest, src, GETPC());
403 /* or on array */
404 static uint32_t do_helper_oc(CPUS390XState *env, uint32_t l, uint64_t dest,
405 uint64_t src, uintptr_t ra)
407 const int mmu_idx = cpu_mmu_index(env, false);
408 S390Access srca1, srca2, desta;
409 uint32_t i;
410 uint8_t c = 0;
412 HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
413 __func__, l, dest, src);
415 /* OC always processes one more byte than specified - maximum is 256 */
416 l++;
418 srca1 = access_prepare(env, src, l, MMU_DATA_LOAD, mmu_idx, ra);
419 srca2 = access_prepare(env, dest, l, MMU_DATA_LOAD, mmu_idx, ra);
420 desta = access_prepare(env, dest, l, MMU_DATA_STORE, mmu_idx, ra);
421 for (i = 0; i < l; i++) {
422 const uint8_t x = access_get_byte(env, &srca1, i, ra) |
423 access_get_byte(env, &srca2, i, ra);
425 c |= x;
426 access_set_byte(env, &desta, i, x, ra);
428 return c != 0;
431 uint32_t HELPER(oc)(CPUS390XState *env, uint32_t l, uint64_t dest,
432 uint64_t src)
434 return do_helper_oc(env, l, dest, src, GETPC());
437 /* memmove */
438 static uint32_t do_helper_mvc(CPUS390XState *env, uint32_t l, uint64_t dest,
439 uint64_t src, uintptr_t ra)
441 const int mmu_idx = cpu_mmu_index(env, false);
442 S390Access srca, desta;
443 uint32_t i;
445 HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
446 __func__, l, dest, src);
448 /* MVC always copies one more byte than specified - maximum is 256 */
449 l++;
451 srca = access_prepare(env, src, l, MMU_DATA_LOAD, mmu_idx, ra);
452 desta = access_prepare(env, dest, l, MMU_DATA_STORE, mmu_idx, ra);
455 * "When the operands overlap, the result is obtained as if the operands
456 * were processed one byte at a time". Only non-destructive overlaps
457 * behave like memmove().
459 if (dest == src + 1) {
460 access_memset(env, &desta, access_get_byte(env, &srca, 0, ra), ra);
461 } else if (!is_destructive_overlap(env, dest, src, l)) {
462 access_memmove(env, &desta, &srca, ra);
463 } else {
464 for (i = 0; i < l; i++) {
465 uint8_t byte = access_get_byte(env, &srca, i, ra);
467 access_set_byte(env, &desta, i, byte, ra);
471 return env->cc_op;
474 void HELPER(mvc)(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src)
476 do_helper_mvc(env, l, dest, src, GETPC());
479 /* move inverse */
480 void HELPER(mvcin)(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src)
482 uintptr_t ra = GETPC();
483 int i;
485 for (i = 0; i <= l; i++) {
486 uint8_t v = cpu_ldub_data_ra(env, src - i, ra);
487 cpu_stb_data_ra(env, dest + i, v, ra);
491 /* move numerics */
492 void HELPER(mvn)(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src)
494 uintptr_t ra = GETPC();
495 int i;
497 for (i = 0; i <= l; i++) {
498 uint8_t v = cpu_ldub_data_ra(env, dest + i, ra) & 0xf0;
499 v |= cpu_ldub_data_ra(env, src + i, ra) & 0x0f;
500 cpu_stb_data_ra(env, dest + i, v, ra);
504 /* move with offset */
505 void HELPER(mvo)(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src)
507 uintptr_t ra = GETPC();
508 int len_dest = l >> 4;
509 int len_src = l & 0xf;
510 uint8_t byte_dest, byte_src;
511 int i;
513 src += len_src;
514 dest += len_dest;
516 /* Handle rightmost byte */
517 byte_src = cpu_ldub_data_ra(env, src, ra);
518 byte_dest = cpu_ldub_data_ra(env, dest, ra);
519 byte_dest = (byte_dest & 0x0f) | (byte_src << 4);
520 cpu_stb_data_ra(env, dest, byte_dest, ra);
522 /* Process remaining bytes from right to left */
523 for (i = 1; i <= len_dest; i++) {
524 byte_dest = byte_src >> 4;
525 if (len_src - i >= 0) {
526 byte_src = cpu_ldub_data_ra(env, src - i, ra);
527 } else {
528 byte_src = 0;
530 byte_dest |= byte_src << 4;
531 cpu_stb_data_ra(env, dest - i, byte_dest, ra);
535 /* move zones */
536 void HELPER(mvz)(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src)
538 uintptr_t ra = GETPC();
539 int i;
541 for (i = 0; i <= l; i++) {
542 uint8_t b = cpu_ldub_data_ra(env, dest + i, ra) & 0x0f;
543 b |= cpu_ldub_data_ra(env, src + i, ra) & 0xf0;
544 cpu_stb_data_ra(env, dest + i, b, ra);
548 /* compare unsigned byte arrays */
549 static uint32_t do_helper_clc(CPUS390XState *env, uint32_t l, uint64_t s1,
550 uint64_t s2, uintptr_t ra)
552 uint32_t i;
553 uint32_t cc = 0;
555 HELPER_LOG("%s l %d s1 %" PRIx64 " s2 %" PRIx64 "\n",
556 __func__, l, s1, s2);
558 for (i = 0; i <= l; i++) {
559 uint8_t x = cpu_ldub_data_ra(env, s1 + i, ra);
560 uint8_t y = cpu_ldub_data_ra(env, s2 + i, ra);
561 HELPER_LOG("%02x (%c)/%02x (%c) ", x, x, y, y);
562 if (x < y) {
563 cc = 1;
564 break;
565 } else if (x > y) {
566 cc = 2;
567 break;
571 HELPER_LOG("\n");
572 return cc;
575 uint32_t HELPER(clc)(CPUS390XState *env, uint32_t l, uint64_t s1, uint64_t s2)
577 return do_helper_clc(env, l, s1, s2, GETPC());
580 /* compare logical under mask */
581 uint32_t HELPER(clm)(CPUS390XState *env, uint32_t r1, uint32_t mask,
582 uint64_t addr)
584 uintptr_t ra = GETPC();
585 uint32_t cc = 0;
587 HELPER_LOG("%s: r1 0x%x mask 0x%x addr 0x%" PRIx64 "\n", __func__, r1,
588 mask, addr);
590 while (mask) {
591 if (mask & 8) {
592 uint8_t d = cpu_ldub_data_ra(env, addr, ra);
593 uint8_t r = extract32(r1, 24, 8);
594 HELPER_LOG("mask 0x%x %02x/%02x (0x%" PRIx64 ") ", mask, r, d,
595 addr);
596 if (r < d) {
597 cc = 1;
598 break;
599 } else if (r > d) {
600 cc = 2;
601 break;
603 addr++;
605 mask = (mask << 1) & 0xf;
606 r1 <<= 8;
609 HELPER_LOG("\n");
610 return cc;
613 static inline uint64_t get_address(CPUS390XState *env, int reg)
615 return wrap_address(env, env->regs[reg]);
619 * Store the address to the given register, zeroing out unused leftmost
620 * bits in bit positions 32-63 (24-bit and 31-bit mode only).
622 static inline void set_address_zero(CPUS390XState *env, int reg,
623 uint64_t address)
625 if (env->psw.mask & PSW_MASK_64) {
626 env->regs[reg] = address;
627 } else {
628 if (!(env->psw.mask & PSW_MASK_32)) {
629 address &= 0x00ffffff;
630 } else {
631 address &= 0x7fffffff;
633 env->regs[reg] = deposit64(env->regs[reg], 0, 32, address);
637 static inline void set_address(CPUS390XState *env, int reg, uint64_t address)
639 if (env->psw.mask & PSW_MASK_64) {
640 /* 64-Bit mode */
641 env->regs[reg] = address;
642 } else {
643 if (!(env->psw.mask & PSW_MASK_32)) {
644 /* 24-Bit mode. According to the PoO it is implementation
645 dependent if bits 32-39 remain unchanged or are set to
646 zeros. Choose the former so that the function can also be
647 used for TRT. */
648 env->regs[reg] = deposit64(env->regs[reg], 0, 24, address);
649 } else {
650 /* 31-Bit mode. According to the PoO it is implementation
651 dependent if bit 32 remains unchanged or is set to zero.
652 Choose the latter so that the function can also be used for
653 TRT. */
654 address &= 0x7fffffff;
655 env->regs[reg] = deposit64(env->regs[reg], 0, 32, address);
660 static inline uint64_t wrap_length32(CPUS390XState *env, uint64_t length)
662 if (!(env->psw.mask & PSW_MASK_64)) {
663 return (uint32_t)length;
665 return length;
668 static inline uint64_t wrap_length31(CPUS390XState *env, uint64_t length)
670 if (!(env->psw.mask & PSW_MASK_64)) {
671 /* 24-Bit and 31-Bit mode */
672 length &= 0x7fffffff;
674 return length;
677 static inline uint64_t get_length(CPUS390XState *env, int reg)
679 return wrap_length31(env, env->regs[reg]);
682 static inline void set_length(CPUS390XState *env, int reg, uint64_t length)
684 if (env->psw.mask & PSW_MASK_64) {
685 /* 64-Bit mode */
686 env->regs[reg] = length;
687 } else {
688 /* 24-Bit and 31-Bit mode */
689 env->regs[reg] = deposit64(env->regs[reg], 0, 32, length);
693 /* search string (c is byte to search, r2 is string, r1 end of string) */
694 void HELPER(srst)(CPUS390XState *env, uint32_t r1, uint32_t r2)
696 uintptr_t ra = GETPC();
697 uint64_t end, str;
698 uint32_t len;
699 uint8_t v, c = env->regs[0];
701 /* Bits 32-55 must contain all 0. */
702 if (env->regs[0] & 0xffffff00u) {
703 s390_program_interrupt(env, PGM_SPECIFICATION, 6, ra);
706 str = get_address(env, r2);
707 end = get_address(env, r1);
709 /* Lest we fail to service interrupts in a timely manner, limit the
710 amount of work we're willing to do. For now, let's cap at 8k. */
711 for (len = 0; len < 0x2000; ++len) {
712 if (str + len == end) {
713 /* Character not found. R1 & R2 are unmodified. */
714 env->cc_op = 2;
715 return;
717 v = cpu_ldub_data_ra(env, str + len, ra);
718 if (v == c) {
719 /* Character found. Set R1 to the location; R2 is unmodified. */
720 env->cc_op = 1;
721 set_address(env, r1, str + len);
722 return;
726 /* CPU-determined bytes processed. Advance R2 to next byte to process. */
727 env->cc_op = 3;
728 set_address(env, r2, str + len);
731 void HELPER(srstu)(CPUS390XState *env, uint32_t r1, uint32_t r2)
733 uintptr_t ra = GETPC();
734 uint32_t len;
735 uint16_t v, c = env->regs[0];
736 uint64_t end, str, adj_end;
738 /* Bits 32-47 of R0 must be zero. */
739 if (env->regs[0] & 0xffff0000u) {
740 s390_program_interrupt(env, PGM_SPECIFICATION, 6, ra);
743 str = get_address(env, r2);
744 end = get_address(env, r1);
746 /* If the LSB of the two addresses differ, use one extra byte. */
747 adj_end = end + ((str ^ end) & 1);
749 /* Lest we fail to service interrupts in a timely manner, limit the
750 amount of work we're willing to do. For now, let's cap at 8k. */
751 for (len = 0; len < 0x2000; len += 2) {
752 if (str + len == adj_end) {
753 /* End of input found. */
754 env->cc_op = 2;
755 return;
757 v = cpu_lduw_data_ra(env, str + len, ra);
758 if (v == c) {
759 /* Character found. Set R1 to the location; R2 is unmodified. */
760 env->cc_op = 1;
761 set_address(env, r1, str + len);
762 return;
766 /* CPU-determined bytes processed. Advance R2 to next byte to process. */
767 env->cc_op = 3;
768 set_address(env, r2, str + len);
771 /* unsigned string compare (c is string terminator) */
772 uint64_t HELPER(clst)(CPUS390XState *env, uint64_t c, uint64_t s1, uint64_t s2)
774 uintptr_t ra = GETPC();
775 uint32_t len;
777 c = c & 0xff;
778 s1 = wrap_address(env, s1);
779 s2 = wrap_address(env, s2);
781 /* Lest we fail to service interrupts in a timely manner, limit the
782 amount of work we're willing to do. For now, let's cap at 8k. */
783 for (len = 0; len < 0x2000; ++len) {
784 uint8_t v1 = cpu_ldub_data_ra(env, s1 + len, ra);
785 uint8_t v2 = cpu_ldub_data_ra(env, s2 + len, ra);
786 if (v1 == v2) {
787 if (v1 == c) {
788 /* Equal. CC=0, and don't advance the registers. */
789 env->cc_op = 0;
790 env->retxl = s2;
791 return s1;
793 } else {
794 /* Unequal. CC={1,2}, and advance the registers. Note that
795 the terminator need not be zero, but the string that contains
796 the terminator is by definition "low". */
797 env->cc_op = (v1 == c ? 1 : v2 == c ? 2 : v1 < v2 ? 1 : 2);
798 env->retxl = s2 + len;
799 return s1 + len;
803 /* CPU-determined bytes equal; advance the registers. */
804 env->cc_op = 3;
805 env->retxl = s2 + len;
806 return s1 + len;
809 /* move page */
810 uint32_t HELPER(mvpg)(CPUS390XState *env, uint64_t r0, uint64_t r1, uint64_t r2)
812 const int mmu_idx = cpu_mmu_index(env, false);
813 const bool f = extract64(r0, 11, 1);
814 const bool s = extract64(r0, 10, 1);
815 uintptr_t ra = GETPC();
816 S390Access srca, desta;
818 if ((f && s) || extract64(r0, 12, 4)) {
819 s390_program_interrupt(env, PGM_SPECIFICATION, ILEN_AUTO, GETPC());
822 r1 = wrap_address(env, r1 & TARGET_PAGE_MASK);
823 r2 = wrap_address(env, r2 & TARGET_PAGE_MASK);
826 * TODO:
827 * - Access key handling
828 * - CC-option with surpression of page-translation exceptions
829 * - Store r1/r2 register identifiers at real location 162
831 srca = access_prepare(env, r2, TARGET_PAGE_SIZE, MMU_DATA_LOAD, mmu_idx,
832 ra);
833 desta = access_prepare(env, r1, TARGET_PAGE_SIZE, MMU_DATA_STORE, mmu_idx,
834 ra);
835 access_memmove(env, &desta, &srca, ra);
836 return 0; /* data moved */
839 /* string copy */
840 uint32_t HELPER(mvst)(CPUS390XState *env, uint32_t r1, uint32_t r2)
842 const uint64_t d = get_address(env, r1);
843 const uint64_t s = get_address(env, r2);
844 const uint8_t c = env->regs[0];
845 uintptr_t ra = GETPC();
846 uint32_t len;
848 if (env->regs[0] & 0xffffff00ull) {
849 s390_program_interrupt(env, PGM_SPECIFICATION, ILEN_AUTO, ra);
852 /* Lest we fail to service interrupts in a timely manner, limit the
853 amount of work we're willing to do. For now, let's cap at 8k. */
854 for (len = 0; len < 0x2000; ++len) {
855 uint8_t v = cpu_ldub_data_ra(env, s + len, ra);
856 cpu_stb_data_ra(env, d + len, v, ra);
857 if (v == c) {
858 set_address_zero(env, r1, d + len);
859 return 1;
862 set_address_zero(env, r1, d + len);
863 set_address_zero(env, r2, s + len);
864 return 3;
867 /* load access registers r1 to r3 from memory at a2 */
868 void HELPER(lam)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
870 uintptr_t ra = GETPC();
871 int i;
873 if (a2 & 0x3) {
874 /* we either came here by lam or lamy, which have different lengths */
875 s390_program_interrupt(env, PGM_SPECIFICATION, ILEN_AUTO, ra);
878 for (i = r1;; i = (i + 1) % 16) {
879 env->aregs[i] = cpu_ldl_data_ra(env, a2, ra);
880 a2 += 4;
882 if (i == r3) {
883 break;
888 /* store access registers r1 to r3 in memory at a2 */
889 void HELPER(stam)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
891 uintptr_t ra = GETPC();
892 int i;
894 if (a2 & 0x3) {
895 s390_program_interrupt(env, PGM_SPECIFICATION, 4, ra);
898 for (i = r1;; i = (i + 1) % 16) {
899 cpu_stl_data_ra(env, a2, env->aregs[i], ra);
900 a2 += 4;
902 if (i == r3) {
903 break;
908 /* move long helper */
909 static inline uint32_t do_mvcl(CPUS390XState *env,
910 uint64_t *dest, uint64_t *destlen,
911 uint64_t *src, uint64_t *srclen,
912 uint16_t pad, int wordsize, uintptr_t ra)
914 const int mmu_idx = cpu_mmu_index(env, false);
915 int len = MIN(*destlen, -(*dest | TARGET_PAGE_MASK));
916 S390Access srca, desta;
917 int i, cc;
919 if (*destlen == *srclen) {
920 cc = 0;
921 } else if (*destlen < *srclen) {
922 cc = 1;
923 } else {
924 cc = 2;
927 if (!*destlen) {
928 return cc;
932 * Only perform one type of type of operation (move/pad) at a time.
933 * Stay within single pages.
935 if (*srclen) {
936 /* Copy the src array */
937 len = MIN(MIN(*srclen, -(*src | TARGET_PAGE_MASK)), len);
938 *destlen -= len;
939 *srclen -= len;
940 srca = access_prepare(env, *src, len, MMU_DATA_LOAD, mmu_idx, ra);
941 desta = access_prepare(env, *dest, len, MMU_DATA_STORE, mmu_idx, ra);
942 access_memmove(env, &desta, &srca, ra);
943 *src = wrap_address(env, *src + len);
944 *dest = wrap_address(env, *dest + len);
945 } else if (wordsize == 1) {
946 /* Pad the remaining area */
947 *destlen -= len;
948 desta = access_prepare(env, *dest, len, MMU_DATA_STORE, mmu_idx, ra);
949 access_memset(env, &desta, pad, ra);
950 *dest = wrap_address(env, *dest + len);
951 } else {
952 desta = access_prepare(env, *dest, len, MMU_DATA_STORE, mmu_idx, ra);
954 /* The remaining length selects the padding byte. */
955 for (i = 0; i < len; (*destlen)--, i++) {
956 if (*destlen & 1) {
957 access_set_byte(env, &desta, i, pad, ra);
958 } else {
959 access_set_byte(env, &desta, i, pad >> 8, ra);
962 *dest = wrap_address(env, *dest + len);
965 return *destlen ? 3 : cc;
968 /* move long */
969 uint32_t HELPER(mvcl)(CPUS390XState *env, uint32_t r1, uint32_t r2)
971 const int mmu_idx = cpu_mmu_index(env, false);
972 uintptr_t ra = GETPC();
973 uint64_t destlen = env->regs[r1 + 1] & 0xffffff;
974 uint64_t dest = get_address(env, r1);
975 uint64_t srclen = env->regs[r2 + 1] & 0xffffff;
976 uint64_t src = get_address(env, r2);
977 uint8_t pad = env->regs[r2 + 1] >> 24;
978 S390Access srca, desta;
979 uint32_t cc, cur_len;
981 if (is_destructive_overlap(env, dest, src, MIN(srclen, destlen))) {
982 cc = 3;
983 } else if (srclen == destlen) {
984 cc = 0;
985 } else if (destlen < srclen) {
986 cc = 1;
987 } else {
988 cc = 2;
991 /* We might have to zero-out some bits even if there was no action. */
992 if (unlikely(!destlen || cc == 3)) {
993 set_address_zero(env, r2, src);
994 set_address_zero(env, r1, dest);
995 return cc;
996 } else if (!srclen) {
997 set_address_zero(env, r2, src);
1001 * Only perform one type of type of operation (move/pad) in one step.
1002 * Stay within single pages.
1004 while (destlen) {
1005 cur_len = MIN(destlen, -(dest | TARGET_PAGE_MASK));
1006 if (!srclen) {
1007 desta = access_prepare(env, dest, cur_len, MMU_DATA_STORE, mmu_idx,
1008 ra);
1009 access_memset(env, &desta, pad, ra);
1010 } else {
1011 cur_len = MIN(MIN(srclen, -(src | TARGET_PAGE_MASK)), cur_len);
1013 srca = access_prepare(env, src, cur_len, MMU_DATA_LOAD, mmu_idx,
1014 ra);
1015 desta = access_prepare(env, dest, cur_len, MMU_DATA_STORE, mmu_idx,
1016 ra);
1017 access_memmove(env, &desta, &srca, ra);
1018 src = wrap_address(env, src + cur_len);
1019 srclen -= cur_len;
1020 env->regs[r2 + 1] = deposit64(env->regs[r2 + 1], 0, 24, srclen);
1021 set_address_zero(env, r2, src);
1023 dest = wrap_address(env, dest + cur_len);
1024 destlen -= cur_len;
1025 env->regs[r1 + 1] = deposit64(env->regs[r1 + 1], 0, 24, destlen);
1026 set_address_zero(env, r1, dest);
1028 /* TODO: Deliver interrupts. */
1030 return cc;
1033 /* move long extended */
1034 uint32_t HELPER(mvcle)(CPUS390XState *env, uint32_t r1, uint64_t a2,
1035 uint32_t r3)
1037 uintptr_t ra = GETPC();
1038 uint64_t destlen = get_length(env, r1 + 1);
1039 uint64_t dest = get_address(env, r1);
1040 uint64_t srclen = get_length(env, r3 + 1);
1041 uint64_t src = get_address(env, r3);
1042 uint8_t pad = a2;
1043 uint32_t cc;
1045 cc = do_mvcl(env, &dest, &destlen, &src, &srclen, pad, 1, ra);
1047 set_length(env, r1 + 1, destlen);
1048 set_length(env, r3 + 1, srclen);
1049 set_address(env, r1, dest);
1050 set_address(env, r3, src);
1052 return cc;
1055 /* move long unicode */
1056 uint32_t HELPER(mvclu)(CPUS390XState *env, uint32_t r1, uint64_t a2,
1057 uint32_t r3)
1059 uintptr_t ra = GETPC();
1060 uint64_t destlen = get_length(env, r1 + 1);
1061 uint64_t dest = get_address(env, r1);
1062 uint64_t srclen = get_length(env, r3 + 1);
1063 uint64_t src = get_address(env, r3);
1064 uint16_t pad = a2;
1065 uint32_t cc;
1067 cc = do_mvcl(env, &dest, &destlen, &src, &srclen, pad, 2, ra);
1069 set_length(env, r1 + 1, destlen);
1070 set_length(env, r3 + 1, srclen);
1071 set_address(env, r1, dest);
1072 set_address(env, r3, src);
1074 return cc;
1077 /* compare logical long helper */
1078 static inline uint32_t do_clcl(CPUS390XState *env,
1079 uint64_t *src1, uint64_t *src1len,
1080 uint64_t *src3, uint64_t *src3len,
1081 uint16_t pad, uint64_t limit,
1082 int wordsize, uintptr_t ra)
1084 uint64_t len = MAX(*src1len, *src3len);
1085 uint32_t cc = 0;
1087 check_alignment(env, *src1len | *src3len, wordsize, ra);
1089 if (!len) {
1090 return cc;
1093 /* Lest we fail to service interrupts in a timely manner, limit the
1094 amount of work we're willing to do. */
1095 if (len > limit) {
1096 len = limit;
1097 cc = 3;
1100 for (; len; len -= wordsize) {
1101 uint16_t v1 = pad;
1102 uint16_t v3 = pad;
1104 if (*src1len) {
1105 v1 = cpu_ldusize_data_ra(env, *src1, wordsize, ra);
1107 if (*src3len) {
1108 v3 = cpu_ldusize_data_ra(env, *src3, wordsize, ra);
1111 if (v1 != v3) {
1112 cc = (v1 < v3) ? 1 : 2;
1113 break;
1116 if (*src1len) {
1117 *src1 += wordsize;
1118 *src1len -= wordsize;
1120 if (*src3len) {
1121 *src3 += wordsize;
1122 *src3len -= wordsize;
1126 return cc;
1130 /* compare logical long */
1131 uint32_t HELPER(clcl)(CPUS390XState *env, uint32_t r1, uint32_t r2)
1133 uintptr_t ra = GETPC();
1134 uint64_t src1len = extract64(env->regs[r1 + 1], 0, 24);
1135 uint64_t src1 = get_address(env, r1);
1136 uint64_t src3len = extract64(env->regs[r2 + 1], 0, 24);
1137 uint64_t src3 = get_address(env, r2);
1138 uint8_t pad = env->regs[r2 + 1] >> 24;
1139 uint32_t cc;
1141 cc = do_clcl(env, &src1, &src1len, &src3, &src3len, pad, -1, 1, ra);
1143 env->regs[r1 + 1] = deposit64(env->regs[r1 + 1], 0, 24, src1len);
1144 env->regs[r2 + 1] = deposit64(env->regs[r2 + 1], 0, 24, src3len);
1145 set_address(env, r1, src1);
1146 set_address(env, r2, src3);
1148 return cc;
1151 /* compare logical long extended memcompare insn with padding */
1152 uint32_t HELPER(clcle)(CPUS390XState *env, uint32_t r1, uint64_t a2,
1153 uint32_t r3)
1155 uintptr_t ra = GETPC();
1156 uint64_t src1len = get_length(env, r1 + 1);
1157 uint64_t src1 = get_address(env, r1);
1158 uint64_t src3len = get_length(env, r3 + 1);
1159 uint64_t src3 = get_address(env, r3);
1160 uint8_t pad = a2;
1161 uint32_t cc;
1163 cc = do_clcl(env, &src1, &src1len, &src3, &src3len, pad, 0x2000, 1, ra);
1165 set_length(env, r1 + 1, src1len);
1166 set_length(env, r3 + 1, src3len);
1167 set_address(env, r1, src1);
1168 set_address(env, r3, src3);
1170 return cc;
1173 /* compare logical long unicode memcompare insn with padding */
1174 uint32_t HELPER(clclu)(CPUS390XState *env, uint32_t r1, uint64_t a2,
1175 uint32_t r3)
1177 uintptr_t ra = GETPC();
1178 uint64_t src1len = get_length(env, r1 + 1);
1179 uint64_t src1 = get_address(env, r1);
1180 uint64_t src3len = get_length(env, r3 + 1);
1181 uint64_t src3 = get_address(env, r3);
1182 uint16_t pad = a2;
1183 uint32_t cc = 0;
1185 cc = do_clcl(env, &src1, &src1len, &src3, &src3len, pad, 0x1000, 2, ra);
1187 set_length(env, r1 + 1, src1len);
1188 set_length(env, r3 + 1, src3len);
1189 set_address(env, r1, src1);
1190 set_address(env, r3, src3);
1192 return cc;
1195 /* checksum */
1196 uint64_t HELPER(cksm)(CPUS390XState *env, uint64_t r1,
1197 uint64_t src, uint64_t src_len)
1199 uintptr_t ra = GETPC();
1200 uint64_t max_len, len;
1201 uint64_t cksm = (uint32_t)r1;
1203 /* Lest we fail to service interrupts in a timely manner, limit the
1204 amount of work we're willing to do. For now, let's cap at 8k. */
1205 max_len = (src_len > 0x2000 ? 0x2000 : src_len);
1207 /* Process full words as available. */
1208 for (len = 0; len + 4 <= max_len; len += 4, src += 4) {
1209 cksm += (uint32_t)cpu_ldl_data_ra(env, src, ra);
1212 switch (max_len - len) {
1213 case 1:
1214 cksm += cpu_ldub_data_ra(env, src, ra) << 24;
1215 len += 1;
1216 break;
1217 case 2:
1218 cksm += cpu_lduw_data_ra(env, src, ra) << 16;
1219 len += 2;
1220 break;
1221 case 3:
1222 cksm += cpu_lduw_data_ra(env, src, ra) << 16;
1223 cksm += cpu_ldub_data_ra(env, src + 2, ra) << 8;
1224 len += 3;
1225 break;
1228 /* Fold the carry from the checksum. Note that we can see carry-out
1229 during folding more than once (but probably not more than twice). */
1230 while (cksm > 0xffffffffull) {
1231 cksm = (uint32_t)cksm + (cksm >> 32);
1234 /* Indicate whether or not we've processed everything. */
1235 env->cc_op = (len == src_len ? 0 : 3);
1237 /* Return both cksm and processed length. */
1238 env->retxl = cksm;
1239 return len;
1242 void HELPER(pack)(CPUS390XState *env, uint32_t len, uint64_t dest, uint64_t src)
1244 uintptr_t ra = GETPC();
1245 int len_dest = len >> 4;
1246 int len_src = len & 0xf;
1247 uint8_t b;
1249 dest += len_dest;
1250 src += len_src;
1252 /* last byte is special, it only flips the nibbles */
1253 b = cpu_ldub_data_ra(env, src, ra);
1254 cpu_stb_data_ra(env, dest, (b << 4) | (b >> 4), ra);
1255 src--;
1256 len_src--;
1258 /* now pack every value */
1259 while (len_dest > 0) {
1260 b = 0;
1262 if (len_src >= 0) {
1263 b = cpu_ldub_data_ra(env, src, ra) & 0x0f;
1264 src--;
1265 len_src--;
1267 if (len_src >= 0) {
1268 b |= cpu_ldub_data_ra(env, src, ra) << 4;
1269 src--;
1270 len_src--;
1273 len_dest--;
1274 dest--;
1275 cpu_stb_data_ra(env, dest, b, ra);
1279 static inline void do_pkau(CPUS390XState *env, uint64_t dest, uint64_t src,
1280 uint32_t srclen, int ssize, uintptr_t ra)
1282 int i;
1283 /* The destination operand is always 16 bytes long. */
1284 const int destlen = 16;
1286 /* The operands are processed from right to left. */
1287 src += srclen - 1;
1288 dest += destlen - 1;
1290 for (i = 0; i < destlen; i++) {
1291 uint8_t b = 0;
1293 /* Start with a positive sign */
1294 if (i == 0) {
1295 b = 0xc;
1296 } else if (srclen > ssize) {
1297 b = cpu_ldub_data_ra(env, src, ra) & 0x0f;
1298 src -= ssize;
1299 srclen -= ssize;
1302 if (srclen > ssize) {
1303 b |= cpu_ldub_data_ra(env, src, ra) << 4;
1304 src -= ssize;
1305 srclen -= ssize;
1308 cpu_stb_data_ra(env, dest, b, ra);
1309 dest--;
1314 void HELPER(pka)(CPUS390XState *env, uint64_t dest, uint64_t src,
1315 uint32_t srclen)
1317 do_pkau(env, dest, src, srclen, 1, GETPC());
1320 void HELPER(pku)(CPUS390XState *env, uint64_t dest, uint64_t src,
1321 uint32_t srclen)
1323 do_pkau(env, dest, src, srclen, 2, GETPC());
1326 void HELPER(unpk)(CPUS390XState *env, uint32_t len, uint64_t dest,
1327 uint64_t src)
1329 uintptr_t ra = GETPC();
1330 int len_dest = len >> 4;
1331 int len_src = len & 0xf;
1332 uint8_t b;
1333 int second_nibble = 0;
1335 dest += len_dest;
1336 src += len_src;
1338 /* last byte is special, it only flips the nibbles */
1339 b = cpu_ldub_data_ra(env, src, ra);
1340 cpu_stb_data_ra(env, dest, (b << 4) | (b >> 4), ra);
1341 src--;
1342 len_src--;
1344 /* now pad every nibble with 0xf0 */
1346 while (len_dest > 0) {
1347 uint8_t cur_byte = 0;
1349 if (len_src > 0) {
1350 cur_byte = cpu_ldub_data_ra(env, src, ra);
1353 len_dest--;
1354 dest--;
1356 /* only advance one nibble at a time */
1357 if (second_nibble) {
1358 cur_byte >>= 4;
1359 len_src--;
1360 src--;
1362 second_nibble = !second_nibble;
1364 /* digit */
1365 cur_byte = (cur_byte & 0xf);
1366 /* zone bits */
1367 cur_byte |= 0xf0;
1369 cpu_stb_data_ra(env, dest, cur_byte, ra);
1373 static inline uint32_t do_unpkau(CPUS390XState *env, uint64_t dest,
1374 uint32_t destlen, int dsize, uint64_t src,
1375 uintptr_t ra)
1377 int i;
1378 uint32_t cc;
1379 uint8_t b;
1380 /* The source operand is always 16 bytes long. */
1381 const int srclen = 16;
1383 /* The operands are processed from right to left. */
1384 src += srclen - 1;
1385 dest += destlen - dsize;
1387 /* Check for the sign. */
1388 b = cpu_ldub_data_ra(env, src, ra);
1389 src--;
1390 switch (b & 0xf) {
1391 case 0xa:
1392 case 0xc:
1393 case 0xe ... 0xf:
1394 cc = 0; /* plus */
1395 break;
1396 case 0xb:
1397 case 0xd:
1398 cc = 1; /* minus */
1399 break;
1400 default:
1401 case 0x0 ... 0x9:
1402 cc = 3; /* invalid */
1403 break;
1406 /* Now pad every nibble with 0x30, advancing one nibble at a time. */
1407 for (i = 0; i < destlen; i += dsize) {
1408 if (i == (31 * dsize)) {
1409 /* If length is 32/64 bytes, the leftmost byte is 0. */
1410 b = 0;
1411 } else if (i % (2 * dsize)) {
1412 b = cpu_ldub_data_ra(env, src, ra);
1413 src--;
1414 } else {
1415 b >>= 4;
1417 cpu_stsize_data_ra(env, dest, 0x30 + (b & 0xf), dsize, ra);
1418 dest -= dsize;
1421 return cc;
1424 uint32_t HELPER(unpka)(CPUS390XState *env, uint64_t dest, uint32_t destlen,
1425 uint64_t src)
1427 return do_unpkau(env, dest, destlen, 1, src, GETPC());
1430 uint32_t HELPER(unpku)(CPUS390XState *env, uint64_t dest, uint32_t destlen,
1431 uint64_t src)
1433 return do_unpkau(env, dest, destlen, 2, src, GETPC());
1436 uint32_t HELPER(tp)(CPUS390XState *env, uint64_t dest, uint32_t destlen)
1438 uintptr_t ra = GETPC();
1439 uint32_t cc = 0;
1440 int i;
1442 for (i = 0; i < destlen; i++) {
1443 uint8_t b = cpu_ldub_data_ra(env, dest + i, ra);
1444 /* digit */
1445 cc |= (b & 0xf0) > 0x90 ? 2 : 0;
1447 if (i == (destlen - 1)) {
1448 /* sign */
1449 cc |= (b & 0xf) < 0xa ? 1 : 0;
1450 } else {
1451 /* digit */
1452 cc |= (b & 0xf) > 0x9 ? 2 : 0;
1456 return cc;
1459 static uint32_t do_helper_tr(CPUS390XState *env, uint32_t len, uint64_t array,
1460 uint64_t trans, uintptr_t ra)
1462 uint32_t i;
1464 for (i = 0; i <= len; i++) {
1465 uint8_t byte = cpu_ldub_data_ra(env, array + i, ra);
1466 uint8_t new_byte = cpu_ldub_data_ra(env, trans + byte, ra);
1467 cpu_stb_data_ra(env, array + i, new_byte, ra);
1470 return env->cc_op;
1473 void HELPER(tr)(CPUS390XState *env, uint32_t len, uint64_t array,
1474 uint64_t trans)
1476 do_helper_tr(env, len, array, trans, GETPC());
1479 uint64_t HELPER(tre)(CPUS390XState *env, uint64_t array,
1480 uint64_t len, uint64_t trans)
1482 uintptr_t ra = GETPC();
1483 uint8_t end = env->regs[0] & 0xff;
1484 uint64_t l = len;
1485 uint64_t i;
1486 uint32_t cc = 0;
1488 if (!(env->psw.mask & PSW_MASK_64)) {
1489 array &= 0x7fffffff;
1490 l = (uint32_t)l;
1493 /* Lest we fail to service interrupts in a timely manner, limit the
1494 amount of work we're willing to do. For now, let's cap at 8k. */
1495 if (l > 0x2000) {
1496 l = 0x2000;
1497 cc = 3;
1500 for (i = 0; i < l; i++) {
1501 uint8_t byte, new_byte;
1503 byte = cpu_ldub_data_ra(env, array + i, ra);
1505 if (byte == end) {
1506 cc = 1;
1507 break;
1510 new_byte = cpu_ldub_data_ra(env, trans + byte, ra);
1511 cpu_stb_data_ra(env, array + i, new_byte, ra);
1514 env->cc_op = cc;
1515 env->retxl = len - i;
1516 return array + i;
1519 static inline uint32_t do_helper_trt(CPUS390XState *env, int len,
1520 uint64_t array, uint64_t trans,
1521 int inc, uintptr_t ra)
1523 int i;
1525 for (i = 0; i <= len; i++) {
1526 uint8_t byte = cpu_ldub_data_ra(env, array + i * inc, ra);
1527 uint8_t sbyte = cpu_ldub_data_ra(env, trans + byte, ra);
1529 if (sbyte != 0) {
1530 set_address(env, 1, array + i * inc);
1531 env->regs[2] = deposit64(env->regs[2], 0, 8, sbyte);
1532 return (i == len) ? 2 : 1;
1536 return 0;
1539 static uint32_t do_helper_trt_fwd(CPUS390XState *env, uint32_t len,
1540 uint64_t array, uint64_t trans,
1541 uintptr_t ra)
1543 return do_helper_trt(env, len, array, trans, 1, ra);
1546 uint32_t HELPER(trt)(CPUS390XState *env, uint32_t len, uint64_t array,
1547 uint64_t trans)
1549 return do_helper_trt(env, len, array, trans, 1, GETPC());
1552 static uint32_t do_helper_trt_bkwd(CPUS390XState *env, uint32_t len,
1553 uint64_t array, uint64_t trans,
1554 uintptr_t ra)
1556 return do_helper_trt(env, len, array, trans, -1, ra);
1559 uint32_t HELPER(trtr)(CPUS390XState *env, uint32_t len, uint64_t array,
1560 uint64_t trans)
1562 return do_helper_trt(env, len, array, trans, -1, GETPC());
1565 /* Translate one/two to one/two */
1566 uint32_t HELPER(trXX)(CPUS390XState *env, uint32_t r1, uint32_t r2,
1567 uint32_t tst, uint32_t sizes)
1569 uintptr_t ra = GETPC();
1570 int dsize = (sizes & 1) ? 1 : 2;
1571 int ssize = (sizes & 2) ? 1 : 2;
1572 uint64_t tbl = get_address(env, 1);
1573 uint64_t dst = get_address(env, r1);
1574 uint64_t len = get_length(env, r1 + 1);
1575 uint64_t src = get_address(env, r2);
1576 uint32_t cc = 3;
1577 int i;
1579 /* The lower address bits of TBL are ignored. For TROO, TROT, it's
1580 the low 3 bits (double-word aligned). For TRTO, TRTT, it's either
1581 the low 12 bits (4K, without ETF2-ENH) or 3 bits (with ETF2-ENH). */
1582 if (ssize == 2 && !s390_has_feat(S390_FEAT_ETF2_ENH)) {
1583 tbl &= -4096;
1584 } else {
1585 tbl &= -8;
1588 check_alignment(env, len, ssize, ra);
1590 /* Lest we fail to service interrupts in a timely manner, */
1591 /* limit the amount of work we're willing to do. */
1592 for (i = 0; i < 0x2000; i++) {
1593 uint16_t sval = cpu_ldusize_data_ra(env, src, ssize, ra);
1594 uint64_t tble = tbl + (sval * dsize);
1595 uint16_t dval = cpu_ldusize_data_ra(env, tble, dsize, ra);
1596 if (dval == tst) {
1597 cc = 1;
1598 break;
1600 cpu_stsize_data_ra(env, dst, dval, dsize, ra);
1602 len -= ssize;
1603 src += ssize;
1604 dst += dsize;
1606 if (len == 0) {
1607 cc = 0;
1608 break;
1612 set_address(env, r1, dst);
1613 set_length(env, r1 + 1, len);
1614 set_address(env, r2, src);
1616 return cc;
1619 void HELPER(cdsg)(CPUS390XState *env, uint64_t addr,
1620 uint32_t r1, uint32_t r3)
1622 uintptr_t ra = GETPC();
1623 Int128 cmpv = int128_make128(env->regs[r1 + 1], env->regs[r1]);
1624 Int128 newv = int128_make128(env->regs[r3 + 1], env->regs[r3]);
1625 Int128 oldv;
1626 uint64_t oldh, oldl;
1627 bool fail;
1629 check_alignment(env, addr, 16, ra);
1631 oldh = cpu_ldq_data_ra(env, addr + 0, ra);
1632 oldl = cpu_ldq_data_ra(env, addr + 8, ra);
1634 oldv = int128_make128(oldl, oldh);
1635 fail = !int128_eq(oldv, cmpv);
1636 if (fail) {
1637 newv = oldv;
1640 cpu_stq_data_ra(env, addr + 0, int128_gethi(newv), ra);
1641 cpu_stq_data_ra(env, addr + 8, int128_getlo(newv), ra);
1643 env->cc_op = fail;
1644 env->regs[r1] = int128_gethi(oldv);
1645 env->regs[r1 + 1] = int128_getlo(oldv);
1648 void HELPER(cdsg_parallel)(CPUS390XState *env, uint64_t addr,
1649 uint32_t r1, uint32_t r3)
1651 uintptr_t ra = GETPC();
1652 Int128 cmpv = int128_make128(env->regs[r1 + 1], env->regs[r1]);
1653 Int128 newv = int128_make128(env->regs[r3 + 1], env->regs[r3]);
1654 int mem_idx;
1655 TCGMemOpIdx oi;
1656 Int128 oldv;
1657 bool fail;
1659 assert(HAVE_CMPXCHG128);
1661 mem_idx = cpu_mmu_index(env, false);
1662 oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx);
1663 oldv = helper_atomic_cmpxchgo_be_mmu(env, addr, cmpv, newv, oi, ra);
1664 fail = !int128_eq(oldv, cmpv);
1666 env->cc_op = fail;
1667 env->regs[r1] = int128_gethi(oldv);
1668 env->regs[r1 + 1] = int128_getlo(oldv);
1671 static uint32_t do_csst(CPUS390XState *env, uint32_t r3, uint64_t a1,
1672 uint64_t a2, bool parallel)
1674 uint32_t mem_idx = cpu_mmu_index(env, false);
1675 uintptr_t ra = GETPC();
1676 uint32_t fc = extract32(env->regs[0], 0, 8);
1677 uint32_t sc = extract32(env->regs[0], 8, 8);
1678 uint64_t pl = get_address(env, 1) & -16;
1679 uint64_t svh, svl;
1680 uint32_t cc;
1682 /* Sanity check the function code and storage characteristic. */
1683 if (fc > 1 || sc > 3) {
1684 if (!s390_has_feat(S390_FEAT_COMPARE_AND_SWAP_AND_STORE_2)) {
1685 goto spec_exception;
1687 if (fc > 2 || sc > 4 || (fc == 2 && (r3 & 1))) {
1688 goto spec_exception;
1692 /* Sanity check the alignments. */
1693 if (extract32(a1, 0, fc + 2) || extract32(a2, 0, sc)) {
1694 goto spec_exception;
1697 /* Sanity check writability of the store address. */
1698 probe_write(env, a2, 1 << sc, mem_idx, ra);
1701 * Note that the compare-and-swap is atomic, and the store is atomic,
1702 * but the complete operation is not. Therefore we do not need to
1703 * assert serial context in order to implement this. That said,
1704 * restart early if we can't support either operation that is supposed
1705 * to be atomic.
1707 if (parallel) {
1708 uint32_t max = 2;
1709 #ifdef CONFIG_ATOMIC64
1710 max = 3;
1711 #endif
1712 if ((HAVE_CMPXCHG128 ? 0 : fc + 2 > max) ||
1713 (HAVE_ATOMIC128 ? 0 : sc > max)) {
1714 cpu_loop_exit_atomic(env_cpu(env), ra);
1718 /* All loads happen before all stores. For simplicity, load the entire
1719 store value area from the parameter list. */
1720 svh = cpu_ldq_data_ra(env, pl + 16, ra);
1721 svl = cpu_ldq_data_ra(env, pl + 24, ra);
1723 switch (fc) {
1724 case 0:
1726 uint32_t nv = cpu_ldl_data_ra(env, pl, ra);
1727 uint32_t cv = env->regs[r3];
1728 uint32_t ov;
1730 if (parallel) {
1731 #ifdef CONFIG_USER_ONLY
1732 uint32_t *haddr = g2h(a1);
1733 ov = atomic_cmpxchg__nocheck(haddr, cv, nv);
1734 #else
1735 TCGMemOpIdx oi = make_memop_idx(MO_TEUL | MO_ALIGN, mem_idx);
1736 ov = helper_atomic_cmpxchgl_be_mmu(env, a1, cv, nv, oi, ra);
1737 #endif
1738 } else {
1739 ov = cpu_ldl_data_ra(env, a1, ra);
1740 cpu_stl_data_ra(env, a1, (ov == cv ? nv : ov), ra);
1742 cc = (ov != cv);
1743 env->regs[r3] = deposit64(env->regs[r3], 32, 32, ov);
1745 break;
1747 case 1:
1749 uint64_t nv = cpu_ldq_data_ra(env, pl, ra);
1750 uint64_t cv = env->regs[r3];
1751 uint64_t ov;
1753 if (parallel) {
1754 #ifdef CONFIG_ATOMIC64
1755 # ifdef CONFIG_USER_ONLY
1756 uint64_t *haddr = g2h(a1);
1757 ov = atomic_cmpxchg__nocheck(haddr, cv, nv);
1758 # else
1759 TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN, mem_idx);
1760 ov = helper_atomic_cmpxchgq_be_mmu(env, a1, cv, nv, oi, ra);
1761 # endif
1762 #else
1763 /* Note that we asserted !parallel above. */
1764 g_assert_not_reached();
1765 #endif
1766 } else {
1767 ov = cpu_ldq_data_ra(env, a1, ra);
1768 cpu_stq_data_ra(env, a1, (ov == cv ? nv : ov), ra);
1770 cc = (ov != cv);
1771 env->regs[r3] = ov;
1773 break;
1775 case 2:
1777 uint64_t nvh = cpu_ldq_data_ra(env, pl, ra);
1778 uint64_t nvl = cpu_ldq_data_ra(env, pl + 8, ra);
1779 Int128 nv = int128_make128(nvl, nvh);
1780 Int128 cv = int128_make128(env->regs[r3 + 1], env->regs[r3]);
1781 Int128 ov;
1783 if (!parallel) {
1784 uint64_t oh = cpu_ldq_data_ra(env, a1 + 0, ra);
1785 uint64_t ol = cpu_ldq_data_ra(env, a1 + 8, ra);
1787 ov = int128_make128(ol, oh);
1788 cc = !int128_eq(ov, cv);
1789 if (cc) {
1790 nv = ov;
1793 cpu_stq_data_ra(env, a1 + 0, int128_gethi(nv), ra);
1794 cpu_stq_data_ra(env, a1 + 8, int128_getlo(nv), ra);
1795 } else if (HAVE_CMPXCHG128) {
1796 TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx);
1797 ov = helper_atomic_cmpxchgo_be_mmu(env, a1, cv, nv, oi, ra);
1798 cc = !int128_eq(ov, cv);
1799 } else {
1800 /* Note that we asserted !parallel above. */
1801 g_assert_not_reached();
1804 env->regs[r3 + 0] = int128_gethi(ov);
1805 env->regs[r3 + 1] = int128_getlo(ov);
1807 break;
1809 default:
1810 g_assert_not_reached();
1813 /* Store only if the comparison succeeded. Note that above we use a pair
1814 of 64-bit big-endian loads, so for sc < 3 we must extract the value
1815 from the most-significant bits of svh. */
1816 if (cc == 0) {
1817 switch (sc) {
1818 case 0:
1819 cpu_stb_data_ra(env, a2, svh >> 56, ra);
1820 break;
1821 case 1:
1822 cpu_stw_data_ra(env, a2, svh >> 48, ra);
1823 break;
1824 case 2:
1825 cpu_stl_data_ra(env, a2, svh >> 32, ra);
1826 break;
1827 case 3:
1828 cpu_stq_data_ra(env, a2, svh, ra);
1829 break;
1830 case 4:
1831 if (!parallel) {
1832 cpu_stq_data_ra(env, a2 + 0, svh, ra);
1833 cpu_stq_data_ra(env, a2 + 8, svl, ra);
1834 } else if (HAVE_ATOMIC128) {
1835 TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx);
1836 Int128 sv = int128_make128(svl, svh);
1837 helper_atomic_sto_be_mmu(env, a2, sv, oi, ra);
1838 } else {
1839 /* Note that we asserted !parallel above. */
1840 g_assert_not_reached();
1842 break;
1843 default:
1844 g_assert_not_reached();
1848 return cc;
1850 spec_exception:
1851 s390_program_interrupt(env, PGM_SPECIFICATION, 6, ra);
1852 g_assert_not_reached();
1855 uint32_t HELPER(csst)(CPUS390XState *env, uint32_t r3, uint64_t a1, uint64_t a2)
1857 return do_csst(env, r3, a1, a2, false);
1860 uint32_t HELPER(csst_parallel)(CPUS390XState *env, uint32_t r3, uint64_t a1,
1861 uint64_t a2)
1863 return do_csst(env, r3, a1, a2, true);
1866 #if !defined(CONFIG_USER_ONLY)
1867 void HELPER(lctlg)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
1869 uintptr_t ra = GETPC();
1870 bool PERchanged = false;
1871 uint64_t src = a2;
1872 uint32_t i;
1874 if (src & 0x7) {
1875 s390_program_interrupt(env, PGM_SPECIFICATION, 6, ra);
1878 for (i = r1;; i = (i + 1) % 16) {
1879 uint64_t val = cpu_ldq_data_ra(env, src, ra);
1880 if (env->cregs[i] != val && i >= 9 && i <= 11) {
1881 PERchanged = true;
1883 env->cregs[i] = val;
1884 HELPER_LOG("load ctl %d from 0x%" PRIx64 " == 0x%" PRIx64 "\n",
1885 i, src, val);
1886 src += sizeof(uint64_t);
1888 if (i == r3) {
1889 break;
1893 if (PERchanged && env->psw.mask & PSW_MASK_PER) {
1894 s390_cpu_recompute_watchpoints(env_cpu(env));
1897 tlb_flush(env_cpu(env));
1900 void HELPER(lctl)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
1902 uintptr_t ra = GETPC();
1903 bool PERchanged = false;
1904 uint64_t src = a2;
1905 uint32_t i;
1907 if (src & 0x3) {
1908 s390_program_interrupt(env, PGM_SPECIFICATION, 4, ra);
1911 for (i = r1;; i = (i + 1) % 16) {
1912 uint32_t val = cpu_ldl_data_ra(env, src, ra);
1913 if ((uint32_t)env->cregs[i] != val && i >= 9 && i <= 11) {
1914 PERchanged = true;
1916 env->cregs[i] = deposit64(env->cregs[i], 0, 32, val);
1917 HELPER_LOG("load ctl %d from 0x%" PRIx64 " == 0x%x\n", i, src, val);
1918 src += sizeof(uint32_t);
1920 if (i == r3) {
1921 break;
1925 if (PERchanged && env->psw.mask & PSW_MASK_PER) {
1926 s390_cpu_recompute_watchpoints(env_cpu(env));
1929 tlb_flush(env_cpu(env));
1932 void HELPER(stctg)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
1934 uintptr_t ra = GETPC();
1935 uint64_t dest = a2;
1936 uint32_t i;
1938 if (dest & 0x7) {
1939 s390_program_interrupt(env, PGM_SPECIFICATION, 6, ra);
1942 for (i = r1;; i = (i + 1) % 16) {
1943 cpu_stq_data_ra(env, dest, env->cregs[i], ra);
1944 dest += sizeof(uint64_t);
1946 if (i == r3) {
1947 break;
1952 void HELPER(stctl)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
1954 uintptr_t ra = GETPC();
1955 uint64_t dest = a2;
1956 uint32_t i;
1958 if (dest & 0x3) {
1959 s390_program_interrupt(env, PGM_SPECIFICATION, 4, ra);
1962 for (i = r1;; i = (i + 1) % 16) {
1963 cpu_stl_data_ra(env, dest, env->cregs[i], ra);
1964 dest += sizeof(uint32_t);
1966 if (i == r3) {
1967 break;
1972 uint32_t HELPER(testblock)(CPUS390XState *env, uint64_t real_addr)
1974 uintptr_t ra = GETPC();
1975 int i;
1977 real_addr = wrap_address(env, real_addr) & TARGET_PAGE_MASK;
1979 for (i = 0; i < TARGET_PAGE_SIZE; i += 8) {
1980 cpu_stq_real_ra(env, real_addr + i, 0, ra);
1983 return 0;
1986 uint32_t HELPER(tprot)(CPUS390XState *env, uint64_t a1, uint64_t a2)
1988 S390CPU *cpu = env_archcpu(env);
1989 CPUState *cs = env_cpu(env);
1992 * TODO: we currently don't handle all access protection types
1993 * (including access-list and key-controlled) as well as AR mode.
1995 if (!s390_cpu_virt_mem_check_write(cpu, a1, 0, 1)) {
1996 /* Fetching permitted; storing permitted */
1997 return 0;
2000 if (env->int_pgm_code == PGM_PROTECTION) {
2001 /* retry if reading is possible */
2002 cs->exception_index = -1;
2003 if (!s390_cpu_virt_mem_check_read(cpu, a1, 0, 1)) {
2004 /* Fetching permitted; storing not permitted */
2005 return 1;
2009 switch (env->int_pgm_code) {
2010 case PGM_PROTECTION:
2011 /* Fetching not permitted; storing not permitted */
2012 cs->exception_index = -1;
2013 return 2;
2014 case PGM_ADDRESSING:
2015 case PGM_TRANS_SPEC:
2016 /* exceptions forwarded to the guest */
2017 s390_cpu_virt_mem_handle_exc(cpu, GETPC());
2018 return 0;
2021 /* Translation not available */
2022 cs->exception_index = -1;
2023 return 3;
2026 /* insert storage key extended */
2027 uint64_t HELPER(iske)(CPUS390XState *env, uint64_t r2)
2029 static S390SKeysState *ss;
2030 static S390SKeysClass *skeyclass;
2031 uint64_t addr = wrap_address(env, r2);
2032 uint8_t key;
2034 if (addr > ram_size) {
2035 return 0;
2038 if (unlikely(!ss)) {
2039 ss = s390_get_skeys_device();
2040 skeyclass = S390_SKEYS_GET_CLASS(ss);
2043 if (skeyclass->get_skeys(ss, addr / TARGET_PAGE_SIZE, 1, &key)) {
2044 return 0;
2046 return key;
2049 /* set storage key extended */
2050 void HELPER(sske)(CPUS390XState *env, uint64_t r1, uint64_t r2)
2052 static S390SKeysState *ss;
2053 static S390SKeysClass *skeyclass;
2054 uint64_t addr = wrap_address(env, r2);
2055 uint8_t key;
2057 if (addr > ram_size) {
2058 return;
2061 if (unlikely(!ss)) {
2062 ss = s390_get_skeys_device();
2063 skeyclass = S390_SKEYS_GET_CLASS(ss);
2066 key = (uint8_t) r1;
2067 skeyclass->set_skeys(ss, addr / TARGET_PAGE_SIZE, 1, &key);
2069 * As we can only flush by virtual address and not all the entries
2070 * that point to a physical address we have to flush the whole TLB.
2072 tlb_flush_all_cpus_synced(env_cpu(env));
2075 /* reset reference bit extended */
2076 uint32_t HELPER(rrbe)(CPUS390XState *env, uint64_t r2)
2078 static S390SKeysState *ss;
2079 static S390SKeysClass *skeyclass;
2080 uint8_t re, key;
2082 if (r2 > ram_size) {
2083 return 0;
2086 if (unlikely(!ss)) {
2087 ss = s390_get_skeys_device();
2088 skeyclass = S390_SKEYS_GET_CLASS(ss);
2091 if (skeyclass->get_skeys(ss, r2 / TARGET_PAGE_SIZE, 1, &key)) {
2092 return 0;
2095 re = key & (SK_R | SK_C);
2096 key &= ~SK_R;
2098 if (skeyclass->set_skeys(ss, r2 / TARGET_PAGE_SIZE, 1, &key)) {
2099 return 0;
2102 * As we can only flush by virtual address and not all the entries
2103 * that point to a physical address we have to flush the whole TLB.
2105 tlb_flush_all_cpus_synced(env_cpu(env));
2108 * cc
2110 * 0 Reference bit zero; change bit zero
2111 * 1 Reference bit zero; change bit one
2112 * 2 Reference bit one; change bit zero
2113 * 3 Reference bit one; change bit one
2116 return re >> 1;
2119 uint32_t HELPER(mvcs)(CPUS390XState *env, uint64_t l, uint64_t a1, uint64_t a2)
2121 const uint8_t psw_as = (env->psw.mask & PSW_MASK_ASC) >> PSW_SHIFT_ASC;
2122 S390Access srca, desta;
2123 uintptr_t ra = GETPC();
2124 int cc = 0;
2126 HELPER_LOG("%s: %16" PRIx64 " %16" PRIx64 " %16" PRIx64 "\n",
2127 __func__, l, a1, a2);
2129 if (!(env->psw.mask & PSW_MASK_DAT) || !(env->cregs[0] & CR0_SECONDARY) ||
2130 psw_as == AS_HOME || psw_as == AS_ACCREG) {
2131 s390_program_interrupt(env, PGM_SPECIAL_OP, ILEN_AUTO, ra);
2134 l = wrap_length32(env, l);
2135 if (l > 256) {
2136 /* max 256 */
2137 l = 256;
2138 cc = 3;
2139 } else if (!l) {
2140 return cc;
2143 /* TODO: Access key handling */
2144 srca = access_prepare(env, a2, l, MMU_DATA_LOAD, MMU_PRIMARY_IDX, ra);
2145 desta = access_prepare(env, a1, l, MMU_DATA_STORE, MMU_SECONDARY_IDX, ra);
2146 access_memmove(env, &desta, &srca, ra);
2147 return cc;
2150 uint32_t HELPER(mvcp)(CPUS390XState *env, uint64_t l, uint64_t a1, uint64_t a2)
2152 const uint8_t psw_as = (env->psw.mask & PSW_MASK_ASC) >> PSW_SHIFT_ASC;
2153 S390Access srca, desta;
2154 uintptr_t ra = GETPC();
2155 int cc = 0;
2157 HELPER_LOG("%s: %16" PRIx64 " %16" PRIx64 " %16" PRIx64 "\n",
2158 __func__, l, a1, a2);
2160 if (!(env->psw.mask & PSW_MASK_DAT) || !(env->cregs[0] & CR0_SECONDARY) ||
2161 psw_as == AS_HOME || psw_as == AS_ACCREG) {
2162 s390_program_interrupt(env, PGM_SPECIAL_OP, ILEN_AUTO, ra);
2165 l = wrap_length32(env, l);
2166 if (l > 256) {
2167 /* max 256 */
2168 l = 256;
2169 cc = 3;
2170 } else if (!l) {
2171 return cc;
2174 /* TODO: Access key handling */
2175 srca = access_prepare(env, a2, l, MMU_DATA_LOAD, MMU_SECONDARY_IDX, ra);
2176 desta = access_prepare(env, a1, l, MMU_DATA_STORE, MMU_PRIMARY_IDX, ra);
2177 access_memmove(env, &desta, &srca, ra);
2178 return cc;
2181 void HELPER(idte)(CPUS390XState *env, uint64_t r1, uint64_t r2, uint32_t m4)
2183 CPUState *cs = env_cpu(env);
2184 const uintptr_t ra = GETPC();
2185 uint64_t table, entry, raddr;
2186 uint16_t entries, i, index = 0;
2188 if (r2 & 0xff000) {
2189 s390_program_interrupt(env, PGM_SPECIFICATION, 4, ra);
2192 if (!(r2 & 0x800)) {
2193 /* invalidation-and-clearing operation */
2194 table = r1 & ASCE_ORIGIN;
2195 entries = (r2 & 0x7ff) + 1;
2197 switch (r1 & ASCE_TYPE_MASK) {
2198 case ASCE_TYPE_REGION1:
2199 index = (r2 >> 53) & 0x7ff;
2200 break;
2201 case ASCE_TYPE_REGION2:
2202 index = (r2 >> 42) & 0x7ff;
2203 break;
2204 case ASCE_TYPE_REGION3:
2205 index = (r2 >> 31) & 0x7ff;
2206 break;
2207 case ASCE_TYPE_SEGMENT:
2208 index = (r2 >> 20) & 0x7ff;
2209 break;
2211 for (i = 0; i < entries; i++) {
2212 /* addresses are not wrapped in 24/31bit mode but table index is */
2213 raddr = table + ((index + i) & 0x7ff) * sizeof(entry);
2214 entry = cpu_ldq_real_ra(env, raddr, ra);
2215 if (!(entry & REGION_ENTRY_INV)) {
2216 /* we are allowed to not store if already invalid */
2217 entry |= REGION_ENTRY_INV;
2218 cpu_stq_real_ra(env, raddr, entry, ra);
2223 /* We simply flush the complete tlb, therefore we can ignore r3. */
2224 if (m4 & 1) {
2225 tlb_flush(cs);
2226 } else {
2227 tlb_flush_all_cpus_synced(cs);
2231 /* invalidate pte */
2232 void HELPER(ipte)(CPUS390XState *env, uint64_t pto, uint64_t vaddr,
2233 uint32_t m4)
2235 CPUState *cs = env_cpu(env);
2236 const uintptr_t ra = GETPC();
2237 uint64_t page = vaddr & TARGET_PAGE_MASK;
2238 uint64_t pte_addr, pte;
2240 /* Compute the page table entry address */
2241 pte_addr = (pto & SEGMENT_ENTRY_ORIGIN);
2242 pte_addr += (vaddr & VADDR_PX) >> 9;
2244 /* Mark the page table entry as invalid */
2245 pte = cpu_ldq_real_ra(env, pte_addr, ra);
2246 pte |= PAGE_INVALID;
2247 cpu_stq_real_ra(env, pte_addr, pte, ra);
2249 /* XXX we exploit the fact that Linux passes the exact virtual
2250 address here - it's not obliged to! */
2251 if (m4 & 1) {
2252 if (vaddr & ~VADDR_PX) {
2253 tlb_flush_page(cs, page);
2254 /* XXX 31-bit hack */
2255 tlb_flush_page(cs, page ^ 0x80000000);
2256 } else {
2257 /* looks like we don't have a valid virtual address */
2258 tlb_flush(cs);
2260 } else {
2261 if (vaddr & ~VADDR_PX) {
2262 tlb_flush_page_all_cpus_synced(cs, page);
2263 /* XXX 31-bit hack */
2264 tlb_flush_page_all_cpus_synced(cs, page ^ 0x80000000);
2265 } else {
2266 /* looks like we don't have a valid virtual address */
2267 tlb_flush_all_cpus_synced(cs);
2272 /* flush local tlb */
2273 void HELPER(ptlb)(CPUS390XState *env)
2275 tlb_flush(env_cpu(env));
2278 /* flush global tlb */
2279 void HELPER(purge)(CPUS390XState *env)
2281 tlb_flush_all_cpus_synced(env_cpu(env));
2284 /* load using real address */
2285 uint64_t HELPER(lura)(CPUS390XState *env, uint64_t addr)
2287 return cpu_ldl_real_ra(env, wrap_address(env, addr), GETPC());
2290 uint64_t HELPER(lurag)(CPUS390XState *env, uint64_t addr)
2292 return cpu_ldq_real_ra(env, wrap_address(env, addr), GETPC());
2295 /* store using real address */
2296 void HELPER(stura)(CPUS390XState *env, uint64_t addr, uint64_t v1)
2298 cpu_stl_real_ra(env, wrap_address(env, addr), (uint32_t)v1, GETPC());
2300 if ((env->psw.mask & PSW_MASK_PER) &&
2301 (env->cregs[9] & PER_CR9_EVENT_STORE) &&
2302 (env->cregs[9] & PER_CR9_EVENT_STORE_REAL)) {
2303 /* PSW is saved just before calling the helper. */
2304 env->per_address = env->psw.addr;
2305 env->per_perc_atmid = PER_CODE_EVENT_STORE_REAL | get_per_atmid(env);
2309 void HELPER(sturg)(CPUS390XState *env, uint64_t addr, uint64_t v1)
2311 cpu_stq_real_ra(env, wrap_address(env, addr), v1, GETPC());
2313 if ((env->psw.mask & PSW_MASK_PER) &&
2314 (env->cregs[9] & PER_CR9_EVENT_STORE) &&
2315 (env->cregs[9] & PER_CR9_EVENT_STORE_REAL)) {
2316 /* PSW is saved just before calling the helper. */
2317 env->per_address = env->psw.addr;
2318 env->per_perc_atmid = PER_CODE_EVENT_STORE_REAL | get_per_atmid(env);
2322 /* load real address */
2323 uint64_t HELPER(lra)(CPUS390XState *env, uint64_t addr)
2325 CPUState *cs = env_cpu(env);
2326 uint32_t cc = 0;
2327 uint64_t asc = env->psw.mask & PSW_MASK_ASC;
2328 uint64_t ret;
2329 int old_exc, flags;
2331 /* XXX incomplete - has more corner cases */
2332 if (!(env->psw.mask & PSW_MASK_64) && (addr >> 32)) {
2333 s390_program_interrupt(env, PGM_SPECIAL_OP, 2, GETPC());
2336 old_exc = cs->exception_index;
2337 if (mmu_translate(env, addr, 0, asc, &ret, &flags, true)) {
2338 cc = 3;
2340 if (cs->exception_index == EXCP_PGM) {
2341 ret = env->int_pgm_code | 0x80000000;
2342 } else {
2343 ret |= addr & ~TARGET_PAGE_MASK;
2345 cs->exception_index = old_exc;
2347 env->cc_op = cc;
2348 return ret;
2350 #endif
2352 /* load pair from quadword */
2353 uint64_t HELPER(lpq)(CPUS390XState *env, uint64_t addr)
2355 uintptr_t ra = GETPC();
2356 uint64_t hi, lo;
2358 check_alignment(env, addr, 16, ra);
2359 hi = cpu_ldq_data_ra(env, addr + 0, ra);
2360 lo = cpu_ldq_data_ra(env, addr + 8, ra);
2362 env->retxl = lo;
2363 return hi;
2366 uint64_t HELPER(lpq_parallel)(CPUS390XState *env, uint64_t addr)
2368 uintptr_t ra = GETPC();
2369 uint64_t hi, lo;
2370 int mem_idx;
2371 TCGMemOpIdx oi;
2372 Int128 v;
2374 assert(HAVE_ATOMIC128);
2376 mem_idx = cpu_mmu_index(env, false);
2377 oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx);
2378 v = helper_atomic_ldo_be_mmu(env, addr, oi, ra);
2379 hi = int128_gethi(v);
2380 lo = int128_getlo(v);
2382 env->retxl = lo;
2383 return hi;
2386 /* store pair to quadword */
2387 void HELPER(stpq)(CPUS390XState *env, uint64_t addr,
2388 uint64_t low, uint64_t high)
2390 uintptr_t ra = GETPC();
2392 check_alignment(env, addr, 16, ra);
2393 cpu_stq_data_ra(env, addr + 0, high, ra);
2394 cpu_stq_data_ra(env, addr + 8, low, ra);
2397 void HELPER(stpq_parallel)(CPUS390XState *env, uint64_t addr,
2398 uint64_t low, uint64_t high)
2400 uintptr_t ra = GETPC();
2401 int mem_idx;
2402 TCGMemOpIdx oi;
2403 Int128 v;
2405 assert(HAVE_ATOMIC128);
2407 mem_idx = cpu_mmu_index(env, false);
2408 oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx);
2409 v = int128_make128(low, high);
2410 helper_atomic_sto_be_mmu(env, addr, v, oi, ra);
2413 /* Execute instruction. This instruction executes an insn modified with
2414 the contents of r1. It does not change the executed instruction in memory;
2415 it does not change the program counter.
2417 Perform this by recording the modified instruction in env->ex_value.
2418 This will be noticed by cpu_get_tb_cpu_state and thus tb translation.
2420 void HELPER(ex)(CPUS390XState *env, uint32_t ilen, uint64_t r1, uint64_t addr)
2422 uint64_t insn = cpu_lduw_code(env, addr);
2423 uint8_t opc = insn >> 8;
2425 /* Or in the contents of R1[56:63]. */
2426 insn |= r1 & 0xff;
2428 /* Load the rest of the instruction. */
2429 insn <<= 48;
2430 switch (get_ilen(opc)) {
2431 case 2:
2432 break;
2433 case 4:
2434 insn |= (uint64_t)cpu_lduw_code(env, addr + 2) << 32;
2435 break;
2436 case 6:
2437 insn |= (uint64_t)(uint32_t)cpu_ldl_code(env, addr + 2) << 16;
2438 break;
2439 default:
2440 g_assert_not_reached();
2443 /* The very most common cases can be sped up by avoiding a new TB. */
2444 if ((opc & 0xf0) == 0xd0) {
2445 typedef uint32_t (*dx_helper)(CPUS390XState *, uint32_t, uint64_t,
2446 uint64_t, uintptr_t);
2447 static const dx_helper dx[16] = {
2448 [0x0] = do_helper_trt_bkwd,
2449 [0x2] = do_helper_mvc,
2450 [0x4] = do_helper_nc,
2451 [0x5] = do_helper_clc,
2452 [0x6] = do_helper_oc,
2453 [0x7] = do_helper_xc,
2454 [0xc] = do_helper_tr,
2455 [0xd] = do_helper_trt_fwd,
2457 dx_helper helper = dx[opc & 0xf];
2459 if (helper) {
2460 uint32_t l = extract64(insn, 48, 8);
2461 uint32_t b1 = extract64(insn, 44, 4);
2462 uint32_t d1 = extract64(insn, 32, 12);
2463 uint32_t b2 = extract64(insn, 28, 4);
2464 uint32_t d2 = extract64(insn, 16, 12);
2465 uint64_t a1 = wrap_address(env, env->regs[b1] + d1);
2466 uint64_t a2 = wrap_address(env, env->regs[b2] + d2);
2468 env->cc_op = helper(env, l, a1, a2, 0);
2469 env->psw.addr += ilen;
2470 return;
2472 } else if (opc == 0x0a) {
2473 env->int_svc_code = extract64(insn, 48, 8);
2474 env->int_svc_ilen = ilen;
2475 helper_exception(env, EXCP_SVC);
2476 g_assert_not_reached();
2479 /* Record the insn we want to execute as well as the ilen to use
2480 during the execution of the target insn. This will also ensure
2481 that ex_value is non-zero, which flags that we are in a state
2482 that requires such execution. */
2483 env->ex_value = insn | ilen;
2486 uint32_t HELPER(mvcos)(CPUS390XState *env, uint64_t dest, uint64_t src,
2487 uint64_t len)
2489 const uint8_t psw_key = (env->psw.mask & PSW_MASK_KEY) >> PSW_SHIFT_KEY;
2490 const uint8_t psw_as = (env->psw.mask & PSW_MASK_ASC) >> PSW_SHIFT_ASC;
2491 const uint64_t r0 = env->regs[0];
2492 const uintptr_t ra = GETPC();
2493 uint8_t dest_key, dest_as, dest_k, dest_a;
2494 uint8_t src_key, src_as, src_k, src_a;
2495 uint64_t val;
2496 int cc = 0;
2498 HELPER_LOG("%s dest %" PRIx64 ", src %" PRIx64 ", len %" PRIx64 "\n",
2499 __func__, dest, src, len);
2501 if (!(env->psw.mask & PSW_MASK_DAT)) {
2502 s390_program_interrupt(env, PGM_SPECIAL_OP, 6, ra);
2505 /* OAC (operand access control) for the first operand -> dest */
2506 val = (r0 & 0xffff0000ULL) >> 16;
2507 dest_key = (val >> 12) & 0xf;
2508 dest_as = (val >> 6) & 0x3;
2509 dest_k = (val >> 1) & 0x1;
2510 dest_a = val & 0x1;
2512 /* OAC (operand access control) for the second operand -> src */
2513 val = (r0 & 0x0000ffffULL);
2514 src_key = (val >> 12) & 0xf;
2515 src_as = (val >> 6) & 0x3;
2516 src_k = (val >> 1) & 0x1;
2517 src_a = val & 0x1;
2519 if (!dest_k) {
2520 dest_key = psw_key;
2522 if (!src_k) {
2523 src_key = psw_key;
2525 if (!dest_a) {
2526 dest_as = psw_as;
2528 if (!src_a) {
2529 src_as = psw_as;
2532 if (dest_a && dest_as == AS_HOME && (env->psw.mask & PSW_MASK_PSTATE)) {
2533 s390_program_interrupt(env, PGM_SPECIAL_OP, 6, ra);
2535 if (!(env->cregs[0] & CR0_SECONDARY) &&
2536 (dest_as == AS_SECONDARY || src_as == AS_SECONDARY)) {
2537 s390_program_interrupt(env, PGM_SPECIAL_OP, 6, ra);
2539 if (!psw_key_valid(env, dest_key) || !psw_key_valid(env, src_key)) {
2540 s390_program_interrupt(env, PGM_PRIVILEGED, 6, ra);
2543 len = wrap_length32(env, len);
2544 if (len > 4096) {
2545 cc = 3;
2546 len = 4096;
2549 /* FIXME: AR-mode and proper problem state mode (using PSW keys) missing */
2550 if (src_as == AS_ACCREG || dest_as == AS_ACCREG ||
2551 (env->psw.mask & PSW_MASK_PSTATE)) {
2552 qemu_log_mask(LOG_UNIMP, "%s: AR-mode and PSTATE support missing\n",
2553 __func__);
2554 s390_program_interrupt(env, PGM_ADDRESSING, 6, ra);
2557 /* FIXME: Access using correct keys and AR-mode */
2558 if (len) {
2559 S390Access srca = access_prepare(env, src, len, MMU_DATA_LOAD,
2560 mmu_idx_from_as(src_as), ra);
2561 S390Access desta = access_prepare(env, dest, len, MMU_DATA_STORE,
2562 mmu_idx_from_as(dest_as), ra);
2564 access_memmove(env, &desta, &srca, ra);
2567 return cc;
2570 /* Decode a Unicode character. A return value < 0 indicates success, storing
2571 the UTF-32 result into OCHAR and the input length into OLEN. A return
2572 value >= 0 indicates failure, and the CC value to be returned. */
2573 typedef int (*decode_unicode_fn)(CPUS390XState *env, uint64_t addr,
2574 uint64_t ilen, bool enh_check, uintptr_t ra,
2575 uint32_t *ochar, uint32_t *olen);
2577 /* Encode a Unicode character. A return value < 0 indicates success, storing
2578 the bytes into ADDR and the output length into OLEN. A return value >= 0
2579 indicates failure, and the CC value to be returned. */
2580 typedef int (*encode_unicode_fn)(CPUS390XState *env, uint64_t addr,
2581 uint64_t ilen, uintptr_t ra, uint32_t c,
2582 uint32_t *olen);
2584 static int decode_utf8(CPUS390XState *env, uint64_t addr, uint64_t ilen,
2585 bool enh_check, uintptr_t ra,
2586 uint32_t *ochar, uint32_t *olen)
2588 uint8_t s0, s1, s2, s3;
2589 uint32_t c, l;
2591 if (ilen < 1) {
2592 return 0;
2594 s0 = cpu_ldub_data_ra(env, addr, ra);
2595 if (s0 <= 0x7f) {
2596 /* one byte character */
2597 l = 1;
2598 c = s0;
2599 } else if (s0 <= (enh_check ? 0xc1 : 0xbf)) {
2600 /* invalid character */
2601 return 2;
2602 } else if (s0 <= 0xdf) {
2603 /* two byte character */
2604 l = 2;
2605 if (ilen < 2) {
2606 return 0;
2608 s1 = cpu_ldub_data_ra(env, addr + 1, ra);
2609 c = s0 & 0x1f;
2610 c = (c << 6) | (s1 & 0x3f);
2611 if (enh_check && (s1 & 0xc0) != 0x80) {
2612 return 2;
2614 } else if (s0 <= 0xef) {
2615 /* three byte character */
2616 l = 3;
2617 if (ilen < 3) {
2618 return 0;
2620 s1 = cpu_ldub_data_ra(env, addr + 1, ra);
2621 s2 = cpu_ldub_data_ra(env, addr + 2, ra);
2622 c = s0 & 0x0f;
2623 c = (c << 6) | (s1 & 0x3f);
2624 c = (c << 6) | (s2 & 0x3f);
2625 /* Fold the byte-by-byte range descriptions in the PoO into
2626 tests against the complete value. It disallows encodings
2627 that could be smaller, and the UTF-16 surrogates. */
2628 if (enh_check
2629 && ((s1 & 0xc0) != 0x80
2630 || (s2 & 0xc0) != 0x80
2631 || c < 0x1000
2632 || (c >= 0xd800 && c <= 0xdfff))) {
2633 return 2;
2635 } else if (s0 <= (enh_check ? 0xf4 : 0xf7)) {
2636 /* four byte character */
2637 l = 4;
2638 if (ilen < 4) {
2639 return 0;
2641 s1 = cpu_ldub_data_ra(env, addr + 1, ra);
2642 s2 = cpu_ldub_data_ra(env, addr + 2, ra);
2643 s3 = cpu_ldub_data_ra(env, addr + 3, ra);
2644 c = s0 & 0x07;
2645 c = (c << 6) | (s1 & 0x3f);
2646 c = (c << 6) | (s2 & 0x3f);
2647 c = (c << 6) | (s3 & 0x3f);
2648 /* See above. */
2649 if (enh_check
2650 && ((s1 & 0xc0) != 0x80
2651 || (s2 & 0xc0) != 0x80
2652 || (s3 & 0xc0) != 0x80
2653 || c < 0x010000
2654 || c > 0x10ffff)) {
2655 return 2;
2657 } else {
2658 /* invalid character */
2659 return 2;
2662 *ochar = c;
2663 *olen = l;
2664 return -1;
2667 static int decode_utf16(CPUS390XState *env, uint64_t addr, uint64_t ilen,
2668 bool enh_check, uintptr_t ra,
2669 uint32_t *ochar, uint32_t *olen)
2671 uint16_t s0, s1;
2672 uint32_t c, l;
2674 if (ilen < 2) {
2675 return 0;
2677 s0 = cpu_lduw_data_ra(env, addr, ra);
2678 if ((s0 & 0xfc00) != 0xd800) {
2679 /* one word character */
2680 l = 2;
2681 c = s0;
2682 } else {
2683 /* two word character */
2684 l = 4;
2685 if (ilen < 4) {
2686 return 0;
2688 s1 = cpu_lduw_data_ra(env, addr + 2, ra);
2689 c = extract32(s0, 6, 4) + 1;
2690 c = (c << 6) | (s0 & 0x3f);
2691 c = (c << 10) | (s1 & 0x3ff);
2692 if (enh_check && (s1 & 0xfc00) != 0xdc00) {
2693 /* invalid surrogate character */
2694 return 2;
2698 *ochar = c;
2699 *olen = l;
2700 return -1;
2703 static int decode_utf32(CPUS390XState *env, uint64_t addr, uint64_t ilen,
2704 bool enh_check, uintptr_t ra,
2705 uint32_t *ochar, uint32_t *olen)
2707 uint32_t c;
2709 if (ilen < 4) {
2710 return 0;
2712 c = cpu_ldl_data_ra(env, addr, ra);
2713 if ((c >= 0xd800 && c <= 0xdbff) || c > 0x10ffff) {
2714 /* invalid unicode character */
2715 return 2;
2718 *ochar = c;
2719 *olen = 4;
2720 return -1;
2723 static int encode_utf8(CPUS390XState *env, uint64_t addr, uint64_t ilen,
2724 uintptr_t ra, uint32_t c, uint32_t *olen)
2726 uint8_t d[4];
2727 uint32_t l, i;
2729 if (c <= 0x7f) {
2730 /* one byte character */
2731 l = 1;
2732 d[0] = c;
2733 } else if (c <= 0x7ff) {
2734 /* two byte character */
2735 l = 2;
2736 d[1] = 0x80 | extract32(c, 0, 6);
2737 d[0] = 0xc0 | extract32(c, 6, 5);
2738 } else if (c <= 0xffff) {
2739 /* three byte character */
2740 l = 3;
2741 d[2] = 0x80 | extract32(c, 0, 6);
2742 d[1] = 0x80 | extract32(c, 6, 6);
2743 d[0] = 0xe0 | extract32(c, 12, 4);
2744 } else {
2745 /* four byte character */
2746 l = 4;
2747 d[3] = 0x80 | extract32(c, 0, 6);
2748 d[2] = 0x80 | extract32(c, 6, 6);
2749 d[1] = 0x80 | extract32(c, 12, 6);
2750 d[0] = 0xf0 | extract32(c, 18, 3);
2753 if (ilen < l) {
2754 return 1;
2756 for (i = 0; i < l; ++i) {
2757 cpu_stb_data_ra(env, addr + i, d[i], ra);
2760 *olen = l;
2761 return -1;
2764 static int encode_utf16(CPUS390XState *env, uint64_t addr, uint64_t ilen,
2765 uintptr_t ra, uint32_t c, uint32_t *olen)
2767 uint16_t d0, d1;
2769 if (c <= 0xffff) {
2770 /* one word character */
2771 if (ilen < 2) {
2772 return 1;
2774 cpu_stw_data_ra(env, addr, c, ra);
2775 *olen = 2;
2776 } else {
2777 /* two word character */
2778 if (ilen < 4) {
2779 return 1;
2781 d1 = 0xdc00 | extract32(c, 0, 10);
2782 d0 = 0xd800 | extract32(c, 10, 6);
2783 d0 = deposit32(d0, 6, 4, extract32(c, 16, 5) - 1);
2784 cpu_stw_data_ra(env, addr + 0, d0, ra);
2785 cpu_stw_data_ra(env, addr + 2, d1, ra);
2786 *olen = 4;
2789 return -1;
2792 static int encode_utf32(CPUS390XState *env, uint64_t addr, uint64_t ilen,
2793 uintptr_t ra, uint32_t c, uint32_t *olen)
2795 if (ilen < 4) {
2796 return 1;
2798 cpu_stl_data_ra(env, addr, c, ra);
2799 *olen = 4;
2800 return -1;
2803 static inline uint32_t convert_unicode(CPUS390XState *env, uint32_t r1,
2804 uint32_t r2, uint32_t m3, uintptr_t ra,
2805 decode_unicode_fn decode,
2806 encode_unicode_fn encode)
2808 uint64_t dst = get_address(env, r1);
2809 uint64_t dlen = get_length(env, r1 + 1);
2810 uint64_t src = get_address(env, r2);
2811 uint64_t slen = get_length(env, r2 + 1);
2812 bool enh_check = m3 & 1;
2813 int cc, i;
2815 /* Lest we fail to service interrupts in a timely manner, limit the
2816 amount of work we're willing to do. For now, let's cap at 256. */
2817 for (i = 0; i < 256; ++i) {
2818 uint32_t c, ilen, olen;
2820 cc = decode(env, src, slen, enh_check, ra, &c, &ilen);
2821 if (unlikely(cc >= 0)) {
2822 break;
2824 cc = encode(env, dst, dlen, ra, c, &olen);
2825 if (unlikely(cc >= 0)) {
2826 break;
2829 src += ilen;
2830 slen -= ilen;
2831 dst += olen;
2832 dlen -= olen;
2833 cc = 3;
2836 set_address(env, r1, dst);
2837 set_length(env, r1 + 1, dlen);
2838 set_address(env, r2, src);
2839 set_length(env, r2 + 1, slen);
2841 return cc;
2844 uint32_t HELPER(cu12)(CPUS390XState *env, uint32_t r1, uint32_t r2, uint32_t m3)
2846 return convert_unicode(env, r1, r2, m3, GETPC(),
2847 decode_utf8, encode_utf16);
2850 uint32_t HELPER(cu14)(CPUS390XState *env, uint32_t r1, uint32_t r2, uint32_t m3)
2852 return convert_unicode(env, r1, r2, m3, GETPC(),
2853 decode_utf8, encode_utf32);
2856 uint32_t HELPER(cu21)(CPUS390XState *env, uint32_t r1, uint32_t r2, uint32_t m3)
2858 return convert_unicode(env, r1, r2, m3, GETPC(),
2859 decode_utf16, encode_utf8);
2862 uint32_t HELPER(cu24)(CPUS390XState *env, uint32_t r1, uint32_t r2, uint32_t m3)
2864 return convert_unicode(env, r1, r2, m3, GETPC(),
2865 decode_utf16, encode_utf32);
2868 uint32_t HELPER(cu41)(CPUS390XState *env, uint32_t r1, uint32_t r2, uint32_t m3)
2870 return convert_unicode(env, r1, r2, m3, GETPC(),
2871 decode_utf32, encode_utf8);
2874 uint32_t HELPER(cu42)(CPUS390XState *env, uint32_t r1, uint32_t r2, uint32_t m3)
2876 return convert_unicode(env, r1, r2, m3, GETPC(),
2877 decode_utf32, encode_utf16);
2880 void probe_write_access(CPUS390XState *env, uint64_t addr, uint64_t len,
2881 uintptr_t ra)
2883 /* test the actual access, not just any access to the page due to LAP */
2884 while (len) {
2885 const uint64_t pagelen = -(addr | TARGET_PAGE_MASK);
2886 const uint64_t curlen = MIN(pagelen, len);
2888 probe_write(env, addr, curlen, cpu_mmu_index(env, false), ra);
2889 addr = wrap_address(env, addr + curlen);
2890 len -= curlen;
2894 void HELPER(probe_write_access)(CPUS390XState *env, uint64_t addr, uint64_t len)
2896 probe_write_access(env, addr, len, GETPC());