accel/kvm/kvm-all: Fix wrong return code handling in dirty log code
[qemu/ar7.git] / target / s390x / mem_helper.c
blob1901e9dfc7c61f9f1211b12a6b59883ef496b2f5
1 /*
2 * S/390 memory access helper routines
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2009 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "internal.h"
24 #include "tcg_s390x.h"
25 #include "exec/helper-proto.h"
26 #include "exec/exec-all.h"
27 #include "exec/cpu_ldst.h"
28 #include "qemu/int128.h"
29 #include "qemu/atomic128.h"
30 #include "tcg/tcg.h"
32 #if !defined(CONFIG_USER_ONLY)
33 #include "hw/s390x/storage-keys.h"
34 #include "hw/boards.h"
35 #endif
37 /*****************************************************************************/
38 /* Softmmu support */
40 /* #define DEBUG_HELPER */
41 #ifdef DEBUG_HELPER
42 #define HELPER_LOG(x...) qemu_log(x)
43 #else
44 #define HELPER_LOG(x...)
45 #endif
47 static inline bool psw_key_valid(CPUS390XState *env, uint8_t psw_key)
49 uint16_t pkm = env->cregs[3] >> 16;
51 if (env->psw.mask & PSW_MASK_PSTATE) {
52 /* PSW key has range 0..15, it is valid if the bit is 1 in the PKM */
53 return pkm & (0x80 >> psw_key);
55 return true;
58 static bool is_destructive_overlap(CPUS390XState *env, uint64_t dest,
59 uint64_t src, uint32_t len)
61 if (!len || src == dest) {
62 return false;
64 /* Take care of wrapping at the end of address space. */
65 if (unlikely(wrap_address(env, src + len - 1) < src)) {
66 return dest > src || dest <= wrap_address(env, src + len - 1);
68 return dest > src && dest <= src + len - 1;
71 /* Trigger a SPECIFICATION exception if an address or a length is not
72 naturally aligned. */
73 static inline void check_alignment(CPUS390XState *env, uint64_t v,
74 int wordsize, uintptr_t ra)
76 if (v % wordsize) {
77 tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra);
81 /* Load a value from memory according to its size. */
82 static inline uint64_t cpu_ldusize_data_ra(CPUS390XState *env, uint64_t addr,
83 int wordsize, uintptr_t ra)
85 switch (wordsize) {
86 case 1:
87 return cpu_ldub_data_ra(env, addr, ra);
88 case 2:
89 return cpu_lduw_data_ra(env, addr, ra);
90 default:
91 abort();
95 /* Store a to memory according to its size. */
96 static inline void cpu_stsize_data_ra(CPUS390XState *env, uint64_t addr,
97 uint64_t value, int wordsize,
98 uintptr_t ra)
100 switch (wordsize) {
101 case 1:
102 cpu_stb_data_ra(env, addr, value, ra);
103 break;
104 case 2:
105 cpu_stw_data_ra(env, addr, value, ra);
106 break;
107 default:
108 abort();
112 /* An access covers at most 4096 bytes and therefore at most two pages. */
113 typedef struct S390Access {
114 target_ulong vaddr1;
115 target_ulong vaddr2;
116 char *haddr1;
117 char *haddr2;
118 uint16_t size1;
119 uint16_t size2;
121 * If we can't access the host page directly, we'll have to do I/O access
122 * via ld/st helpers. These are internal details, so we store the
123 * mmu idx to do the access here instead of passing it around in the
124 * helpers. Maybe, one day we can get rid of ld/st access - once we can
125 * handle TLB_NOTDIRTY differently. We don't expect these special accesses
126 * to trigger exceptions - only if we would have TLB_NOTDIRTY on LAP
127 * pages, we might trigger a new MMU translation - very unlikely that
128 * the mapping changes in between and we would trigger a fault.
130 int mmu_idx;
131 } S390Access;
133 static S390Access access_prepare(CPUS390XState *env, vaddr vaddr, int size,
134 MMUAccessType access_type, int mmu_idx,
135 uintptr_t ra)
137 S390Access access = {
138 .vaddr1 = vaddr,
139 .size1 = MIN(size, -(vaddr | TARGET_PAGE_MASK)),
140 .mmu_idx = mmu_idx,
143 g_assert(size > 0 && size <= 4096);
144 access.haddr1 = probe_access(env, access.vaddr1, access.size1, access_type,
145 mmu_idx, ra);
147 if (unlikely(access.size1 != size)) {
148 /* The access crosses page boundaries. */
149 access.vaddr2 = wrap_address(env, vaddr + access.size1);
150 access.size2 = size - access.size1;
151 access.haddr2 = probe_access(env, access.vaddr2, access.size2,
152 access_type, mmu_idx, ra);
154 return access;
157 /* Helper to handle memset on a single page. */
158 static void do_access_memset(CPUS390XState *env, vaddr vaddr, char *haddr,
159 uint8_t byte, uint16_t size, int mmu_idx,
160 uintptr_t ra)
162 #ifdef CONFIG_USER_ONLY
163 g_assert(haddr);
164 memset(haddr, byte, size);
165 #else
166 TCGMemOpIdx oi = make_memop_idx(MO_UB, mmu_idx);
167 int i;
169 if (likely(haddr)) {
170 memset(haddr, byte, size);
171 } else {
173 * Do a single access and test if we can then get access to the
174 * page. This is especially relevant to speed up TLB_NOTDIRTY.
176 g_assert(size > 0);
177 helper_ret_stb_mmu(env, vaddr, byte, oi, ra);
178 haddr = tlb_vaddr_to_host(env, vaddr, MMU_DATA_STORE, mmu_idx);
179 if (likely(haddr)) {
180 memset(haddr + 1, byte, size - 1);
181 } else {
182 for (i = 1; i < size; i++) {
183 helper_ret_stb_mmu(env, vaddr + i, byte, oi, ra);
187 #endif
190 static void access_memset(CPUS390XState *env, S390Access *desta,
191 uint8_t byte, uintptr_t ra)
194 do_access_memset(env, desta->vaddr1, desta->haddr1, byte, desta->size1,
195 desta->mmu_idx, ra);
196 if (likely(!desta->size2)) {
197 return;
199 do_access_memset(env, desta->vaddr2, desta->haddr2, byte, desta->size2,
200 desta->mmu_idx, ra);
203 static uint8_t do_access_get_byte(CPUS390XState *env, vaddr vaddr, char **haddr,
204 int offset, int mmu_idx, uintptr_t ra)
206 #ifdef CONFIG_USER_ONLY
207 return ldub_p(*haddr + offset);
208 #else
209 TCGMemOpIdx oi = make_memop_idx(MO_UB, mmu_idx);
210 uint8_t byte;
212 if (likely(*haddr)) {
213 return ldub_p(*haddr + offset);
216 * Do a single access and test if we can then get access to the
217 * page. This is especially relevant to speed up TLB_NOTDIRTY.
219 byte = helper_ret_ldub_mmu(env, vaddr + offset, oi, ra);
220 *haddr = tlb_vaddr_to_host(env, vaddr, MMU_DATA_LOAD, mmu_idx);
221 return byte;
222 #endif
225 static uint8_t access_get_byte(CPUS390XState *env, S390Access *access,
226 int offset, uintptr_t ra)
228 if (offset < access->size1) {
229 return do_access_get_byte(env, access->vaddr1, &access->haddr1,
230 offset, access->mmu_idx, ra);
232 return do_access_get_byte(env, access->vaddr2, &access->haddr2,
233 offset - access->size1, access->mmu_idx, ra);
236 static void do_access_set_byte(CPUS390XState *env, vaddr vaddr, char **haddr,
237 int offset, uint8_t byte, int mmu_idx,
238 uintptr_t ra)
240 #ifdef CONFIG_USER_ONLY
241 stb_p(*haddr + offset, byte);
242 #else
243 TCGMemOpIdx oi = make_memop_idx(MO_UB, mmu_idx);
245 if (likely(*haddr)) {
246 stb_p(*haddr + offset, byte);
247 return;
250 * Do a single access and test if we can then get access to the
251 * page. This is especially relevant to speed up TLB_NOTDIRTY.
253 helper_ret_stb_mmu(env, vaddr + offset, byte, oi, ra);
254 *haddr = tlb_vaddr_to_host(env, vaddr, MMU_DATA_STORE, mmu_idx);
255 #endif
258 static void access_set_byte(CPUS390XState *env, S390Access *access,
259 int offset, uint8_t byte, uintptr_t ra)
261 if (offset < access->size1) {
262 do_access_set_byte(env, access->vaddr1, &access->haddr1, offset, byte,
263 access->mmu_idx, ra);
264 } else {
265 do_access_set_byte(env, access->vaddr2, &access->haddr2,
266 offset - access->size1, byte, access->mmu_idx, ra);
271 * Move data with the same semantics as memmove() in case ranges don't overlap
272 * or src > dest. Undefined behavior on destructive overlaps.
274 static void access_memmove(CPUS390XState *env, S390Access *desta,
275 S390Access *srca, uintptr_t ra)
277 int diff;
279 g_assert(desta->size1 + desta->size2 == srca->size1 + srca->size2);
281 /* Fallback to slow access in case we don't have access to all host pages */
282 if (unlikely(!desta->haddr1 || (desta->size2 && !desta->haddr2) ||
283 !srca->haddr1 || (srca->size2 && !srca->haddr2))) {
284 int i;
286 for (i = 0; i < desta->size1 + desta->size2; i++) {
287 uint8_t byte = access_get_byte(env, srca, i, ra);
289 access_set_byte(env, desta, i, byte, ra);
291 return;
294 if (srca->size1 == desta->size1) {
295 memmove(desta->haddr1, srca->haddr1, srca->size1);
296 if (unlikely(srca->size2)) {
297 memmove(desta->haddr2, srca->haddr2, srca->size2);
299 } else if (srca->size1 < desta->size1) {
300 diff = desta->size1 - srca->size1;
301 memmove(desta->haddr1, srca->haddr1, srca->size1);
302 memmove(desta->haddr1 + srca->size1, srca->haddr2, diff);
303 if (likely(desta->size2)) {
304 memmove(desta->haddr2, srca->haddr2 + diff, desta->size2);
306 } else {
307 diff = srca->size1 - desta->size1;
308 memmove(desta->haddr1, srca->haddr1, desta->size1);
309 memmove(desta->haddr2, srca->haddr1 + desta->size1, diff);
310 if (likely(srca->size2)) {
311 memmove(desta->haddr2 + diff, srca->haddr2, srca->size2);
316 static int mmu_idx_from_as(uint8_t as)
318 switch (as) {
319 case AS_PRIMARY:
320 return MMU_PRIMARY_IDX;
321 case AS_SECONDARY:
322 return MMU_SECONDARY_IDX;
323 case AS_HOME:
324 return MMU_HOME_IDX;
325 default:
326 /* FIXME AS_ACCREG */
327 g_assert_not_reached();
331 /* and on array */
332 static uint32_t do_helper_nc(CPUS390XState *env, uint32_t l, uint64_t dest,
333 uint64_t src, uintptr_t ra)
335 const int mmu_idx = cpu_mmu_index(env, false);
336 S390Access srca1, srca2, desta;
337 uint32_t i;
338 uint8_t c = 0;
340 HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
341 __func__, l, dest, src);
343 /* NC always processes one more byte than specified - maximum is 256 */
344 l++;
346 srca1 = access_prepare(env, src, l, MMU_DATA_LOAD, mmu_idx, ra);
347 srca2 = access_prepare(env, dest, l, MMU_DATA_LOAD, mmu_idx, ra);
348 desta = access_prepare(env, dest, l, MMU_DATA_STORE, mmu_idx, ra);
349 for (i = 0; i < l; i++) {
350 const uint8_t x = access_get_byte(env, &srca1, i, ra) &
351 access_get_byte(env, &srca2, i, ra);
353 c |= x;
354 access_set_byte(env, &desta, i, x, ra);
356 return c != 0;
359 uint32_t HELPER(nc)(CPUS390XState *env, uint32_t l, uint64_t dest,
360 uint64_t src)
362 return do_helper_nc(env, l, dest, src, GETPC());
365 /* xor on array */
366 static uint32_t do_helper_xc(CPUS390XState *env, uint32_t l, uint64_t dest,
367 uint64_t src, uintptr_t ra)
369 const int mmu_idx = cpu_mmu_index(env, false);
370 S390Access srca1, srca2, desta;
371 uint32_t i;
372 uint8_t c = 0;
374 HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
375 __func__, l, dest, src);
377 /* XC always processes one more byte than specified - maximum is 256 */
378 l++;
380 srca1 = access_prepare(env, src, l, MMU_DATA_LOAD, mmu_idx, ra);
381 srca2 = access_prepare(env, dest, l, MMU_DATA_LOAD, mmu_idx, ra);
382 desta = access_prepare(env, dest, l, MMU_DATA_STORE, mmu_idx, ra);
384 /* xor with itself is the same as memset(0) */
385 if (src == dest) {
386 access_memset(env, &desta, 0, ra);
387 return 0;
390 for (i = 0; i < l; i++) {
391 const uint8_t x = access_get_byte(env, &srca1, i, ra) ^
392 access_get_byte(env, &srca2, i, ra);
394 c |= x;
395 access_set_byte(env, &desta, i, x, ra);
397 return c != 0;
400 uint32_t HELPER(xc)(CPUS390XState *env, uint32_t l, uint64_t dest,
401 uint64_t src)
403 return do_helper_xc(env, l, dest, src, GETPC());
406 /* or on array */
407 static uint32_t do_helper_oc(CPUS390XState *env, uint32_t l, uint64_t dest,
408 uint64_t src, uintptr_t ra)
410 const int mmu_idx = cpu_mmu_index(env, false);
411 S390Access srca1, srca2, desta;
412 uint32_t i;
413 uint8_t c = 0;
415 HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
416 __func__, l, dest, src);
418 /* OC always processes one more byte than specified - maximum is 256 */
419 l++;
421 srca1 = access_prepare(env, src, l, MMU_DATA_LOAD, mmu_idx, ra);
422 srca2 = access_prepare(env, dest, l, MMU_DATA_LOAD, mmu_idx, ra);
423 desta = access_prepare(env, dest, l, MMU_DATA_STORE, mmu_idx, ra);
424 for (i = 0; i < l; i++) {
425 const uint8_t x = access_get_byte(env, &srca1, i, ra) |
426 access_get_byte(env, &srca2, i, ra);
428 c |= x;
429 access_set_byte(env, &desta, i, x, ra);
431 return c != 0;
434 uint32_t HELPER(oc)(CPUS390XState *env, uint32_t l, uint64_t dest,
435 uint64_t src)
437 return do_helper_oc(env, l, dest, src, GETPC());
440 /* memmove */
441 static uint32_t do_helper_mvc(CPUS390XState *env, uint32_t l, uint64_t dest,
442 uint64_t src, uintptr_t ra)
444 const int mmu_idx = cpu_mmu_index(env, false);
445 S390Access srca, desta;
446 uint32_t i;
448 HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
449 __func__, l, dest, src);
451 /* MVC always copies one more byte than specified - maximum is 256 */
452 l++;
454 srca = access_prepare(env, src, l, MMU_DATA_LOAD, mmu_idx, ra);
455 desta = access_prepare(env, dest, l, MMU_DATA_STORE, mmu_idx, ra);
458 * "When the operands overlap, the result is obtained as if the operands
459 * were processed one byte at a time". Only non-destructive overlaps
460 * behave like memmove().
462 if (dest == src + 1) {
463 access_memset(env, &desta, access_get_byte(env, &srca, 0, ra), ra);
464 } else if (!is_destructive_overlap(env, dest, src, l)) {
465 access_memmove(env, &desta, &srca, ra);
466 } else {
467 for (i = 0; i < l; i++) {
468 uint8_t byte = access_get_byte(env, &srca, i, ra);
470 access_set_byte(env, &desta, i, byte, ra);
474 return env->cc_op;
477 void HELPER(mvc)(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src)
479 do_helper_mvc(env, l, dest, src, GETPC());
482 /* move inverse */
483 void HELPER(mvcin)(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src)
485 const int mmu_idx = cpu_mmu_index(env, false);
486 S390Access srca, desta;
487 uintptr_t ra = GETPC();
488 int i;
490 /* MVCIN always copies one more byte than specified - maximum is 256 */
491 l++;
493 src = wrap_address(env, src - l + 1);
494 srca = access_prepare(env, src, l, MMU_DATA_LOAD, mmu_idx, ra);
495 desta = access_prepare(env, dest, l, MMU_DATA_STORE, mmu_idx, ra);
496 for (i = 0; i < l; i++) {
497 const uint8_t x = access_get_byte(env, &srca, l - i - 1, ra);
499 access_set_byte(env, &desta, i, x, ra);
503 /* move numerics */
504 void HELPER(mvn)(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src)
506 const int mmu_idx = cpu_mmu_index(env, false);
507 S390Access srca1, srca2, desta;
508 uintptr_t ra = GETPC();
509 int i;
511 /* MVN always copies one more byte than specified - maximum is 256 */
512 l++;
514 srca1 = access_prepare(env, src, l, MMU_DATA_LOAD, mmu_idx, ra);
515 srca2 = access_prepare(env, dest, l, MMU_DATA_LOAD, mmu_idx, ra);
516 desta = access_prepare(env, dest, l, MMU_DATA_STORE, mmu_idx, ra);
517 for (i = 0; i < l; i++) {
518 const uint8_t x = (access_get_byte(env, &srca1, i, ra) & 0x0f) |
519 (access_get_byte(env, &srca2, i, ra) & 0xf0);
521 access_set_byte(env, &desta, i, x, ra);
525 /* move with offset */
526 void HELPER(mvo)(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src)
528 const int mmu_idx = cpu_mmu_index(env, false);
529 /* MVO always processes one more byte than specified - maximum is 16 */
530 const int len_dest = (l >> 4) + 1;
531 const int len_src = (l & 0xf) + 1;
532 uintptr_t ra = GETPC();
533 uint8_t byte_dest, byte_src;
534 S390Access srca, desta;
535 int i, j;
537 srca = access_prepare(env, src, len_src, MMU_DATA_LOAD, mmu_idx, ra);
538 desta = access_prepare(env, dest, len_dest, MMU_DATA_STORE, mmu_idx, ra);
540 /* Handle rightmost byte */
541 byte_dest = cpu_ldub_data_ra(env, dest + len_dest - 1, ra);
542 byte_src = access_get_byte(env, &srca, len_src - 1, ra);
543 byte_dest = (byte_dest & 0x0f) | (byte_src << 4);
544 access_set_byte(env, &desta, len_dest - 1, byte_dest, ra);
546 /* Process remaining bytes from right to left */
547 for (i = len_dest - 2, j = len_src - 2; i >= 0; i--, j--) {
548 byte_dest = byte_src >> 4;
549 if (j >= 0) {
550 byte_src = access_get_byte(env, &srca, j, ra);
551 } else {
552 byte_src = 0;
554 byte_dest |= byte_src << 4;
555 access_set_byte(env, &desta, i, byte_dest, ra);
559 /* move zones */
560 void HELPER(mvz)(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src)
562 const int mmu_idx = cpu_mmu_index(env, false);
563 S390Access srca1, srca2, desta;
564 uintptr_t ra = GETPC();
565 int i;
567 /* MVZ always copies one more byte than specified - maximum is 256 */
568 l++;
570 srca1 = access_prepare(env, src, l, MMU_DATA_LOAD, mmu_idx, ra);
571 srca2 = access_prepare(env, dest, l, MMU_DATA_LOAD, mmu_idx, ra);
572 desta = access_prepare(env, dest, l, MMU_DATA_STORE, mmu_idx, ra);
573 for (i = 0; i < l; i++) {
574 const uint8_t x = (access_get_byte(env, &srca1, i, ra) & 0xf0) |
575 (access_get_byte(env, &srca2, i, ra) & 0x0f);
577 access_set_byte(env, &desta, i, x, ra);
581 /* compare unsigned byte arrays */
582 static uint32_t do_helper_clc(CPUS390XState *env, uint32_t l, uint64_t s1,
583 uint64_t s2, uintptr_t ra)
585 uint32_t i;
586 uint32_t cc = 0;
588 HELPER_LOG("%s l %d s1 %" PRIx64 " s2 %" PRIx64 "\n",
589 __func__, l, s1, s2);
591 for (i = 0; i <= l; i++) {
592 uint8_t x = cpu_ldub_data_ra(env, s1 + i, ra);
593 uint8_t y = cpu_ldub_data_ra(env, s2 + i, ra);
594 HELPER_LOG("%02x (%c)/%02x (%c) ", x, x, y, y);
595 if (x < y) {
596 cc = 1;
597 break;
598 } else if (x > y) {
599 cc = 2;
600 break;
604 HELPER_LOG("\n");
605 return cc;
608 uint32_t HELPER(clc)(CPUS390XState *env, uint32_t l, uint64_t s1, uint64_t s2)
610 return do_helper_clc(env, l, s1, s2, GETPC());
613 /* compare logical under mask */
614 uint32_t HELPER(clm)(CPUS390XState *env, uint32_t r1, uint32_t mask,
615 uint64_t addr)
617 uintptr_t ra = GETPC();
618 uint32_t cc = 0;
620 HELPER_LOG("%s: r1 0x%x mask 0x%x addr 0x%" PRIx64 "\n", __func__, r1,
621 mask, addr);
623 while (mask) {
624 if (mask & 8) {
625 uint8_t d = cpu_ldub_data_ra(env, addr, ra);
626 uint8_t r = extract32(r1, 24, 8);
627 HELPER_LOG("mask 0x%x %02x/%02x (0x%" PRIx64 ") ", mask, r, d,
628 addr);
629 if (r < d) {
630 cc = 1;
631 break;
632 } else if (r > d) {
633 cc = 2;
634 break;
636 addr++;
638 mask = (mask << 1) & 0xf;
639 r1 <<= 8;
642 HELPER_LOG("\n");
643 return cc;
646 static inline uint64_t get_address(CPUS390XState *env, int reg)
648 return wrap_address(env, env->regs[reg]);
652 * Store the address to the given register, zeroing out unused leftmost
653 * bits in bit positions 32-63 (24-bit and 31-bit mode only).
655 static inline void set_address_zero(CPUS390XState *env, int reg,
656 uint64_t address)
658 if (env->psw.mask & PSW_MASK_64) {
659 env->regs[reg] = address;
660 } else {
661 if (!(env->psw.mask & PSW_MASK_32)) {
662 address &= 0x00ffffff;
663 } else {
664 address &= 0x7fffffff;
666 env->regs[reg] = deposit64(env->regs[reg], 0, 32, address);
670 static inline void set_address(CPUS390XState *env, int reg, uint64_t address)
672 if (env->psw.mask & PSW_MASK_64) {
673 /* 64-Bit mode */
674 env->regs[reg] = address;
675 } else {
676 if (!(env->psw.mask & PSW_MASK_32)) {
677 /* 24-Bit mode. According to the PoO it is implementation
678 dependent if bits 32-39 remain unchanged or are set to
679 zeros. Choose the former so that the function can also be
680 used for TRT. */
681 env->regs[reg] = deposit64(env->regs[reg], 0, 24, address);
682 } else {
683 /* 31-Bit mode. According to the PoO it is implementation
684 dependent if bit 32 remains unchanged or is set to zero.
685 Choose the latter so that the function can also be used for
686 TRT. */
687 address &= 0x7fffffff;
688 env->regs[reg] = deposit64(env->regs[reg], 0, 32, address);
693 static inline uint64_t wrap_length32(CPUS390XState *env, uint64_t length)
695 if (!(env->psw.mask & PSW_MASK_64)) {
696 return (uint32_t)length;
698 return length;
701 static inline uint64_t wrap_length31(CPUS390XState *env, uint64_t length)
703 if (!(env->psw.mask & PSW_MASK_64)) {
704 /* 24-Bit and 31-Bit mode */
705 length &= 0x7fffffff;
707 return length;
710 static inline uint64_t get_length(CPUS390XState *env, int reg)
712 return wrap_length31(env, env->regs[reg]);
715 static inline void set_length(CPUS390XState *env, int reg, uint64_t length)
717 if (env->psw.mask & PSW_MASK_64) {
718 /* 64-Bit mode */
719 env->regs[reg] = length;
720 } else {
721 /* 24-Bit and 31-Bit mode */
722 env->regs[reg] = deposit64(env->regs[reg], 0, 32, length);
726 /* search string (c is byte to search, r2 is string, r1 end of string) */
727 void HELPER(srst)(CPUS390XState *env, uint32_t r1, uint32_t r2)
729 uintptr_t ra = GETPC();
730 uint64_t end, str;
731 uint32_t len;
732 uint8_t v, c = env->regs[0];
734 /* Bits 32-55 must contain all 0. */
735 if (env->regs[0] & 0xffffff00u) {
736 tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra);
739 str = get_address(env, r2);
740 end = get_address(env, r1);
742 /* Lest we fail to service interrupts in a timely manner, limit the
743 amount of work we're willing to do. For now, let's cap at 8k. */
744 for (len = 0; len < 0x2000; ++len) {
745 if (str + len == end) {
746 /* Character not found. R1 & R2 are unmodified. */
747 env->cc_op = 2;
748 return;
750 v = cpu_ldub_data_ra(env, str + len, ra);
751 if (v == c) {
752 /* Character found. Set R1 to the location; R2 is unmodified. */
753 env->cc_op = 1;
754 set_address(env, r1, str + len);
755 return;
759 /* CPU-determined bytes processed. Advance R2 to next byte to process. */
760 env->cc_op = 3;
761 set_address(env, r2, str + len);
764 void HELPER(srstu)(CPUS390XState *env, uint32_t r1, uint32_t r2)
766 uintptr_t ra = GETPC();
767 uint32_t len;
768 uint16_t v, c = env->regs[0];
769 uint64_t end, str, adj_end;
771 /* Bits 32-47 of R0 must be zero. */
772 if (env->regs[0] & 0xffff0000u) {
773 tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra);
776 str = get_address(env, r2);
777 end = get_address(env, r1);
779 /* If the LSB of the two addresses differ, use one extra byte. */
780 adj_end = end + ((str ^ end) & 1);
782 /* Lest we fail to service interrupts in a timely manner, limit the
783 amount of work we're willing to do. For now, let's cap at 8k. */
784 for (len = 0; len < 0x2000; len += 2) {
785 if (str + len == adj_end) {
786 /* End of input found. */
787 env->cc_op = 2;
788 return;
790 v = cpu_lduw_data_ra(env, str + len, ra);
791 if (v == c) {
792 /* Character found. Set R1 to the location; R2 is unmodified. */
793 env->cc_op = 1;
794 set_address(env, r1, str + len);
795 return;
799 /* CPU-determined bytes processed. Advance R2 to next byte to process. */
800 env->cc_op = 3;
801 set_address(env, r2, str + len);
804 /* unsigned string compare (c is string terminator) */
805 uint64_t HELPER(clst)(CPUS390XState *env, uint64_t c, uint64_t s1, uint64_t s2)
807 uintptr_t ra = GETPC();
808 uint32_t len;
810 c = c & 0xff;
811 s1 = wrap_address(env, s1);
812 s2 = wrap_address(env, s2);
814 /* Lest we fail to service interrupts in a timely manner, limit the
815 amount of work we're willing to do. For now, let's cap at 8k. */
816 for (len = 0; len < 0x2000; ++len) {
817 uint8_t v1 = cpu_ldub_data_ra(env, s1 + len, ra);
818 uint8_t v2 = cpu_ldub_data_ra(env, s2 + len, ra);
819 if (v1 == v2) {
820 if (v1 == c) {
821 /* Equal. CC=0, and don't advance the registers. */
822 env->cc_op = 0;
823 env->retxl = s2;
824 return s1;
826 } else {
827 /* Unequal. CC={1,2}, and advance the registers. Note that
828 the terminator need not be zero, but the string that contains
829 the terminator is by definition "low". */
830 env->cc_op = (v1 == c ? 1 : v2 == c ? 2 : v1 < v2 ? 1 : 2);
831 env->retxl = s2 + len;
832 return s1 + len;
836 /* CPU-determined bytes equal; advance the registers. */
837 env->cc_op = 3;
838 env->retxl = s2 + len;
839 return s1 + len;
842 /* move page */
843 uint32_t HELPER(mvpg)(CPUS390XState *env, uint64_t r0, uint64_t r1, uint64_t r2)
845 const int mmu_idx = cpu_mmu_index(env, false);
846 const bool f = extract64(r0, 11, 1);
847 const bool s = extract64(r0, 10, 1);
848 uintptr_t ra = GETPC();
849 S390Access srca, desta;
851 if ((f && s) || extract64(r0, 12, 4)) {
852 tcg_s390_program_interrupt(env, PGM_SPECIFICATION, GETPC());
855 r1 = wrap_address(env, r1 & TARGET_PAGE_MASK);
856 r2 = wrap_address(env, r2 & TARGET_PAGE_MASK);
859 * TODO:
860 * - Access key handling
861 * - CC-option with surpression of page-translation exceptions
862 * - Store r1/r2 register identifiers at real location 162
864 srca = access_prepare(env, r2, TARGET_PAGE_SIZE, MMU_DATA_LOAD, mmu_idx,
865 ra);
866 desta = access_prepare(env, r1, TARGET_PAGE_SIZE, MMU_DATA_STORE, mmu_idx,
867 ra);
868 access_memmove(env, &desta, &srca, ra);
869 return 0; /* data moved */
872 /* string copy */
873 uint32_t HELPER(mvst)(CPUS390XState *env, uint32_t r1, uint32_t r2)
875 const int mmu_idx = cpu_mmu_index(env, false);
876 const uint64_t d = get_address(env, r1);
877 const uint64_t s = get_address(env, r2);
878 const uint8_t c = env->regs[0];
879 const int len = MIN(-(d | TARGET_PAGE_MASK), -(s | TARGET_PAGE_MASK));
880 S390Access srca, desta;
881 uintptr_t ra = GETPC();
882 int i;
884 if (env->regs[0] & 0xffffff00ull) {
885 tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra);
889 * Our access should not exceed single pages, as we must not report access
890 * exceptions exceeding the actually copied range (which we don't know at
891 * this point). We might over-indicate watchpoints within the pages
892 * (if we ever care, we have to limit processing to a single byte).
894 srca = access_prepare(env, s, len, MMU_DATA_LOAD, mmu_idx, ra);
895 desta = access_prepare(env, d, len, MMU_DATA_STORE, mmu_idx, ra);
896 for (i = 0; i < len; i++) {
897 const uint8_t v = access_get_byte(env, &srca, i, ra);
899 access_set_byte(env, &desta, i, v, ra);
900 if (v == c) {
901 set_address_zero(env, r1, d + i);
902 return 1;
905 set_address_zero(env, r1, d + len);
906 set_address_zero(env, r2, s + len);
907 return 3;
910 /* load access registers r1 to r3 from memory at a2 */
911 void HELPER(lam)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
913 uintptr_t ra = GETPC();
914 int i;
916 if (a2 & 0x3) {
917 tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra);
920 for (i = r1;; i = (i + 1) % 16) {
921 env->aregs[i] = cpu_ldl_data_ra(env, a2, ra);
922 a2 += 4;
924 if (i == r3) {
925 break;
930 /* store access registers r1 to r3 in memory at a2 */
931 void HELPER(stam)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
933 uintptr_t ra = GETPC();
934 int i;
936 if (a2 & 0x3) {
937 tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra);
940 for (i = r1;; i = (i + 1) % 16) {
941 cpu_stl_data_ra(env, a2, env->aregs[i], ra);
942 a2 += 4;
944 if (i == r3) {
945 break;
950 /* move long helper */
951 static inline uint32_t do_mvcl(CPUS390XState *env,
952 uint64_t *dest, uint64_t *destlen,
953 uint64_t *src, uint64_t *srclen,
954 uint16_t pad, int wordsize, uintptr_t ra)
956 const int mmu_idx = cpu_mmu_index(env, false);
957 int len = MIN(*destlen, -(*dest | TARGET_PAGE_MASK));
958 S390Access srca, desta;
959 int i, cc;
961 if (*destlen == *srclen) {
962 cc = 0;
963 } else if (*destlen < *srclen) {
964 cc = 1;
965 } else {
966 cc = 2;
969 if (!*destlen) {
970 return cc;
974 * Only perform one type of type of operation (move/pad) at a time.
975 * Stay within single pages.
977 if (*srclen) {
978 /* Copy the src array */
979 len = MIN(MIN(*srclen, -(*src | TARGET_PAGE_MASK)), len);
980 *destlen -= len;
981 *srclen -= len;
982 srca = access_prepare(env, *src, len, MMU_DATA_LOAD, mmu_idx, ra);
983 desta = access_prepare(env, *dest, len, MMU_DATA_STORE, mmu_idx, ra);
984 access_memmove(env, &desta, &srca, ra);
985 *src = wrap_address(env, *src + len);
986 *dest = wrap_address(env, *dest + len);
987 } else if (wordsize == 1) {
988 /* Pad the remaining area */
989 *destlen -= len;
990 desta = access_prepare(env, *dest, len, MMU_DATA_STORE, mmu_idx, ra);
991 access_memset(env, &desta, pad, ra);
992 *dest = wrap_address(env, *dest + len);
993 } else {
994 desta = access_prepare(env, *dest, len, MMU_DATA_STORE, mmu_idx, ra);
996 /* The remaining length selects the padding byte. */
997 for (i = 0; i < len; (*destlen)--, i++) {
998 if (*destlen & 1) {
999 access_set_byte(env, &desta, i, pad, ra);
1000 } else {
1001 access_set_byte(env, &desta, i, pad >> 8, ra);
1004 *dest = wrap_address(env, *dest + len);
1007 return *destlen ? 3 : cc;
1010 /* move long */
1011 uint32_t HELPER(mvcl)(CPUS390XState *env, uint32_t r1, uint32_t r2)
1013 const int mmu_idx = cpu_mmu_index(env, false);
1014 uintptr_t ra = GETPC();
1015 uint64_t destlen = env->regs[r1 + 1] & 0xffffff;
1016 uint64_t dest = get_address(env, r1);
1017 uint64_t srclen = env->regs[r2 + 1] & 0xffffff;
1018 uint64_t src = get_address(env, r2);
1019 uint8_t pad = env->regs[r2 + 1] >> 24;
1020 CPUState *cs = env_cpu(env);
1021 S390Access srca, desta;
1022 uint32_t cc, cur_len;
1024 if (is_destructive_overlap(env, dest, src, MIN(srclen, destlen))) {
1025 cc = 3;
1026 } else if (srclen == destlen) {
1027 cc = 0;
1028 } else if (destlen < srclen) {
1029 cc = 1;
1030 } else {
1031 cc = 2;
1034 /* We might have to zero-out some bits even if there was no action. */
1035 if (unlikely(!destlen || cc == 3)) {
1036 set_address_zero(env, r2, src);
1037 set_address_zero(env, r1, dest);
1038 return cc;
1039 } else if (!srclen) {
1040 set_address_zero(env, r2, src);
1044 * Only perform one type of type of operation (move/pad) in one step.
1045 * Stay within single pages.
1047 while (destlen) {
1048 cur_len = MIN(destlen, -(dest | TARGET_PAGE_MASK));
1049 if (!srclen) {
1050 desta = access_prepare(env, dest, cur_len, MMU_DATA_STORE, mmu_idx,
1051 ra);
1052 access_memset(env, &desta, pad, ra);
1053 } else {
1054 cur_len = MIN(MIN(srclen, -(src | TARGET_PAGE_MASK)), cur_len);
1056 srca = access_prepare(env, src, cur_len, MMU_DATA_LOAD, mmu_idx,
1057 ra);
1058 desta = access_prepare(env, dest, cur_len, MMU_DATA_STORE, mmu_idx,
1059 ra);
1060 access_memmove(env, &desta, &srca, ra);
1061 src = wrap_address(env, src + cur_len);
1062 srclen -= cur_len;
1063 env->regs[r2 + 1] = deposit64(env->regs[r2 + 1], 0, 24, srclen);
1064 set_address_zero(env, r2, src);
1066 dest = wrap_address(env, dest + cur_len);
1067 destlen -= cur_len;
1068 env->regs[r1 + 1] = deposit64(env->regs[r1 + 1], 0, 24, destlen);
1069 set_address_zero(env, r1, dest);
1072 * MVCL is interruptible. Return to the main loop if requested after
1073 * writing back all state to registers. If no interrupt will get
1074 * injected, we'll end up back in this handler and continue processing
1075 * the remaining parts.
1077 if (destlen && unlikely(cpu_loop_exit_requested(cs))) {
1078 cpu_loop_exit_restore(cs, ra);
1081 return cc;
1084 /* move long extended */
1085 uint32_t HELPER(mvcle)(CPUS390XState *env, uint32_t r1, uint64_t a2,
1086 uint32_t r3)
1088 uintptr_t ra = GETPC();
1089 uint64_t destlen = get_length(env, r1 + 1);
1090 uint64_t dest = get_address(env, r1);
1091 uint64_t srclen = get_length(env, r3 + 1);
1092 uint64_t src = get_address(env, r3);
1093 uint8_t pad = a2;
1094 uint32_t cc;
1096 cc = do_mvcl(env, &dest, &destlen, &src, &srclen, pad, 1, ra);
1098 set_length(env, r1 + 1, destlen);
1099 set_length(env, r3 + 1, srclen);
1100 set_address(env, r1, dest);
1101 set_address(env, r3, src);
1103 return cc;
1106 /* move long unicode */
1107 uint32_t HELPER(mvclu)(CPUS390XState *env, uint32_t r1, uint64_t a2,
1108 uint32_t r3)
1110 uintptr_t ra = GETPC();
1111 uint64_t destlen = get_length(env, r1 + 1);
1112 uint64_t dest = get_address(env, r1);
1113 uint64_t srclen = get_length(env, r3 + 1);
1114 uint64_t src = get_address(env, r3);
1115 uint16_t pad = a2;
1116 uint32_t cc;
1118 cc = do_mvcl(env, &dest, &destlen, &src, &srclen, pad, 2, ra);
1120 set_length(env, r1 + 1, destlen);
1121 set_length(env, r3 + 1, srclen);
1122 set_address(env, r1, dest);
1123 set_address(env, r3, src);
1125 return cc;
1128 /* compare logical long helper */
1129 static inline uint32_t do_clcl(CPUS390XState *env,
1130 uint64_t *src1, uint64_t *src1len,
1131 uint64_t *src3, uint64_t *src3len,
1132 uint16_t pad, uint64_t limit,
1133 int wordsize, uintptr_t ra)
1135 uint64_t len = MAX(*src1len, *src3len);
1136 uint32_t cc = 0;
1138 check_alignment(env, *src1len | *src3len, wordsize, ra);
1140 if (!len) {
1141 return cc;
1144 /* Lest we fail to service interrupts in a timely manner, limit the
1145 amount of work we're willing to do. */
1146 if (len > limit) {
1147 len = limit;
1148 cc = 3;
1151 for (; len; len -= wordsize) {
1152 uint16_t v1 = pad;
1153 uint16_t v3 = pad;
1155 if (*src1len) {
1156 v1 = cpu_ldusize_data_ra(env, *src1, wordsize, ra);
1158 if (*src3len) {
1159 v3 = cpu_ldusize_data_ra(env, *src3, wordsize, ra);
1162 if (v1 != v3) {
1163 cc = (v1 < v3) ? 1 : 2;
1164 break;
1167 if (*src1len) {
1168 *src1 += wordsize;
1169 *src1len -= wordsize;
1171 if (*src3len) {
1172 *src3 += wordsize;
1173 *src3len -= wordsize;
1177 return cc;
1181 /* compare logical long */
1182 uint32_t HELPER(clcl)(CPUS390XState *env, uint32_t r1, uint32_t r2)
1184 uintptr_t ra = GETPC();
1185 uint64_t src1len = extract64(env->regs[r1 + 1], 0, 24);
1186 uint64_t src1 = get_address(env, r1);
1187 uint64_t src3len = extract64(env->regs[r2 + 1], 0, 24);
1188 uint64_t src3 = get_address(env, r2);
1189 uint8_t pad = env->regs[r2 + 1] >> 24;
1190 uint32_t cc;
1192 cc = do_clcl(env, &src1, &src1len, &src3, &src3len, pad, -1, 1, ra);
1194 env->regs[r1 + 1] = deposit64(env->regs[r1 + 1], 0, 24, src1len);
1195 env->regs[r2 + 1] = deposit64(env->regs[r2 + 1], 0, 24, src3len);
1196 set_address(env, r1, src1);
1197 set_address(env, r2, src3);
1199 return cc;
1202 /* compare logical long extended memcompare insn with padding */
1203 uint32_t HELPER(clcle)(CPUS390XState *env, uint32_t r1, uint64_t a2,
1204 uint32_t r3)
1206 uintptr_t ra = GETPC();
1207 uint64_t src1len = get_length(env, r1 + 1);
1208 uint64_t src1 = get_address(env, r1);
1209 uint64_t src3len = get_length(env, r3 + 1);
1210 uint64_t src3 = get_address(env, r3);
1211 uint8_t pad = a2;
1212 uint32_t cc;
1214 cc = do_clcl(env, &src1, &src1len, &src3, &src3len, pad, 0x2000, 1, ra);
1216 set_length(env, r1 + 1, src1len);
1217 set_length(env, r3 + 1, src3len);
1218 set_address(env, r1, src1);
1219 set_address(env, r3, src3);
1221 return cc;
1224 /* compare logical long unicode memcompare insn with padding */
1225 uint32_t HELPER(clclu)(CPUS390XState *env, uint32_t r1, uint64_t a2,
1226 uint32_t r3)
1228 uintptr_t ra = GETPC();
1229 uint64_t src1len = get_length(env, r1 + 1);
1230 uint64_t src1 = get_address(env, r1);
1231 uint64_t src3len = get_length(env, r3 + 1);
1232 uint64_t src3 = get_address(env, r3);
1233 uint16_t pad = a2;
1234 uint32_t cc = 0;
1236 cc = do_clcl(env, &src1, &src1len, &src3, &src3len, pad, 0x1000, 2, ra);
1238 set_length(env, r1 + 1, src1len);
1239 set_length(env, r3 + 1, src3len);
1240 set_address(env, r1, src1);
1241 set_address(env, r3, src3);
1243 return cc;
1246 /* checksum */
1247 uint64_t HELPER(cksm)(CPUS390XState *env, uint64_t r1,
1248 uint64_t src, uint64_t src_len)
1250 uintptr_t ra = GETPC();
1251 uint64_t max_len, len;
1252 uint64_t cksm = (uint32_t)r1;
1254 /* Lest we fail to service interrupts in a timely manner, limit the
1255 amount of work we're willing to do. For now, let's cap at 8k. */
1256 max_len = (src_len > 0x2000 ? 0x2000 : src_len);
1258 /* Process full words as available. */
1259 for (len = 0; len + 4 <= max_len; len += 4, src += 4) {
1260 cksm += (uint32_t)cpu_ldl_data_ra(env, src, ra);
1263 switch (max_len - len) {
1264 case 1:
1265 cksm += cpu_ldub_data_ra(env, src, ra) << 24;
1266 len += 1;
1267 break;
1268 case 2:
1269 cksm += cpu_lduw_data_ra(env, src, ra) << 16;
1270 len += 2;
1271 break;
1272 case 3:
1273 cksm += cpu_lduw_data_ra(env, src, ra) << 16;
1274 cksm += cpu_ldub_data_ra(env, src + 2, ra) << 8;
1275 len += 3;
1276 break;
1279 /* Fold the carry from the checksum. Note that we can see carry-out
1280 during folding more than once (but probably not more than twice). */
1281 while (cksm > 0xffffffffull) {
1282 cksm = (uint32_t)cksm + (cksm >> 32);
1285 /* Indicate whether or not we've processed everything. */
1286 env->cc_op = (len == src_len ? 0 : 3);
1288 /* Return both cksm and processed length. */
1289 env->retxl = cksm;
1290 return len;
1293 void HELPER(pack)(CPUS390XState *env, uint32_t len, uint64_t dest, uint64_t src)
1295 uintptr_t ra = GETPC();
1296 int len_dest = len >> 4;
1297 int len_src = len & 0xf;
1298 uint8_t b;
1300 dest += len_dest;
1301 src += len_src;
1303 /* last byte is special, it only flips the nibbles */
1304 b = cpu_ldub_data_ra(env, src, ra);
1305 cpu_stb_data_ra(env, dest, (b << 4) | (b >> 4), ra);
1306 src--;
1307 len_src--;
1309 /* now pack every value */
1310 while (len_dest > 0) {
1311 b = 0;
1313 if (len_src >= 0) {
1314 b = cpu_ldub_data_ra(env, src, ra) & 0x0f;
1315 src--;
1316 len_src--;
1318 if (len_src >= 0) {
1319 b |= cpu_ldub_data_ra(env, src, ra) << 4;
1320 src--;
1321 len_src--;
1324 len_dest--;
1325 dest--;
1326 cpu_stb_data_ra(env, dest, b, ra);
1330 static inline void do_pkau(CPUS390XState *env, uint64_t dest, uint64_t src,
1331 uint32_t srclen, int ssize, uintptr_t ra)
1333 int i;
1334 /* The destination operand is always 16 bytes long. */
1335 const int destlen = 16;
1337 /* The operands are processed from right to left. */
1338 src += srclen - 1;
1339 dest += destlen - 1;
1341 for (i = 0; i < destlen; i++) {
1342 uint8_t b = 0;
1344 /* Start with a positive sign */
1345 if (i == 0) {
1346 b = 0xc;
1347 } else if (srclen > ssize) {
1348 b = cpu_ldub_data_ra(env, src, ra) & 0x0f;
1349 src -= ssize;
1350 srclen -= ssize;
1353 if (srclen > ssize) {
1354 b |= cpu_ldub_data_ra(env, src, ra) << 4;
1355 src -= ssize;
1356 srclen -= ssize;
1359 cpu_stb_data_ra(env, dest, b, ra);
1360 dest--;
1365 void HELPER(pka)(CPUS390XState *env, uint64_t dest, uint64_t src,
1366 uint32_t srclen)
1368 do_pkau(env, dest, src, srclen, 1, GETPC());
1371 void HELPER(pku)(CPUS390XState *env, uint64_t dest, uint64_t src,
1372 uint32_t srclen)
1374 do_pkau(env, dest, src, srclen, 2, GETPC());
1377 void HELPER(unpk)(CPUS390XState *env, uint32_t len, uint64_t dest,
1378 uint64_t src)
1380 uintptr_t ra = GETPC();
1381 int len_dest = len >> 4;
1382 int len_src = len & 0xf;
1383 uint8_t b;
1384 int second_nibble = 0;
1386 dest += len_dest;
1387 src += len_src;
1389 /* last byte is special, it only flips the nibbles */
1390 b = cpu_ldub_data_ra(env, src, ra);
1391 cpu_stb_data_ra(env, dest, (b << 4) | (b >> 4), ra);
1392 src--;
1393 len_src--;
1395 /* now pad every nibble with 0xf0 */
1397 while (len_dest > 0) {
1398 uint8_t cur_byte = 0;
1400 if (len_src > 0) {
1401 cur_byte = cpu_ldub_data_ra(env, src, ra);
1404 len_dest--;
1405 dest--;
1407 /* only advance one nibble at a time */
1408 if (second_nibble) {
1409 cur_byte >>= 4;
1410 len_src--;
1411 src--;
1413 second_nibble = !second_nibble;
1415 /* digit */
1416 cur_byte = (cur_byte & 0xf);
1417 /* zone bits */
1418 cur_byte |= 0xf0;
1420 cpu_stb_data_ra(env, dest, cur_byte, ra);
1424 static inline uint32_t do_unpkau(CPUS390XState *env, uint64_t dest,
1425 uint32_t destlen, int dsize, uint64_t src,
1426 uintptr_t ra)
1428 int i;
1429 uint32_t cc;
1430 uint8_t b;
1431 /* The source operand is always 16 bytes long. */
1432 const int srclen = 16;
1434 /* The operands are processed from right to left. */
1435 src += srclen - 1;
1436 dest += destlen - dsize;
1438 /* Check for the sign. */
1439 b = cpu_ldub_data_ra(env, src, ra);
1440 src--;
1441 switch (b & 0xf) {
1442 case 0xa:
1443 case 0xc:
1444 case 0xe ... 0xf:
1445 cc = 0; /* plus */
1446 break;
1447 case 0xb:
1448 case 0xd:
1449 cc = 1; /* minus */
1450 break;
1451 default:
1452 case 0x0 ... 0x9:
1453 cc = 3; /* invalid */
1454 break;
1457 /* Now pad every nibble with 0x30, advancing one nibble at a time. */
1458 for (i = 0; i < destlen; i += dsize) {
1459 if (i == (31 * dsize)) {
1460 /* If length is 32/64 bytes, the leftmost byte is 0. */
1461 b = 0;
1462 } else if (i % (2 * dsize)) {
1463 b = cpu_ldub_data_ra(env, src, ra);
1464 src--;
1465 } else {
1466 b >>= 4;
1468 cpu_stsize_data_ra(env, dest, 0x30 + (b & 0xf), dsize, ra);
1469 dest -= dsize;
1472 return cc;
1475 uint32_t HELPER(unpka)(CPUS390XState *env, uint64_t dest, uint32_t destlen,
1476 uint64_t src)
1478 return do_unpkau(env, dest, destlen, 1, src, GETPC());
1481 uint32_t HELPER(unpku)(CPUS390XState *env, uint64_t dest, uint32_t destlen,
1482 uint64_t src)
1484 return do_unpkau(env, dest, destlen, 2, src, GETPC());
1487 uint32_t HELPER(tp)(CPUS390XState *env, uint64_t dest, uint32_t destlen)
1489 uintptr_t ra = GETPC();
1490 uint32_t cc = 0;
1491 int i;
1493 for (i = 0; i < destlen; i++) {
1494 uint8_t b = cpu_ldub_data_ra(env, dest + i, ra);
1495 /* digit */
1496 cc |= (b & 0xf0) > 0x90 ? 2 : 0;
1498 if (i == (destlen - 1)) {
1499 /* sign */
1500 cc |= (b & 0xf) < 0xa ? 1 : 0;
1501 } else {
1502 /* digit */
1503 cc |= (b & 0xf) > 0x9 ? 2 : 0;
1507 return cc;
1510 static uint32_t do_helper_tr(CPUS390XState *env, uint32_t len, uint64_t array,
1511 uint64_t trans, uintptr_t ra)
1513 uint32_t i;
1515 for (i = 0; i <= len; i++) {
1516 uint8_t byte = cpu_ldub_data_ra(env, array + i, ra);
1517 uint8_t new_byte = cpu_ldub_data_ra(env, trans + byte, ra);
1518 cpu_stb_data_ra(env, array + i, new_byte, ra);
1521 return env->cc_op;
1524 void HELPER(tr)(CPUS390XState *env, uint32_t len, uint64_t array,
1525 uint64_t trans)
1527 do_helper_tr(env, len, array, trans, GETPC());
1530 uint64_t HELPER(tre)(CPUS390XState *env, uint64_t array,
1531 uint64_t len, uint64_t trans)
1533 uintptr_t ra = GETPC();
1534 uint8_t end = env->regs[0] & 0xff;
1535 uint64_t l = len;
1536 uint64_t i;
1537 uint32_t cc = 0;
1539 if (!(env->psw.mask & PSW_MASK_64)) {
1540 array &= 0x7fffffff;
1541 l = (uint32_t)l;
1544 /* Lest we fail to service interrupts in a timely manner, limit the
1545 amount of work we're willing to do. For now, let's cap at 8k. */
1546 if (l > 0x2000) {
1547 l = 0x2000;
1548 cc = 3;
1551 for (i = 0; i < l; i++) {
1552 uint8_t byte, new_byte;
1554 byte = cpu_ldub_data_ra(env, array + i, ra);
1556 if (byte == end) {
1557 cc = 1;
1558 break;
1561 new_byte = cpu_ldub_data_ra(env, trans + byte, ra);
1562 cpu_stb_data_ra(env, array + i, new_byte, ra);
1565 env->cc_op = cc;
1566 env->retxl = len - i;
1567 return array + i;
1570 static inline uint32_t do_helper_trt(CPUS390XState *env, int len,
1571 uint64_t array, uint64_t trans,
1572 int inc, uintptr_t ra)
1574 int i;
1576 for (i = 0; i <= len; i++) {
1577 uint8_t byte = cpu_ldub_data_ra(env, array + i * inc, ra);
1578 uint8_t sbyte = cpu_ldub_data_ra(env, trans + byte, ra);
1580 if (sbyte != 0) {
1581 set_address(env, 1, array + i * inc);
1582 env->regs[2] = deposit64(env->regs[2], 0, 8, sbyte);
1583 return (i == len) ? 2 : 1;
1587 return 0;
1590 static uint32_t do_helper_trt_fwd(CPUS390XState *env, uint32_t len,
1591 uint64_t array, uint64_t trans,
1592 uintptr_t ra)
1594 return do_helper_trt(env, len, array, trans, 1, ra);
1597 uint32_t HELPER(trt)(CPUS390XState *env, uint32_t len, uint64_t array,
1598 uint64_t trans)
1600 return do_helper_trt(env, len, array, trans, 1, GETPC());
1603 static uint32_t do_helper_trt_bkwd(CPUS390XState *env, uint32_t len,
1604 uint64_t array, uint64_t trans,
1605 uintptr_t ra)
1607 return do_helper_trt(env, len, array, trans, -1, ra);
1610 uint32_t HELPER(trtr)(CPUS390XState *env, uint32_t len, uint64_t array,
1611 uint64_t trans)
1613 return do_helper_trt(env, len, array, trans, -1, GETPC());
1616 /* Translate one/two to one/two */
1617 uint32_t HELPER(trXX)(CPUS390XState *env, uint32_t r1, uint32_t r2,
1618 uint32_t tst, uint32_t sizes)
1620 uintptr_t ra = GETPC();
1621 int dsize = (sizes & 1) ? 1 : 2;
1622 int ssize = (sizes & 2) ? 1 : 2;
1623 uint64_t tbl = get_address(env, 1);
1624 uint64_t dst = get_address(env, r1);
1625 uint64_t len = get_length(env, r1 + 1);
1626 uint64_t src = get_address(env, r2);
1627 uint32_t cc = 3;
1628 int i;
1630 /* The lower address bits of TBL are ignored. For TROO, TROT, it's
1631 the low 3 bits (double-word aligned). For TRTO, TRTT, it's either
1632 the low 12 bits (4K, without ETF2-ENH) or 3 bits (with ETF2-ENH). */
1633 if (ssize == 2 && !s390_has_feat(S390_FEAT_ETF2_ENH)) {
1634 tbl &= -4096;
1635 } else {
1636 tbl &= -8;
1639 check_alignment(env, len, ssize, ra);
1641 /* Lest we fail to service interrupts in a timely manner, */
1642 /* limit the amount of work we're willing to do. */
1643 for (i = 0; i < 0x2000; i++) {
1644 uint16_t sval = cpu_ldusize_data_ra(env, src, ssize, ra);
1645 uint64_t tble = tbl + (sval * dsize);
1646 uint16_t dval = cpu_ldusize_data_ra(env, tble, dsize, ra);
1647 if (dval == tst) {
1648 cc = 1;
1649 break;
1651 cpu_stsize_data_ra(env, dst, dval, dsize, ra);
1653 len -= ssize;
1654 src += ssize;
1655 dst += dsize;
1657 if (len == 0) {
1658 cc = 0;
1659 break;
1663 set_address(env, r1, dst);
1664 set_length(env, r1 + 1, len);
1665 set_address(env, r2, src);
1667 return cc;
1670 void HELPER(cdsg)(CPUS390XState *env, uint64_t addr,
1671 uint32_t r1, uint32_t r3)
1673 uintptr_t ra = GETPC();
1674 Int128 cmpv = int128_make128(env->regs[r1 + 1], env->regs[r1]);
1675 Int128 newv = int128_make128(env->regs[r3 + 1], env->regs[r3]);
1676 Int128 oldv;
1677 uint64_t oldh, oldl;
1678 bool fail;
1680 check_alignment(env, addr, 16, ra);
1682 oldh = cpu_ldq_data_ra(env, addr + 0, ra);
1683 oldl = cpu_ldq_data_ra(env, addr + 8, ra);
1685 oldv = int128_make128(oldl, oldh);
1686 fail = !int128_eq(oldv, cmpv);
1687 if (fail) {
1688 newv = oldv;
1691 cpu_stq_data_ra(env, addr + 0, int128_gethi(newv), ra);
1692 cpu_stq_data_ra(env, addr + 8, int128_getlo(newv), ra);
1694 env->cc_op = fail;
1695 env->regs[r1] = int128_gethi(oldv);
1696 env->regs[r1 + 1] = int128_getlo(oldv);
1699 void HELPER(cdsg_parallel)(CPUS390XState *env, uint64_t addr,
1700 uint32_t r1, uint32_t r3)
1702 uintptr_t ra = GETPC();
1703 Int128 cmpv = int128_make128(env->regs[r1 + 1], env->regs[r1]);
1704 Int128 newv = int128_make128(env->regs[r3 + 1], env->regs[r3]);
1705 int mem_idx;
1706 TCGMemOpIdx oi;
1707 Int128 oldv;
1708 bool fail;
1710 assert(HAVE_CMPXCHG128);
1712 mem_idx = cpu_mmu_index(env, false);
1713 oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx);
1714 oldv = helper_atomic_cmpxchgo_be_mmu(env, addr, cmpv, newv, oi, ra);
1715 fail = !int128_eq(oldv, cmpv);
1717 env->cc_op = fail;
1718 env->regs[r1] = int128_gethi(oldv);
1719 env->regs[r1 + 1] = int128_getlo(oldv);
1722 static uint32_t do_csst(CPUS390XState *env, uint32_t r3, uint64_t a1,
1723 uint64_t a2, bool parallel)
1725 uint32_t mem_idx = cpu_mmu_index(env, false);
1726 uintptr_t ra = GETPC();
1727 uint32_t fc = extract32(env->regs[0], 0, 8);
1728 uint32_t sc = extract32(env->regs[0], 8, 8);
1729 uint64_t pl = get_address(env, 1) & -16;
1730 uint64_t svh, svl;
1731 uint32_t cc;
1733 /* Sanity check the function code and storage characteristic. */
1734 if (fc > 1 || sc > 3) {
1735 if (!s390_has_feat(S390_FEAT_COMPARE_AND_SWAP_AND_STORE_2)) {
1736 goto spec_exception;
1738 if (fc > 2 || sc > 4 || (fc == 2 && (r3 & 1))) {
1739 goto spec_exception;
1743 /* Sanity check the alignments. */
1744 if (extract32(a1, 0, fc + 2) || extract32(a2, 0, sc)) {
1745 goto spec_exception;
1748 /* Sanity check writability of the store address. */
1749 probe_write(env, a2, 1 << sc, mem_idx, ra);
1752 * Note that the compare-and-swap is atomic, and the store is atomic,
1753 * but the complete operation is not. Therefore we do not need to
1754 * assert serial context in order to implement this. That said,
1755 * restart early if we can't support either operation that is supposed
1756 * to be atomic.
1758 if (parallel) {
1759 uint32_t max = 2;
1760 #ifdef CONFIG_ATOMIC64
1761 max = 3;
1762 #endif
1763 if ((HAVE_CMPXCHG128 ? 0 : fc + 2 > max) ||
1764 (HAVE_ATOMIC128 ? 0 : sc > max)) {
1765 cpu_loop_exit_atomic(env_cpu(env), ra);
1769 /* All loads happen before all stores. For simplicity, load the entire
1770 store value area from the parameter list. */
1771 svh = cpu_ldq_data_ra(env, pl + 16, ra);
1772 svl = cpu_ldq_data_ra(env, pl + 24, ra);
1774 switch (fc) {
1775 case 0:
1777 uint32_t nv = cpu_ldl_data_ra(env, pl, ra);
1778 uint32_t cv = env->regs[r3];
1779 uint32_t ov;
1781 if (parallel) {
1782 #ifdef CONFIG_USER_ONLY
1783 uint32_t *haddr = g2h(a1);
1784 ov = qatomic_cmpxchg__nocheck(haddr, cv, nv);
1785 #else
1786 TCGMemOpIdx oi = make_memop_idx(MO_TEUL | MO_ALIGN, mem_idx);
1787 ov = helper_atomic_cmpxchgl_be_mmu(env, a1, cv, nv, oi, ra);
1788 #endif
1789 } else {
1790 ov = cpu_ldl_data_ra(env, a1, ra);
1791 cpu_stl_data_ra(env, a1, (ov == cv ? nv : ov), ra);
1793 cc = (ov != cv);
1794 env->regs[r3] = deposit64(env->regs[r3], 32, 32, ov);
1796 break;
1798 case 1:
1800 uint64_t nv = cpu_ldq_data_ra(env, pl, ra);
1801 uint64_t cv = env->regs[r3];
1802 uint64_t ov;
1804 if (parallel) {
1805 #ifdef CONFIG_ATOMIC64
1806 # ifdef CONFIG_USER_ONLY
1807 uint64_t *haddr = g2h(a1);
1808 ov = qatomic_cmpxchg__nocheck(haddr, cv, nv);
1809 # else
1810 TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN, mem_idx);
1811 ov = helper_atomic_cmpxchgq_be_mmu(env, a1, cv, nv, oi, ra);
1812 # endif
1813 #else
1814 /* Note that we asserted !parallel above. */
1815 g_assert_not_reached();
1816 #endif
1817 } else {
1818 ov = cpu_ldq_data_ra(env, a1, ra);
1819 cpu_stq_data_ra(env, a1, (ov == cv ? nv : ov), ra);
1821 cc = (ov != cv);
1822 env->regs[r3] = ov;
1824 break;
1826 case 2:
1828 uint64_t nvh = cpu_ldq_data_ra(env, pl, ra);
1829 uint64_t nvl = cpu_ldq_data_ra(env, pl + 8, ra);
1830 Int128 nv = int128_make128(nvl, nvh);
1831 Int128 cv = int128_make128(env->regs[r3 + 1], env->regs[r3]);
1832 Int128 ov;
1834 if (!parallel) {
1835 uint64_t oh = cpu_ldq_data_ra(env, a1 + 0, ra);
1836 uint64_t ol = cpu_ldq_data_ra(env, a1 + 8, ra);
1838 ov = int128_make128(ol, oh);
1839 cc = !int128_eq(ov, cv);
1840 if (cc) {
1841 nv = ov;
1844 cpu_stq_data_ra(env, a1 + 0, int128_gethi(nv), ra);
1845 cpu_stq_data_ra(env, a1 + 8, int128_getlo(nv), ra);
1846 } else if (HAVE_CMPXCHG128) {
1847 TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx);
1848 ov = helper_atomic_cmpxchgo_be_mmu(env, a1, cv, nv, oi, ra);
1849 cc = !int128_eq(ov, cv);
1850 } else {
1851 /* Note that we asserted !parallel above. */
1852 g_assert_not_reached();
1855 env->regs[r3 + 0] = int128_gethi(ov);
1856 env->regs[r3 + 1] = int128_getlo(ov);
1858 break;
1860 default:
1861 g_assert_not_reached();
1864 /* Store only if the comparison succeeded. Note that above we use a pair
1865 of 64-bit big-endian loads, so for sc < 3 we must extract the value
1866 from the most-significant bits of svh. */
1867 if (cc == 0) {
1868 switch (sc) {
1869 case 0:
1870 cpu_stb_data_ra(env, a2, svh >> 56, ra);
1871 break;
1872 case 1:
1873 cpu_stw_data_ra(env, a2, svh >> 48, ra);
1874 break;
1875 case 2:
1876 cpu_stl_data_ra(env, a2, svh >> 32, ra);
1877 break;
1878 case 3:
1879 cpu_stq_data_ra(env, a2, svh, ra);
1880 break;
1881 case 4:
1882 if (!parallel) {
1883 cpu_stq_data_ra(env, a2 + 0, svh, ra);
1884 cpu_stq_data_ra(env, a2 + 8, svl, ra);
1885 } else if (HAVE_ATOMIC128) {
1886 TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx);
1887 Int128 sv = int128_make128(svl, svh);
1888 helper_atomic_sto_be_mmu(env, a2, sv, oi, ra);
1889 } else {
1890 /* Note that we asserted !parallel above. */
1891 g_assert_not_reached();
1893 break;
1894 default:
1895 g_assert_not_reached();
1899 return cc;
1901 spec_exception:
1902 tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra);
1905 uint32_t HELPER(csst)(CPUS390XState *env, uint32_t r3, uint64_t a1, uint64_t a2)
1907 return do_csst(env, r3, a1, a2, false);
1910 uint32_t HELPER(csst_parallel)(CPUS390XState *env, uint32_t r3, uint64_t a1,
1911 uint64_t a2)
1913 return do_csst(env, r3, a1, a2, true);
1916 #if !defined(CONFIG_USER_ONLY)
1917 void HELPER(lctlg)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
1919 uintptr_t ra = GETPC();
1920 bool PERchanged = false;
1921 uint64_t src = a2;
1922 uint32_t i;
1924 if (src & 0x7) {
1925 tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra);
1928 for (i = r1;; i = (i + 1) % 16) {
1929 uint64_t val = cpu_ldq_data_ra(env, src, ra);
1930 if (env->cregs[i] != val && i >= 9 && i <= 11) {
1931 PERchanged = true;
1933 env->cregs[i] = val;
1934 HELPER_LOG("load ctl %d from 0x%" PRIx64 " == 0x%" PRIx64 "\n",
1935 i, src, val);
1936 src += sizeof(uint64_t);
1938 if (i == r3) {
1939 break;
1943 if (PERchanged && env->psw.mask & PSW_MASK_PER) {
1944 s390_cpu_recompute_watchpoints(env_cpu(env));
1947 tlb_flush(env_cpu(env));
1950 void HELPER(lctl)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
1952 uintptr_t ra = GETPC();
1953 bool PERchanged = false;
1954 uint64_t src = a2;
1955 uint32_t i;
1957 if (src & 0x3) {
1958 tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra);
1961 for (i = r1;; i = (i + 1) % 16) {
1962 uint32_t val = cpu_ldl_data_ra(env, src, ra);
1963 if ((uint32_t)env->cregs[i] != val && i >= 9 && i <= 11) {
1964 PERchanged = true;
1966 env->cregs[i] = deposit64(env->cregs[i], 0, 32, val);
1967 HELPER_LOG("load ctl %d from 0x%" PRIx64 " == 0x%x\n", i, src, val);
1968 src += sizeof(uint32_t);
1970 if (i == r3) {
1971 break;
1975 if (PERchanged && env->psw.mask & PSW_MASK_PER) {
1976 s390_cpu_recompute_watchpoints(env_cpu(env));
1979 tlb_flush(env_cpu(env));
1982 void HELPER(stctg)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
1984 uintptr_t ra = GETPC();
1985 uint64_t dest = a2;
1986 uint32_t i;
1988 if (dest & 0x7) {
1989 tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra);
1992 for (i = r1;; i = (i + 1) % 16) {
1993 cpu_stq_data_ra(env, dest, env->cregs[i], ra);
1994 dest += sizeof(uint64_t);
1996 if (i == r3) {
1997 break;
2002 void HELPER(stctl)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
2004 uintptr_t ra = GETPC();
2005 uint64_t dest = a2;
2006 uint32_t i;
2008 if (dest & 0x3) {
2009 tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra);
2012 for (i = r1;; i = (i + 1) % 16) {
2013 cpu_stl_data_ra(env, dest, env->cregs[i], ra);
2014 dest += sizeof(uint32_t);
2016 if (i == r3) {
2017 break;
2022 uint32_t HELPER(testblock)(CPUS390XState *env, uint64_t real_addr)
2024 uintptr_t ra = GETPC();
2025 int i;
2027 real_addr = wrap_address(env, real_addr) & TARGET_PAGE_MASK;
2029 for (i = 0; i < TARGET_PAGE_SIZE; i += 8) {
2030 cpu_stq_mmuidx_ra(env, real_addr + i, 0, MMU_REAL_IDX, ra);
2033 return 0;
2036 uint32_t HELPER(tprot)(CPUS390XState *env, uint64_t a1, uint64_t a2)
2038 S390CPU *cpu = env_archcpu(env);
2039 CPUState *cs = env_cpu(env);
2042 * TODO: we currently don't handle all access protection types
2043 * (including access-list and key-controlled) as well as AR mode.
2045 if (!s390_cpu_virt_mem_check_write(cpu, a1, 0, 1)) {
2046 /* Fetching permitted; storing permitted */
2047 return 0;
2050 if (env->int_pgm_code == PGM_PROTECTION) {
2051 /* retry if reading is possible */
2052 cs->exception_index = -1;
2053 if (!s390_cpu_virt_mem_check_read(cpu, a1, 0, 1)) {
2054 /* Fetching permitted; storing not permitted */
2055 return 1;
2059 switch (env->int_pgm_code) {
2060 case PGM_PROTECTION:
2061 /* Fetching not permitted; storing not permitted */
2062 cs->exception_index = -1;
2063 return 2;
2064 case PGM_ADDRESSING:
2065 case PGM_TRANS_SPEC:
2066 /* exceptions forwarded to the guest */
2067 s390_cpu_virt_mem_handle_exc(cpu, GETPC());
2068 return 0;
2071 /* Translation not available */
2072 cs->exception_index = -1;
2073 return 3;
2076 /* insert storage key extended */
2077 uint64_t HELPER(iske)(CPUS390XState *env, uint64_t r2)
2079 MachineState *ms = MACHINE(qdev_get_machine());
2080 static S390SKeysState *ss;
2081 static S390SKeysClass *skeyclass;
2082 uint64_t addr = wrap_address(env, r2);
2083 uint8_t key;
2085 if (addr > ms->ram_size) {
2086 return 0;
2089 if (unlikely(!ss)) {
2090 ss = s390_get_skeys_device();
2091 skeyclass = S390_SKEYS_GET_CLASS(ss);
2094 if (skeyclass->get_skeys(ss, addr / TARGET_PAGE_SIZE, 1, &key)) {
2095 return 0;
2097 return key;
2100 /* set storage key extended */
2101 void HELPER(sske)(CPUS390XState *env, uint64_t r1, uint64_t r2)
2103 MachineState *ms = MACHINE(qdev_get_machine());
2104 static S390SKeysState *ss;
2105 static S390SKeysClass *skeyclass;
2106 uint64_t addr = wrap_address(env, r2);
2107 uint8_t key;
2109 if (addr > ms->ram_size) {
2110 return;
2113 if (unlikely(!ss)) {
2114 ss = s390_get_skeys_device();
2115 skeyclass = S390_SKEYS_GET_CLASS(ss);
2118 key = (uint8_t) r1;
2119 skeyclass->set_skeys(ss, addr / TARGET_PAGE_SIZE, 1, &key);
2121 * As we can only flush by virtual address and not all the entries
2122 * that point to a physical address we have to flush the whole TLB.
2124 tlb_flush_all_cpus_synced(env_cpu(env));
2127 /* reset reference bit extended */
2128 uint32_t HELPER(rrbe)(CPUS390XState *env, uint64_t r2)
2130 MachineState *ms = MACHINE(qdev_get_machine());
2131 static S390SKeysState *ss;
2132 static S390SKeysClass *skeyclass;
2133 uint8_t re, key;
2135 if (r2 > ms->ram_size) {
2136 return 0;
2139 if (unlikely(!ss)) {
2140 ss = s390_get_skeys_device();
2141 skeyclass = S390_SKEYS_GET_CLASS(ss);
2144 if (skeyclass->get_skeys(ss, r2 / TARGET_PAGE_SIZE, 1, &key)) {
2145 return 0;
2148 re = key & (SK_R | SK_C);
2149 key &= ~SK_R;
2151 if (skeyclass->set_skeys(ss, r2 / TARGET_PAGE_SIZE, 1, &key)) {
2152 return 0;
2155 * As we can only flush by virtual address and not all the entries
2156 * that point to a physical address we have to flush the whole TLB.
2158 tlb_flush_all_cpus_synced(env_cpu(env));
2161 * cc
2163 * 0 Reference bit zero; change bit zero
2164 * 1 Reference bit zero; change bit one
2165 * 2 Reference bit one; change bit zero
2166 * 3 Reference bit one; change bit one
2169 return re >> 1;
2172 uint32_t HELPER(mvcs)(CPUS390XState *env, uint64_t l, uint64_t a1, uint64_t a2)
2174 const uint8_t psw_as = (env->psw.mask & PSW_MASK_ASC) >> PSW_SHIFT_ASC;
2175 S390Access srca, desta;
2176 uintptr_t ra = GETPC();
2177 int cc = 0;
2179 HELPER_LOG("%s: %16" PRIx64 " %16" PRIx64 " %16" PRIx64 "\n",
2180 __func__, l, a1, a2);
2182 if (!(env->psw.mask & PSW_MASK_DAT) || !(env->cregs[0] & CR0_SECONDARY) ||
2183 psw_as == AS_HOME || psw_as == AS_ACCREG) {
2184 s390_program_interrupt(env, PGM_SPECIAL_OP, ra);
2187 l = wrap_length32(env, l);
2188 if (l > 256) {
2189 /* max 256 */
2190 l = 256;
2191 cc = 3;
2192 } else if (!l) {
2193 return cc;
2196 /* TODO: Access key handling */
2197 srca = access_prepare(env, a2, l, MMU_DATA_LOAD, MMU_PRIMARY_IDX, ra);
2198 desta = access_prepare(env, a1, l, MMU_DATA_STORE, MMU_SECONDARY_IDX, ra);
2199 access_memmove(env, &desta, &srca, ra);
2200 return cc;
2203 uint32_t HELPER(mvcp)(CPUS390XState *env, uint64_t l, uint64_t a1, uint64_t a2)
2205 const uint8_t psw_as = (env->psw.mask & PSW_MASK_ASC) >> PSW_SHIFT_ASC;
2206 S390Access srca, desta;
2207 uintptr_t ra = GETPC();
2208 int cc = 0;
2210 HELPER_LOG("%s: %16" PRIx64 " %16" PRIx64 " %16" PRIx64 "\n",
2211 __func__, l, a1, a2);
2213 if (!(env->psw.mask & PSW_MASK_DAT) || !(env->cregs[0] & CR0_SECONDARY) ||
2214 psw_as == AS_HOME || psw_as == AS_ACCREG) {
2215 s390_program_interrupt(env, PGM_SPECIAL_OP, ra);
2218 l = wrap_length32(env, l);
2219 if (l > 256) {
2220 /* max 256 */
2221 l = 256;
2222 cc = 3;
2223 } else if (!l) {
2224 return cc;
2227 /* TODO: Access key handling */
2228 srca = access_prepare(env, a2, l, MMU_DATA_LOAD, MMU_SECONDARY_IDX, ra);
2229 desta = access_prepare(env, a1, l, MMU_DATA_STORE, MMU_PRIMARY_IDX, ra);
2230 access_memmove(env, &desta, &srca, ra);
2231 return cc;
2234 void HELPER(idte)(CPUS390XState *env, uint64_t r1, uint64_t r2, uint32_t m4)
2236 CPUState *cs = env_cpu(env);
2237 const uintptr_t ra = GETPC();
2238 uint64_t table, entry, raddr;
2239 uint16_t entries, i, index = 0;
2241 if (r2 & 0xff000) {
2242 tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra);
2245 if (!(r2 & 0x800)) {
2246 /* invalidation-and-clearing operation */
2247 table = r1 & ASCE_ORIGIN;
2248 entries = (r2 & 0x7ff) + 1;
2250 switch (r1 & ASCE_TYPE_MASK) {
2251 case ASCE_TYPE_REGION1:
2252 index = (r2 >> 53) & 0x7ff;
2253 break;
2254 case ASCE_TYPE_REGION2:
2255 index = (r2 >> 42) & 0x7ff;
2256 break;
2257 case ASCE_TYPE_REGION3:
2258 index = (r2 >> 31) & 0x7ff;
2259 break;
2260 case ASCE_TYPE_SEGMENT:
2261 index = (r2 >> 20) & 0x7ff;
2262 break;
2264 for (i = 0; i < entries; i++) {
2265 /* addresses are not wrapped in 24/31bit mode but table index is */
2266 raddr = table + ((index + i) & 0x7ff) * sizeof(entry);
2267 entry = cpu_ldq_mmuidx_ra(env, raddr, MMU_REAL_IDX, ra);
2268 if (!(entry & REGION_ENTRY_I)) {
2269 /* we are allowed to not store if already invalid */
2270 entry |= REGION_ENTRY_I;
2271 cpu_stq_mmuidx_ra(env, raddr, entry, MMU_REAL_IDX, ra);
2276 /* We simply flush the complete tlb, therefore we can ignore r3. */
2277 if (m4 & 1) {
2278 tlb_flush(cs);
2279 } else {
2280 tlb_flush_all_cpus_synced(cs);
2284 /* invalidate pte */
2285 void HELPER(ipte)(CPUS390XState *env, uint64_t pto, uint64_t vaddr,
2286 uint32_t m4)
2288 CPUState *cs = env_cpu(env);
2289 const uintptr_t ra = GETPC();
2290 uint64_t page = vaddr & TARGET_PAGE_MASK;
2291 uint64_t pte_addr, pte;
2293 /* Compute the page table entry address */
2294 pte_addr = (pto & SEGMENT_ENTRY_ORIGIN);
2295 pte_addr += VADDR_PAGE_TX(vaddr) * 8;
2297 /* Mark the page table entry as invalid */
2298 pte = cpu_ldq_mmuidx_ra(env, pte_addr, MMU_REAL_IDX, ra);
2299 pte |= PAGE_ENTRY_I;
2300 cpu_stq_mmuidx_ra(env, pte_addr, pte, MMU_REAL_IDX, ra);
2302 /* XXX we exploit the fact that Linux passes the exact virtual
2303 address here - it's not obliged to! */
2304 if (m4 & 1) {
2305 if (vaddr & ~VADDR_PAGE_TX_MASK) {
2306 tlb_flush_page(cs, page);
2307 /* XXX 31-bit hack */
2308 tlb_flush_page(cs, page ^ 0x80000000);
2309 } else {
2310 /* looks like we don't have a valid virtual address */
2311 tlb_flush(cs);
2313 } else {
2314 if (vaddr & ~VADDR_PAGE_TX_MASK) {
2315 tlb_flush_page_all_cpus_synced(cs, page);
2316 /* XXX 31-bit hack */
2317 tlb_flush_page_all_cpus_synced(cs, page ^ 0x80000000);
2318 } else {
2319 /* looks like we don't have a valid virtual address */
2320 tlb_flush_all_cpus_synced(cs);
2325 /* flush local tlb */
2326 void HELPER(ptlb)(CPUS390XState *env)
2328 tlb_flush(env_cpu(env));
2331 /* flush global tlb */
2332 void HELPER(purge)(CPUS390XState *env)
2334 tlb_flush_all_cpus_synced(env_cpu(env));
2337 /* load real address */
2338 uint64_t HELPER(lra)(CPUS390XState *env, uint64_t addr)
2340 uint64_t asc = env->psw.mask & PSW_MASK_ASC;
2341 uint64_t ret, tec;
2342 int flags, exc, cc;
2344 /* XXX incomplete - has more corner cases */
2345 if (!(env->psw.mask & PSW_MASK_64) && (addr >> 32)) {
2346 tcg_s390_program_interrupt(env, PGM_SPECIAL_OP, GETPC());
2349 exc = mmu_translate(env, addr, 0, asc, &ret, &flags, &tec);
2350 if (exc) {
2351 cc = 3;
2352 ret = exc | 0x80000000;
2353 } else {
2354 cc = 0;
2355 ret |= addr & ~TARGET_PAGE_MASK;
2358 env->cc_op = cc;
2359 return ret;
2361 #endif
2363 /* load pair from quadword */
2364 uint64_t HELPER(lpq)(CPUS390XState *env, uint64_t addr)
2366 uintptr_t ra = GETPC();
2367 uint64_t hi, lo;
2369 check_alignment(env, addr, 16, ra);
2370 hi = cpu_ldq_data_ra(env, addr + 0, ra);
2371 lo = cpu_ldq_data_ra(env, addr + 8, ra);
2373 env->retxl = lo;
2374 return hi;
2377 uint64_t HELPER(lpq_parallel)(CPUS390XState *env, uint64_t addr)
2379 uintptr_t ra = GETPC();
2380 uint64_t hi, lo;
2381 int mem_idx;
2382 TCGMemOpIdx oi;
2383 Int128 v;
2385 assert(HAVE_ATOMIC128);
2387 mem_idx = cpu_mmu_index(env, false);
2388 oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx);
2389 v = helper_atomic_ldo_be_mmu(env, addr, oi, ra);
2390 hi = int128_gethi(v);
2391 lo = int128_getlo(v);
2393 env->retxl = lo;
2394 return hi;
2397 /* store pair to quadword */
2398 void HELPER(stpq)(CPUS390XState *env, uint64_t addr,
2399 uint64_t low, uint64_t high)
2401 uintptr_t ra = GETPC();
2403 check_alignment(env, addr, 16, ra);
2404 cpu_stq_data_ra(env, addr + 0, high, ra);
2405 cpu_stq_data_ra(env, addr + 8, low, ra);
2408 void HELPER(stpq_parallel)(CPUS390XState *env, uint64_t addr,
2409 uint64_t low, uint64_t high)
2411 uintptr_t ra = GETPC();
2412 int mem_idx;
2413 TCGMemOpIdx oi;
2414 Int128 v;
2416 assert(HAVE_ATOMIC128);
2418 mem_idx = cpu_mmu_index(env, false);
2419 oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx);
2420 v = int128_make128(low, high);
2421 helper_atomic_sto_be_mmu(env, addr, v, oi, ra);
2424 /* Execute instruction. This instruction executes an insn modified with
2425 the contents of r1. It does not change the executed instruction in memory;
2426 it does not change the program counter.
2428 Perform this by recording the modified instruction in env->ex_value.
2429 This will be noticed by cpu_get_tb_cpu_state and thus tb translation.
2431 void HELPER(ex)(CPUS390XState *env, uint32_t ilen, uint64_t r1, uint64_t addr)
2433 uint64_t insn = cpu_lduw_code(env, addr);
2434 uint8_t opc = insn >> 8;
2436 /* Or in the contents of R1[56:63]. */
2437 insn |= r1 & 0xff;
2439 /* Load the rest of the instruction. */
2440 insn <<= 48;
2441 switch (get_ilen(opc)) {
2442 case 2:
2443 break;
2444 case 4:
2445 insn |= (uint64_t)cpu_lduw_code(env, addr + 2) << 32;
2446 break;
2447 case 6:
2448 insn |= (uint64_t)(uint32_t)cpu_ldl_code(env, addr + 2) << 16;
2449 break;
2450 default:
2451 g_assert_not_reached();
2454 /* The very most common cases can be sped up by avoiding a new TB. */
2455 if ((opc & 0xf0) == 0xd0) {
2456 typedef uint32_t (*dx_helper)(CPUS390XState *, uint32_t, uint64_t,
2457 uint64_t, uintptr_t);
2458 static const dx_helper dx[16] = {
2459 [0x0] = do_helper_trt_bkwd,
2460 [0x2] = do_helper_mvc,
2461 [0x4] = do_helper_nc,
2462 [0x5] = do_helper_clc,
2463 [0x6] = do_helper_oc,
2464 [0x7] = do_helper_xc,
2465 [0xc] = do_helper_tr,
2466 [0xd] = do_helper_trt_fwd,
2468 dx_helper helper = dx[opc & 0xf];
2470 if (helper) {
2471 uint32_t l = extract64(insn, 48, 8);
2472 uint32_t b1 = extract64(insn, 44, 4);
2473 uint32_t d1 = extract64(insn, 32, 12);
2474 uint32_t b2 = extract64(insn, 28, 4);
2475 uint32_t d2 = extract64(insn, 16, 12);
2476 uint64_t a1 = wrap_address(env, (b1 ? env->regs[b1] : 0) + d1);
2477 uint64_t a2 = wrap_address(env, (b2 ? env->regs[b2] : 0) + d2);
2479 env->cc_op = helper(env, l, a1, a2, 0);
2480 env->psw.addr += ilen;
2481 return;
2483 } else if (opc == 0x0a) {
2484 env->int_svc_code = extract64(insn, 48, 8);
2485 env->int_svc_ilen = ilen;
2486 helper_exception(env, EXCP_SVC);
2487 g_assert_not_reached();
2490 /* Record the insn we want to execute as well as the ilen to use
2491 during the execution of the target insn. This will also ensure
2492 that ex_value is non-zero, which flags that we are in a state
2493 that requires such execution. */
2494 env->ex_value = insn | ilen;
2497 uint32_t HELPER(mvcos)(CPUS390XState *env, uint64_t dest, uint64_t src,
2498 uint64_t len)
2500 const uint8_t psw_key = (env->psw.mask & PSW_MASK_KEY) >> PSW_SHIFT_KEY;
2501 const uint8_t psw_as = (env->psw.mask & PSW_MASK_ASC) >> PSW_SHIFT_ASC;
2502 const uint64_t r0 = env->regs[0];
2503 const uintptr_t ra = GETPC();
2504 uint8_t dest_key, dest_as, dest_k, dest_a;
2505 uint8_t src_key, src_as, src_k, src_a;
2506 uint64_t val;
2507 int cc = 0;
2509 HELPER_LOG("%s dest %" PRIx64 ", src %" PRIx64 ", len %" PRIx64 "\n",
2510 __func__, dest, src, len);
2512 if (!(env->psw.mask & PSW_MASK_DAT)) {
2513 tcg_s390_program_interrupt(env, PGM_SPECIAL_OP, ra);
2516 /* OAC (operand access control) for the first operand -> dest */
2517 val = (r0 & 0xffff0000ULL) >> 16;
2518 dest_key = (val >> 12) & 0xf;
2519 dest_as = (val >> 6) & 0x3;
2520 dest_k = (val >> 1) & 0x1;
2521 dest_a = val & 0x1;
2523 /* OAC (operand access control) for the second operand -> src */
2524 val = (r0 & 0x0000ffffULL);
2525 src_key = (val >> 12) & 0xf;
2526 src_as = (val >> 6) & 0x3;
2527 src_k = (val >> 1) & 0x1;
2528 src_a = val & 0x1;
2530 if (!dest_k) {
2531 dest_key = psw_key;
2533 if (!src_k) {
2534 src_key = psw_key;
2536 if (!dest_a) {
2537 dest_as = psw_as;
2539 if (!src_a) {
2540 src_as = psw_as;
2543 if (dest_a && dest_as == AS_HOME && (env->psw.mask & PSW_MASK_PSTATE)) {
2544 tcg_s390_program_interrupt(env, PGM_SPECIAL_OP, ra);
2546 if (!(env->cregs[0] & CR0_SECONDARY) &&
2547 (dest_as == AS_SECONDARY || src_as == AS_SECONDARY)) {
2548 tcg_s390_program_interrupt(env, PGM_SPECIAL_OP, ra);
2550 if (!psw_key_valid(env, dest_key) || !psw_key_valid(env, src_key)) {
2551 tcg_s390_program_interrupt(env, PGM_PRIVILEGED, ra);
2554 len = wrap_length32(env, len);
2555 if (len > 4096) {
2556 cc = 3;
2557 len = 4096;
2560 /* FIXME: AR-mode and proper problem state mode (using PSW keys) missing */
2561 if (src_as == AS_ACCREG || dest_as == AS_ACCREG ||
2562 (env->psw.mask & PSW_MASK_PSTATE)) {
2563 qemu_log_mask(LOG_UNIMP, "%s: AR-mode and PSTATE support missing\n",
2564 __func__);
2565 tcg_s390_program_interrupt(env, PGM_ADDRESSING, ra);
2568 /* FIXME: Access using correct keys and AR-mode */
2569 if (len) {
2570 S390Access srca = access_prepare(env, src, len, MMU_DATA_LOAD,
2571 mmu_idx_from_as(src_as), ra);
2572 S390Access desta = access_prepare(env, dest, len, MMU_DATA_STORE,
2573 mmu_idx_from_as(dest_as), ra);
2575 access_memmove(env, &desta, &srca, ra);
2578 return cc;
2581 /* Decode a Unicode character. A return value < 0 indicates success, storing
2582 the UTF-32 result into OCHAR and the input length into OLEN. A return
2583 value >= 0 indicates failure, and the CC value to be returned. */
2584 typedef int (*decode_unicode_fn)(CPUS390XState *env, uint64_t addr,
2585 uint64_t ilen, bool enh_check, uintptr_t ra,
2586 uint32_t *ochar, uint32_t *olen);
2588 /* Encode a Unicode character. A return value < 0 indicates success, storing
2589 the bytes into ADDR and the output length into OLEN. A return value >= 0
2590 indicates failure, and the CC value to be returned. */
2591 typedef int (*encode_unicode_fn)(CPUS390XState *env, uint64_t addr,
2592 uint64_t ilen, uintptr_t ra, uint32_t c,
2593 uint32_t *olen);
2595 static int decode_utf8(CPUS390XState *env, uint64_t addr, uint64_t ilen,
2596 bool enh_check, uintptr_t ra,
2597 uint32_t *ochar, uint32_t *olen)
2599 uint8_t s0, s1, s2, s3;
2600 uint32_t c, l;
2602 if (ilen < 1) {
2603 return 0;
2605 s0 = cpu_ldub_data_ra(env, addr, ra);
2606 if (s0 <= 0x7f) {
2607 /* one byte character */
2608 l = 1;
2609 c = s0;
2610 } else if (s0 <= (enh_check ? 0xc1 : 0xbf)) {
2611 /* invalid character */
2612 return 2;
2613 } else if (s0 <= 0xdf) {
2614 /* two byte character */
2615 l = 2;
2616 if (ilen < 2) {
2617 return 0;
2619 s1 = cpu_ldub_data_ra(env, addr + 1, ra);
2620 c = s0 & 0x1f;
2621 c = (c << 6) | (s1 & 0x3f);
2622 if (enh_check && (s1 & 0xc0) != 0x80) {
2623 return 2;
2625 } else if (s0 <= 0xef) {
2626 /* three byte character */
2627 l = 3;
2628 if (ilen < 3) {
2629 return 0;
2631 s1 = cpu_ldub_data_ra(env, addr + 1, ra);
2632 s2 = cpu_ldub_data_ra(env, addr + 2, ra);
2633 c = s0 & 0x0f;
2634 c = (c << 6) | (s1 & 0x3f);
2635 c = (c << 6) | (s2 & 0x3f);
2636 /* Fold the byte-by-byte range descriptions in the PoO into
2637 tests against the complete value. It disallows encodings
2638 that could be smaller, and the UTF-16 surrogates. */
2639 if (enh_check
2640 && ((s1 & 0xc0) != 0x80
2641 || (s2 & 0xc0) != 0x80
2642 || c < 0x1000
2643 || (c >= 0xd800 && c <= 0xdfff))) {
2644 return 2;
2646 } else if (s0 <= (enh_check ? 0xf4 : 0xf7)) {
2647 /* four byte character */
2648 l = 4;
2649 if (ilen < 4) {
2650 return 0;
2652 s1 = cpu_ldub_data_ra(env, addr + 1, ra);
2653 s2 = cpu_ldub_data_ra(env, addr + 2, ra);
2654 s3 = cpu_ldub_data_ra(env, addr + 3, ra);
2655 c = s0 & 0x07;
2656 c = (c << 6) | (s1 & 0x3f);
2657 c = (c << 6) | (s2 & 0x3f);
2658 c = (c << 6) | (s3 & 0x3f);
2659 /* See above. */
2660 if (enh_check
2661 && ((s1 & 0xc0) != 0x80
2662 || (s2 & 0xc0) != 0x80
2663 || (s3 & 0xc0) != 0x80
2664 || c < 0x010000
2665 || c > 0x10ffff)) {
2666 return 2;
2668 } else {
2669 /* invalid character */
2670 return 2;
2673 *ochar = c;
2674 *olen = l;
2675 return -1;
2678 static int decode_utf16(CPUS390XState *env, uint64_t addr, uint64_t ilen,
2679 bool enh_check, uintptr_t ra,
2680 uint32_t *ochar, uint32_t *olen)
2682 uint16_t s0, s1;
2683 uint32_t c, l;
2685 if (ilen < 2) {
2686 return 0;
2688 s0 = cpu_lduw_data_ra(env, addr, ra);
2689 if ((s0 & 0xfc00) != 0xd800) {
2690 /* one word character */
2691 l = 2;
2692 c = s0;
2693 } else {
2694 /* two word character */
2695 l = 4;
2696 if (ilen < 4) {
2697 return 0;
2699 s1 = cpu_lduw_data_ra(env, addr + 2, ra);
2700 c = extract32(s0, 6, 4) + 1;
2701 c = (c << 6) | (s0 & 0x3f);
2702 c = (c << 10) | (s1 & 0x3ff);
2703 if (enh_check && (s1 & 0xfc00) != 0xdc00) {
2704 /* invalid surrogate character */
2705 return 2;
2709 *ochar = c;
2710 *olen = l;
2711 return -1;
2714 static int decode_utf32(CPUS390XState *env, uint64_t addr, uint64_t ilen,
2715 bool enh_check, uintptr_t ra,
2716 uint32_t *ochar, uint32_t *olen)
2718 uint32_t c;
2720 if (ilen < 4) {
2721 return 0;
2723 c = cpu_ldl_data_ra(env, addr, ra);
2724 if ((c >= 0xd800 && c <= 0xdbff) || c > 0x10ffff) {
2725 /* invalid unicode character */
2726 return 2;
2729 *ochar = c;
2730 *olen = 4;
2731 return -1;
2734 static int encode_utf8(CPUS390XState *env, uint64_t addr, uint64_t ilen,
2735 uintptr_t ra, uint32_t c, uint32_t *olen)
2737 uint8_t d[4];
2738 uint32_t l, i;
2740 if (c <= 0x7f) {
2741 /* one byte character */
2742 l = 1;
2743 d[0] = c;
2744 } else if (c <= 0x7ff) {
2745 /* two byte character */
2746 l = 2;
2747 d[1] = 0x80 | extract32(c, 0, 6);
2748 d[0] = 0xc0 | extract32(c, 6, 5);
2749 } else if (c <= 0xffff) {
2750 /* three byte character */
2751 l = 3;
2752 d[2] = 0x80 | extract32(c, 0, 6);
2753 d[1] = 0x80 | extract32(c, 6, 6);
2754 d[0] = 0xe0 | extract32(c, 12, 4);
2755 } else {
2756 /* four byte character */
2757 l = 4;
2758 d[3] = 0x80 | extract32(c, 0, 6);
2759 d[2] = 0x80 | extract32(c, 6, 6);
2760 d[1] = 0x80 | extract32(c, 12, 6);
2761 d[0] = 0xf0 | extract32(c, 18, 3);
2764 if (ilen < l) {
2765 return 1;
2767 for (i = 0; i < l; ++i) {
2768 cpu_stb_data_ra(env, addr + i, d[i], ra);
2771 *olen = l;
2772 return -1;
2775 static int encode_utf16(CPUS390XState *env, uint64_t addr, uint64_t ilen,
2776 uintptr_t ra, uint32_t c, uint32_t *olen)
2778 uint16_t d0, d1;
2780 if (c <= 0xffff) {
2781 /* one word character */
2782 if (ilen < 2) {
2783 return 1;
2785 cpu_stw_data_ra(env, addr, c, ra);
2786 *olen = 2;
2787 } else {
2788 /* two word character */
2789 if (ilen < 4) {
2790 return 1;
2792 d1 = 0xdc00 | extract32(c, 0, 10);
2793 d0 = 0xd800 | extract32(c, 10, 6);
2794 d0 = deposit32(d0, 6, 4, extract32(c, 16, 5) - 1);
2795 cpu_stw_data_ra(env, addr + 0, d0, ra);
2796 cpu_stw_data_ra(env, addr + 2, d1, ra);
2797 *olen = 4;
2800 return -1;
2803 static int encode_utf32(CPUS390XState *env, uint64_t addr, uint64_t ilen,
2804 uintptr_t ra, uint32_t c, uint32_t *olen)
2806 if (ilen < 4) {
2807 return 1;
2809 cpu_stl_data_ra(env, addr, c, ra);
2810 *olen = 4;
2811 return -1;
2814 static inline uint32_t convert_unicode(CPUS390XState *env, uint32_t r1,
2815 uint32_t r2, uint32_t m3, uintptr_t ra,
2816 decode_unicode_fn decode,
2817 encode_unicode_fn encode)
2819 uint64_t dst = get_address(env, r1);
2820 uint64_t dlen = get_length(env, r1 + 1);
2821 uint64_t src = get_address(env, r2);
2822 uint64_t slen = get_length(env, r2 + 1);
2823 bool enh_check = m3 & 1;
2824 int cc, i;
2826 /* Lest we fail to service interrupts in a timely manner, limit the
2827 amount of work we're willing to do. For now, let's cap at 256. */
2828 for (i = 0; i < 256; ++i) {
2829 uint32_t c, ilen, olen;
2831 cc = decode(env, src, slen, enh_check, ra, &c, &ilen);
2832 if (unlikely(cc >= 0)) {
2833 break;
2835 cc = encode(env, dst, dlen, ra, c, &olen);
2836 if (unlikely(cc >= 0)) {
2837 break;
2840 src += ilen;
2841 slen -= ilen;
2842 dst += olen;
2843 dlen -= olen;
2844 cc = 3;
2847 set_address(env, r1, dst);
2848 set_length(env, r1 + 1, dlen);
2849 set_address(env, r2, src);
2850 set_length(env, r2 + 1, slen);
2852 return cc;
2855 uint32_t HELPER(cu12)(CPUS390XState *env, uint32_t r1, uint32_t r2, uint32_t m3)
2857 return convert_unicode(env, r1, r2, m3, GETPC(),
2858 decode_utf8, encode_utf16);
2861 uint32_t HELPER(cu14)(CPUS390XState *env, uint32_t r1, uint32_t r2, uint32_t m3)
2863 return convert_unicode(env, r1, r2, m3, GETPC(),
2864 decode_utf8, encode_utf32);
2867 uint32_t HELPER(cu21)(CPUS390XState *env, uint32_t r1, uint32_t r2, uint32_t m3)
2869 return convert_unicode(env, r1, r2, m3, GETPC(),
2870 decode_utf16, encode_utf8);
2873 uint32_t HELPER(cu24)(CPUS390XState *env, uint32_t r1, uint32_t r2, uint32_t m3)
2875 return convert_unicode(env, r1, r2, m3, GETPC(),
2876 decode_utf16, encode_utf32);
2879 uint32_t HELPER(cu41)(CPUS390XState *env, uint32_t r1, uint32_t r2, uint32_t m3)
2881 return convert_unicode(env, r1, r2, m3, GETPC(),
2882 decode_utf32, encode_utf8);
2885 uint32_t HELPER(cu42)(CPUS390XState *env, uint32_t r1, uint32_t r2, uint32_t m3)
2887 return convert_unicode(env, r1, r2, m3, GETPC(),
2888 decode_utf32, encode_utf16);
2891 void probe_write_access(CPUS390XState *env, uint64_t addr, uint64_t len,
2892 uintptr_t ra)
2894 /* test the actual access, not just any access to the page due to LAP */
2895 while (len) {
2896 const uint64_t pagelen = -(addr | TARGET_PAGE_MASK);
2897 const uint64_t curlen = MIN(pagelen, len);
2899 probe_write(env, addr, curlen, cpu_mmu_index(env, false), ra);
2900 addr = wrap_address(env, addr + curlen);
2901 len -= curlen;
2905 void HELPER(probe_write_access)(CPUS390XState *env, uint64_t addr, uint64_t len)
2907 probe_write_access(env, addr, len, GETPC());