sockets: avoid string truncation warnings when copying UNIX path
[qemu/ar7.git] / target / s390x / mem_helper.c
blob3f76a8abfd37512209cdf6f9a9534c114ad1352f
1 /*
2 * S/390 memory access helper routines
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2009 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "internal.h"
24 #include "exec/helper-proto.h"
25 #include "exec/exec-all.h"
26 #include "exec/cpu_ldst.h"
27 #include "qemu/int128.h"
28 #include "qemu/atomic128.h"
30 #if !defined(CONFIG_USER_ONLY)
31 #include "hw/s390x/storage-keys.h"
32 #endif
34 /*****************************************************************************/
35 /* Softmmu support */
36 #if !defined(CONFIG_USER_ONLY)
38 /* try to fill the TLB and return an exception if error. If retaddr is
39 NULL, it means that the function was called in C code (i.e. not
40 from generated code or from helper.c) */
41 /* XXX: fix it to restore all registers */
42 void tlb_fill(CPUState *cs, target_ulong addr, int size,
43 MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
45 int ret = s390_cpu_handle_mmu_fault(cs, addr, size, access_type, mmu_idx);
46 if (unlikely(ret != 0)) {
47 cpu_loop_exit_restore(cs, retaddr);
51 #endif
53 /* #define DEBUG_HELPER */
54 #ifdef DEBUG_HELPER
55 #define HELPER_LOG(x...) qemu_log(x)
56 #else
57 #define HELPER_LOG(x...)
58 #endif
60 static inline bool psw_key_valid(CPUS390XState *env, uint8_t psw_key)
62 uint16_t pkm = env->cregs[3] >> 16;
64 if (env->psw.mask & PSW_MASK_PSTATE) {
65 /* PSW key has range 0..15, it is valid if the bit is 1 in the PKM */
66 return pkm & (0x80 >> psw_key);
68 return true;
71 /* Reduce the length so that addr + len doesn't cross a page boundary. */
72 static inline uint32_t adj_len_to_page(uint32_t len, uint64_t addr)
74 #ifndef CONFIG_USER_ONLY
75 if ((addr & ~TARGET_PAGE_MASK) + len - 1 >= TARGET_PAGE_SIZE) {
76 return -(addr | TARGET_PAGE_MASK);
78 #endif
79 return len;
82 /* Trigger a SPECIFICATION exception if an address or a length is not
83 naturally aligned. */
84 static inline void check_alignment(CPUS390XState *env, uint64_t v,
85 int wordsize, uintptr_t ra)
87 if (v % wordsize) {
88 s390_program_interrupt(env, PGM_SPECIFICATION, 6, ra);
92 /* Load a value from memory according to its size. */
93 static inline uint64_t cpu_ldusize_data_ra(CPUS390XState *env, uint64_t addr,
94 int wordsize, uintptr_t ra)
96 switch (wordsize) {
97 case 1:
98 return cpu_ldub_data_ra(env, addr, ra);
99 case 2:
100 return cpu_lduw_data_ra(env, addr, ra);
101 default:
102 abort();
106 /* Store a to memory according to its size. */
107 static inline void cpu_stsize_data_ra(CPUS390XState *env, uint64_t addr,
108 uint64_t value, int wordsize,
109 uintptr_t ra)
111 switch (wordsize) {
112 case 1:
113 cpu_stb_data_ra(env, addr, value, ra);
114 break;
115 case 2:
116 cpu_stw_data_ra(env, addr, value, ra);
117 break;
118 default:
119 abort();
123 static void fast_memset(CPUS390XState *env, uint64_t dest, uint8_t byte,
124 uint32_t l, uintptr_t ra)
126 int mmu_idx = cpu_mmu_index(env, false);
128 while (l > 0) {
129 void *p = tlb_vaddr_to_host(env, dest, MMU_DATA_STORE, mmu_idx);
130 if (p) {
131 /* Access to the whole page in write mode granted. */
132 uint32_t l_adj = adj_len_to_page(l, dest);
133 memset(p, byte, l_adj);
134 dest += l_adj;
135 l -= l_adj;
136 } else {
137 /* We failed to get access to the whole page. The next write
138 access will likely fill the QEMU TLB for the next iteration. */
139 cpu_stb_data_ra(env, dest, byte, ra);
140 dest++;
141 l--;
146 #ifndef CONFIG_USER_ONLY
147 static void fast_memmove_idx(CPUS390XState *env, uint64_t dest, uint64_t src,
148 uint32_t len, int dest_idx, int src_idx,
149 uintptr_t ra)
151 TCGMemOpIdx oi_dest = make_memop_idx(MO_UB, dest_idx);
152 TCGMemOpIdx oi_src = make_memop_idx(MO_UB, src_idx);
153 uint32_t len_adj;
154 void *src_p;
155 void *dest_p;
156 uint8_t x;
158 while (len > 0) {
159 src = wrap_address(env, src);
160 dest = wrap_address(env, dest);
161 src_p = tlb_vaddr_to_host(env, src, MMU_DATA_LOAD, src_idx);
162 dest_p = tlb_vaddr_to_host(env, dest, MMU_DATA_STORE, dest_idx);
164 if (src_p && dest_p) {
165 /* Access to both whole pages granted. */
166 len_adj = adj_len_to_page(adj_len_to_page(len, src), dest);
167 memmove(dest_p, src_p, len_adj);
168 } else {
169 /* We failed to get access to one or both whole pages. The next
170 read or write access will likely fill the QEMU TLB for the
171 next iteration. */
172 len_adj = 1;
173 x = helper_ret_ldub_mmu(env, src, oi_src, ra);
174 helper_ret_stb_mmu(env, dest, x, oi_dest, ra);
176 src += len_adj;
177 dest += len_adj;
178 len -= len_adj;
182 static int mmu_idx_from_as(uint8_t as)
184 switch (as) {
185 case AS_PRIMARY:
186 return MMU_PRIMARY_IDX;
187 case AS_SECONDARY:
188 return MMU_SECONDARY_IDX;
189 case AS_HOME:
190 return MMU_HOME_IDX;
191 default:
192 /* FIXME AS_ACCREG */
193 g_assert_not_reached();
197 static void fast_memmove_as(CPUS390XState *env, uint64_t dest, uint64_t src,
198 uint32_t len, uint8_t dest_as, uint8_t src_as,
199 uintptr_t ra)
201 int src_idx = mmu_idx_from_as(src_as);
202 int dest_idx = mmu_idx_from_as(dest_as);
204 fast_memmove_idx(env, dest, src, len, dest_idx, src_idx, ra);
206 #endif
208 static void fast_memmove(CPUS390XState *env, uint64_t dest, uint64_t src,
209 uint32_t l, uintptr_t ra)
211 int mmu_idx = cpu_mmu_index(env, false);
213 while (l > 0) {
214 void *src_p = tlb_vaddr_to_host(env, src, MMU_DATA_LOAD, mmu_idx);
215 void *dest_p = tlb_vaddr_to_host(env, dest, MMU_DATA_STORE, mmu_idx);
216 if (src_p && dest_p) {
217 /* Access to both whole pages granted. */
218 uint32_t l_adj = adj_len_to_page(l, src);
219 l_adj = adj_len_to_page(l_adj, dest);
220 memmove(dest_p, src_p, l_adj);
221 src += l_adj;
222 dest += l_adj;
223 l -= l_adj;
224 } else {
225 /* We failed to get access to one or both whole pages. The next
226 read or write access will likely fill the QEMU TLB for the
227 next iteration. */
228 cpu_stb_data_ra(env, dest, cpu_ldub_data_ra(env, src, ra), ra);
229 src++;
230 dest++;
231 l--;
236 /* and on array */
237 static uint32_t do_helper_nc(CPUS390XState *env, uint32_t l, uint64_t dest,
238 uint64_t src, uintptr_t ra)
240 uint32_t i;
241 uint8_t c = 0;
243 HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
244 __func__, l, dest, src);
246 for (i = 0; i <= l; i++) {
247 uint8_t x = cpu_ldub_data_ra(env, src + i, ra);
248 x &= cpu_ldub_data_ra(env, dest + i, ra);
249 c |= x;
250 cpu_stb_data_ra(env, dest + i, x, ra);
252 return c != 0;
255 uint32_t HELPER(nc)(CPUS390XState *env, uint32_t l, uint64_t dest,
256 uint64_t src)
258 return do_helper_nc(env, l, dest, src, GETPC());
261 /* xor on array */
262 static uint32_t do_helper_xc(CPUS390XState *env, uint32_t l, uint64_t dest,
263 uint64_t src, uintptr_t ra)
265 uint32_t i;
266 uint8_t c = 0;
268 HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
269 __func__, l, dest, src);
271 /* xor with itself is the same as memset(0) */
272 if (src == dest) {
273 fast_memset(env, dest, 0, l + 1, ra);
274 return 0;
277 for (i = 0; i <= l; i++) {
278 uint8_t x = cpu_ldub_data_ra(env, src + i, ra);
279 x ^= cpu_ldub_data_ra(env, dest + i, ra);
280 c |= x;
281 cpu_stb_data_ra(env, dest + i, x, ra);
283 return c != 0;
286 uint32_t HELPER(xc)(CPUS390XState *env, uint32_t l, uint64_t dest,
287 uint64_t src)
289 return do_helper_xc(env, l, dest, src, GETPC());
292 /* or on array */
293 static uint32_t do_helper_oc(CPUS390XState *env, uint32_t l, uint64_t dest,
294 uint64_t src, uintptr_t ra)
296 uint32_t i;
297 uint8_t c = 0;
299 HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
300 __func__, l, dest, src);
302 for (i = 0; i <= l; i++) {
303 uint8_t x = cpu_ldub_data_ra(env, src + i, ra);
304 x |= cpu_ldub_data_ra(env, dest + i, ra);
305 c |= x;
306 cpu_stb_data_ra(env, dest + i, x, ra);
308 return c != 0;
311 uint32_t HELPER(oc)(CPUS390XState *env, uint32_t l, uint64_t dest,
312 uint64_t src)
314 return do_helper_oc(env, l, dest, src, GETPC());
317 /* memmove */
318 static uint32_t do_helper_mvc(CPUS390XState *env, uint32_t l, uint64_t dest,
319 uint64_t src, uintptr_t ra)
321 uint32_t i;
323 HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
324 __func__, l, dest, src);
326 /* mvc and memmove do not behave the same when areas overlap! */
327 /* mvc with source pointing to the byte after the destination is the
328 same as memset with the first source byte */
329 if (dest == src + 1) {
330 fast_memset(env, dest, cpu_ldub_data_ra(env, src, ra), l + 1, ra);
331 } else if (dest < src || src + l < dest) {
332 fast_memmove(env, dest, src, l + 1, ra);
333 } else {
334 /* slow version with byte accesses which always work */
335 for (i = 0; i <= l; i++) {
336 uint8_t x = cpu_ldub_data_ra(env, src + i, ra);
337 cpu_stb_data_ra(env, dest + i, x, ra);
341 return env->cc_op;
344 void HELPER(mvc)(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src)
346 do_helper_mvc(env, l, dest, src, GETPC());
349 /* move inverse */
350 void HELPER(mvcin)(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src)
352 uintptr_t ra = GETPC();
353 int i;
355 for (i = 0; i <= l; i++) {
356 uint8_t v = cpu_ldub_data_ra(env, src - i, ra);
357 cpu_stb_data_ra(env, dest + i, v, ra);
361 /* move numerics */
362 void HELPER(mvn)(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src)
364 uintptr_t ra = GETPC();
365 int i;
367 for (i = 0; i <= l; i++) {
368 uint8_t v = cpu_ldub_data_ra(env, dest + i, ra) & 0xf0;
369 v |= cpu_ldub_data_ra(env, src + i, ra) & 0x0f;
370 cpu_stb_data_ra(env, dest + i, v, ra);
374 /* move with offset */
375 void HELPER(mvo)(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src)
377 uintptr_t ra = GETPC();
378 int len_dest = l >> 4;
379 int len_src = l & 0xf;
380 uint8_t byte_dest, byte_src;
381 int i;
383 src += len_src;
384 dest += len_dest;
386 /* Handle rightmost byte */
387 byte_src = cpu_ldub_data_ra(env, src, ra);
388 byte_dest = cpu_ldub_data_ra(env, dest, ra);
389 byte_dest = (byte_dest & 0x0f) | (byte_src << 4);
390 cpu_stb_data_ra(env, dest, byte_dest, ra);
392 /* Process remaining bytes from right to left */
393 for (i = 1; i <= len_dest; i++) {
394 byte_dest = byte_src >> 4;
395 if (len_src - i >= 0) {
396 byte_src = cpu_ldub_data_ra(env, src - i, ra);
397 } else {
398 byte_src = 0;
400 byte_dest |= byte_src << 4;
401 cpu_stb_data_ra(env, dest - i, byte_dest, ra);
405 /* move zones */
406 void HELPER(mvz)(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src)
408 uintptr_t ra = GETPC();
409 int i;
411 for (i = 0; i <= l; i++) {
412 uint8_t b = cpu_ldub_data_ra(env, dest + i, ra) & 0x0f;
413 b |= cpu_ldub_data_ra(env, src + i, ra) & 0xf0;
414 cpu_stb_data_ra(env, dest + i, b, ra);
418 /* compare unsigned byte arrays */
419 static uint32_t do_helper_clc(CPUS390XState *env, uint32_t l, uint64_t s1,
420 uint64_t s2, uintptr_t ra)
422 uint32_t i;
423 uint32_t cc = 0;
425 HELPER_LOG("%s l %d s1 %" PRIx64 " s2 %" PRIx64 "\n",
426 __func__, l, s1, s2);
428 for (i = 0; i <= l; i++) {
429 uint8_t x = cpu_ldub_data_ra(env, s1 + i, ra);
430 uint8_t y = cpu_ldub_data_ra(env, s2 + i, ra);
431 HELPER_LOG("%02x (%c)/%02x (%c) ", x, x, y, y);
432 if (x < y) {
433 cc = 1;
434 break;
435 } else if (x > y) {
436 cc = 2;
437 break;
441 HELPER_LOG("\n");
442 return cc;
445 uint32_t HELPER(clc)(CPUS390XState *env, uint32_t l, uint64_t s1, uint64_t s2)
447 return do_helper_clc(env, l, s1, s2, GETPC());
450 /* compare logical under mask */
451 uint32_t HELPER(clm)(CPUS390XState *env, uint32_t r1, uint32_t mask,
452 uint64_t addr)
454 uintptr_t ra = GETPC();
455 uint32_t cc = 0;
457 HELPER_LOG("%s: r1 0x%x mask 0x%x addr 0x%" PRIx64 "\n", __func__, r1,
458 mask, addr);
460 while (mask) {
461 if (mask & 8) {
462 uint8_t d = cpu_ldub_data_ra(env, addr, ra);
463 uint8_t r = extract32(r1, 24, 8);
464 HELPER_LOG("mask 0x%x %02x/%02x (0x%" PRIx64 ") ", mask, r, d,
465 addr);
466 if (r < d) {
467 cc = 1;
468 break;
469 } else if (r > d) {
470 cc = 2;
471 break;
473 addr++;
475 mask = (mask << 1) & 0xf;
476 r1 <<= 8;
479 HELPER_LOG("\n");
480 return cc;
483 static inline uint64_t get_address(CPUS390XState *env, int reg)
485 return wrap_address(env, env->regs[reg]);
488 static inline void set_address(CPUS390XState *env, int reg, uint64_t address)
490 if (env->psw.mask & PSW_MASK_64) {
491 /* 64-Bit mode */
492 env->regs[reg] = address;
493 } else {
494 if (!(env->psw.mask & PSW_MASK_32)) {
495 /* 24-Bit mode. According to the PoO it is implementation
496 dependent if bits 32-39 remain unchanged or are set to
497 zeros. Choose the former so that the function can also be
498 used for TRT. */
499 env->regs[reg] = deposit64(env->regs[reg], 0, 24, address);
500 } else {
501 /* 31-Bit mode. According to the PoO it is implementation
502 dependent if bit 32 remains unchanged or is set to zero.
503 Choose the latter so that the function can also be used for
504 TRT. */
505 address &= 0x7fffffff;
506 env->regs[reg] = deposit64(env->regs[reg], 0, 32, address);
511 static inline uint64_t wrap_length(CPUS390XState *env, uint64_t length)
513 if (!(env->psw.mask & PSW_MASK_64)) {
514 /* 24-Bit and 31-Bit mode */
515 length &= 0x7fffffff;
517 return length;
520 static inline uint64_t get_length(CPUS390XState *env, int reg)
522 return wrap_length(env, env->regs[reg]);
525 static inline void set_length(CPUS390XState *env, int reg, uint64_t length)
527 if (env->psw.mask & PSW_MASK_64) {
528 /* 64-Bit mode */
529 env->regs[reg] = length;
530 } else {
531 /* 24-Bit and 31-Bit mode */
532 env->regs[reg] = deposit64(env->regs[reg], 0, 32, length);
536 /* search string (c is byte to search, r2 is string, r1 end of string) */
537 void HELPER(srst)(CPUS390XState *env, uint32_t r1, uint32_t r2)
539 uintptr_t ra = GETPC();
540 uint64_t end, str;
541 uint32_t len;
542 uint8_t v, c = env->regs[0];
544 /* Bits 32-55 must contain all 0. */
545 if (env->regs[0] & 0xffffff00u) {
546 s390_program_interrupt(env, PGM_SPECIFICATION, 6, ra);
549 str = get_address(env, r2);
550 end = get_address(env, r1);
552 /* Lest we fail to service interrupts in a timely manner, limit the
553 amount of work we're willing to do. For now, let's cap at 8k. */
554 for (len = 0; len < 0x2000; ++len) {
555 if (str + len == end) {
556 /* Character not found. R1 & R2 are unmodified. */
557 env->cc_op = 2;
558 return;
560 v = cpu_ldub_data_ra(env, str + len, ra);
561 if (v == c) {
562 /* Character found. Set R1 to the location; R2 is unmodified. */
563 env->cc_op = 1;
564 set_address(env, r1, str + len);
565 return;
569 /* CPU-determined bytes processed. Advance R2 to next byte to process. */
570 env->cc_op = 3;
571 set_address(env, r2, str + len);
574 void HELPER(srstu)(CPUS390XState *env, uint32_t r1, uint32_t r2)
576 uintptr_t ra = GETPC();
577 uint32_t len;
578 uint16_t v, c = env->regs[0];
579 uint64_t end, str, adj_end;
581 /* Bits 32-47 of R0 must be zero. */
582 if (env->regs[0] & 0xffff0000u) {
583 s390_program_interrupt(env, PGM_SPECIFICATION, 6, ra);
586 str = get_address(env, r2);
587 end = get_address(env, r1);
589 /* If the LSB of the two addresses differ, use one extra byte. */
590 adj_end = end + ((str ^ end) & 1);
592 /* Lest we fail to service interrupts in a timely manner, limit the
593 amount of work we're willing to do. For now, let's cap at 8k. */
594 for (len = 0; len < 0x2000; len += 2) {
595 if (str + len == adj_end) {
596 /* End of input found. */
597 env->cc_op = 2;
598 return;
600 v = cpu_lduw_data_ra(env, str + len, ra);
601 if (v == c) {
602 /* Character found. Set R1 to the location; R2 is unmodified. */
603 env->cc_op = 1;
604 set_address(env, r1, str + len);
605 return;
609 /* CPU-determined bytes processed. Advance R2 to next byte to process. */
610 env->cc_op = 3;
611 set_address(env, r2, str + len);
614 /* unsigned string compare (c is string terminator) */
615 uint64_t HELPER(clst)(CPUS390XState *env, uint64_t c, uint64_t s1, uint64_t s2)
617 uintptr_t ra = GETPC();
618 uint32_t len;
620 c = c & 0xff;
621 s1 = wrap_address(env, s1);
622 s2 = wrap_address(env, s2);
624 /* Lest we fail to service interrupts in a timely manner, limit the
625 amount of work we're willing to do. For now, let's cap at 8k. */
626 for (len = 0; len < 0x2000; ++len) {
627 uint8_t v1 = cpu_ldub_data_ra(env, s1 + len, ra);
628 uint8_t v2 = cpu_ldub_data_ra(env, s2 + len, ra);
629 if (v1 == v2) {
630 if (v1 == c) {
631 /* Equal. CC=0, and don't advance the registers. */
632 env->cc_op = 0;
633 env->retxl = s2;
634 return s1;
636 } else {
637 /* Unequal. CC={1,2}, and advance the registers. Note that
638 the terminator need not be zero, but the string that contains
639 the terminator is by definition "low". */
640 env->cc_op = (v1 == c ? 1 : v2 == c ? 2 : v1 < v2 ? 1 : 2);
641 env->retxl = s2 + len;
642 return s1 + len;
646 /* CPU-determined bytes equal; advance the registers. */
647 env->cc_op = 3;
648 env->retxl = s2 + len;
649 return s1 + len;
652 /* move page */
653 uint32_t HELPER(mvpg)(CPUS390XState *env, uint64_t r0, uint64_t r1, uint64_t r2)
655 /* ??? missing r0 handling, which includes access keys, but more
656 importantly optional suppression of the exception! */
657 fast_memmove(env, r1, r2, TARGET_PAGE_SIZE, GETPC());
658 return 0; /* data moved */
661 /* string copy (c is string terminator) */
662 uint64_t HELPER(mvst)(CPUS390XState *env, uint64_t c, uint64_t d, uint64_t s)
664 uintptr_t ra = GETPC();
665 uint32_t len;
667 c = c & 0xff;
668 d = wrap_address(env, d);
669 s = wrap_address(env, s);
671 /* Lest we fail to service interrupts in a timely manner, limit the
672 amount of work we're willing to do. For now, let's cap at 8k. */
673 for (len = 0; len < 0x2000; ++len) {
674 uint8_t v = cpu_ldub_data_ra(env, s + len, ra);
675 cpu_stb_data_ra(env, d + len, v, ra);
676 if (v == c) {
677 /* Complete. Set CC=1 and advance R1. */
678 env->cc_op = 1;
679 env->retxl = s;
680 return d + len;
684 /* Incomplete. Set CC=3 and signal to advance R1 and R2. */
685 env->cc_op = 3;
686 env->retxl = s + len;
687 return d + len;
690 /* load access registers r1 to r3 from memory at a2 */
691 void HELPER(lam)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
693 uintptr_t ra = GETPC();
694 int i;
696 if (a2 & 0x3) {
697 /* we either came here by lam or lamy, which have different lengths */
698 s390_program_interrupt(env, PGM_SPECIFICATION, ILEN_AUTO, ra);
701 for (i = r1;; i = (i + 1) % 16) {
702 env->aregs[i] = cpu_ldl_data_ra(env, a2, ra);
703 a2 += 4;
705 if (i == r3) {
706 break;
711 /* store access registers r1 to r3 in memory at a2 */
712 void HELPER(stam)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
714 uintptr_t ra = GETPC();
715 int i;
717 if (a2 & 0x3) {
718 s390_program_interrupt(env, PGM_SPECIFICATION, 4, ra);
721 for (i = r1;; i = (i + 1) % 16) {
722 cpu_stl_data_ra(env, a2, env->aregs[i], ra);
723 a2 += 4;
725 if (i == r3) {
726 break;
731 /* move long helper */
732 static inline uint32_t do_mvcl(CPUS390XState *env,
733 uint64_t *dest, uint64_t *destlen,
734 uint64_t *src, uint64_t *srclen,
735 uint16_t pad, int wordsize, uintptr_t ra)
737 uint64_t len = MIN(*srclen, *destlen);
738 uint32_t cc;
740 if (*destlen == *srclen) {
741 cc = 0;
742 } else if (*destlen < *srclen) {
743 cc = 1;
744 } else {
745 cc = 2;
748 /* Copy the src array */
749 fast_memmove(env, *dest, *src, len, ra);
750 *src += len;
751 *srclen -= len;
752 *dest += len;
753 *destlen -= len;
755 /* Pad the remaining area */
756 if (wordsize == 1) {
757 fast_memset(env, *dest, pad, *destlen, ra);
758 *dest += *destlen;
759 *destlen = 0;
760 } else {
761 /* If remaining length is odd, pad with odd byte first. */
762 if (*destlen & 1) {
763 cpu_stb_data_ra(env, *dest, pad & 0xff, ra);
764 *dest += 1;
765 *destlen -= 1;
767 /* The remaining length is even, pad using words. */
768 for (; *destlen; *dest += 2, *destlen -= 2) {
769 cpu_stw_data_ra(env, *dest, pad, ra);
773 return cc;
776 /* move long */
777 uint32_t HELPER(mvcl)(CPUS390XState *env, uint32_t r1, uint32_t r2)
779 uintptr_t ra = GETPC();
780 uint64_t destlen = env->regs[r1 + 1] & 0xffffff;
781 uint64_t dest = get_address(env, r1);
782 uint64_t srclen = env->regs[r2 + 1] & 0xffffff;
783 uint64_t src = get_address(env, r2);
784 uint8_t pad = env->regs[r2 + 1] >> 24;
785 uint32_t cc;
787 cc = do_mvcl(env, &dest, &destlen, &src, &srclen, pad, 1, ra);
789 env->regs[r1 + 1] = deposit64(env->regs[r1 + 1], 0, 24, destlen);
790 env->regs[r2 + 1] = deposit64(env->regs[r2 + 1], 0, 24, srclen);
791 set_address(env, r1, dest);
792 set_address(env, r2, src);
794 return cc;
797 /* move long extended */
798 uint32_t HELPER(mvcle)(CPUS390XState *env, uint32_t r1, uint64_t a2,
799 uint32_t r3)
801 uintptr_t ra = GETPC();
802 uint64_t destlen = get_length(env, r1 + 1);
803 uint64_t dest = get_address(env, r1);
804 uint64_t srclen = get_length(env, r3 + 1);
805 uint64_t src = get_address(env, r3);
806 uint8_t pad = a2;
807 uint32_t cc;
809 cc = do_mvcl(env, &dest, &destlen, &src, &srclen, pad, 1, ra);
811 set_length(env, r1 + 1, destlen);
812 set_length(env, r3 + 1, srclen);
813 set_address(env, r1, dest);
814 set_address(env, r3, src);
816 return cc;
819 /* move long unicode */
820 uint32_t HELPER(mvclu)(CPUS390XState *env, uint32_t r1, uint64_t a2,
821 uint32_t r3)
823 uintptr_t ra = GETPC();
824 uint64_t destlen = get_length(env, r1 + 1);
825 uint64_t dest = get_address(env, r1);
826 uint64_t srclen = get_length(env, r3 + 1);
827 uint64_t src = get_address(env, r3);
828 uint16_t pad = a2;
829 uint32_t cc;
831 cc = do_mvcl(env, &dest, &destlen, &src, &srclen, pad, 2, ra);
833 set_length(env, r1 + 1, destlen);
834 set_length(env, r3 + 1, srclen);
835 set_address(env, r1, dest);
836 set_address(env, r3, src);
838 return cc;
841 /* compare logical long helper */
842 static inline uint32_t do_clcl(CPUS390XState *env,
843 uint64_t *src1, uint64_t *src1len,
844 uint64_t *src3, uint64_t *src3len,
845 uint16_t pad, uint64_t limit,
846 int wordsize, uintptr_t ra)
848 uint64_t len = MAX(*src1len, *src3len);
849 uint32_t cc = 0;
851 check_alignment(env, *src1len | *src3len, wordsize, ra);
853 if (!len) {
854 return cc;
857 /* Lest we fail to service interrupts in a timely manner, limit the
858 amount of work we're willing to do. */
859 if (len > limit) {
860 len = limit;
861 cc = 3;
864 for (; len; len -= wordsize) {
865 uint16_t v1 = pad;
866 uint16_t v3 = pad;
868 if (*src1len) {
869 v1 = cpu_ldusize_data_ra(env, *src1, wordsize, ra);
871 if (*src3len) {
872 v3 = cpu_ldusize_data_ra(env, *src3, wordsize, ra);
875 if (v1 != v3) {
876 cc = (v1 < v3) ? 1 : 2;
877 break;
880 if (*src1len) {
881 *src1 += wordsize;
882 *src1len -= wordsize;
884 if (*src3len) {
885 *src3 += wordsize;
886 *src3len -= wordsize;
890 return cc;
894 /* compare logical long */
895 uint32_t HELPER(clcl)(CPUS390XState *env, uint32_t r1, uint32_t r2)
897 uintptr_t ra = GETPC();
898 uint64_t src1len = extract64(env->regs[r1 + 1], 0, 24);
899 uint64_t src1 = get_address(env, r1);
900 uint64_t src3len = extract64(env->regs[r2 + 1], 0, 24);
901 uint64_t src3 = get_address(env, r2);
902 uint8_t pad = env->regs[r2 + 1] >> 24;
903 uint32_t cc;
905 cc = do_clcl(env, &src1, &src1len, &src3, &src3len, pad, -1, 1, ra);
907 env->regs[r1 + 1] = deposit64(env->regs[r1 + 1], 0, 24, src1len);
908 env->regs[r2 + 1] = deposit64(env->regs[r2 + 1], 0, 24, src3len);
909 set_address(env, r1, src1);
910 set_address(env, r2, src3);
912 return cc;
915 /* compare logical long extended memcompare insn with padding */
916 uint32_t HELPER(clcle)(CPUS390XState *env, uint32_t r1, uint64_t a2,
917 uint32_t r3)
919 uintptr_t ra = GETPC();
920 uint64_t src1len = get_length(env, r1 + 1);
921 uint64_t src1 = get_address(env, r1);
922 uint64_t src3len = get_length(env, r3 + 1);
923 uint64_t src3 = get_address(env, r3);
924 uint8_t pad = a2;
925 uint32_t cc;
927 cc = do_clcl(env, &src1, &src1len, &src3, &src3len, pad, 0x2000, 1, ra);
929 set_length(env, r1 + 1, src1len);
930 set_length(env, r3 + 1, src3len);
931 set_address(env, r1, src1);
932 set_address(env, r3, src3);
934 return cc;
937 /* compare logical long unicode memcompare insn with padding */
938 uint32_t HELPER(clclu)(CPUS390XState *env, uint32_t r1, uint64_t a2,
939 uint32_t r3)
941 uintptr_t ra = GETPC();
942 uint64_t src1len = get_length(env, r1 + 1);
943 uint64_t src1 = get_address(env, r1);
944 uint64_t src3len = get_length(env, r3 + 1);
945 uint64_t src3 = get_address(env, r3);
946 uint16_t pad = a2;
947 uint32_t cc = 0;
949 cc = do_clcl(env, &src1, &src1len, &src3, &src3len, pad, 0x1000, 2, ra);
951 set_length(env, r1 + 1, src1len);
952 set_length(env, r3 + 1, src3len);
953 set_address(env, r1, src1);
954 set_address(env, r3, src3);
956 return cc;
959 /* checksum */
960 uint64_t HELPER(cksm)(CPUS390XState *env, uint64_t r1,
961 uint64_t src, uint64_t src_len)
963 uintptr_t ra = GETPC();
964 uint64_t max_len, len;
965 uint64_t cksm = (uint32_t)r1;
967 /* Lest we fail to service interrupts in a timely manner, limit the
968 amount of work we're willing to do. For now, let's cap at 8k. */
969 max_len = (src_len > 0x2000 ? 0x2000 : src_len);
971 /* Process full words as available. */
972 for (len = 0; len + 4 <= max_len; len += 4, src += 4) {
973 cksm += (uint32_t)cpu_ldl_data_ra(env, src, ra);
976 switch (max_len - len) {
977 case 1:
978 cksm += cpu_ldub_data_ra(env, src, ra) << 24;
979 len += 1;
980 break;
981 case 2:
982 cksm += cpu_lduw_data_ra(env, src, ra) << 16;
983 len += 2;
984 break;
985 case 3:
986 cksm += cpu_lduw_data_ra(env, src, ra) << 16;
987 cksm += cpu_ldub_data_ra(env, src + 2, ra) << 8;
988 len += 3;
989 break;
992 /* Fold the carry from the checksum. Note that we can see carry-out
993 during folding more than once (but probably not more than twice). */
994 while (cksm > 0xffffffffull) {
995 cksm = (uint32_t)cksm + (cksm >> 32);
998 /* Indicate whether or not we've processed everything. */
999 env->cc_op = (len == src_len ? 0 : 3);
1001 /* Return both cksm and processed length. */
1002 env->retxl = cksm;
1003 return len;
1006 void HELPER(pack)(CPUS390XState *env, uint32_t len, uint64_t dest, uint64_t src)
1008 uintptr_t ra = GETPC();
1009 int len_dest = len >> 4;
1010 int len_src = len & 0xf;
1011 uint8_t b;
1013 dest += len_dest;
1014 src += len_src;
1016 /* last byte is special, it only flips the nibbles */
1017 b = cpu_ldub_data_ra(env, src, ra);
1018 cpu_stb_data_ra(env, dest, (b << 4) | (b >> 4), ra);
1019 src--;
1020 len_src--;
1022 /* now pack every value */
1023 while (len_dest > 0) {
1024 b = 0;
1026 if (len_src >= 0) {
1027 b = cpu_ldub_data_ra(env, src, ra) & 0x0f;
1028 src--;
1029 len_src--;
1031 if (len_src >= 0) {
1032 b |= cpu_ldub_data_ra(env, src, ra) << 4;
1033 src--;
1034 len_src--;
1037 len_dest--;
1038 dest--;
1039 cpu_stb_data_ra(env, dest, b, ra);
1043 static inline void do_pkau(CPUS390XState *env, uint64_t dest, uint64_t src,
1044 uint32_t srclen, int ssize, uintptr_t ra)
1046 int i;
1047 /* The destination operand is always 16 bytes long. */
1048 const int destlen = 16;
1050 /* The operands are processed from right to left. */
1051 src += srclen - 1;
1052 dest += destlen - 1;
1054 for (i = 0; i < destlen; i++) {
1055 uint8_t b = 0;
1057 /* Start with a positive sign */
1058 if (i == 0) {
1059 b = 0xc;
1060 } else if (srclen > ssize) {
1061 b = cpu_ldub_data_ra(env, src, ra) & 0x0f;
1062 src -= ssize;
1063 srclen -= ssize;
1066 if (srclen > ssize) {
1067 b |= cpu_ldub_data_ra(env, src, ra) << 4;
1068 src -= ssize;
1069 srclen -= ssize;
1072 cpu_stb_data_ra(env, dest, b, ra);
1073 dest--;
1078 void HELPER(pka)(CPUS390XState *env, uint64_t dest, uint64_t src,
1079 uint32_t srclen)
1081 do_pkau(env, dest, src, srclen, 1, GETPC());
1084 void HELPER(pku)(CPUS390XState *env, uint64_t dest, uint64_t src,
1085 uint32_t srclen)
1087 do_pkau(env, dest, src, srclen, 2, GETPC());
1090 void HELPER(unpk)(CPUS390XState *env, uint32_t len, uint64_t dest,
1091 uint64_t src)
1093 uintptr_t ra = GETPC();
1094 int len_dest = len >> 4;
1095 int len_src = len & 0xf;
1096 uint8_t b;
1097 int second_nibble = 0;
1099 dest += len_dest;
1100 src += len_src;
1102 /* last byte is special, it only flips the nibbles */
1103 b = cpu_ldub_data_ra(env, src, ra);
1104 cpu_stb_data_ra(env, dest, (b << 4) | (b >> 4), ra);
1105 src--;
1106 len_src--;
1108 /* now pad every nibble with 0xf0 */
1110 while (len_dest > 0) {
1111 uint8_t cur_byte = 0;
1113 if (len_src > 0) {
1114 cur_byte = cpu_ldub_data_ra(env, src, ra);
1117 len_dest--;
1118 dest--;
1120 /* only advance one nibble at a time */
1121 if (second_nibble) {
1122 cur_byte >>= 4;
1123 len_src--;
1124 src--;
1126 second_nibble = !second_nibble;
1128 /* digit */
1129 cur_byte = (cur_byte & 0xf);
1130 /* zone bits */
1131 cur_byte |= 0xf0;
1133 cpu_stb_data_ra(env, dest, cur_byte, ra);
1137 static inline uint32_t do_unpkau(CPUS390XState *env, uint64_t dest,
1138 uint32_t destlen, int dsize, uint64_t src,
1139 uintptr_t ra)
1141 int i;
1142 uint32_t cc;
1143 uint8_t b;
1144 /* The source operand is always 16 bytes long. */
1145 const int srclen = 16;
1147 /* The operands are processed from right to left. */
1148 src += srclen - 1;
1149 dest += destlen - dsize;
1151 /* Check for the sign. */
1152 b = cpu_ldub_data_ra(env, src, ra);
1153 src--;
1154 switch (b & 0xf) {
1155 case 0xa:
1156 case 0xc:
1157 case 0xe ... 0xf:
1158 cc = 0; /* plus */
1159 break;
1160 case 0xb:
1161 case 0xd:
1162 cc = 1; /* minus */
1163 break;
1164 default:
1165 case 0x0 ... 0x9:
1166 cc = 3; /* invalid */
1167 break;
1170 /* Now pad every nibble with 0x30, advancing one nibble at a time. */
1171 for (i = 0; i < destlen; i += dsize) {
1172 if (i == (31 * dsize)) {
1173 /* If length is 32/64 bytes, the leftmost byte is 0. */
1174 b = 0;
1175 } else if (i % (2 * dsize)) {
1176 b = cpu_ldub_data_ra(env, src, ra);
1177 src--;
1178 } else {
1179 b >>= 4;
1181 cpu_stsize_data_ra(env, dest, 0x30 + (b & 0xf), dsize, ra);
1182 dest -= dsize;
1185 return cc;
1188 uint32_t HELPER(unpka)(CPUS390XState *env, uint64_t dest, uint32_t destlen,
1189 uint64_t src)
1191 return do_unpkau(env, dest, destlen, 1, src, GETPC());
1194 uint32_t HELPER(unpku)(CPUS390XState *env, uint64_t dest, uint32_t destlen,
1195 uint64_t src)
1197 return do_unpkau(env, dest, destlen, 2, src, GETPC());
1200 uint32_t HELPER(tp)(CPUS390XState *env, uint64_t dest, uint32_t destlen)
1202 uintptr_t ra = GETPC();
1203 uint32_t cc = 0;
1204 int i;
1206 for (i = 0; i < destlen; i++) {
1207 uint8_t b = cpu_ldub_data_ra(env, dest + i, ra);
1208 /* digit */
1209 cc |= (b & 0xf0) > 0x90 ? 2 : 0;
1211 if (i == (destlen - 1)) {
1212 /* sign */
1213 cc |= (b & 0xf) < 0xa ? 1 : 0;
1214 } else {
1215 /* digit */
1216 cc |= (b & 0xf) > 0x9 ? 2 : 0;
1220 return cc;
1223 static uint32_t do_helper_tr(CPUS390XState *env, uint32_t len, uint64_t array,
1224 uint64_t trans, uintptr_t ra)
1226 uint32_t i;
1228 for (i = 0; i <= len; i++) {
1229 uint8_t byte = cpu_ldub_data_ra(env, array + i, ra);
1230 uint8_t new_byte = cpu_ldub_data_ra(env, trans + byte, ra);
1231 cpu_stb_data_ra(env, array + i, new_byte, ra);
1234 return env->cc_op;
1237 void HELPER(tr)(CPUS390XState *env, uint32_t len, uint64_t array,
1238 uint64_t trans)
1240 do_helper_tr(env, len, array, trans, GETPC());
1243 uint64_t HELPER(tre)(CPUS390XState *env, uint64_t array,
1244 uint64_t len, uint64_t trans)
1246 uintptr_t ra = GETPC();
1247 uint8_t end = env->regs[0] & 0xff;
1248 uint64_t l = len;
1249 uint64_t i;
1250 uint32_t cc = 0;
1252 if (!(env->psw.mask & PSW_MASK_64)) {
1253 array &= 0x7fffffff;
1254 l = (uint32_t)l;
1257 /* Lest we fail to service interrupts in a timely manner, limit the
1258 amount of work we're willing to do. For now, let's cap at 8k. */
1259 if (l > 0x2000) {
1260 l = 0x2000;
1261 cc = 3;
1264 for (i = 0; i < l; i++) {
1265 uint8_t byte, new_byte;
1267 byte = cpu_ldub_data_ra(env, array + i, ra);
1269 if (byte == end) {
1270 cc = 1;
1271 break;
1274 new_byte = cpu_ldub_data_ra(env, trans + byte, ra);
1275 cpu_stb_data_ra(env, array + i, new_byte, ra);
1278 env->cc_op = cc;
1279 env->retxl = len - i;
1280 return array + i;
1283 static inline uint32_t do_helper_trt(CPUS390XState *env, int len,
1284 uint64_t array, uint64_t trans,
1285 int inc, uintptr_t ra)
1287 int i;
1289 for (i = 0; i <= len; i++) {
1290 uint8_t byte = cpu_ldub_data_ra(env, array + i * inc, ra);
1291 uint8_t sbyte = cpu_ldub_data_ra(env, trans + byte, ra);
1293 if (sbyte != 0) {
1294 set_address(env, 1, array + i * inc);
1295 env->regs[2] = deposit64(env->regs[2], 0, 8, sbyte);
1296 return (i == len) ? 2 : 1;
1300 return 0;
1303 static uint32_t do_helper_trt_fwd(CPUS390XState *env, uint32_t len,
1304 uint64_t array, uint64_t trans,
1305 uintptr_t ra)
1307 return do_helper_trt(env, len, array, trans, 1, ra);
1310 uint32_t HELPER(trt)(CPUS390XState *env, uint32_t len, uint64_t array,
1311 uint64_t trans)
1313 return do_helper_trt(env, len, array, trans, 1, GETPC());
1316 static uint32_t do_helper_trt_bkwd(CPUS390XState *env, uint32_t len,
1317 uint64_t array, uint64_t trans,
1318 uintptr_t ra)
1320 return do_helper_trt(env, len, array, trans, -1, ra);
1323 uint32_t HELPER(trtr)(CPUS390XState *env, uint32_t len, uint64_t array,
1324 uint64_t trans)
1326 return do_helper_trt(env, len, array, trans, -1, GETPC());
1329 /* Translate one/two to one/two */
1330 uint32_t HELPER(trXX)(CPUS390XState *env, uint32_t r1, uint32_t r2,
1331 uint32_t tst, uint32_t sizes)
1333 uintptr_t ra = GETPC();
1334 int dsize = (sizes & 1) ? 1 : 2;
1335 int ssize = (sizes & 2) ? 1 : 2;
1336 uint64_t tbl = get_address(env, 1);
1337 uint64_t dst = get_address(env, r1);
1338 uint64_t len = get_length(env, r1 + 1);
1339 uint64_t src = get_address(env, r2);
1340 uint32_t cc = 3;
1341 int i;
1343 /* The lower address bits of TBL are ignored. For TROO, TROT, it's
1344 the low 3 bits (double-word aligned). For TRTO, TRTT, it's either
1345 the low 12 bits (4K, without ETF2-ENH) or 3 bits (with ETF2-ENH). */
1346 if (ssize == 2 && !s390_has_feat(S390_FEAT_ETF2_ENH)) {
1347 tbl &= -4096;
1348 } else {
1349 tbl &= -8;
1352 check_alignment(env, len, ssize, ra);
1354 /* Lest we fail to service interrupts in a timely manner, */
1355 /* limit the amount of work we're willing to do. */
1356 for (i = 0; i < 0x2000; i++) {
1357 uint16_t sval = cpu_ldusize_data_ra(env, src, ssize, ra);
1358 uint64_t tble = tbl + (sval * dsize);
1359 uint16_t dval = cpu_ldusize_data_ra(env, tble, dsize, ra);
1360 if (dval == tst) {
1361 cc = 1;
1362 break;
1364 cpu_stsize_data_ra(env, dst, dval, dsize, ra);
1366 len -= ssize;
1367 src += ssize;
1368 dst += dsize;
1370 if (len == 0) {
1371 cc = 0;
1372 break;
1376 set_address(env, r1, dst);
1377 set_length(env, r1 + 1, len);
1378 set_address(env, r2, src);
1380 return cc;
1383 void HELPER(cdsg)(CPUS390XState *env, uint64_t addr,
1384 uint32_t r1, uint32_t r3)
1386 uintptr_t ra = GETPC();
1387 Int128 cmpv = int128_make128(env->regs[r1 + 1], env->regs[r1]);
1388 Int128 newv = int128_make128(env->regs[r3 + 1], env->regs[r3]);
1389 Int128 oldv;
1390 uint64_t oldh, oldl;
1391 bool fail;
1393 check_alignment(env, addr, 16, ra);
1395 oldh = cpu_ldq_data_ra(env, addr + 0, ra);
1396 oldl = cpu_ldq_data_ra(env, addr + 8, ra);
1398 oldv = int128_make128(oldl, oldh);
1399 fail = !int128_eq(oldv, cmpv);
1400 if (fail) {
1401 newv = oldv;
1404 cpu_stq_data_ra(env, addr + 0, int128_gethi(newv), ra);
1405 cpu_stq_data_ra(env, addr + 8, int128_getlo(newv), ra);
1407 env->cc_op = fail;
1408 env->regs[r1] = int128_gethi(oldv);
1409 env->regs[r1 + 1] = int128_getlo(oldv);
1412 void HELPER(cdsg_parallel)(CPUS390XState *env, uint64_t addr,
1413 uint32_t r1, uint32_t r3)
1415 uintptr_t ra = GETPC();
1416 Int128 cmpv = int128_make128(env->regs[r1 + 1], env->regs[r1]);
1417 Int128 newv = int128_make128(env->regs[r3 + 1], env->regs[r3]);
1418 int mem_idx;
1419 TCGMemOpIdx oi;
1420 Int128 oldv;
1421 bool fail;
1423 assert(HAVE_CMPXCHG128);
1425 mem_idx = cpu_mmu_index(env, false);
1426 oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx);
1427 oldv = helper_atomic_cmpxchgo_be_mmu(env, addr, cmpv, newv, oi, ra);
1428 fail = !int128_eq(oldv, cmpv);
1430 env->cc_op = fail;
1431 env->regs[r1] = int128_gethi(oldv);
1432 env->regs[r1 + 1] = int128_getlo(oldv);
1435 static uint32_t do_csst(CPUS390XState *env, uint32_t r3, uint64_t a1,
1436 uint64_t a2, bool parallel)
1438 uint32_t mem_idx = cpu_mmu_index(env, false);
1439 uintptr_t ra = GETPC();
1440 uint32_t fc = extract32(env->regs[0], 0, 8);
1441 uint32_t sc = extract32(env->regs[0], 8, 8);
1442 uint64_t pl = get_address(env, 1) & -16;
1443 uint64_t svh, svl;
1444 uint32_t cc;
1446 /* Sanity check the function code and storage characteristic. */
1447 if (fc > 1 || sc > 3) {
1448 if (!s390_has_feat(S390_FEAT_COMPARE_AND_SWAP_AND_STORE_2)) {
1449 goto spec_exception;
1451 if (fc > 2 || sc > 4 || (fc == 2 && (r3 & 1))) {
1452 goto spec_exception;
1456 /* Sanity check the alignments. */
1457 if (extract32(a1, 0, fc + 2) || extract32(a2, 0, sc)) {
1458 goto spec_exception;
1461 /* Sanity check writability of the store address. */
1462 #ifndef CONFIG_USER_ONLY
1463 probe_write(env, a2, 0, mem_idx, ra);
1464 #endif
1467 * Note that the compare-and-swap is atomic, and the store is atomic,
1468 * but the complete operation is not. Therefore we do not need to
1469 * assert serial context in order to implement this. That said,
1470 * restart early if we can't support either operation that is supposed
1471 * to be atomic.
1473 if (parallel) {
1474 uint32_t max = 2;
1475 #ifdef CONFIG_ATOMIC64
1476 max = 3;
1477 #endif
1478 if ((HAVE_CMPXCHG128 ? 0 : fc + 2 > max) ||
1479 (HAVE_ATOMIC128 ? 0 : sc > max)) {
1480 cpu_loop_exit_atomic(ENV_GET_CPU(env), ra);
1484 /* All loads happen before all stores. For simplicity, load the entire
1485 store value area from the parameter list. */
1486 svh = cpu_ldq_data_ra(env, pl + 16, ra);
1487 svl = cpu_ldq_data_ra(env, pl + 24, ra);
1489 switch (fc) {
1490 case 0:
1492 uint32_t nv = cpu_ldl_data_ra(env, pl, ra);
1493 uint32_t cv = env->regs[r3];
1494 uint32_t ov;
1496 if (parallel) {
1497 #ifdef CONFIG_USER_ONLY
1498 uint32_t *haddr = g2h(a1);
1499 ov = atomic_cmpxchg__nocheck(haddr, cv, nv);
1500 #else
1501 TCGMemOpIdx oi = make_memop_idx(MO_TEUL | MO_ALIGN, mem_idx);
1502 ov = helper_atomic_cmpxchgl_be_mmu(env, a1, cv, nv, oi, ra);
1503 #endif
1504 } else {
1505 ov = cpu_ldl_data_ra(env, a1, ra);
1506 cpu_stl_data_ra(env, a1, (ov == cv ? nv : ov), ra);
1508 cc = (ov != cv);
1509 env->regs[r3] = deposit64(env->regs[r3], 32, 32, ov);
1511 break;
1513 case 1:
1515 uint64_t nv = cpu_ldq_data_ra(env, pl, ra);
1516 uint64_t cv = env->regs[r3];
1517 uint64_t ov;
1519 if (parallel) {
1520 #ifdef CONFIG_ATOMIC64
1521 # ifdef CONFIG_USER_ONLY
1522 uint64_t *haddr = g2h(a1);
1523 ov = atomic_cmpxchg__nocheck(haddr, cv, nv);
1524 # else
1525 TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN, mem_idx);
1526 ov = helper_atomic_cmpxchgq_be_mmu(env, a1, cv, nv, oi, ra);
1527 # endif
1528 #else
1529 /* Note that we asserted !parallel above. */
1530 g_assert_not_reached();
1531 #endif
1532 } else {
1533 ov = cpu_ldq_data_ra(env, a1, ra);
1534 cpu_stq_data_ra(env, a1, (ov == cv ? nv : ov), ra);
1536 cc = (ov != cv);
1537 env->regs[r3] = ov;
1539 break;
1541 case 2:
1543 uint64_t nvh = cpu_ldq_data_ra(env, pl, ra);
1544 uint64_t nvl = cpu_ldq_data_ra(env, pl + 8, ra);
1545 Int128 nv = int128_make128(nvl, nvh);
1546 Int128 cv = int128_make128(env->regs[r3 + 1], env->regs[r3]);
1547 Int128 ov;
1549 if (!parallel) {
1550 uint64_t oh = cpu_ldq_data_ra(env, a1 + 0, ra);
1551 uint64_t ol = cpu_ldq_data_ra(env, a1 + 8, ra);
1553 ov = int128_make128(ol, oh);
1554 cc = !int128_eq(ov, cv);
1555 if (cc) {
1556 nv = ov;
1559 cpu_stq_data_ra(env, a1 + 0, int128_gethi(nv), ra);
1560 cpu_stq_data_ra(env, a1 + 8, int128_getlo(nv), ra);
1561 } else if (HAVE_CMPXCHG128) {
1562 TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx);
1563 ov = helper_atomic_cmpxchgo_be_mmu(env, a1, cv, nv, oi, ra);
1564 cc = !int128_eq(ov, cv);
1565 } else {
1566 /* Note that we asserted !parallel above. */
1567 g_assert_not_reached();
1570 env->regs[r3 + 0] = int128_gethi(ov);
1571 env->regs[r3 + 1] = int128_getlo(ov);
1573 break;
1575 default:
1576 g_assert_not_reached();
1579 /* Store only if the comparison succeeded. Note that above we use a pair
1580 of 64-bit big-endian loads, so for sc < 3 we must extract the value
1581 from the most-significant bits of svh. */
1582 if (cc == 0) {
1583 switch (sc) {
1584 case 0:
1585 cpu_stb_data_ra(env, a2, svh >> 56, ra);
1586 break;
1587 case 1:
1588 cpu_stw_data_ra(env, a2, svh >> 48, ra);
1589 break;
1590 case 2:
1591 cpu_stl_data_ra(env, a2, svh >> 32, ra);
1592 break;
1593 case 3:
1594 cpu_stq_data_ra(env, a2, svh, ra);
1595 break;
1596 case 4:
1597 if (!parallel) {
1598 cpu_stq_data_ra(env, a2 + 0, svh, ra);
1599 cpu_stq_data_ra(env, a2 + 8, svl, ra);
1600 } else if (HAVE_ATOMIC128) {
1601 TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx);
1602 Int128 sv = int128_make128(svl, svh);
1603 helper_atomic_sto_be_mmu(env, a2, sv, oi, ra);
1604 } else {
1605 /* Note that we asserted !parallel above. */
1606 g_assert_not_reached();
1608 break;
1609 default:
1610 g_assert_not_reached();
1614 return cc;
1616 spec_exception:
1617 s390_program_interrupt(env, PGM_SPECIFICATION, 6, ra);
1618 g_assert_not_reached();
1621 uint32_t HELPER(csst)(CPUS390XState *env, uint32_t r3, uint64_t a1, uint64_t a2)
1623 return do_csst(env, r3, a1, a2, false);
1626 uint32_t HELPER(csst_parallel)(CPUS390XState *env, uint32_t r3, uint64_t a1,
1627 uint64_t a2)
1629 return do_csst(env, r3, a1, a2, true);
1632 #if !defined(CONFIG_USER_ONLY)
1633 void HELPER(lctlg)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
1635 uintptr_t ra = GETPC();
1636 S390CPU *cpu = s390_env_get_cpu(env);
1637 bool PERchanged = false;
1638 uint64_t src = a2;
1639 uint32_t i;
1641 if (src & 0x7) {
1642 s390_program_interrupt(env, PGM_SPECIFICATION, 6, ra);
1645 for (i = r1;; i = (i + 1) % 16) {
1646 uint64_t val = cpu_ldq_data_ra(env, src, ra);
1647 if (env->cregs[i] != val && i >= 9 && i <= 11) {
1648 PERchanged = true;
1650 env->cregs[i] = val;
1651 HELPER_LOG("load ctl %d from 0x%" PRIx64 " == 0x%" PRIx64 "\n",
1652 i, src, val);
1653 src += sizeof(uint64_t);
1655 if (i == r3) {
1656 break;
1660 if (PERchanged && env->psw.mask & PSW_MASK_PER) {
1661 s390_cpu_recompute_watchpoints(CPU(cpu));
1664 tlb_flush(CPU(cpu));
1667 void HELPER(lctl)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
1669 uintptr_t ra = GETPC();
1670 S390CPU *cpu = s390_env_get_cpu(env);
1671 bool PERchanged = false;
1672 uint64_t src = a2;
1673 uint32_t i;
1675 if (src & 0x3) {
1676 s390_program_interrupt(env, PGM_SPECIFICATION, 4, ra);
1679 for (i = r1;; i = (i + 1) % 16) {
1680 uint32_t val = cpu_ldl_data_ra(env, src, ra);
1681 if ((uint32_t)env->cregs[i] != val && i >= 9 && i <= 11) {
1682 PERchanged = true;
1684 env->cregs[i] = deposit64(env->cregs[i], 0, 32, val);
1685 HELPER_LOG("load ctl %d from 0x%" PRIx64 " == 0x%x\n", i, src, val);
1686 src += sizeof(uint32_t);
1688 if (i == r3) {
1689 break;
1693 if (PERchanged && env->psw.mask & PSW_MASK_PER) {
1694 s390_cpu_recompute_watchpoints(CPU(cpu));
1697 tlb_flush(CPU(cpu));
1700 void HELPER(stctg)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
1702 uintptr_t ra = GETPC();
1703 uint64_t dest = a2;
1704 uint32_t i;
1706 if (dest & 0x7) {
1707 s390_program_interrupt(env, PGM_SPECIFICATION, 6, ra);
1710 for (i = r1;; i = (i + 1) % 16) {
1711 cpu_stq_data_ra(env, dest, env->cregs[i], ra);
1712 dest += sizeof(uint64_t);
1714 if (i == r3) {
1715 break;
1720 void HELPER(stctl)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
1722 uintptr_t ra = GETPC();
1723 uint64_t dest = a2;
1724 uint32_t i;
1726 if (dest & 0x3) {
1727 s390_program_interrupt(env, PGM_SPECIFICATION, 4, ra);
1730 for (i = r1;; i = (i + 1) % 16) {
1731 cpu_stl_data_ra(env, dest, env->cregs[i], ra);
1732 dest += sizeof(uint32_t);
1734 if (i == r3) {
1735 break;
1740 uint32_t HELPER(testblock)(CPUS390XState *env, uint64_t real_addr)
1742 uintptr_t ra = GETPC();
1743 int i;
1745 real_addr = wrap_address(env, real_addr) & TARGET_PAGE_MASK;
1747 for (i = 0; i < TARGET_PAGE_SIZE; i += 8) {
1748 cpu_stq_real_ra(env, real_addr + i, 0, ra);
1751 return 0;
1754 uint32_t HELPER(tprot)(CPUS390XState *env, uint64_t a1, uint64_t a2)
1756 S390CPU *cpu = s390_env_get_cpu(env);
1757 CPUState *cs = CPU(cpu);
1760 * TODO: we currently don't handle all access protection types
1761 * (including access-list and key-controlled) as well as AR mode.
1763 if (!s390_cpu_virt_mem_check_write(cpu, a1, 0, 1)) {
1764 /* Fetching permitted; storing permitted */
1765 return 0;
1768 if (env->int_pgm_code == PGM_PROTECTION) {
1769 /* retry if reading is possible */
1770 cs->exception_index = 0;
1771 if (!s390_cpu_virt_mem_check_read(cpu, a1, 0, 1)) {
1772 /* Fetching permitted; storing not permitted */
1773 return 1;
1777 switch (env->int_pgm_code) {
1778 case PGM_PROTECTION:
1779 /* Fetching not permitted; storing not permitted */
1780 cs->exception_index = 0;
1781 return 2;
1782 case PGM_ADDRESSING:
1783 case PGM_TRANS_SPEC:
1784 /* exceptions forwarded to the guest */
1785 s390_cpu_virt_mem_handle_exc(cpu, GETPC());
1786 return 0;
1789 /* Translation not available */
1790 cs->exception_index = 0;
1791 return 3;
1794 /* insert storage key extended */
1795 uint64_t HELPER(iske)(CPUS390XState *env, uint64_t r2)
1797 static S390SKeysState *ss;
1798 static S390SKeysClass *skeyclass;
1799 uint64_t addr = wrap_address(env, r2);
1800 uint8_t key;
1802 if (addr > ram_size) {
1803 return 0;
1806 if (unlikely(!ss)) {
1807 ss = s390_get_skeys_device();
1808 skeyclass = S390_SKEYS_GET_CLASS(ss);
1811 if (skeyclass->get_skeys(ss, addr / TARGET_PAGE_SIZE, 1, &key)) {
1812 return 0;
1814 return key;
1817 /* set storage key extended */
1818 void HELPER(sske)(CPUS390XState *env, uint64_t r1, uint64_t r2)
1820 static S390SKeysState *ss;
1821 static S390SKeysClass *skeyclass;
1822 uint64_t addr = wrap_address(env, r2);
1823 uint8_t key;
1825 if (addr > ram_size) {
1826 return;
1829 if (unlikely(!ss)) {
1830 ss = s390_get_skeys_device();
1831 skeyclass = S390_SKEYS_GET_CLASS(ss);
1834 key = (uint8_t) r1;
1835 skeyclass->set_skeys(ss, addr / TARGET_PAGE_SIZE, 1, &key);
1838 /* reset reference bit extended */
1839 uint32_t HELPER(rrbe)(CPUS390XState *env, uint64_t r2)
1841 static S390SKeysState *ss;
1842 static S390SKeysClass *skeyclass;
1843 uint8_t re, key;
1845 if (r2 > ram_size) {
1846 return 0;
1849 if (unlikely(!ss)) {
1850 ss = s390_get_skeys_device();
1851 skeyclass = S390_SKEYS_GET_CLASS(ss);
1854 if (skeyclass->get_skeys(ss, r2 / TARGET_PAGE_SIZE, 1, &key)) {
1855 return 0;
1858 re = key & (SK_R | SK_C);
1859 key &= ~SK_R;
1861 if (skeyclass->set_skeys(ss, r2 / TARGET_PAGE_SIZE, 1, &key)) {
1862 return 0;
1866 * cc
1868 * 0 Reference bit zero; change bit zero
1869 * 1 Reference bit zero; change bit one
1870 * 2 Reference bit one; change bit zero
1871 * 3 Reference bit one; change bit one
1874 return re >> 1;
1877 uint32_t HELPER(mvcs)(CPUS390XState *env, uint64_t l, uint64_t a1, uint64_t a2)
1879 uintptr_t ra = GETPC();
1880 int cc = 0, i;
1882 HELPER_LOG("%s: %16" PRIx64 " %16" PRIx64 " %16" PRIx64 "\n",
1883 __func__, l, a1, a2);
1885 if (l > 256) {
1886 /* max 256 */
1887 l = 256;
1888 cc = 3;
1891 /* XXX replace w/ memcpy */
1892 for (i = 0; i < l; i++) {
1893 uint8_t x = cpu_ldub_primary_ra(env, a2 + i, ra);
1894 cpu_stb_secondary_ra(env, a1 + i, x, ra);
1897 return cc;
1900 uint32_t HELPER(mvcp)(CPUS390XState *env, uint64_t l, uint64_t a1, uint64_t a2)
1902 uintptr_t ra = GETPC();
1903 int cc = 0, i;
1905 HELPER_LOG("%s: %16" PRIx64 " %16" PRIx64 " %16" PRIx64 "\n",
1906 __func__, l, a1, a2);
1908 if (l > 256) {
1909 /* max 256 */
1910 l = 256;
1911 cc = 3;
1914 /* XXX replace w/ memcpy */
1915 for (i = 0; i < l; i++) {
1916 uint8_t x = cpu_ldub_secondary_ra(env, a2 + i, ra);
1917 cpu_stb_primary_ra(env, a1 + i, x, ra);
1920 return cc;
1923 void HELPER(idte)(CPUS390XState *env, uint64_t r1, uint64_t r2, uint32_t m4)
1925 CPUState *cs = CPU(s390_env_get_cpu(env));
1926 const uintptr_t ra = GETPC();
1927 uint64_t table, entry, raddr;
1928 uint16_t entries, i, index = 0;
1930 if (r2 & 0xff000) {
1931 s390_program_interrupt(env, PGM_SPECIFICATION, 4, ra);
1934 if (!(r2 & 0x800)) {
1935 /* invalidation-and-clearing operation */
1936 table = r1 & ASCE_ORIGIN;
1937 entries = (r2 & 0x7ff) + 1;
1939 switch (r1 & ASCE_TYPE_MASK) {
1940 case ASCE_TYPE_REGION1:
1941 index = (r2 >> 53) & 0x7ff;
1942 break;
1943 case ASCE_TYPE_REGION2:
1944 index = (r2 >> 42) & 0x7ff;
1945 break;
1946 case ASCE_TYPE_REGION3:
1947 index = (r2 >> 31) & 0x7ff;
1948 break;
1949 case ASCE_TYPE_SEGMENT:
1950 index = (r2 >> 20) & 0x7ff;
1951 break;
1953 for (i = 0; i < entries; i++) {
1954 /* addresses are not wrapped in 24/31bit mode but table index is */
1955 raddr = table + ((index + i) & 0x7ff) * sizeof(entry);
1956 entry = cpu_ldq_real_ra(env, raddr, ra);
1957 if (!(entry & REGION_ENTRY_INV)) {
1958 /* we are allowed to not store if already invalid */
1959 entry |= REGION_ENTRY_INV;
1960 cpu_stq_real_ra(env, raddr, entry, ra);
1965 /* We simply flush the complete tlb, therefore we can ignore r3. */
1966 if (m4 & 1) {
1967 tlb_flush(cs);
1968 } else {
1969 tlb_flush_all_cpus_synced(cs);
1973 /* invalidate pte */
1974 void HELPER(ipte)(CPUS390XState *env, uint64_t pto, uint64_t vaddr,
1975 uint32_t m4)
1977 CPUState *cs = CPU(s390_env_get_cpu(env));
1978 const uintptr_t ra = GETPC();
1979 uint64_t page = vaddr & TARGET_PAGE_MASK;
1980 uint64_t pte_addr, pte;
1982 /* Compute the page table entry address */
1983 pte_addr = (pto & SEGMENT_ENTRY_ORIGIN);
1984 pte_addr += (vaddr & VADDR_PX) >> 9;
1986 /* Mark the page table entry as invalid */
1987 pte = cpu_ldq_real_ra(env, pte_addr, ra);
1988 pte |= PAGE_INVALID;
1989 cpu_stq_real_ra(env, pte_addr, pte, ra);
1991 /* XXX we exploit the fact that Linux passes the exact virtual
1992 address here - it's not obliged to! */
1993 if (m4 & 1) {
1994 if (vaddr & ~VADDR_PX) {
1995 tlb_flush_page(cs, page);
1996 /* XXX 31-bit hack */
1997 tlb_flush_page(cs, page ^ 0x80000000);
1998 } else {
1999 /* looks like we don't have a valid virtual address */
2000 tlb_flush(cs);
2002 } else {
2003 if (vaddr & ~VADDR_PX) {
2004 tlb_flush_page_all_cpus_synced(cs, page);
2005 /* XXX 31-bit hack */
2006 tlb_flush_page_all_cpus_synced(cs, page ^ 0x80000000);
2007 } else {
2008 /* looks like we don't have a valid virtual address */
2009 tlb_flush_all_cpus_synced(cs);
2014 /* flush local tlb */
2015 void HELPER(ptlb)(CPUS390XState *env)
2017 S390CPU *cpu = s390_env_get_cpu(env);
2019 tlb_flush(CPU(cpu));
2022 /* flush global tlb */
2023 void HELPER(purge)(CPUS390XState *env)
2025 S390CPU *cpu = s390_env_get_cpu(env);
2027 tlb_flush_all_cpus_synced(CPU(cpu));
2030 /* load using real address */
2031 uint64_t HELPER(lura)(CPUS390XState *env, uint64_t addr)
2033 return cpu_ldl_real_ra(env, wrap_address(env, addr), GETPC());
2036 uint64_t HELPER(lurag)(CPUS390XState *env, uint64_t addr)
2038 return cpu_ldq_real_ra(env, wrap_address(env, addr), GETPC());
2041 /* store using real address */
2042 void HELPER(stura)(CPUS390XState *env, uint64_t addr, uint64_t v1)
2044 cpu_stl_real_ra(env, wrap_address(env, addr), (uint32_t)v1, GETPC());
2046 if ((env->psw.mask & PSW_MASK_PER) &&
2047 (env->cregs[9] & PER_CR9_EVENT_STORE) &&
2048 (env->cregs[9] & PER_CR9_EVENT_STORE_REAL)) {
2049 /* PSW is saved just before calling the helper. */
2050 env->per_address = env->psw.addr;
2051 env->per_perc_atmid = PER_CODE_EVENT_STORE_REAL | get_per_atmid(env);
2055 void HELPER(sturg)(CPUS390XState *env, uint64_t addr, uint64_t v1)
2057 cpu_stq_real_ra(env, wrap_address(env, addr), v1, GETPC());
2059 if ((env->psw.mask & PSW_MASK_PER) &&
2060 (env->cregs[9] & PER_CR9_EVENT_STORE) &&
2061 (env->cregs[9] & PER_CR9_EVENT_STORE_REAL)) {
2062 /* PSW is saved just before calling the helper. */
2063 env->per_address = env->psw.addr;
2064 env->per_perc_atmid = PER_CODE_EVENT_STORE_REAL | get_per_atmid(env);
2068 /* load real address */
2069 uint64_t HELPER(lra)(CPUS390XState *env, uint64_t addr)
2071 CPUState *cs = CPU(s390_env_get_cpu(env));
2072 uint32_t cc = 0;
2073 uint64_t asc = env->psw.mask & PSW_MASK_ASC;
2074 uint64_t ret;
2075 int old_exc, flags;
2077 /* XXX incomplete - has more corner cases */
2078 if (!(env->psw.mask & PSW_MASK_64) && (addr >> 32)) {
2079 s390_program_interrupt(env, PGM_SPECIAL_OP, 2, GETPC());
2082 old_exc = cs->exception_index;
2083 if (mmu_translate(env, addr, 0, asc, &ret, &flags, true)) {
2084 cc = 3;
2086 if (cs->exception_index == EXCP_PGM) {
2087 ret = env->int_pgm_code | 0x80000000;
2088 } else {
2089 ret |= addr & ~TARGET_PAGE_MASK;
2091 cs->exception_index = old_exc;
2093 env->cc_op = cc;
2094 return ret;
2096 #endif
2098 /* load pair from quadword */
2099 uint64_t HELPER(lpq)(CPUS390XState *env, uint64_t addr)
2101 uintptr_t ra = GETPC();
2102 uint64_t hi, lo;
2104 check_alignment(env, addr, 16, ra);
2105 hi = cpu_ldq_data_ra(env, addr + 0, ra);
2106 lo = cpu_ldq_data_ra(env, addr + 8, ra);
2108 env->retxl = lo;
2109 return hi;
2112 uint64_t HELPER(lpq_parallel)(CPUS390XState *env, uint64_t addr)
2114 uintptr_t ra = GETPC();
2115 uint64_t hi, lo;
2116 int mem_idx;
2117 TCGMemOpIdx oi;
2118 Int128 v;
2120 assert(HAVE_ATOMIC128);
2122 mem_idx = cpu_mmu_index(env, false);
2123 oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx);
2124 v = helper_atomic_ldo_be_mmu(env, addr, oi, ra);
2125 hi = int128_gethi(v);
2126 lo = int128_getlo(v);
2128 env->retxl = lo;
2129 return hi;
2132 /* store pair to quadword */
2133 void HELPER(stpq)(CPUS390XState *env, uint64_t addr,
2134 uint64_t low, uint64_t high)
2136 uintptr_t ra = GETPC();
2138 check_alignment(env, addr, 16, ra);
2139 cpu_stq_data_ra(env, addr + 0, high, ra);
2140 cpu_stq_data_ra(env, addr + 8, low, ra);
2143 void HELPER(stpq_parallel)(CPUS390XState *env, uint64_t addr,
2144 uint64_t low, uint64_t high)
2146 uintptr_t ra = GETPC();
2147 int mem_idx;
2148 TCGMemOpIdx oi;
2149 Int128 v;
2151 assert(HAVE_ATOMIC128);
2153 mem_idx = cpu_mmu_index(env, false);
2154 oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx);
2155 v = int128_make128(low, high);
2156 helper_atomic_sto_be_mmu(env, addr, v, oi, ra);
2159 /* Execute instruction. This instruction executes an insn modified with
2160 the contents of r1. It does not change the executed instruction in memory;
2161 it does not change the program counter.
2163 Perform this by recording the modified instruction in env->ex_value.
2164 This will be noticed by cpu_get_tb_cpu_state and thus tb translation.
2166 void HELPER(ex)(CPUS390XState *env, uint32_t ilen, uint64_t r1, uint64_t addr)
2168 uint64_t insn = cpu_lduw_code(env, addr);
2169 uint8_t opc = insn >> 8;
2171 /* Or in the contents of R1[56:63]. */
2172 insn |= r1 & 0xff;
2174 /* Load the rest of the instruction. */
2175 insn <<= 48;
2176 switch (get_ilen(opc)) {
2177 case 2:
2178 break;
2179 case 4:
2180 insn |= (uint64_t)cpu_lduw_code(env, addr + 2) << 32;
2181 break;
2182 case 6:
2183 insn |= (uint64_t)(uint32_t)cpu_ldl_code(env, addr + 2) << 16;
2184 break;
2185 default:
2186 g_assert_not_reached();
2189 /* The very most common cases can be sped up by avoiding a new TB. */
2190 if ((opc & 0xf0) == 0xd0) {
2191 typedef uint32_t (*dx_helper)(CPUS390XState *, uint32_t, uint64_t,
2192 uint64_t, uintptr_t);
2193 static const dx_helper dx[16] = {
2194 [0x0] = do_helper_trt_bkwd,
2195 [0x2] = do_helper_mvc,
2196 [0x4] = do_helper_nc,
2197 [0x5] = do_helper_clc,
2198 [0x6] = do_helper_oc,
2199 [0x7] = do_helper_xc,
2200 [0xc] = do_helper_tr,
2201 [0xd] = do_helper_trt_fwd,
2203 dx_helper helper = dx[opc & 0xf];
2205 if (helper) {
2206 uint32_t l = extract64(insn, 48, 8);
2207 uint32_t b1 = extract64(insn, 44, 4);
2208 uint32_t d1 = extract64(insn, 32, 12);
2209 uint32_t b2 = extract64(insn, 28, 4);
2210 uint32_t d2 = extract64(insn, 16, 12);
2211 uint64_t a1 = wrap_address(env, env->regs[b1] + d1);
2212 uint64_t a2 = wrap_address(env, env->regs[b2] + d2);
2214 env->cc_op = helper(env, l, a1, a2, 0);
2215 env->psw.addr += ilen;
2216 return;
2218 } else if (opc == 0x0a) {
2219 env->int_svc_code = extract64(insn, 48, 8);
2220 env->int_svc_ilen = ilen;
2221 helper_exception(env, EXCP_SVC);
2222 g_assert_not_reached();
2225 /* Record the insn we want to execute as well as the ilen to use
2226 during the execution of the target insn. This will also ensure
2227 that ex_value is non-zero, which flags that we are in a state
2228 that requires such execution. */
2229 env->ex_value = insn | ilen;
2232 uint32_t HELPER(mvcos)(CPUS390XState *env, uint64_t dest, uint64_t src,
2233 uint64_t len)
2235 const uint8_t psw_key = (env->psw.mask & PSW_MASK_KEY) >> PSW_SHIFT_KEY;
2236 const uint8_t psw_as = (env->psw.mask & PSW_MASK_ASC) >> PSW_SHIFT_ASC;
2237 const uint64_t r0 = env->regs[0];
2238 const uintptr_t ra = GETPC();
2239 uint8_t dest_key, dest_as, dest_k, dest_a;
2240 uint8_t src_key, src_as, src_k, src_a;
2241 uint64_t val;
2242 int cc = 0;
2244 HELPER_LOG("%s dest %" PRIx64 ", src %" PRIx64 ", len %" PRIx64 "\n",
2245 __func__, dest, src, len);
2247 if (!(env->psw.mask & PSW_MASK_DAT)) {
2248 s390_program_interrupt(env, PGM_SPECIAL_OP, 6, ra);
2251 /* OAC (operand access control) for the first operand -> dest */
2252 val = (r0 & 0xffff0000ULL) >> 16;
2253 dest_key = (val >> 12) & 0xf;
2254 dest_as = (val >> 6) & 0x3;
2255 dest_k = (val >> 1) & 0x1;
2256 dest_a = val & 0x1;
2258 /* OAC (operand access control) for the second operand -> src */
2259 val = (r0 & 0x0000ffffULL);
2260 src_key = (val >> 12) & 0xf;
2261 src_as = (val >> 6) & 0x3;
2262 src_k = (val >> 1) & 0x1;
2263 src_a = val & 0x1;
2265 if (!dest_k) {
2266 dest_key = psw_key;
2268 if (!src_k) {
2269 src_key = psw_key;
2271 if (!dest_a) {
2272 dest_as = psw_as;
2274 if (!src_a) {
2275 src_as = psw_as;
2278 if (dest_a && dest_as == AS_HOME && (env->psw.mask & PSW_MASK_PSTATE)) {
2279 s390_program_interrupt(env, PGM_SPECIAL_OP, 6, ra);
2281 if (!(env->cregs[0] & CR0_SECONDARY) &&
2282 (dest_as == AS_SECONDARY || src_as == AS_SECONDARY)) {
2283 s390_program_interrupt(env, PGM_SPECIAL_OP, 6, ra);
2285 if (!psw_key_valid(env, dest_key) || !psw_key_valid(env, src_key)) {
2286 s390_program_interrupt(env, PGM_PRIVILEGED, 6, ra);
2289 len = wrap_length(env, len);
2290 if (len > 4096) {
2291 cc = 3;
2292 len = 4096;
2295 /* FIXME: AR-mode and proper problem state mode (using PSW keys) missing */
2296 if (src_as == AS_ACCREG || dest_as == AS_ACCREG ||
2297 (env->psw.mask & PSW_MASK_PSTATE)) {
2298 qemu_log_mask(LOG_UNIMP, "%s: AR-mode and PSTATE support missing\n",
2299 __func__);
2300 s390_program_interrupt(env, PGM_ADDRESSING, 6, ra);
2303 /* FIXME: a) LAP
2304 * b) Access using correct keys
2305 * c) AR-mode
2307 #ifdef CONFIG_USER_ONLY
2308 /* psw keys are never valid in user mode, we will never reach this */
2309 g_assert_not_reached();
2310 #else
2311 fast_memmove_as(env, dest, src, len, dest_as, src_as, ra);
2312 #endif
2314 return cc;
2317 /* Decode a Unicode character. A return value < 0 indicates success, storing
2318 the UTF-32 result into OCHAR and the input length into OLEN. A return
2319 value >= 0 indicates failure, and the CC value to be returned. */
2320 typedef int (*decode_unicode_fn)(CPUS390XState *env, uint64_t addr,
2321 uint64_t ilen, bool enh_check, uintptr_t ra,
2322 uint32_t *ochar, uint32_t *olen);
2324 /* Encode a Unicode character. A return value < 0 indicates success, storing
2325 the bytes into ADDR and the output length into OLEN. A return value >= 0
2326 indicates failure, and the CC value to be returned. */
2327 typedef int (*encode_unicode_fn)(CPUS390XState *env, uint64_t addr,
2328 uint64_t ilen, uintptr_t ra, uint32_t c,
2329 uint32_t *olen);
2331 static int decode_utf8(CPUS390XState *env, uint64_t addr, uint64_t ilen,
2332 bool enh_check, uintptr_t ra,
2333 uint32_t *ochar, uint32_t *olen)
2335 uint8_t s0, s1, s2, s3;
2336 uint32_t c, l;
2338 if (ilen < 1) {
2339 return 0;
2341 s0 = cpu_ldub_data_ra(env, addr, ra);
2342 if (s0 <= 0x7f) {
2343 /* one byte character */
2344 l = 1;
2345 c = s0;
2346 } else if (s0 <= (enh_check ? 0xc1 : 0xbf)) {
2347 /* invalid character */
2348 return 2;
2349 } else if (s0 <= 0xdf) {
2350 /* two byte character */
2351 l = 2;
2352 if (ilen < 2) {
2353 return 0;
2355 s1 = cpu_ldub_data_ra(env, addr + 1, ra);
2356 c = s0 & 0x1f;
2357 c = (c << 6) | (s1 & 0x3f);
2358 if (enh_check && (s1 & 0xc0) != 0x80) {
2359 return 2;
2361 } else if (s0 <= 0xef) {
2362 /* three byte character */
2363 l = 3;
2364 if (ilen < 3) {
2365 return 0;
2367 s1 = cpu_ldub_data_ra(env, addr + 1, ra);
2368 s2 = cpu_ldub_data_ra(env, addr + 2, ra);
2369 c = s0 & 0x0f;
2370 c = (c << 6) | (s1 & 0x3f);
2371 c = (c << 6) | (s2 & 0x3f);
2372 /* Fold the byte-by-byte range descriptions in the PoO into
2373 tests against the complete value. It disallows encodings
2374 that could be smaller, and the UTF-16 surrogates. */
2375 if (enh_check
2376 && ((s1 & 0xc0) != 0x80
2377 || (s2 & 0xc0) != 0x80
2378 || c < 0x1000
2379 || (c >= 0xd800 && c <= 0xdfff))) {
2380 return 2;
2382 } else if (s0 <= (enh_check ? 0xf4 : 0xf7)) {
2383 /* four byte character */
2384 l = 4;
2385 if (ilen < 4) {
2386 return 0;
2388 s1 = cpu_ldub_data_ra(env, addr + 1, ra);
2389 s2 = cpu_ldub_data_ra(env, addr + 2, ra);
2390 s3 = cpu_ldub_data_ra(env, addr + 3, ra);
2391 c = s0 & 0x07;
2392 c = (c << 6) | (s1 & 0x3f);
2393 c = (c << 6) | (s2 & 0x3f);
2394 c = (c << 6) | (s3 & 0x3f);
2395 /* See above. */
2396 if (enh_check
2397 && ((s1 & 0xc0) != 0x80
2398 || (s2 & 0xc0) != 0x80
2399 || (s3 & 0xc0) != 0x80
2400 || c < 0x010000
2401 || c > 0x10ffff)) {
2402 return 2;
2404 } else {
2405 /* invalid character */
2406 return 2;
2409 *ochar = c;
2410 *olen = l;
2411 return -1;
2414 static int decode_utf16(CPUS390XState *env, uint64_t addr, uint64_t ilen,
2415 bool enh_check, uintptr_t ra,
2416 uint32_t *ochar, uint32_t *olen)
2418 uint16_t s0, s1;
2419 uint32_t c, l;
2421 if (ilen < 2) {
2422 return 0;
2424 s0 = cpu_lduw_data_ra(env, addr, ra);
2425 if ((s0 & 0xfc00) != 0xd800) {
2426 /* one word character */
2427 l = 2;
2428 c = s0;
2429 } else {
2430 /* two word character */
2431 l = 4;
2432 if (ilen < 4) {
2433 return 0;
2435 s1 = cpu_lduw_data_ra(env, addr + 2, ra);
2436 c = extract32(s0, 6, 4) + 1;
2437 c = (c << 6) | (s0 & 0x3f);
2438 c = (c << 10) | (s1 & 0x3ff);
2439 if (enh_check && (s1 & 0xfc00) != 0xdc00) {
2440 /* invalid surrogate character */
2441 return 2;
2445 *ochar = c;
2446 *olen = l;
2447 return -1;
2450 static int decode_utf32(CPUS390XState *env, uint64_t addr, uint64_t ilen,
2451 bool enh_check, uintptr_t ra,
2452 uint32_t *ochar, uint32_t *olen)
2454 uint32_t c;
2456 if (ilen < 4) {
2457 return 0;
2459 c = cpu_ldl_data_ra(env, addr, ra);
2460 if ((c >= 0xd800 && c <= 0xdbff) || c > 0x10ffff) {
2461 /* invalid unicode character */
2462 return 2;
2465 *ochar = c;
2466 *olen = 4;
2467 return -1;
2470 static int encode_utf8(CPUS390XState *env, uint64_t addr, uint64_t ilen,
2471 uintptr_t ra, uint32_t c, uint32_t *olen)
2473 uint8_t d[4];
2474 uint32_t l, i;
2476 if (c <= 0x7f) {
2477 /* one byte character */
2478 l = 1;
2479 d[0] = c;
2480 } else if (c <= 0x7ff) {
2481 /* two byte character */
2482 l = 2;
2483 d[1] = 0x80 | extract32(c, 0, 6);
2484 d[0] = 0xc0 | extract32(c, 6, 5);
2485 } else if (c <= 0xffff) {
2486 /* three byte character */
2487 l = 3;
2488 d[2] = 0x80 | extract32(c, 0, 6);
2489 d[1] = 0x80 | extract32(c, 6, 6);
2490 d[0] = 0xe0 | extract32(c, 12, 4);
2491 } else {
2492 /* four byte character */
2493 l = 4;
2494 d[3] = 0x80 | extract32(c, 0, 6);
2495 d[2] = 0x80 | extract32(c, 6, 6);
2496 d[1] = 0x80 | extract32(c, 12, 6);
2497 d[0] = 0xf0 | extract32(c, 18, 3);
2500 if (ilen < l) {
2501 return 1;
2503 for (i = 0; i < l; ++i) {
2504 cpu_stb_data_ra(env, addr + i, d[i], ra);
2507 *olen = l;
2508 return -1;
2511 static int encode_utf16(CPUS390XState *env, uint64_t addr, uint64_t ilen,
2512 uintptr_t ra, uint32_t c, uint32_t *olen)
2514 uint16_t d0, d1;
2516 if (c <= 0xffff) {
2517 /* one word character */
2518 if (ilen < 2) {
2519 return 1;
2521 cpu_stw_data_ra(env, addr, c, ra);
2522 *olen = 2;
2523 } else {
2524 /* two word character */
2525 if (ilen < 4) {
2526 return 1;
2528 d1 = 0xdc00 | extract32(c, 0, 10);
2529 d0 = 0xd800 | extract32(c, 10, 6);
2530 d0 = deposit32(d0, 6, 4, extract32(c, 16, 5) - 1);
2531 cpu_stw_data_ra(env, addr + 0, d0, ra);
2532 cpu_stw_data_ra(env, addr + 2, d1, ra);
2533 *olen = 4;
2536 return -1;
2539 static int encode_utf32(CPUS390XState *env, uint64_t addr, uint64_t ilen,
2540 uintptr_t ra, uint32_t c, uint32_t *olen)
2542 if (ilen < 4) {
2543 return 1;
2545 cpu_stl_data_ra(env, addr, c, ra);
2546 *olen = 4;
2547 return -1;
2550 static inline uint32_t convert_unicode(CPUS390XState *env, uint32_t r1,
2551 uint32_t r2, uint32_t m3, uintptr_t ra,
2552 decode_unicode_fn decode,
2553 encode_unicode_fn encode)
2555 uint64_t dst = get_address(env, r1);
2556 uint64_t dlen = get_length(env, r1 + 1);
2557 uint64_t src = get_address(env, r2);
2558 uint64_t slen = get_length(env, r2 + 1);
2559 bool enh_check = m3 & 1;
2560 int cc, i;
2562 /* Lest we fail to service interrupts in a timely manner, limit the
2563 amount of work we're willing to do. For now, let's cap at 256. */
2564 for (i = 0; i < 256; ++i) {
2565 uint32_t c, ilen, olen;
2567 cc = decode(env, src, slen, enh_check, ra, &c, &ilen);
2568 if (unlikely(cc >= 0)) {
2569 break;
2571 cc = encode(env, dst, dlen, ra, c, &olen);
2572 if (unlikely(cc >= 0)) {
2573 break;
2576 src += ilen;
2577 slen -= ilen;
2578 dst += olen;
2579 dlen -= olen;
2580 cc = 3;
2583 set_address(env, r1, dst);
2584 set_length(env, r1 + 1, dlen);
2585 set_address(env, r2, src);
2586 set_length(env, r2 + 1, slen);
2588 return cc;
2591 uint32_t HELPER(cu12)(CPUS390XState *env, uint32_t r1, uint32_t r2, uint32_t m3)
2593 return convert_unicode(env, r1, r2, m3, GETPC(),
2594 decode_utf8, encode_utf16);
2597 uint32_t HELPER(cu14)(CPUS390XState *env, uint32_t r1, uint32_t r2, uint32_t m3)
2599 return convert_unicode(env, r1, r2, m3, GETPC(),
2600 decode_utf8, encode_utf32);
2603 uint32_t HELPER(cu21)(CPUS390XState *env, uint32_t r1, uint32_t r2, uint32_t m3)
2605 return convert_unicode(env, r1, r2, m3, GETPC(),
2606 decode_utf16, encode_utf8);
2609 uint32_t HELPER(cu24)(CPUS390XState *env, uint32_t r1, uint32_t r2, uint32_t m3)
2611 return convert_unicode(env, r1, r2, m3, GETPC(),
2612 decode_utf16, encode_utf32);
2615 uint32_t HELPER(cu41)(CPUS390XState *env, uint32_t r1, uint32_t r2, uint32_t m3)
2617 return convert_unicode(env, r1, r2, m3, GETPC(),
2618 decode_utf32, encode_utf8);
2621 uint32_t HELPER(cu42)(CPUS390XState *env, uint32_t r1, uint32_t r2, uint32_t m3)
2623 return convert_unicode(env, r1, r2, m3, GETPC(),
2624 decode_utf32, encode_utf16);
2627 void probe_write_access(CPUS390XState *env, uint64_t addr, uint64_t len,
2628 uintptr_t ra)
2630 #ifdef CONFIG_USER_ONLY
2631 if (!h2g_valid(addr) || !h2g_valid(addr + len - 1) ||
2632 page_check_range(addr, len, PAGE_WRITE) < 0) {
2633 s390_program_interrupt(env, PGM_ADDRESSING, ILEN_AUTO, ra);
2635 #else
2636 /* test the actual access, not just any access to the page due to LAP */
2637 while (len) {
2638 const uint64_t pagelen = -(addr | -TARGET_PAGE_MASK);
2639 const uint64_t curlen = MIN(pagelen, len);
2641 probe_write(env, addr, curlen, cpu_mmu_index(env, false), ra);
2642 addr = wrap_address(env, addr + curlen);
2643 len -= curlen;
2645 #endif
2648 void HELPER(probe_write_access)(CPUS390XState *env, uint64_t addr, uint64_t len)
2650 probe_write_access(env, addr, len, GETPC());