target/s390x: fix adj_len_to_page
[qemu/ar7.git] / target / s390x / mem_helper.c
blob2326f0bdb9046d97bfba9e134701c9c74d41a7fd
1 /*
2 * S/390 memory access helper routines
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2009 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "exec/address-spaces.h"
24 #include "exec/helper-proto.h"
25 #include "exec/exec-all.h"
26 #include "exec/cpu_ldst.h"
27 #include "qemu/int128.h"
29 #if !defined(CONFIG_USER_ONLY)
30 #include "hw/s390x/storage-keys.h"
31 #endif
33 /*****************************************************************************/
34 /* Softmmu support */
35 #if !defined(CONFIG_USER_ONLY)
37 /* try to fill the TLB and return an exception if error. If retaddr is
38 NULL, it means that the function was called in C code (i.e. not
39 from generated code or from helper.c) */
40 /* XXX: fix it to restore all registers */
41 void tlb_fill(CPUState *cs, target_ulong addr, MMUAccessType access_type,
42 int mmu_idx, uintptr_t retaddr)
44 int ret = s390_cpu_handle_mmu_fault(cs, addr, access_type, mmu_idx);
45 if (unlikely(ret != 0)) {
46 cpu_loop_exit_restore(cs, retaddr);
50 #endif
52 /* #define DEBUG_HELPER */
53 #ifdef DEBUG_HELPER
54 #define HELPER_LOG(x...) qemu_log(x)
55 #else
56 #define HELPER_LOG(x...)
57 #endif
59 /* Reduce the length so that addr + len doesn't cross a page boundary. */
60 static inline uint32_t adj_len_to_page(uint32_t len, uint64_t addr)
62 #ifndef CONFIG_USER_ONLY
63 if ((addr & ~TARGET_PAGE_MASK) + len - 1 >= TARGET_PAGE_SIZE) {
64 return -(addr | TARGET_PAGE_MASK);
66 #endif
67 return len;
70 static void fast_memset(CPUS390XState *env, uint64_t dest, uint8_t byte,
71 uint32_t l, uintptr_t ra)
73 int mmu_idx = cpu_mmu_index(env, false);
75 while (l > 0) {
76 void *p = tlb_vaddr_to_host(env, dest, MMU_DATA_STORE, mmu_idx);
77 if (p) {
78 /* Access to the whole page in write mode granted. */
79 uint32_t l_adj = adj_len_to_page(l, dest);
80 memset(p, byte, l_adj);
81 dest += l_adj;
82 l -= l_adj;
83 } else {
84 /* We failed to get access to the whole page. The next write
85 access will likely fill the QEMU TLB for the next iteration. */
86 cpu_stb_data_ra(env, dest, byte, ra);
87 dest++;
88 l--;
93 static void fast_memmove(CPUS390XState *env, uint64_t dest, uint64_t src,
94 uint32_t l, uintptr_t ra)
96 int mmu_idx = cpu_mmu_index(env, false);
98 while (l > 0) {
99 void *src_p = tlb_vaddr_to_host(env, src, MMU_DATA_LOAD, mmu_idx);
100 void *dest_p = tlb_vaddr_to_host(env, dest, MMU_DATA_STORE, mmu_idx);
101 if (src_p && dest_p) {
102 /* Access to both whole pages granted. */
103 uint32_t l_adj = adj_len_to_page(l, src);
104 l_adj = adj_len_to_page(l_adj, dest);
105 memmove(dest_p, src_p, l_adj);
106 src += l_adj;
107 dest += l_adj;
108 l -= l_adj;
109 } else {
110 /* We failed to get access to one or both whole pages. The next
111 read or write access will likely fill the QEMU TLB for the
112 next iteration. */
113 cpu_stb_data_ra(env, dest, cpu_ldub_data_ra(env, src, ra), ra);
114 src++;
115 dest++;
116 l--;
121 /* and on array */
122 static uint32_t do_helper_nc(CPUS390XState *env, uint32_t l, uint64_t dest,
123 uint64_t src, uintptr_t ra)
125 uint32_t i;
126 uint8_t c = 0;
128 HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
129 __func__, l, dest, src);
131 for (i = 0; i <= l; i++) {
132 uint8_t x = cpu_ldub_data_ra(env, src + i, ra);
133 x &= cpu_ldub_data_ra(env, dest + i, ra);
134 c |= x;
135 cpu_stb_data_ra(env, dest + i, x, ra);
137 return c != 0;
140 uint32_t HELPER(nc)(CPUS390XState *env, uint32_t l, uint64_t dest,
141 uint64_t src)
143 return do_helper_nc(env, l, dest, src, GETPC());
146 /* xor on array */
147 static uint32_t do_helper_xc(CPUS390XState *env, uint32_t l, uint64_t dest,
148 uint64_t src, uintptr_t ra)
150 uint32_t i;
151 uint8_t c = 0;
153 HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
154 __func__, l, dest, src);
156 /* xor with itself is the same as memset(0) */
157 if (src == dest) {
158 fast_memset(env, dest, 0, l + 1, ra);
159 return 0;
162 for (i = 0; i <= l; i++) {
163 uint8_t x = cpu_ldub_data_ra(env, src + i, ra);
164 x ^= cpu_ldub_data_ra(env, dest + i, ra);
165 c |= x;
166 cpu_stb_data_ra(env, dest + i, x, ra);
168 return c != 0;
171 uint32_t HELPER(xc)(CPUS390XState *env, uint32_t l, uint64_t dest,
172 uint64_t src)
174 return do_helper_xc(env, l, dest, src, GETPC());
177 /* or on array */
178 static uint32_t do_helper_oc(CPUS390XState *env, uint32_t l, uint64_t dest,
179 uint64_t src, uintptr_t ra)
181 uint32_t i;
182 uint8_t c = 0;
184 HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
185 __func__, l, dest, src);
187 for (i = 0; i <= l; i++) {
188 uint8_t x = cpu_ldub_data_ra(env, src + i, ra);
189 x |= cpu_ldub_data_ra(env, dest + i, ra);
190 c |= x;
191 cpu_stb_data_ra(env, dest + i, x, ra);
193 return c != 0;
196 uint32_t HELPER(oc)(CPUS390XState *env, uint32_t l, uint64_t dest,
197 uint64_t src)
199 return do_helper_oc(env, l, dest, src, GETPC());
202 /* memmove */
203 static uint32_t do_helper_mvc(CPUS390XState *env, uint32_t l, uint64_t dest,
204 uint64_t src, uintptr_t ra)
206 uint32_t i;
208 HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
209 __func__, l, dest, src);
211 /* mvc and memmove do not behave the same when areas overlap! */
212 /* mvc with source pointing to the byte after the destination is the
213 same as memset with the first source byte */
214 if (dest == src + 1) {
215 fast_memset(env, dest, cpu_ldub_data_ra(env, src, ra), l + 1, ra);
216 } else if (dest < src || src + l < dest) {
217 fast_memmove(env, dest, src, l + 1, ra);
218 } else {
219 /* slow version with byte accesses which always work */
220 for (i = 0; i <= l; i++) {
221 uint8_t x = cpu_ldub_data_ra(env, src + i, ra);
222 cpu_stb_data_ra(env, dest + i, x, ra);
226 return env->cc_op;
229 void HELPER(mvc)(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src)
231 do_helper_mvc(env, l, dest, src, GETPC());
234 /* move inverse */
235 void HELPER(mvcin)(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src)
237 uintptr_t ra = GETPC();
238 int i;
240 for (i = 0; i <= l; i++) {
241 uint8_t v = cpu_ldub_data_ra(env, src - i, ra);
242 cpu_stb_data_ra(env, dest + i, v, ra);
246 /* move numerics */
247 void HELPER(mvn)(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src)
249 uintptr_t ra = GETPC();
250 int i;
252 for (i = 0; i <= l; i++) {
253 uint8_t v = cpu_ldub_data_ra(env, dest + i, ra) & 0xf0;
254 v |= cpu_ldub_data_ra(env, src + i, ra) & 0x0f;
255 cpu_stb_data_ra(env, dest + i, v, ra);
259 /* move with offset */
260 void HELPER(mvo)(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src)
262 uintptr_t ra = GETPC();
263 int len_dest = l >> 4;
264 int len_src = l & 0xf;
265 uint8_t byte_dest, byte_src;
266 int i;
268 src += len_src;
269 dest += len_dest;
271 /* Handle rightmost byte */
272 byte_src = cpu_ldub_data_ra(env, src, ra);
273 byte_dest = cpu_ldub_data_ra(env, dest, ra);
274 byte_dest = (byte_dest & 0x0f) | (byte_src << 4);
275 cpu_stb_data_ra(env, dest, byte_dest, ra);
277 /* Process remaining bytes from right to left */
278 for (i = 1; i <= len_dest; i++) {
279 byte_dest = byte_src >> 4;
280 if (len_src - i >= 0) {
281 byte_src = cpu_ldub_data_ra(env, src - i, ra);
282 } else {
283 byte_src = 0;
285 byte_dest |= byte_src << 4;
286 cpu_stb_data_ra(env, dest - i, byte_dest, ra);
290 /* move zones */
291 void HELPER(mvz)(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src)
293 uintptr_t ra = GETPC();
294 int i;
296 for (i = 0; i <= l; i++) {
297 uint8_t b = cpu_ldub_data_ra(env, dest + i, ra) & 0x0f;
298 b |= cpu_ldub_data_ra(env, src + i, ra) & 0xf0;
299 cpu_stb_data_ra(env, dest + i, b, ra);
303 /* compare unsigned byte arrays */
304 static uint32_t do_helper_clc(CPUS390XState *env, uint32_t l, uint64_t s1,
305 uint64_t s2, uintptr_t ra)
307 uint32_t i;
308 uint32_t cc = 0;
310 HELPER_LOG("%s l %d s1 %" PRIx64 " s2 %" PRIx64 "\n",
311 __func__, l, s1, s2);
313 for (i = 0; i <= l; i++) {
314 uint8_t x = cpu_ldub_data_ra(env, s1 + i, ra);
315 uint8_t y = cpu_ldub_data_ra(env, s2 + i, ra);
316 HELPER_LOG("%02x (%c)/%02x (%c) ", x, x, y, y);
317 if (x < y) {
318 cc = 1;
319 break;
320 } else if (x > y) {
321 cc = 2;
322 break;
326 HELPER_LOG("\n");
327 return cc;
330 uint32_t HELPER(clc)(CPUS390XState *env, uint32_t l, uint64_t s1, uint64_t s2)
332 return do_helper_clc(env, l, s1, s2, GETPC());
335 /* compare logical under mask */
336 uint32_t HELPER(clm)(CPUS390XState *env, uint32_t r1, uint32_t mask,
337 uint64_t addr)
339 uintptr_t ra = GETPC();
340 uint32_t cc = 0;
342 HELPER_LOG("%s: r1 0x%x mask 0x%x addr 0x%" PRIx64 "\n", __func__, r1,
343 mask, addr);
345 while (mask) {
346 if (mask & 8) {
347 uint8_t d = cpu_ldub_data_ra(env, addr, ra);
348 uint8_t r = extract32(r1, 24, 8);
349 HELPER_LOG("mask 0x%x %02x/%02x (0x%" PRIx64 ") ", mask, r, d,
350 addr);
351 if (r < d) {
352 cc = 1;
353 break;
354 } else if (r > d) {
355 cc = 2;
356 break;
358 addr++;
360 mask = (mask << 1) & 0xf;
361 r1 <<= 8;
364 HELPER_LOG("\n");
365 return cc;
368 static inline uint64_t wrap_address(CPUS390XState *env, uint64_t a)
370 if (!(env->psw.mask & PSW_MASK_64)) {
371 if (!(env->psw.mask & PSW_MASK_32)) {
372 /* 24-Bit mode */
373 a &= 0x00ffffff;
374 } else {
375 /* 31-Bit mode */
376 a &= 0x7fffffff;
379 return a;
382 static inline uint64_t get_address(CPUS390XState *env, int reg)
384 return wrap_address(env, env->regs[reg]);
387 static inline void set_address(CPUS390XState *env, int reg, uint64_t address)
389 if (env->psw.mask & PSW_MASK_64) {
390 /* 64-Bit mode */
391 env->regs[reg] = address;
392 } else {
393 if (!(env->psw.mask & PSW_MASK_32)) {
394 /* 24-Bit mode. According to the PoO it is implementation
395 dependent if bits 32-39 remain unchanged or are set to
396 zeros. Choose the former so that the function can also be
397 used for TRT. */
398 env->regs[reg] = deposit64(env->regs[reg], 0, 24, address);
399 } else {
400 /* 31-Bit mode. According to the PoO it is implementation
401 dependent if bit 32 remains unchanged or is set to zero.
402 Choose the latter so that the function can also be used for
403 TRT. */
404 address &= 0x7fffffff;
405 env->regs[reg] = deposit64(env->regs[reg], 0, 32, address);
410 static inline uint64_t wrap_length(CPUS390XState *env, uint64_t length)
412 if (!(env->psw.mask & PSW_MASK_64)) {
413 /* 24-Bit and 31-Bit mode */
414 length &= 0x7fffffff;
416 return length;
419 static inline uint64_t get_length(CPUS390XState *env, int reg)
421 return wrap_length(env, env->regs[reg]);
424 static inline void set_length(CPUS390XState *env, int reg, uint64_t length)
426 if (env->psw.mask & PSW_MASK_64) {
427 /* 64-Bit mode */
428 env->regs[reg] = length;
429 } else {
430 /* 24-Bit and 31-Bit mode */
431 env->regs[reg] = deposit64(env->regs[reg], 0, 32, length);
435 /* search string (c is byte to search, r2 is string, r1 end of string) */
436 uint64_t HELPER(srst)(CPUS390XState *env, uint64_t r0, uint64_t end,
437 uint64_t str)
439 uintptr_t ra = GETPC();
440 uint32_t len;
441 uint8_t v, c = r0;
443 str = wrap_address(env, str);
444 end = wrap_address(env, end);
446 /* Assume for now that R2 is unmodified. */
447 env->retxl = str;
449 /* Lest we fail to service interrupts in a timely manner, limit the
450 amount of work we're willing to do. For now, let's cap at 8k. */
451 for (len = 0; len < 0x2000; ++len) {
452 if (str + len == end) {
453 /* Character not found. R1 & R2 are unmodified. */
454 env->cc_op = 2;
455 return end;
457 v = cpu_ldub_data_ra(env, str + len, ra);
458 if (v == c) {
459 /* Character found. Set R1 to the location; R2 is unmodified. */
460 env->cc_op = 1;
461 return str + len;
465 /* CPU-determined bytes processed. Advance R2 to next byte to process. */
466 env->retxl = str + len;
467 env->cc_op = 3;
468 return end;
471 /* unsigned string compare (c is string terminator) */
472 uint64_t HELPER(clst)(CPUS390XState *env, uint64_t c, uint64_t s1, uint64_t s2)
474 uintptr_t ra = GETPC();
475 uint32_t len;
477 c = c & 0xff;
478 s1 = wrap_address(env, s1);
479 s2 = wrap_address(env, s2);
481 /* Lest we fail to service interrupts in a timely manner, limit the
482 amount of work we're willing to do. For now, let's cap at 8k. */
483 for (len = 0; len < 0x2000; ++len) {
484 uint8_t v1 = cpu_ldub_data_ra(env, s1 + len, ra);
485 uint8_t v2 = cpu_ldub_data_ra(env, s2 + len, ra);
486 if (v1 == v2) {
487 if (v1 == c) {
488 /* Equal. CC=0, and don't advance the registers. */
489 env->cc_op = 0;
490 env->retxl = s2;
491 return s1;
493 } else {
494 /* Unequal. CC={1,2}, and advance the registers. Note that
495 the terminator need not be zero, but the string that contains
496 the terminator is by definition "low". */
497 env->cc_op = (v1 == c ? 1 : v2 == c ? 2 : v1 < v2 ? 1 : 2);
498 env->retxl = s2 + len;
499 return s1 + len;
503 /* CPU-determined bytes equal; advance the registers. */
504 env->cc_op = 3;
505 env->retxl = s2 + len;
506 return s1 + len;
509 /* move page */
510 uint32_t HELPER(mvpg)(CPUS390XState *env, uint64_t r0, uint64_t r1, uint64_t r2)
512 /* ??? missing r0 handling, which includes access keys, but more
513 importantly optional suppression of the exception! */
514 fast_memmove(env, r1, r2, TARGET_PAGE_SIZE, GETPC());
515 return 0; /* data moved */
518 /* string copy (c is string terminator) */
519 uint64_t HELPER(mvst)(CPUS390XState *env, uint64_t c, uint64_t d, uint64_t s)
521 uintptr_t ra = GETPC();
522 uint32_t len;
524 c = c & 0xff;
525 d = wrap_address(env, d);
526 s = wrap_address(env, s);
528 /* Lest we fail to service interrupts in a timely manner, limit the
529 amount of work we're willing to do. For now, let's cap at 8k. */
530 for (len = 0; len < 0x2000; ++len) {
531 uint8_t v = cpu_ldub_data_ra(env, s + len, ra);
532 cpu_stb_data_ra(env, d + len, v, ra);
533 if (v == c) {
534 /* Complete. Set CC=1 and advance R1. */
535 env->cc_op = 1;
536 env->retxl = s;
537 return d + len;
541 /* Incomplete. Set CC=3 and signal to advance R1 and R2. */
542 env->cc_op = 3;
543 env->retxl = s + len;
544 return d + len;
547 /* load access registers r1 to r3 from memory at a2 */
548 void HELPER(lam)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
550 uintptr_t ra = GETPC();
551 int i;
553 for (i = r1;; i = (i + 1) % 16) {
554 env->aregs[i] = cpu_ldl_data_ra(env, a2, ra);
555 a2 += 4;
557 if (i == r3) {
558 break;
563 /* store access registers r1 to r3 in memory at a2 */
564 void HELPER(stam)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
566 uintptr_t ra = GETPC();
567 int i;
569 for (i = r1;; i = (i + 1) % 16) {
570 cpu_stl_data_ra(env, a2, env->aregs[i], ra);
571 a2 += 4;
573 if (i == r3) {
574 break;
579 /* move long */
580 uint32_t HELPER(mvcl)(CPUS390XState *env, uint32_t r1, uint32_t r2)
582 uintptr_t ra = GETPC();
583 uint64_t destlen = env->regs[r1 + 1] & 0xffffff;
584 uint64_t dest = get_address(env, r1);
585 uint64_t srclen = env->regs[r2 + 1] & 0xffffff;
586 uint64_t src = get_address(env, r2);
587 uint8_t pad = env->regs[r2 + 1] >> 24;
588 uint8_t v;
589 uint32_t cc;
591 if (destlen == srclen) {
592 cc = 0;
593 } else if (destlen < srclen) {
594 cc = 1;
595 } else {
596 cc = 2;
599 if (srclen > destlen) {
600 srclen = destlen;
603 for (; destlen && srclen; src++, dest++, destlen--, srclen--) {
604 v = cpu_ldub_data_ra(env, src, ra);
605 cpu_stb_data_ra(env, dest, v, ra);
608 for (; destlen; dest++, destlen--) {
609 cpu_stb_data_ra(env, dest, pad, ra);
612 env->regs[r1 + 1] = destlen;
613 /* can't use srclen here, we trunc'ed it */
614 env->regs[r2 + 1] -= src - env->regs[r2];
615 set_address(env, r1, dest);
616 set_address(env, r2, src);
618 return cc;
621 /* move long extended another memcopy insn with more bells and whistles */
622 uint32_t HELPER(mvcle)(CPUS390XState *env, uint32_t r1, uint64_t a2,
623 uint32_t r3)
625 uintptr_t ra = GETPC();
626 uint64_t destlen = get_length(env, r1 + 1);
627 uint64_t dest = get_address(env, r1);
628 uint64_t srclen = get_length(env, r3 + 1);
629 uint64_t src = get_address(env, r3);
630 uint8_t pad = a2 & 0xff;
631 uint8_t v;
632 uint32_t cc;
634 if (destlen == srclen) {
635 cc = 0;
636 } else if (destlen < srclen) {
637 cc = 1;
638 } else {
639 cc = 2;
642 if (srclen > destlen) {
643 srclen = destlen;
646 for (; destlen && srclen; src++, dest++, destlen--, srclen--) {
647 v = cpu_ldub_data_ra(env, src, ra);
648 cpu_stb_data_ra(env, dest, v, ra);
651 for (; destlen; dest++, destlen--) {
652 cpu_stb_data_ra(env, dest, pad, ra);
655 set_length(env, r1 + 1 , destlen);
656 /* can't use srclen here, we trunc'ed it */
657 set_length(env, r3 + 1, env->regs[r3 + 1] - src - env->regs[r3]);
658 set_address(env, r1, dest);
659 set_address(env, r3, src);
661 return cc;
664 /* compare logical long helper */
665 static inline uint32_t do_clcl(CPUS390XState *env,
666 uint64_t *src1, uint64_t *src1len,
667 uint64_t *src3, uint64_t *src3len,
668 uint8_t pad, uint64_t limit,
669 uintptr_t ra)
671 uint64_t len = MAX(*src1len, *src3len);
672 uint32_t cc = 0;
674 if (!len) {
675 return cc;
678 /* Lest we fail to service interrupts in a timely manner, limit the
679 amount of work we're willing to do. */
680 if (len > limit) {
681 len = limit;
682 cc = 3;
685 for (; len; len--) {
686 uint8_t v1 = pad;
687 uint8_t v3 = pad;
689 if (*src1len) {
690 v1 = cpu_ldub_data_ra(env, *src1, ra);
692 if (*src3len) {
693 v3 = cpu_ldub_data_ra(env, *src3, ra);
696 if (v1 != v3) {
697 cc = (v1 < v3) ? 1 : 2;
698 break;
701 if (*src1len) {
702 *src1 += 1;
703 *src1len -= 1;
705 if (*src3len) {
706 *src3 += 1;
707 *src3len -= 1;
711 return cc;
715 /* compare logical long */
716 uint32_t HELPER(clcl)(CPUS390XState *env, uint32_t r1, uint32_t r2)
718 uintptr_t ra = GETPC();
719 uint64_t src1len = extract64(env->regs[r1 + 1], 0, 24);
720 uint64_t src1 = get_address(env, r1);
721 uint64_t src3len = extract64(env->regs[r2 + 1], 0, 24);
722 uint64_t src3 = get_address(env, r2);
723 uint8_t pad = env->regs[r2 + 1] >> 24;
724 uint32_t cc;
726 cc = do_clcl(env, &src1, &src1len, &src3, &src3len, pad, -1, ra);
728 env->regs[r1 + 1] = deposit64(env->regs[r1 + 1], 0, 24, src1len);
729 env->regs[r2 + 1] = deposit64(env->regs[r2 + 1], 0, 24, src3len);
730 set_address(env, r1, src1);
731 set_address(env, r2, src3);
733 return cc;
736 /* compare logical long extended memcompare insn with padding */
737 uint32_t HELPER(clcle)(CPUS390XState *env, uint32_t r1, uint64_t a2,
738 uint32_t r3)
740 uintptr_t ra = GETPC();
741 uint64_t src1len = get_length(env, r1 + 1);
742 uint64_t src1 = get_address(env, r1);
743 uint64_t src3len = get_length(env, r3 + 1);
744 uint64_t src3 = get_address(env, r3);
745 uint8_t pad = a2;
746 uint32_t cc;
748 cc = do_clcl(env, &src1, &src1len, &src3, &src3len, pad, 0x2000, ra);
750 set_length(env, r1 + 1, src1len);
751 set_length(env, r3 + 1, src3len);
752 set_address(env, r1, src1);
753 set_address(env, r3, src3);
755 return cc;
758 /* checksum */
759 uint64_t HELPER(cksm)(CPUS390XState *env, uint64_t r1,
760 uint64_t src, uint64_t src_len)
762 uintptr_t ra = GETPC();
763 uint64_t max_len, len;
764 uint64_t cksm = (uint32_t)r1;
766 /* Lest we fail to service interrupts in a timely manner, limit the
767 amount of work we're willing to do. For now, let's cap at 8k. */
768 max_len = (src_len > 0x2000 ? 0x2000 : src_len);
770 /* Process full words as available. */
771 for (len = 0; len + 4 <= max_len; len += 4, src += 4) {
772 cksm += (uint32_t)cpu_ldl_data_ra(env, src, ra);
775 switch (max_len - len) {
776 case 1:
777 cksm += cpu_ldub_data_ra(env, src, ra) << 24;
778 len += 1;
779 break;
780 case 2:
781 cksm += cpu_lduw_data_ra(env, src, ra) << 16;
782 len += 2;
783 break;
784 case 3:
785 cksm += cpu_lduw_data_ra(env, src, ra) << 16;
786 cksm += cpu_ldub_data_ra(env, src + 2, ra) << 8;
787 len += 3;
788 break;
791 /* Fold the carry from the checksum. Note that we can see carry-out
792 during folding more than once (but probably not more than twice). */
793 while (cksm > 0xffffffffull) {
794 cksm = (uint32_t)cksm + (cksm >> 32);
797 /* Indicate whether or not we've processed everything. */
798 env->cc_op = (len == src_len ? 0 : 3);
800 /* Return both cksm and processed length. */
801 env->retxl = cksm;
802 return len;
805 void HELPER(pack)(CPUS390XState *env, uint32_t len, uint64_t dest, uint64_t src)
807 uintptr_t ra = GETPC();
808 int len_dest = len >> 4;
809 int len_src = len & 0xf;
810 uint8_t b;
812 dest += len_dest;
813 src += len_src;
815 /* last byte is special, it only flips the nibbles */
816 b = cpu_ldub_data_ra(env, src, ra);
817 cpu_stb_data_ra(env, dest, (b << 4) | (b >> 4), ra);
818 src--;
819 len_src--;
821 /* now pack every value */
822 while (len_dest >= 0) {
823 b = 0;
825 if (len_src > 0) {
826 b = cpu_ldub_data_ra(env, src, ra) & 0x0f;
827 src--;
828 len_src--;
830 if (len_src > 0) {
831 b |= cpu_ldub_data_ra(env, src, ra) << 4;
832 src--;
833 len_src--;
836 len_dest--;
837 dest--;
838 cpu_stb_data_ra(env, dest, b, ra);
842 void HELPER(unpk)(CPUS390XState *env, uint32_t len, uint64_t dest,
843 uint64_t src)
845 uintptr_t ra = GETPC();
846 int len_dest = len >> 4;
847 int len_src = len & 0xf;
848 uint8_t b;
849 int second_nibble = 0;
851 dest += len_dest;
852 src += len_src;
854 /* last byte is special, it only flips the nibbles */
855 b = cpu_ldub_data_ra(env, src, ra);
856 cpu_stb_data_ra(env, dest, (b << 4) | (b >> 4), ra);
857 src--;
858 len_src--;
860 /* now pad every nibble with 0xf0 */
862 while (len_dest > 0) {
863 uint8_t cur_byte = 0;
865 if (len_src > 0) {
866 cur_byte = cpu_ldub_data_ra(env, src, ra);
869 len_dest--;
870 dest--;
872 /* only advance one nibble at a time */
873 if (second_nibble) {
874 cur_byte >>= 4;
875 len_src--;
876 src--;
878 second_nibble = !second_nibble;
880 /* digit */
881 cur_byte = (cur_byte & 0xf);
882 /* zone bits */
883 cur_byte |= 0xf0;
885 cpu_stb_data_ra(env, dest, cur_byte, ra);
889 static uint32_t do_helper_tr(CPUS390XState *env, uint32_t len, uint64_t array,
890 uint64_t trans, uintptr_t ra)
892 uint32_t i;
894 for (i = 0; i <= len; i++) {
895 uint8_t byte = cpu_ldub_data_ra(env, array + i, ra);
896 uint8_t new_byte = cpu_ldub_data_ra(env, trans + byte, ra);
897 cpu_stb_data_ra(env, array + i, new_byte, ra);
900 return env->cc_op;
903 void HELPER(tr)(CPUS390XState *env, uint32_t len, uint64_t array,
904 uint64_t trans)
906 do_helper_tr(env, len, array, trans, GETPC());
909 uint64_t HELPER(tre)(CPUS390XState *env, uint64_t array,
910 uint64_t len, uint64_t trans)
912 uintptr_t ra = GETPC();
913 uint8_t end = env->regs[0] & 0xff;
914 uint64_t l = len;
915 uint64_t i;
916 uint32_t cc = 0;
918 if (!(env->psw.mask & PSW_MASK_64)) {
919 array &= 0x7fffffff;
920 l = (uint32_t)l;
923 /* Lest we fail to service interrupts in a timely manner, limit the
924 amount of work we're willing to do. For now, let's cap at 8k. */
925 if (l > 0x2000) {
926 l = 0x2000;
927 cc = 3;
930 for (i = 0; i < l; i++) {
931 uint8_t byte, new_byte;
933 byte = cpu_ldub_data_ra(env, array + i, ra);
935 if (byte == end) {
936 cc = 1;
937 break;
940 new_byte = cpu_ldub_data_ra(env, trans + byte, ra);
941 cpu_stb_data_ra(env, array + i, new_byte, ra);
944 env->cc_op = cc;
945 env->retxl = len - i;
946 return array + i;
949 static uint32_t do_helper_trt(CPUS390XState *env, uint32_t len, uint64_t array,
950 uint64_t trans, uintptr_t ra)
952 uint32_t i;
954 for (i = 0; i <= len; i++) {
955 uint8_t byte = cpu_ldub_data_ra(env, array + i, ra);
956 uint8_t sbyte = cpu_ldub_data_ra(env, trans + byte, ra);
958 if (sbyte != 0) {
959 set_address(env, 1, array + i);
960 env->regs[2] = deposit64(env->regs[2], 0, 8, sbyte);
961 return (i == len) ? 2 : 1;
965 return 0;
968 uint32_t HELPER(trt)(CPUS390XState *env, uint32_t len, uint64_t array,
969 uint64_t trans)
971 return do_helper_trt(env, len, array, trans, GETPC());
974 void HELPER(cdsg)(CPUS390XState *env, uint64_t addr,
975 uint32_t r1, uint32_t r3)
977 uintptr_t ra = GETPC();
978 Int128 cmpv = int128_make128(env->regs[r1 + 1], env->regs[r1]);
979 Int128 newv = int128_make128(env->regs[r3 + 1], env->regs[r3]);
980 Int128 oldv;
981 bool fail;
983 if (parallel_cpus) {
984 #ifndef CONFIG_ATOMIC128
985 cpu_loop_exit_atomic(ENV_GET_CPU(env), ra);
986 #else
987 int mem_idx = cpu_mmu_index(env, false);
988 TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx);
989 oldv = helper_atomic_cmpxchgo_be_mmu(env, addr, cmpv, newv, oi, ra);
990 fail = !int128_eq(oldv, cmpv);
991 #endif
992 } else {
993 uint64_t oldh, oldl;
995 oldh = cpu_ldq_data_ra(env, addr + 0, ra);
996 oldl = cpu_ldq_data_ra(env, addr + 8, ra);
998 oldv = int128_make128(oldl, oldh);
999 fail = !int128_eq(oldv, cmpv);
1000 if (fail) {
1001 newv = oldv;
1004 cpu_stq_data_ra(env, addr + 0, int128_gethi(newv), ra);
1005 cpu_stq_data_ra(env, addr + 8, int128_getlo(newv), ra);
1008 env->cc_op = fail;
1009 env->regs[r1] = int128_gethi(oldv);
1010 env->regs[r1 + 1] = int128_getlo(oldv);
1013 #if !defined(CONFIG_USER_ONLY)
1014 void HELPER(lctlg)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
1016 uintptr_t ra = GETPC();
1017 S390CPU *cpu = s390_env_get_cpu(env);
1018 bool PERchanged = false;
1019 uint64_t src = a2;
1020 uint32_t i;
1022 for (i = r1;; i = (i + 1) % 16) {
1023 uint64_t val = cpu_ldq_data_ra(env, src, ra);
1024 if (env->cregs[i] != val && i >= 9 && i <= 11) {
1025 PERchanged = true;
1027 env->cregs[i] = val;
1028 HELPER_LOG("load ctl %d from 0x%" PRIx64 " == 0x%" PRIx64 "\n",
1029 i, src, val);
1030 src += sizeof(uint64_t);
1032 if (i == r3) {
1033 break;
1037 if (PERchanged && env->psw.mask & PSW_MASK_PER) {
1038 s390_cpu_recompute_watchpoints(CPU(cpu));
1041 tlb_flush(CPU(cpu));
1044 void HELPER(lctl)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
1046 uintptr_t ra = GETPC();
1047 S390CPU *cpu = s390_env_get_cpu(env);
1048 bool PERchanged = false;
1049 uint64_t src = a2;
1050 uint32_t i;
1052 for (i = r1;; i = (i + 1) % 16) {
1053 uint32_t val = cpu_ldl_data_ra(env, src, ra);
1054 if ((uint32_t)env->cregs[i] != val && i >= 9 && i <= 11) {
1055 PERchanged = true;
1057 env->cregs[i] = deposit64(env->cregs[i], 0, 32, val);
1058 HELPER_LOG("load ctl %d from 0x%" PRIx64 " == 0x%x\n", i, src, val);
1059 src += sizeof(uint32_t);
1061 if (i == r3) {
1062 break;
1066 if (PERchanged && env->psw.mask & PSW_MASK_PER) {
1067 s390_cpu_recompute_watchpoints(CPU(cpu));
1070 tlb_flush(CPU(cpu));
1073 void HELPER(stctg)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
1075 uintptr_t ra = GETPC();
1076 uint64_t dest = a2;
1077 uint32_t i;
1079 for (i = r1;; i = (i + 1) % 16) {
1080 cpu_stq_data_ra(env, dest, env->cregs[i], ra);
1081 dest += sizeof(uint64_t);
1083 if (i == r3) {
1084 break;
1089 void HELPER(stctl)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
1091 uintptr_t ra = GETPC();
1092 uint64_t dest = a2;
1093 uint32_t i;
1095 for (i = r1;; i = (i + 1) % 16) {
1096 cpu_stl_data_ra(env, dest, env->cregs[i], ra);
1097 dest += sizeof(uint32_t);
1099 if (i == r3) {
1100 break;
1105 uint32_t HELPER(testblock)(CPUS390XState *env, uint64_t real_addr)
1107 uintptr_t ra = GETPC();
1108 CPUState *cs = CPU(s390_env_get_cpu(env));
1109 uint64_t abs_addr;
1110 int i;
1112 real_addr = wrap_address(env, real_addr);
1113 abs_addr = mmu_real2abs(env, real_addr) & TARGET_PAGE_MASK;
1114 if (!address_space_access_valid(&address_space_memory, abs_addr,
1115 TARGET_PAGE_SIZE, true)) {
1116 cpu_restore_state(cs, ra);
1117 program_interrupt(env, PGM_ADDRESSING, 4);
1118 return 1;
1121 /* Check low-address protection */
1122 if ((env->cregs[0] & CR0_LOWPROT) && real_addr < 0x2000) {
1123 cpu_restore_state(cs, ra);
1124 program_interrupt(env, PGM_PROTECTION, 4);
1125 return 1;
1128 for (i = 0; i < TARGET_PAGE_SIZE; i += 8) {
1129 stq_phys(cs->as, abs_addr + i, 0);
1132 return 0;
1135 uint32_t HELPER(tprot)(uint64_t a1, uint64_t a2)
1137 /* XXX implement */
1138 return 0;
1141 /* insert storage key extended */
1142 uint64_t HELPER(iske)(CPUS390XState *env, uint64_t r2)
1144 static S390SKeysState *ss;
1145 static S390SKeysClass *skeyclass;
1146 uint64_t addr = wrap_address(env, r2);
1147 uint8_t key;
1149 if (addr > ram_size) {
1150 return 0;
1153 if (unlikely(!ss)) {
1154 ss = s390_get_skeys_device();
1155 skeyclass = S390_SKEYS_GET_CLASS(ss);
1158 if (skeyclass->get_skeys(ss, addr / TARGET_PAGE_SIZE, 1, &key)) {
1159 return 0;
1161 return key;
1164 /* set storage key extended */
1165 void HELPER(sske)(CPUS390XState *env, uint64_t r1, uint64_t r2)
1167 static S390SKeysState *ss;
1168 static S390SKeysClass *skeyclass;
1169 uint64_t addr = wrap_address(env, r2);
1170 uint8_t key;
1172 if (addr > ram_size) {
1173 return;
1176 if (unlikely(!ss)) {
1177 ss = s390_get_skeys_device();
1178 skeyclass = S390_SKEYS_GET_CLASS(ss);
1181 key = (uint8_t) r1;
1182 skeyclass->set_skeys(ss, addr / TARGET_PAGE_SIZE, 1, &key);
1185 /* reset reference bit extended */
1186 uint32_t HELPER(rrbe)(CPUS390XState *env, uint64_t r2)
1188 static S390SKeysState *ss;
1189 static S390SKeysClass *skeyclass;
1190 uint8_t re, key;
1192 if (r2 > ram_size) {
1193 return 0;
1196 if (unlikely(!ss)) {
1197 ss = s390_get_skeys_device();
1198 skeyclass = S390_SKEYS_GET_CLASS(ss);
1201 if (skeyclass->get_skeys(ss, r2 / TARGET_PAGE_SIZE, 1, &key)) {
1202 return 0;
1205 re = key & (SK_R | SK_C);
1206 key &= ~SK_R;
1208 if (skeyclass->set_skeys(ss, r2 / TARGET_PAGE_SIZE, 1, &key)) {
1209 return 0;
1213 * cc
1215 * 0 Reference bit zero; change bit zero
1216 * 1 Reference bit zero; change bit one
1217 * 2 Reference bit one; change bit zero
1218 * 3 Reference bit one; change bit one
1221 return re >> 1;
1224 uint32_t HELPER(mvcs)(CPUS390XState *env, uint64_t l, uint64_t a1, uint64_t a2)
1226 uintptr_t ra = GETPC();
1227 int cc = 0, i;
1229 HELPER_LOG("%s: %16" PRIx64 " %16" PRIx64 " %16" PRIx64 "\n",
1230 __func__, l, a1, a2);
1232 if (l > 256) {
1233 /* max 256 */
1234 l = 256;
1235 cc = 3;
1238 /* XXX replace w/ memcpy */
1239 for (i = 0; i < l; i++) {
1240 uint8_t x = cpu_ldub_primary_ra(env, a2 + i, ra);
1241 cpu_stb_secondary_ra(env, a1 + i, x, ra);
1244 return cc;
1247 uint32_t HELPER(mvcp)(CPUS390XState *env, uint64_t l, uint64_t a1, uint64_t a2)
1249 uintptr_t ra = GETPC();
1250 int cc = 0, i;
1252 HELPER_LOG("%s: %16" PRIx64 " %16" PRIx64 " %16" PRIx64 "\n",
1253 __func__, l, a1, a2);
1255 if (l > 256) {
1256 /* max 256 */
1257 l = 256;
1258 cc = 3;
1261 /* XXX replace w/ memcpy */
1262 for (i = 0; i < l; i++) {
1263 uint8_t x = cpu_ldub_secondary_ra(env, a2 + i, ra);
1264 cpu_stb_primary_ra(env, a1 + i, x, ra);
1267 return cc;
1270 /* invalidate pte */
1271 void HELPER(ipte)(CPUS390XState *env, uint64_t pto, uint64_t vaddr,
1272 uint32_t m4)
1274 CPUState *cs = CPU(s390_env_get_cpu(env));
1275 uint64_t page = vaddr & TARGET_PAGE_MASK;
1276 uint64_t pte_addr, pte;
1278 /* Compute the page table entry address */
1279 pte_addr = (pto & _SEGMENT_ENTRY_ORIGIN);
1280 pte_addr += (vaddr & VADDR_PX) >> 9;
1282 /* Mark the page table entry as invalid */
1283 pte = ldq_phys(cs->as, pte_addr);
1284 pte |= _PAGE_INVALID;
1285 stq_phys(cs->as, pte_addr, pte);
1287 /* XXX we exploit the fact that Linux passes the exact virtual
1288 address here - it's not obliged to! */
1289 /* XXX: the LC bit should be considered as 0 if the local-TLB-clearing
1290 facility is not installed. */
1291 if (m4 & 1) {
1292 tlb_flush_page(cs, page);
1293 } else {
1294 tlb_flush_page_all_cpus_synced(cs, page);
1297 /* XXX 31-bit hack */
1298 if (m4 & 1) {
1299 tlb_flush_page(cs, page ^ 0x80000000);
1300 } else {
1301 tlb_flush_page_all_cpus_synced(cs, page ^ 0x80000000);
1305 /* flush local tlb */
1306 void HELPER(ptlb)(CPUS390XState *env)
1308 S390CPU *cpu = s390_env_get_cpu(env);
1310 tlb_flush(CPU(cpu));
1313 /* flush global tlb */
1314 void HELPER(purge)(CPUS390XState *env)
1316 S390CPU *cpu = s390_env_get_cpu(env);
1318 tlb_flush_all_cpus_synced(CPU(cpu));
1321 /* load using real address */
1322 uint64_t HELPER(lura)(CPUS390XState *env, uint64_t addr)
1324 CPUState *cs = CPU(s390_env_get_cpu(env));
1326 return (uint32_t)ldl_phys(cs->as, wrap_address(env, addr));
1329 uint64_t HELPER(lurag)(CPUS390XState *env, uint64_t addr)
1331 CPUState *cs = CPU(s390_env_get_cpu(env));
1333 return ldq_phys(cs->as, wrap_address(env, addr));
1336 /* store using real address */
1337 void HELPER(stura)(CPUS390XState *env, uint64_t addr, uint64_t v1)
1339 CPUState *cs = CPU(s390_env_get_cpu(env));
1341 stl_phys(cs->as, wrap_address(env, addr), (uint32_t)v1);
1343 if ((env->psw.mask & PSW_MASK_PER) &&
1344 (env->cregs[9] & PER_CR9_EVENT_STORE) &&
1345 (env->cregs[9] & PER_CR9_EVENT_STORE_REAL)) {
1346 /* PSW is saved just before calling the helper. */
1347 env->per_address = env->psw.addr;
1348 env->per_perc_atmid = PER_CODE_EVENT_STORE_REAL | get_per_atmid(env);
1352 void HELPER(sturg)(CPUS390XState *env, uint64_t addr, uint64_t v1)
1354 CPUState *cs = CPU(s390_env_get_cpu(env));
1356 stq_phys(cs->as, wrap_address(env, addr), v1);
1358 if ((env->psw.mask & PSW_MASK_PER) &&
1359 (env->cregs[9] & PER_CR9_EVENT_STORE) &&
1360 (env->cregs[9] & PER_CR9_EVENT_STORE_REAL)) {
1361 /* PSW is saved just before calling the helper. */
1362 env->per_address = env->psw.addr;
1363 env->per_perc_atmid = PER_CODE_EVENT_STORE_REAL | get_per_atmid(env);
1367 /* load real address */
1368 uint64_t HELPER(lra)(CPUS390XState *env, uint64_t addr)
1370 CPUState *cs = CPU(s390_env_get_cpu(env));
1371 uint32_t cc = 0;
1372 uint64_t asc = env->psw.mask & PSW_MASK_ASC;
1373 uint64_t ret;
1374 int old_exc, flags;
1376 /* XXX incomplete - has more corner cases */
1377 if (!(env->psw.mask & PSW_MASK_64) && (addr >> 32)) {
1378 cpu_restore_state(cs, GETPC());
1379 program_interrupt(env, PGM_SPECIAL_OP, 2);
1382 old_exc = cs->exception_index;
1383 if (mmu_translate(env, addr, 0, asc, &ret, &flags, true)) {
1384 cc = 3;
1386 if (cs->exception_index == EXCP_PGM) {
1387 ret = env->int_pgm_code | 0x80000000;
1388 } else {
1389 ret |= addr & ~TARGET_PAGE_MASK;
1391 cs->exception_index = old_exc;
1393 env->cc_op = cc;
1394 return ret;
1396 #endif
1398 /* Execute instruction. This instruction executes an insn modified with
1399 the contents of r1. It does not change the executed instruction in memory;
1400 it does not change the program counter.
1402 Perform this by recording the modified instruction in env->ex_value.
1403 This will be noticed by cpu_get_tb_cpu_state and thus tb translation.
1405 void HELPER(ex)(CPUS390XState *env, uint32_t ilen, uint64_t r1, uint64_t addr)
1407 uint64_t insn = cpu_lduw_code(env, addr);
1408 uint8_t opc = insn >> 8;
1410 /* Or in the contents of R1[56:63]. */
1411 insn |= r1 & 0xff;
1413 /* Load the rest of the instruction. */
1414 insn <<= 48;
1415 switch (get_ilen(opc)) {
1416 case 2:
1417 break;
1418 case 4:
1419 insn |= (uint64_t)cpu_lduw_code(env, addr + 2) << 32;
1420 break;
1421 case 6:
1422 insn |= (uint64_t)(uint32_t)cpu_ldl_code(env, addr + 2) << 16;
1423 break;
1424 default:
1425 g_assert_not_reached();
1428 /* The very most common cases can be sped up by avoiding a new TB. */
1429 if ((opc & 0xf0) == 0xd0) {
1430 typedef uint32_t (*dx_helper)(CPUS390XState *, uint32_t, uint64_t,
1431 uint64_t, uintptr_t);
1432 static const dx_helper dx[16] = {
1433 [0x2] = do_helper_mvc,
1434 [0x4] = do_helper_nc,
1435 [0x5] = do_helper_clc,
1436 [0x6] = do_helper_oc,
1437 [0x7] = do_helper_xc,
1438 [0xc] = do_helper_tr,
1439 [0xd] = do_helper_trt,
1441 dx_helper helper = dx[opc & 0xf];
1443 if (helper) {
1444 uint32_t l = extract64(insn, 48, 8);
1445 uint32_t b1 = extract64(insn, 44, 4);
1446 uint32_t d1 = extract64(insn, 32, 12);
1447 uint32_t b2 = extract64(insn, 28, 4);
1448 uint32_t d2 = extract64(insn, 16, 12);
1449 uint64_t a1 = wrap_address(env, env->regs[b1] + d1);
1450 uint64_t a2 = wrap_address(env, env->regs[b2] + d2);
1452 env->cc_op = helper(env, l, a1, a2, 0);
1453 env->psw.addr += ilen;
1454 return;
1456 } else if (opc == 0x0a) {
1457 env->int_svc_code = extract64(insn, 48, 8);
1458 env->int_svc_ilen = ilen;
1459 helper_exception(env, EXCP_SVC);
1460 g_assert_not_reached();
1463 /* Record the insn we want to execute as well as the ilen to use
1464 during the execution of the target insn. This will also ensure
1465 that ex_value is non-zero, which flags that we are in a state
1466 that requires such execution. */
1467 env->ex_value = insn | ilen;