virtio-serial: fix segfault on disconnect
[qemu/ar7.git] / target / s390x / mem_helper.c
blobf6e5bcec5df853a89fdd76db2c46dea61637c4ca
1 /*
2 * S/390 memory access helper routines
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2009 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "exec/helper-proto.h"
24 #include "exec/exec-all.h"
25 #include "exec/cpu_ldst.h"
26 #include "qemu/int128.h"
28 #if !defined(CONFIG_USER_ONLY)
29 #include "hw/s390x/storage-keys.h"
30 #endif
32 /*****************************************************************************/
33 /* Softmmu support */
34 #if !defined(CONFIG_USER_ONLY)
36 /* try to fill the TLB and return an exception if error. If retaddr is
37 NULL, it means that the function was called in C code (i.e. not
38 from generated code or from helper.c) */
39 /* XXX: fix it to restore all registers */
40 void tlb_fill(CPUState *cs, target_ulong addr, MMUAccessType access_type,
41 int mmu_idx, uintptr_t retaddr)
43 int ret;
45 ret = s390_cpu_handle_mmu_fault(cs, addr, access_type, mmu_idx);
46 if (unlikely(ret != 0)) {
47 if (likely(retaddr)) {
48 /* now we have a real cpu fault */
49 cpu_restore_state(cs, retaddr);
51 cpu_loop_exit(cs);
55 #endif
57 /* #define DEBUG_HELPER */
58 #ifdef DEBUG_HELPER
59 #define HELPER_LOG(x...) qemu_log(x)
60 #else
61 #define HELPER_LOG(x...)
62 #endif
64 /* Reduce the length so that addr + len doesn't cross a page boundary. */
65 static inline uint64_t adj_len_to_page(uint64_t len, uint64_t addr)
67 #ifndef CONFIG_USER_ONLY
68 if ((addr & ~TARGET_PAGE_MASK) + len - 1 >= TARGET_PAGE_SIZE) {
69 return -addr & ~TARGET_PAGE_MASK;
71 #endif
72 return len;
75 static void fast_memset(CPUS390XState *env, uint64_t dest, uint8_t byte,
76 uint32_t l)
78 int mmu_idx = cpu_mmu_index(env, false);
80 while (l > 0) {
81 void *p = tlb_vaddr_to_host(env, dest, MMU_DATA_STORE, mmu_idx);
82 if (p) {
83 /* Access to the whole page in write mode granted. */
84 int l_adj = adj_len_to_page(l, dest);
85 memset(p, byte, l_adj);
86 dest += l_adj;
87 l -= l_adj;
88 } else {
89 /* We failed to get access to the whole page. The next write
90 access will likely fill the QEMU TLB for the next iteration. */
91 cpu_stb_data(env, dest, byte);
92 dest++;
93 l--;
98 static void fast_memmove(CPUS390XState *env, uint64_t dest, uint64_t src,
99 uint32_t l)
101 int mmu_idx = cpu_mmu_index(env, false);
103 while (l > 0) {
104 void *src_p = tlb_vaddr_to_host(env, src, MMU_DATA_LOAD, mmu_idx);
105 void *dest_p = tlb_vaddr_to_host(env, dest, MMU_DATA_STORE, mmu_idx);
106 if (src_p && dest_p) {
107 /* Access to both whole pages granted. */
108 int l_adj = adj_len_to_page(l, src);
109 l_adj = adj_len_to_page(l_adj, dest);
110 memmove(dest_p, src_p, l_adj);
111 src += l_adj;
112 dest += l_adj;
113 l -= l_adj;
114 } else {
115 /* We failed to get access to one or both whole pages. The next
116 read or write access will likely fill the QEMU TLB for the
117 next iteration. */
118 cpu_stb_data(env, dest, cpu_ldub_data(env, src));
119 src++;
120 dest++;
121 l--;
126 /* and on array */
127 uint32_t HELPER(nc)(CPUS390XState *env, uint32_t l, uint64_t dest,
128 uint64_t src)
130 int i;
131 unsigned char x;
132 uint32_t cc = 0;
134 HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
135 __func__, l, dest, src);
136 for (i = 0; i <= l; i++) {
137 x = cpu_ldub_data(env, dest + i) & cpu_ldub_data(env, src + i);
138 if (x) {
139 cc = 1;
141 cpu_stb_data(env, dest + i, x);
143 return cc;
146 /* xor on array */
147 uint32_t HELPER(xc)(CPUS390XState *env, uint32_t l, uint64_t dest,
148 uint64_t src)
150 int i;
151 unsigned char x;
152 uint32_t cc = 0;
154 HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
155 __func__, l, dest, src);
157 /* xor with itself is the same as memset(0) */
158 if (src == dest) {
159 fast_memset(env, dest, 0, l + 1);
160 return 0;
163 for (i = 0; i <= l; i++) {
164 x = cpu_ldub_data(env, dest + i) ^ cpu_ldub_data(env, src + i);
165 if (x) {
166 cc = 1;
168 cpu_stb_data(env, dest + i, x);
170 return cc;
173 /* or on array */
174 uint32_t HELPER(oc)(CPUS390XState *env, uint32_t l, uint64_t dest,
175 uint64_t src)
177 int i;
178 unsigned char x;
179 uint32_t cc = 0;
181 HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
182 __func__, l, dest, src);
183 for (i = 0; i <= l; i++) {
184 x = cpu_ldub_data(env, dest + i) | cpu_ldub_data(env, src + i);
185 if (x) {
186 cc = 1;
188 cpu_stb_data(env, dest + i, x);
190 return cc;
193 /* memmove */
194 void HELPER(mvc)(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src)
196 int i = 0;
198 HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
199 __func__, l, dest, src);
201 /* mvc with source pointing to the byte after the destination is the
202 same as memset with the first source byte */
203 if (dest == (src + 1)) {
204 fast_memset(env, dest, cpu_ldub_data(env, src), l + 1);
205 return;
208 /* mvc and memmove do not behave the same when areas overlap! */
209 if ((dest < src) || (src + l < dest)) {
210 fast_memmove(env, dest, src, l + 1);
211 return;
214 /* slow version with byte accesses which always work */
215 for (i = 0; i <= l; i++) {
216 cpu_stb_data(env, dest + i, cpu_ldub_data(env, src + i));
220 /* compare unsigned byte arrays */
221 uint32_t HELPER(clc)(CPUS390XState *env, uint32_t l, uint64_t s1, uint64_t s2)
223 int i;
224 unsigned char x, y;
225 uint32_t cc;
227 HELPER_LOG("%s l %d s1 %" PRIx64 " s2 %" PRIx64 "\n",
228 __func__, l, s1, s2);
229 for (i = 0; i <= l; i++) {
230 x = cpu_ldub_data(env, s1 + i);
231 y = cpu_ldub_data(env, s2 + i);
232 HELPER_LOG("%02x (%c)/%02x (%c) ", x, x, y, y);
233 if (x < y) {
234 cc = 1;
235 goto done;
236 } else if (x > y) {
237 cc = 2;
238 goto done;
241 cc = 0;
242 done:
243 HELPER_LOG("\n");
244 return cc;
247 /* compare logical under mask */
248 uint32_t HELPER(clm)(CPUS390XState *env, uint32_t r1, uint32_t mask,
249 uint64_t addr)
251 uint8_t r, d;
252 uint32_t cc;
254 HELPER_LOG("%s: r1 0x%x mask 0x%x addr 0x%" PRIx64 "\n", __func__, r1,
255 mask, addr);
256 cc = 0;
257 while (mask) {
258 if (mask & 8) {
259 d = cpu_ldub_data(env, addr);
260 r = (r1 & 0xff000000UL) >> 24;
261 HELPER_LOG("mask 0x%x %02x/%02x (0x%" PRIx64 ") ", mask, r, d,
262 addr);
263 if (r < d) {
264 cc = 1;
265 break;
266 } else if (r > d) {
267 cc = 2;
268 break;
270 addr++;
272 mask = (mask << 1) & 0xf;
273 r1 <<= 8;
275 HELPER_LOG("\n");
276 return cc;
279 static inline uint64_t fix_address(CPUS390XState *env, uint64_t a)
281 /* 31-Bit mode */
282 if (!(env->psw.mask & PSW_MASK_64)) {
283 a &= 0x7fffffff;
285 return a;
288 static inline uint64_t get_address(CPUS390XState *env, int x2, int b2, int d2)
290 uint64_t r = d2;
291 if (x2) {
292 r += env->regs[x2];
294 if (b2) {
295 r += env->regs[b2];
297 return fix_address(env, r);
300 static inline uint64_t get_address_31fix(CPUS390XState *env, int reg)
302 return fix_address(env, env->regs[reg]);
305 /* search string (c is byte to search, r2 is string, r1 end of string) */
306 uint64_t HELPER(srst)(CPUS390XState *env, uint64_t r0, uint64_t end,
307 uint64_t str)
309 uint32_t len;
310 uint8_t v, c = r0;
312 str = fix_address(env, str);
313 end = fix_address(env, end);
315 /* Assume for now that R2 is unmodified. */
316 env->retxl = str;
318 /* Lest we fail to service interrupts in a timely manner, limit the
319 amount of work we're willing to do. For now, let's cap at 8k. */
320 for (len = 0; len < 0x2000; ++len) {
321 if (str + len == end) {
322 /* Character not found. R1 & R2 are unmodified. */
323 env->cc_op = 2;
324 return end;
326 v = cpu_ldub_data(env, str + len);
327 if (v == c) {
328 /* Character found. Set R1 to the location; R2 is unmodified. */
329 env->cc_op = 1;
330 return str + len;
334 /* CPU-determined bytes processed. Advance R2 to next byte to process. */
335 env->retxl = str + len;
336 env->cc_op = 3;
337 return end;
340 /* unsigned string compare (c is string terminator) */
341 uint64_t HELPER(clst)(CPUS390XState *env, uint64_t c, uint64_t s1, uint64_t s2)
343 uint32_t len;
345 c = c & 0xff;
346 s1 = fix_address(env, s1);
347 s2 = fix_address(env, s2);
349 /* Lest we fail to service interrupts in a timely manner, limit the
350 amount of work we're willing to do. For now, let's cap at 8k. */
351 for (len = 0; len < 0x2000; ++len) {
352 uint8_t v1 = cpu_ldub_data(env, s1 + len);
353 uint8_t v2 = cpu_ldub_data(env, s2 + len);
354 if (v1 == v2) {
355 if (v1 == c) {
356 /* Equal. CC=0, and don't advance the registers. */
357 env->cc_op = 0;
358 env->retxl = s2;
359 return s1;
361 } else {
362 /* Unequal. CC={1,2}, and advance the registers. Note that
363 the terminator need not be zero, but the string that contains
364 the terminator is by definition "low". */
365 env->cc_op = (v1 == c ? 1 : v2 == c ? 2 : v1 < v2 ? 1 : 2);
366 env->retxl = s2 + len;
367 return s1 + len;
371 /* CPU-determined bytes equal; advance the registers. */
372 env->cc_op = 3;
373 env->retxl = s2 + len;
374 return s1 + len;
377 /* move page */
378 void HELPER(mvpg)(CPUS390XState *env, uint64_t r0, uint64_t r1, uint64_t r2)
380 /* XXX missing r0 handling */
381 env->cc_op = 0;
382 fast_memmove(env, r1, r2, TARGET_PAGE_SIZE);
385 /* string copy (c is string terminator) */
386 uint64_t HELPER(mvst)(CPUS390XState *env, uint64_t c, uint64_t d, uint64_t s)
388 uint32_t len;
390 c = c & 0xff;
391 d = fix_address(env, d);
392 s = fix_address(env, s);
394 /* Lest we fail to service interrupts in a timely manner, limit the
395 amount of work we're willing to do. For now, let's cap at 8k. */
396 for (len = 0; len < 0x2000; ++len) {
397 uint8_t v = cpu_ldub_data(env, s + len);
398 cpu_stb_data(env, d + len, v);
399 if (v == c) {
400 /* Complete. Set CC=1 and advance R1. */
401 env->cc_op = 1;
402 env->retxl = s;
403 return d + len;
407 /* Incomplete. Set CC=3 and signal to advance R1 and R2. */
408 env->cc_op = 3;
409 env->retxl = s + len;
410 return d + len;
413 static uint32_t helper_icm(CPUS390XState *env, uint32_t r1, uint64_t address,
414 uint32_t mask)
416 int pos = 24; /* top of the lower half of r1 */
417 uint64_t rmask = 0xff000000ULL;
418 uint8_t val = 0;
419 int ccd = 0;
420 uint32_t cc = 0;
422 while (mask) {
423 if (mask & 8) {
424 env->regs[r1] &= ~rmask;
425 val = cpu_ldub_data(env, address);
426 if ((val & 0x80) && !ccd) {
427 cc = 1;
429 ccd = 1;
430 if (val && cc == 0) {
431 cc = 2;
433 env->regs[r1] |= (uint64_t)val << pos;
434 address++;
436 mask = (mask << 1) & 0xf;
437 pos -= 8;
438 rmask >>= 8;
441 return cc;
444 /* execute instruction
445 this instruction executes an insn modified with the contents of r1
446 it does not change the executed instruction in memory
447 it does not change the program counter
448 in other words: tricky...
449 currently implemented by interpreting the cases it is most commonly used in
451 uint32_t HELPER(ex)(CPUS390XState *env, uint32_t cc, uint64_t v1,
452 uint64_t addr, uint64_t ret)
454 S390CPU *cpu = s390_env_get_cpu(env);
455 uint16_t insn = cpu_lduw_code(env, addr);
457 HELPER_LOG("%s: v1 0x%lx addr 0x%lx insn 0x%x\n", __func__, v1, addr,
458 insn);
459 if ((insn & 0xf0ff) == 0xd000) {
460 uint32_t l, insn2, b1, b2, d1, d2;
462 l = v1 & 0xff;
463 insn2 = cpu_ldl_code(env, addr + 2);
464 b1 = (insn2 >> 28) & 0xf;
465 b2 = (insn2 >> 12) & 0xf;
466 d1 = (insn2 >> 16) & 0xfff;
467 d2 = insn2 & 0xfff;
468 switch (insn & 0xf00) {
469 case 0x200:
470 helper_mvc(env, l, get_address(env, 0, b1, d1),
471 get_address(env, 0, b2, d2));
472 break;
473 case 0x400:
474 cc = helper_nc(env, l, get_address(env, 0, b1, d1),
475 get_address(env, 0, b2, d2));
476 break;
477 case 0x500:
478 cc = helper_clc(env, l, get_address(env, 0, b1, d1),
479 get_address(env, 0, b2, d2));
480 break;
481 case 0x600:
482 cc = helper_oc(env, l, get_address(env, 0, b1, d1),
483 get_address(env, 0, b2, d2));
484 break;
485 case 0x700:
486 cc = helper_xc(env, l, get_address(env, 0, b1, d1),
487 get_address(env, 0, b2, d2));
488 break;
489 case 0xc00:
490 helper_tr(env, l, get_address(env, 0, b1, d1),
491 get_address(env, 0, b2, d2));
492 break;
493 case 0xd00:
494 cc = helper_trt(env, l, get_address(env, 0, b1, d1),
495 get_address(env, 0, b2, d2));
496 break;
497 default:
498 goto abort;
500 } else if ((insn & 0xff00) == 0x0a00) {
501 /* supervisor call */
502 HELPER_LOG("%s: svc %ld via execute\n", __func__, (insn | v1) & 0xff);
503 env->psw.addr = ret - 4;
504 env->int_svc_code = (insn | v1) & 0xff;
505 env->int_svc_ilen = 4;
506 helper_exception(env, EXCP_SVC);
507 } else if ((insn & 0xff00) == 0xbf00) {
508 uint32_t insn2, r1, r3, b2, d2;
510 insn2 = cpu_ldl_code(env, addr + 2);
511 r1 = (insn2 >> 20) & 0xf;
512 r3 = (insn2 >> 16) & 0xf;
513 b2 = (insn2 >> 12) & 0xf;
514 d2 = insn2 & 0xfff;
515 cc = helper_icm(env, r1, get_address(env, 0, b2, d2), r3);
516 } else {
517 abort:
518 cpu_abort(CPU(cpu), "EXECUTE on instruction prefix 0x%x not implemented\n",
519 insn);
521 return cc;
524 /* load access registers r1 to r3 from memory at a2 */
525 void HELPER(lam)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
527 int i;
529 for (i = r1;; i = (i + 1) % 16) {
530 env->aregs[i] = cpu_ldl_data(env, a2);
531 a2 += 4;
533 if (i == r3) {
534 break;
539 /* store access registers r1 to r3 in memory at a2 */
540 void HELPER(stam)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
542 int i;
544 for (i = r1;; i = (i + 1) % 16) {
545 cpu_stl_data(env, a2, env->aregs[i]);
546 a2 += 4;
548 if (i == r3) {
549 break;
554 /* move long */
555 uint32_t HELPER(mvcl)(CPUS390XState *env, uint32_t r1, uint32_t r2)
557 uint64_t destlen = env->regs[r1 + 1] & 0xffffff;
558 uint64_t dest = get_address_31fix(env, r1);
559 uint64_t srclen = env->regs[r2 + 1] & 0xffffff;
560 uint64_t src = get_address_31fix(env, r2);
561 uint8_t pad = env->regs[r2 + 1] >> 24;
562 uint8_t v;
563 uint32_t cc;
565 if (destlen == srclen) {
566 cc = 0;
567 } else if (destlen < srclen) {
568 cc = 1;
569 } else {
570 cc = 2;
573 if (srclen > destlen) {
574 srclen = destlen;
577 for (; destlen && srclen; src++, dest++, destlen--, srclen--) {
578 v = cpu_ldub_data(env, src);
579 cpu_stb_data(env, dest, v);
582 for (; destlen; dest++, destlen--) {
583 cpu_stb_data(env, dest, pad);
586 env->regs[r1 + 1] = destlen;
587 /* can't use srclen here, we trunc'ed it */
588 env->regs[r2 + 1] -= src - env->regs[r2];
589 env->regs[r1] = dest;
590 env->regs[r2] = src;
592 return cc;
595 /* move long extended another memcopy insn with more bells and whistles */
596 uint32_t HELPER(mvcle)(CPUS390XState *env, uint32_t r1, uint64_t a2,
597 uint32_t r3)
599 uint64_t destlen = env->regs[r1 + 1];
600 uint64_t dest = env->regs[r1];
601 uint64_t srclen = env->regs[r3 + 1];
602 uint64_t src = env->regs[r3];
603 uint8_t pad = a2 & 0xff;
604 uint8_t v;
605 uint32_t cc;
607 if (!(env->psw.mask & PSW_MASK_64)) {
608 destlen = (uint32_t)destlen;
609 srclen = (uint32_t)srclen;
610 dest &= 0x7fffffff;
611 src &= 0x7fffffff;
614 if (destlen == srclen) {
615 cc = 0;
616 } else if (destlen < srclen) {
617 cc = 1;
618 } else {
619 cc = 2;
622 if (srclen > destlen) {
623 srclen = destlen;
626 for (; destlen && srclen; src++, dest++, destlen--, srclen--) {
627 v = cpu_ldub_data(env, src);
628 cpu_stb_data(env, dest, v);
631 for (; destlen; dest++, destlen--) {
632 cpu_stb_data(env, dest, pad);
635 env->regs[r1 + 1] = destlen;
636 /* can't use srclen here, we trunc'ed it */
637 /* FIXME: 31-bit mode! */
638 env->regs[r3 + 1] -= src - env->regs[r3];
639 env->regs[r1] = dest;
640 env->regs[r3] = src;
642 return cc;
645 /* compare logical long extended memcompare insn with padding */
646 uint32_t HELPER(clcle)(CPUS390XState *env, uint32_t r1, uint64_t a2,
647 uint32_t r3)
649 uint64_t destlen = env->regs[r1 + 1];
650 uint64_t dest = get_address_31fix(env, r1);
651 uint64_t srclen = env->regs[r3 + 1];
652 uint64_t src = get_address_31fix(env, r3);
653 uint8_t pad = a2 & 0xff;
654 uint8_t v1 = 0, v2 = 0;
655 uint32_t cc = 0;
657 if (!(destlen || srclen)) {
658 return cc;
661 if (srclen > destlen) {
662 srclen = destlen;
665 for (; destlen || srclen; src++, dest++, destlen--, srclen--) {
666 v1 = srclen ? cpu_ldub_data(env, src) : pad;
667 v2 = destlen ? cpu_ldub_data(env, dest) : pad;
668 if (v1 != v2) {
669 cc = (v1 < v2) ? 1 : 2;
670 break;
674 env->regs[r1 + 1] = destlen;
675 /* can't use srclen here, we trunc'ed it */
676 env->regs[r3 + 1] -= src - env->regs[r3];
677 env->regs[r1] = dest;
678 env->regs[r3] = src;
680 return cc;
683 /* checksum */
684 uint64_t HELPER(cksm)(CPUS390XState *env, uint64_t r1,
685 uint64_t src, uint64_t src_len)
687 uint64_t max_len, len;
688 uint64_t cksm = (uint32_t)r1;
690 /* Lest we fail to service interrupts in a timely manner, limit the
691 amount of work we're willing to do. For now, let's cap at 8k. */
692 max_len = (src_len > 0x2000 ? 0x2000 : src_len);
694 /* Process full words as available. */
695 for (len = 0; len + 4 <= max_len; len += 4, src += 4) {
696 cksm += (uint32_t)cpu_ldl_data(env, src);
699 switch (max_len - len) {
700 case 1:
701 cksm += cpu_ldub_data(env, src) << 24;
702 len += 1;
703 break;
704 case 2:
705 cksm += cpu_lduw_data(env, src) << 16;
706 len += 2;
707 break;
708 case 3:
709 cksm += cpu_lduw_data(env, src) << 16;
710 cksm += cpu_ldub_data(env, src + 2) << 8;
711 len += 3;
712 break;
715 /* Fold the carry from the checksum. Note that we can see carry-out
716 during folding more than once (but probably not more than twice). */
717 while (cksm > 0xffffffffull) {
718 cksm = (uint32_t)cksm + (cksm >> 32);
721 /* Indicate whether or not we've processed everything. */
722 env->cc_op = (len == src_len ? 0 : 3);
724 /* Return both cksm and processed length. */
725 env->retxl = cksm;
726 return len;
729 void HELPER(unpk)(CPUS390XState *env, uint32_t len, uint64_t dest,
730 uint64_t src)
732 int len_dest = len >> 4;
733 int len_src = len & 0xf;
734 uint8_t b;
735 int second_nibble = 0;
737 dest += len_dest;
738 src += len_src;
740 /* last byte is special, it only flips the nibbles */
741 b = cpu_ldub_data(env, src);
742 cpu_stb_data(env, dest, (b << 4) | (b >> 4));
743 src--;
744 len_src--;
746 /* now pad every nibble with 0xf0 */
748 while (len_dest > 0) {
749 uint8_t cur_byte = 0;
751 if (len_src > 0) {
752 cur_byte = cpu_ldub_data(env, src);
755 len_dest--;
756 dest--;
758 /* only advance one nibble at a time */
759 if (second_nibble) {
760 cur_byte >>= 4;
761 len_src--;
762 src--;
764 second_nibble = !second_nibble;
766 /* digit */
767 cur_byte = (cur_byte & 0xf);
768 /* zone bits */
769 cur_byte |= 0xf0;
771 cpu_stb_data(env, dest, cur_byte);
775 void HELPER(tr)(CPUS390XState *env, uint32_t len, uint64_t array,
776 uint64_t trans)
778 int i;
780 for (i = 0; i <= len; i++) {
781 uint8_t byte = cpu_ldub_data(env, array + i);
782 uint8_t new_byte = cpu_ldub_data(env, trans + byte);
784 cpu_stb_data(env, array + i, new_byte);
788 uint64_t HELPER(tre)(CPUS390XState *env, uint64_t array,
789 uint64_t len, uint64_t trans)
791 uint8_t end = env->regs[0] & 0xff;
792 uint64_t l = len;
793 uint64_t i;
795 if (!(env->psw.mask & PSW_MASK_64)) {
796 array &= 0x7fffffff;
797 l = (uint32_t)l;
800 /* Lest we fail to service interrupts in a timely manner, limit the
801 amount of work we're willing to do. For now, let's cap at 8k. */
802 if (l > 0x2000) {
803 l = 0x2000;
804 env->cc_op = 3;
805 } else {
806 env->cc_op = 0;
809 for (i = 0; i < l; i++) {
810 uint8_t byte, new_byte;
812 byte = cpu_ldub_data(env, array + i);
814 if (byte == end) {
815 env->cc_op = 1;
816 break;
819 new_byte = cpu_ldub_data(env, trans + byte);
820 cpu_stb_data(env, array + i, new_byte);
823 env->retxl = len - i;
824 return array + i;
827 uint32_t HELPER(trt)(CPUS390XState *env, uint32_t len, uint64_t array,
828 uint64_t trans)
830 uint32_t cc = 0;
831 int i;
833 for (i = 0; i <= len; i++) {
834 uint8_t byte = cpu_ldub_data(env, array + i);
835 uint8_t sbyte = cpu_ldub_data(env, trans + byte);
837 if (sbyte != 0) {
838 env->regs[1] = array + i;
839 env->regs[2] = (env->regs[2] & ~0xff) | sbyte;
840 cc = (i == len) ? 2 : 1;
841 break;
845 return cc;
848 void HELPER(cdsg)(CPUS390XState *env, uint64_t addr,
849 uint32_t r1, uint32_t r3)
851 uintptr_t ra = GETPC();
852 Int128 cmpv = int128_make128(env->regs[r1 + 1], env->regs[r1]);
853 Int128 newv = int128_make128(env->regs[r3 + 1], env->regs[r3]);
854 Int128 oldv;
855 bool fail;
857 if (parallel_cpus) {
858 #ifndef CONFIG_ATOMIC128
859 cpu_loop_exit_atomic(ENV_GET_CPU(env), ra);
860 #else
861 int mem_idx = cpu_mmu_index(env, false);
862 TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx);
863 oldv = helper_atomic_cmpxchgo_be_mmu(env, addr, cmpv, newv, oi, ra);
864 fail = !int128_eq(oldv, cmpv);
865 #endif
866 } else {
867 uint64_t oldh, oldl;
869 oldh = cpu_ldq_data_ra(env, addr + 0, ra);
870 oldl = cpu_ldq_data_ra(env, addr + 8, ra);
872 oldv = int128_make128(oldl, oldh);
873 fail = !int128_eq(oldv, cmpv);
874 if (fail) {
875 newv = oldv;
878 cpu_stq_data_ra(env, addr + 0, int128_gethi(newv), ra);
879 cpu_stq_data_ra(env, addr + 8, int128_getlo(newv), ra);
882 env->cc_op = fail;
883 env->regs[r1] = int128_gethi(oldv);
884 env->regs[r1 + 1] = int128_getlo(oldv);
887 #if !defined(CONFIG_USER_ONLY)
888 void HELPER(lctlg)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
890 S390CPU *cpu = s390_env_get_cpu(env);
891 bool PERchanged = false;
892 int i;
893 uint64_t src = a2;
894 uint64_t val;
896 for (i = r1;; i = (i + 1) % 16) {
897 val = cpu_ldq_data(env, src);
898 if (env->cregs[i] != val && i >= 9 && i <= 11) {
899 PERchanged = true;
901 env->cregs[i] = val;
902 HELPER_LOG("load ctl %d from 0x%" PRIx64 " == 0x%" PRIx64 "\n",
903 i, src, env->cregs[i]);
904 src += sizeof(uint64_t);
906 if (i == r3) {
907 break;
911 if (PERchanged && env->psw.mask & PSW_MASK_PER) {
912 s390_cpu_recompute_watchpoints(CPU(cpu));
915 tlb_flush(CPU(cpu));
918 void HELPER(lctl)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
920 S390CPU *cpu = s390_env_get_cpu(env);
921 bool PERchanged = false;
922 int i;
923 uint64_t src = a2;
924 uint32_t val;
926 for (i = r1;; i = (i + 1) % 16) {
927 val = cpu_ldl_data(env, src);
928 if ((uint32_t)env->cregs[i] != val && i >= 9 && i <= 11) {
929 PERchanged = true;
931 env->cregs[i] = (env->cregs[i] & 0xFFFFFFFF00000000ULL) | val;
932 src += sizeof(uint32_t);
934 if (i == r3) {
935 break;
939 if (PERchanged && env->psw.mask & PSW_MASK_PER) {
940 s390_cpu_recompute_watchpoints(CPU(cpu));
943 tlb_flush(CPU(cpu));
946 void HELPER(stctg)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
948 int i;
949 uint64_t dest = a2;
951 for (i = r1;; i = (i + 1) % 16) {
952 cpu_stq_data(env, dest, env->cregs[i]);
953 dest += sizeof(uint64_t);
955 if (i == r3) {
956 break;
961 void HELPER(stctl)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
963 int i;
964 uint64_t dest = a2;
966 for (i = r1;; i = (i + 1) % 16) {
967 cpu_stl_data(env, dest, env->cregs[i]);
968 dest += sizeof(uint32_t);
970 if (i == r3) {
971 break;
976 uint32_t HELPER(tprot)(uint64_t a1, uint64_t a2)
978 /* XXX implement */
980 return 0;
983 /* insert storage key extended */
984 uint64_t HELPER(iske)(CPUS390XState *env, uint64_t r2)
986 static S390SKeysState *ss;
987 static S390SKeysClass *skeyclass;
988 uint64_t addr = get_address(env, 0, 0, r2);
989 uint8_t key;
991 if (addr > ram_size) {
992 return 0;
995 if (unlikely(!ss)) {
996 ss = s390_get_skeys_device();
997 skeyclass = S390_SKEYS_GET_CLASS(ss);
1000 if (skeyclass->get_skeys(ss, addr / TARGET_PAGE_SIZE, 1, &key)) {
1001 return 0;
1003 return key;
1006 /* set storage key extended */
1007 void HELPER(sske)(CPUS390XState *env, uint64_t r1, uint64_t r2)
1009 static S390SKeysState *ss;
1010 static S390SKeysClass *skeyclass;
1011 uint64_t addr = get_address(env, 0, 0, r2);
1012 uint8_t key;
1014 if (addr > ram_size) {
1015 return;
1018 if (unlikely(!ss)) {
1019 ss = s390_get_skeys_device();
1020 skeyclass = S390_SKEYS_GET_CLASS(ss);
1023 key = (uint8_t) r1;
1024 skeyclass->set_skeys(ss, addr / TARGET_PAGE_SIZE, 1, &key);
1027 /* reset reference bit extended */
1028 uint32_t HELPER(rrbe)(CPUS390XState *env, uint64_t r2)
1030 static S390SKeysState *ss;
1031 static S390SKeysClass *skeyclass;
1032 uint8_t re, key;
1034 if (r2 > ram_size) {
1035 return 0;
1038 if (unlikely(!ss)) {
1039 ss = s390_get_skeys_device();
1040 skeyclass = S390_SKEYS_GET_CLASS(ss);
1043 if (skeyclass->get_skeys(ss, r2 / TARGET_PAGE_SIZE, 1, &key)) {
1044 return 0;
1047 re = key & (SK_R | SK_C);
1048 key &= ~SK_R;
1050 if (skeyclass->set_skeys(ss, r2 / TARGET_PAGE_SIZE, 1, &key)) {
1051 return 0;
1055 * cc
1057 * 0 Reference bit zero; change bit zero
1058 * 1 Reference bit zero; change bit one
1059 * 2 Reference bit one; change bit zero
1060 * 3 Reference bit one; change bit one
1063 return re >> 1;
1066 /* compare and swap and purge */
1067 uint32_t HELPER(csp)(CPUS390XState *env, uint32_t r1, uint64_t r2)
1069 S390CPU *cpu = s390_env_get_cpu(env);
1070 uint32_t cc;
1071 uint32_t o1 = env->regs[r1];
1072 uint64_t a2 = r2 & ~3ULL;
1073 uint32_t o2 = cpu_ldl_data(env, a2);
1075 if (o1 == o2) {
1076 cpu_stl_data(env, a2, env->regs[(r1 + 1) & 15]);
1077 if (r2 & 0x3) {
1078 /* flush TLB / ALB */
1079 tlb_flush(CPU(cpu));
1081 cc = 0;
1082 } else {
1083 env->regs[r1] = (env->regs[r1] & 0xffffffff00000000ULL) | o2;
1084 cc = 1;
1087 return cc;
1090 uint32_t HELPER(mvcs)(CPUS390XState *env, uint64_t l, uint64_t a1, uint64_t a2)
1092 int cc = 0, i;
1094 HELPER_LOG("%s: %16" PRIx64 " %16" PRIx64 " %16" PRIx64 "\n",
1095 __func__, l, a1, a2);
1097 if (l > 256) {
1098 /* max 256 */
1099 l = 256;
1100 cc = 3;
1103 /* XXX replace w/ memcpy */
1104 for (i = 0; i < l; i++) {
1105 cpu_stb_secondary(env, a1 + i, cpu_ldub_primary(env, a2 + i));
1108 return cc;
1111 uint32_t HELPER(mvcp)(CPUS390XState *env, uint64_t l, uint64_t a1, uint64_t a2)
1113 int cc = 0, i;
1115 HELPER_LOG("%s: %16" PRIx64 " %16" PRIx64 " %16" PRIx64 "\n",
1116 __func__, l, a1, a2);
1118 if (l > 256) {
1119 /* max 256 */
1120 l = 256;
1121 cc = 3;
1124 /* XXX replace w/ memcpy */
1125 for (i = 0; i < l; i++) {
1126 cpu_stb_primary(env, a1 + i, cpu_ldub_secondary(env, a2 + i));
1129 return cc;
1132 /* invalidate pte */
1133 void HELPER(ipte)(CPUS390XState *env, uint64_t pte_addr, uint64_t vaddr)
1135 CPUState *cs = CPU(s390_env_get_cpu(env));
1136 uint64_t page = vaddr & TARGET_PAGE_MASK;
1137 uint64_t pte = 0;
1139 /* XXX broadcast to other CPUs */
1141 /* XXX Linux is nice enough to give us the exact pte address.
1142 According to spec we'd have to find it out ourselves */
1143 /* XXX Linux is fine with overwriting the pte, the spec requires
1144 us to only set the invalid bit */
1145 stq_phys(cs->as, pte_addr, pte | _PAGE_INVALID);
1147 /* XXX we exploit the fact that Linux passes the exact virtual
1148 address here - it's not obliged to! */
1149 tlb_flush_page(cs, page);
1151 /* XXX 31-bit hack */
1152 if (page & 0x80000000) {
1153 tlb_flush_page(cs, page & ~0x80000000);
1154 } else {
1155 tlb_flush_page(cs, page | 0x80000000);
1159 /* flush local tlb */
1160 void HELPER(ptlb)(CPUS390XState *env)
1162 S390CPU *cpu = s390_env_get_cpu(env);
1164 tlb_flush(CPU(cpu));
1167 /* load using real address */
1168 uint64_t HELPER(lura)(CPUS390XState *env, uint64_t addr)
1170 CPUState *cs = CPU(s390_env_get_cpu(env));
1172 return (uint32_t)ldl_phys(cs->as, get_address(env, 0, 0, addr));
1175 uint64_t HELPER(lurag)(CPUS390XState *env, uint64_t addr)
1177 CPUState *cs = CPU(s390_env_get_cpu(env));
1179 return ldq_phys(cs->as, get_address(env, 0, 0, addr));
1182 /* store using real address */
1183 void HELPER(stura)(CPUS390XState *env, uint64_t addr, uint64_t v1)
1185 CPUState *cs = CPU(s390_env_get_cpu(env));
1187 stl_phys(cs->as, get_address(env, 0, 0, addr), (uint32_t)v1);
1189 if ((env->psw.mask & PSW_MASK_PER) &&
1190 (env->cregs[9] & PER_CR9_EVENT_STORE) &&
1191 (env->cregs[9] & PER_CR9_EVENT_STORE_REAL)) {
1192 /* PSW is saved just before calling the helper. */
1193 env->per_address = env->psw.addr;
1194 env->per_perc_atmid = PER_CODE_EVENT_STORE_REAL | get_per_atmid(env);
1198 void HELPER(sturg)(CPUS390XState *env, uint64_t addr, uint64_t v1)
1200 CPUState *cs = CPU(s390_env_get_cpu(env));
1202 stq_phys(cs->as, get_address(env, 0, 0, addr), v1);
1204 if ((env->psw.mask & PSW_MASK_PER) &&
1205 (env->cregs[9] & PER_CR9_EVENT_STORE) &&
1206 (env->cregs[9] & PER_CR9_EVENT_STORE_REAL)) {
1207 /* PSW is saved just before calling the helper. */
1208 env->per_address = env->psw.addr;
1209 env->per_perc_atmid = PER_CODE_EVENT_STORE_REAL | get_per_atmid(env);
1213 /* load real address */
1214 uint64_t HELPER(lra)(CPUS390XState *env, uint64_t addr)
1216 CPUState *cs = CPU(s390_env_get_cpu(env));
1217 uint32_t cc = 0;
1218 int old_exc = cs->exception_index;
1219 uint64_t asc = env->psw.mask & PSW_MASK_ASC;
1220 uint64_t ret;
1221 int flags;
1223 /* XXX incomplete - has more corner cases */
1224 if (!(env->psw.mask & PSW_MASK_64) && (addr >> 32)) {
1225 program_interrupt(env, PGM_SPECIAL_OP, 2);
1228 cs->exception_index = old_exc;
1229 if (mmu_translate(env, addr, 0, asc, &ret, &flags, true)) {
1230 cc = 3;
1232 if (cs->exception_index == EXCP_PGM) {
1233 ret = env->int_pgm_code | 0x80000000;
1234 } else {
1235 ret |= addr & ~TARGET_PAGE_MASK;
1237 cs->exception_index = old_exc;
1239 env->cc_op = cc;
1240 return ret;
1242 #endif