Xen: use qemu_strtoul instead of strtol
[qemu/ar7.git] / target-s390x / mem_helper.c
blob90399f1b0b351a696dfd18d13e0b1eb9e239d5a0
1 /*
2 * S/390 memory access helper routines
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2009 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "cpu.h"
22 #include "exec/helper-proto.h"
23 #include "exec/cpu_ldst.h"
24 #include "hw/s390x/storage-keys.h"
26 /*****************************************************************************/
27 /* Softmmu support */
28 #if !defined(CONFIG_USER_ONLY)
30 /* try to fill the TLB and return an exception if error. If retaddr is
31 NULL, it means that the function was called in C code (i.e. not
32 from generated code or from helper.c) */
33 /* XXX: fix it to restore all registers */
34 void tlb_fill(CPUState *cs, target_ulong addr, int is_write, int mmu_idx,
35 uintptr_t retaddr)
37 int ret;
39 ret = s390_cpu_handle_mmu_fault(cs, addr, is_write, mmu_idx);
40 if (unlikely(ret != 0)) {
41 if (likely(retaddr)) {
42 /* now we have a real cpu fault */
43 cpu_restore_state(cs, retaddr);
45 cpu_loop_exit(cs);
49 #endif
51 /* #define DEBUG_HELPER */
52 #ifdef DEBUG_HELPER
53 #define HELPER_LOG(x...) qemu_log(x)
54 #else
55 #define HELPER_LOG(x...)
56 #endif
58 /* Reduce the length so that addr + len doesn't cross a page boundary. */
59 static inline uint64_t adj_len_to_page(uint64_t len, uint64_t addr)
61 #ifndef CONFIG_USER_ONLY
62 if ((addr & ~TARGET_PAGE_MASK) + len - 1 >= TARGET_PAGE_SIZE) {
63 return -addr & ~TARGET_PAGE_MASK;
65 #endif
66 return len;
69 static void fast_memset(CPUS390XState *env, uint64_t dest, uint8_t byte,
70 uint32_t l)
72 int mmu_idx = cpu_mmu_index(env, false);
74 while (l > 0) {
75 void *p = tlb_vaddr_to_host(env, dest, MMU_DATA_STORE, mmu_idx);
76 if (p) {
77 /* Access to the whole page in write mode granted. */
78 int l_adj = adj_len_to_page(l, dest);
79 memset(p, byte, l_adj);
80 dest += l_adj;
81 l -= l_adj;
82 } else {
83 /* We failed to get access to the whole page. The next write
84 access will likely fill the QEMU TLB for the next iteration. */
85 cpu_stb_data(env, dest, byte);
86 dest++;
87 l--;
92 static void fast_memmove(CPUS390XState *env, uint64_t dest, uint64_t src,
93 uint32_t l)
95 int mmu_idx = cpu_mmu_index(env, false);
97 while (l > 0) {
98 void *src_p = tlb_vaddr_to_host(env, src, MMU_DATA_LOAD, mmu_idx);
99 void *dest_p = tlb_vaddr_to_host(env, dest, MMU_DATA_STORE, mmu_idx);
100 if (src_p && dest_p) {
101 /* Access to both whole pages granted. */
102 int l_adj = adj_len_to_page(l, src);
103 l_adj = adj_len_to_page(l_adj, dest);
104 memmove(dest_p, src_p, l_adj);
105 src += l_adj;
106 dest += l_adj;
107 l -= l_adj;
108 } else {
109 /* We failed to get access to one or both whole pages. The next
110 read or write access will likely fill the QEMU TLB for the
111 next iteration. */
112 cpu_stb_data(env, dest, cpu_ldub_data(env, src));
113 src++;
114 dest++;
115 l--;
120 /* and on array */
121 uint32_t HELPER(nc)(CPUS390XState *env, uint32_t l, uint64_t dest,
122 uint64_t src)
124 int i;
125 unsigned char x;
126 uint32_t cc = 0;
128 HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
129 __func__, l, dest, src);
130 for (i = 0; i <= l; i++) {
131 x = cpu_ldub_data(env, dest + i) & cpu_ldub_data(env, src + i);
132 if (x) {
133 cc = 1;
135 cpu_stb_data(env, dest + i, x);
137 return cc;
140 /* xor on array */
141 uint32_t HELPER(xc)(CPUS390XState *env, uint32_t l, uint64_t dest,
142 uint64_t src)
144 int i;
145 unsigned char x;
146 uint32_t cc = 0;
148 HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
149 __func__, l, dest, src);
151 /* xor with itself is the same as memset(0) */
152 if (src == dest) {
153 fast_memset(env, dest, 0, l + 1);
154 return 0;
157 for (i = 0; i <= l; i++) {
158 x = cpu_ldub_data(env, dest + i) ^ cpu_ldub_data(env, src + i);
159 if (x) {
160 cc = 1;
162 cpu_stb_data(env, dest + i, x);
164 return cc;
167 /* or on array */
168 uint32_t HELPER(oc)(CPUS390XState *env, uint32_t l, uint64_t dest,
169 uint64_t src)
171 int i;
172 unsigned char x;
173 uint32_t cc = 0;
175 HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
176 __func__, l, dest, src);
177 for (i = 0; i <= l; i++) {
178 x = cpu_ldub_data(env, dest + i) | cpu_ldub_data(env, src + i);
179 if (x) {
180 cc = 1;
182 cpu_stb_data(env, dest + i, x);
184 return cc;
187 /* memmove */
188 void HELPER(mvc)(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src)
190 int i = 0;
192 HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
193 __func__, l, dest, src);
195 /* mvc with source pointing to the byte after the destination is the
196 same as memset with the first source byte */
197 if (dest == (src + 1)) {
198 fast_memset(env, dest, cpu_ldub_data(env, src), l + 1);
199 return;
202 /* mvc and memmove do not behave the same when areas overlap! */
203 if ((dest < src) || (src + l < dest)) {
204 fast_memmove(env, dest, src, l + 1);
205 return;
208 /* slow version with byte accesses which always work */
209 for (i = 0; i <= l; i++) {
210 cpu_stb_data(env, dest + i, cpu_ldub_data(env, src + i));
214 /* compare unsigned byte arrays */
215 uint32_t HELPER(clc)(CPUS390XState *env, uint32_t l, uint64_t s1, uint64_t s2)
217 int i;
218 unsigned char x, y;
219 uint32_t cc;
221 HELPER_LOG("%s l %d s1 %" PRIx64 " s2 %" PRIx64 "\n",
222 __func__, l, s1, s2);
223 for (i = 0; i <= l; i++) {
224 x = cpu_ldub_data(env, s1 + i);
225 y = cpu_ldub_data(env, s2 + i);
226 HELPER_LOG("%02x (%c)/%02x (%c) ", x, x, y, y);
227 if (x < y) {
228 cc = 1;
229 goto done;
230 } else if (x > y) {
231 cc = 2;
232 goto done;
235 cc = 0;
236 done:
237 HELPER_LOG("\n");
238 return cc;
241 /* compare logical under mask */
242 uint32_t HELPER(clm)(CPUS390XState *env, uint32_t r1, uint32_t mask,
243 uint64_t addr)
245 uint8_t r, d;
246 uint32_t cc;
248 HELPER_LOG("%s: r1 0x%x mask 0x%x addr 0x%" PRIx64 "\n", __func__, r1,
249 mask, addr);
250 cc = 0;
251 while (mask) {
252 if (mask & 8) {
253 d = cpu_ldub_data(env, addr);
254 r = (r1 & 0xff000000UL) >> 24;
255 HELPER_LOG("mask 0x%x %02x/%02x (0x%" PRIx64 ") ", mask, r, d,
256 addr);
257 if (r < d) {
258 cc = 1;
259 break;
260 } else if (r > d) {
261 cc = 2;
262 break;
264 addr++;
266 mask = (mask << 1) & 0xf;
267 r1 <<= 8;
269 HELPER_LOG("\n");
270 return cc;
273 static inline uint64_t fix_address(CPUS390XState *env, uint64_t a)
275 /* 31-Bit mode */
276 if (!(env->psw.mask & PSW_MASK_64)) {
277 a &= 0x7fffffff;
279 return a;
282 static inline uint64_t get_address(CPUS390XState *env, int x2, int b2, int d2)
284 uint64_t r = d2;
285 if (x2) {
286 r += env->regs[x2];
288 if (b2) {
289 r += env->regs[b2];
291 return fix_address(env, r);
294 static inline uint64_t get_address_31fix(CPUS390XState *env, int reg)
296 return fix_address(env, env->regs[reg]);
299 /* search string (c is byte to search, r2 is string, r1 end of string) */
300 uint64_t HELPER(srst)(CPUS390XState *env, uint64_t r0, uint64_t end,
301 uint64_t str)
303 uint32_t len;
304 uint8_t v, c = r0;
306 str = fix_address(env, str);
307 end = fix_address(env, end);
309 /* Assume for now that R2 is unmodified. */
310 env->retxl = str;
312 /* Lest we fail to service interrupts in a timely manner, limit the
313 amount of work we're willing to do. For now, let's cap at 8k. */
314 for (len = 0; len < 0x2000; ++len) {
315 if (str + len == end) {
316 /* Character not found. R1 & R2 are unmodified. */
317 env->cc_op = 2;
318 return end;
320 v = cpu_ldub_data(env, str + len);
321 if (v == c) {
322 /* Character found. Set R1 to the location; R2 is unmodified. */
323 env->cc_op = 1;
324 return str + len;
328 /* CPU-determined bytes processed. Advance R2 to next byte to process. */
329 env->retxl = str + len;
330 env->cc_op = 3;
331 return end;
334 /* unsigned string compare (c is string terminator) */
335 uint64_t HELPER(clst)(CPUS390XState *env, uint64_t c, uint64_t s1, uint64_t s2)
337 uint32_t len;
339 c = c & 0xff;
340 s1 = fix_address(env, s1);
341 s2 = fix_address(env, s2);
343 /* Lest we fail to service interrupts in a timely manner, limit the
344 amount of work we're willing to do. For now, let's cap at 8k. */
345 for (len = 0; len < 0x2000; ++len) {
346 uint8_t v1 = cpu_ldub_data(env, s1 + len);
347 uint8_t v2 = cpu_ldub_data(env, s2 + len);
348 if (v1 == v2) {
349 if (v1 == c) {
350 /* Equal. CC=0, and don't advance the registers. */
351 env->cc_op = 0;
352 env->retxl = s2;
353 return s1;
355 } else {
356 /* Unequal. CC={1,2}, and advance the registers. Note that
357 the terminator need not be zero, but the string that contains
358 the terminator is by definition "low". */
359 env->cc_op = (v1 == c ? 1 : v2 == c ? 2 : v1 < v2 ? 1 : 2);
360 env->retxl = s2 + len;
361 return s1 + len;
365 /* CPU-determined bytes equal; advance the registers. */
366 env->cc_op = 3;
367 env->retxl = s2 + len;
368 return s1 + len;
371 /* move page */
372 void HELPER(mvpg)(CPUS390XState *env, uint64_t r0, uint64_t r1, uint64_t r2)
374 /* XXX missing r0 handling */
375 env->cc_op = 0;
376 fast_memmove(env, r1, r2, TARGET_PAGE_SIZE);
379 /* string copy (c is string terminator) */
380 uint64_t HELPER(mvst)(CPUS390XState *env, uint64_t c, uint64_t d, uint64_t s)
382 uint32_t len;
384 c = c & 0xff;
385 d = fix_address(env, d);
386 s = fix_address(env, s);
388 /* Lest we fail to service interrupts in a timely manner, limit the
389 amount of work we're willing to do. For now, let's cap at 8k. */
390 for (len = 0; len < 0x2000; ++len) {
391 uint8_t v = cpu_ldub_data(env, s + len);
392 cpu_stb_data(env, d + len, v);
393 if (v == c) {
394 /* Complete. Set CC=1 and advance R1. */
395 env->cc_op = 1;
396 env->retxl = s;
397 return d + len;
401 /* Incomplete. Set CC=3 and signal to advance R1 and R2. */
402 env->cc_op = 3;
403 env->retxl = s + len;
404 return d + len;
407 static uint32_t helper_icm(CPUS390XState *env, uint32_t r1, uint64_t address,
408 uint32_t mask)
410 int pos = 24; /* top of the lower half of r1 */
411 uint64_t rmask = 0xff000000ULL;
412 uint8_t val = 0;
413 int ccd = 0;
414 uint32_t cc = 0;
416 while (mask) {
417 if (mask & 8) {
418 env->regs[r1] &= ~rmask;
419 val = cpu_ldub_data(env, address);
420 if ((val & 0x80) && !ccd) {
421 cc = 1;
423 ccd = 1;
424 if (val && cc == 0) {
425 cc = 2;
427 env->regs[r1] |= (uint64_t)val << pos;
428 address++;
430 mask = (mask << 1) & 0xf;
431 pos -= 8;
432 rmask >>= 8;
435 return cc;
438 /* execute instruction
439 this instruction executes an insn modified with the contents of r1
440 it does not change the executed instruction in memory
441 it does not change the program counter
442 in other words: tricky...
443 currently implemented by interpreting the cases it is most commonly used in
445 uint32_t HELPER(ex)(CPUS390XState *env, uint32_t cc, uint64_t v1,
446 uint64_t addr, uint64_t ret)
448 S390CPU *cpu = s390_env_get_cpu(env);
449 uint16_t insn = cpu_lduw_code(env, addr);
451 HELPER_LOG("%s: v1 0x%lx addr 0x%lx insn 0x%x\n", __func__, v1, addr,
452 insn);
453 if ((insn & 0xf0ff) == 0xd000) {
454 uint32_t l, insn2, b1, b2, d1, d2;
456 l = v1 & 0xff;
457 insn2 = cpu_ldl_code(env, addr + 2);
458 b1 = (insn2 >> 28) & 0xf;
459 b2 = (insn2 >> 12) & 0xf;
460 d1 = (insn2 >> 16) & 0xfff;
461 d2 = insn2 & 0xfff;
462 switch (insn & 0xf00) {
463 case 0x200:
464 helper_mvc(env, l, get_address(env, 0, b1, d1),
465 get_address(env, 0, b2, d2));
466 break;
467 case 0x400:
468 cc = helper_nc(env, l, get_address(env, 0, b1, d1),
469 get_address(env, 0, b2, d2));
470 break;
471 case 0x500:
472 cc = helper_clc(env, l, get_address(env, 0, b1, d1),
473 get_address(env, 0, b2, d2));
474 break;
475 case 0x600:
476 cc = helper_oc(env, l, get_address(env, 0, b1, d1),
477 get_address(env, 0, b2, d2));
478 break;
479 case 0x700:
480 cc = helper_xc(env, l, get_address(env, 0, b1, d1),
481 get_address(env, 0, b2, d2));
482 break;
483 case 0xc00:
484 helper_tr(env, l, get_address(env, 0, b1, d1),
485 get_address(env, 0, b2, d2));
486 break;
487 case 0xd00:
488 cc = helper_trt(env, l, get_address(env, 0, b1, d1),
489 get_address(env, 0, b2, d2));
490 break;
491 default:
492 goto abort;
494 } else if ((insn & 0xff00) == 0x0a00) {
495 /* supervisor call */
496 HELPER_LOG("%s: svc %ld via execute\n", __func__, (insn | v1) & 0xff);
497 env->psw.addr = ret - 4;
498 env->int_svc_code = (insn | v1) & 0xff;
499 env->int_svc_ilen = 4;
500 helper_exception(env, EXCP_SVC);
501 } else if ((insn & 0xff00) == 0xbf00) {
502 uint32_t insn2, r1, r3, b2, d2;
504 insn2 = cpu_ldl_code(env, addr + 2);
505 r1 = (insn2 >> 20) & 0xf;
506 r3 = (insn2 >> 16) & 0xf;
507 b2 = (insn2 >> 12) & 0xf;
508 d2 = insn2 & 0xfff;
509 cc = helper_icm(env, r1, get_address(env, 0, b2, d2), r3);
510 } else {
511 abort:
512 cpu_abort(CPU(cpu), "EXECUTE on instruction prefix 0x%x not implemented\n",
513 insn);
515 return cc;
518 /* load access registers r1 to r3 from memory at a2 */
519 void HELPER(lam)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
521 int i;
523 for (i = r1;; i = (i + 1) % 16) {
524 env->aregs[i] = cpu_ldl_data(env, a2);
525 a2 += 4;
527 if (i == r3) {
528 break;
533 /* store access registers r1 to r3 in memory at a2 */
534 void HELPER(stam)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
536 int i;
538 for (i = r1;; i = (i + 1) % 16) {
539 cpu_stl_data(env, a2, env->aregs[i]);
540 a2 += 4;
542 if (i == r3) {
543 break;
548 /* move long */
549 uint32_t HELPER(mvcl)(CPUS390XState *env, uint32_t r1, uint32_t r2)
551 uint64_t destlen = env->regs[r1 + 1] & 0xffffff;
552 uint64_t dest = get_address_31fix(env, r1);
553 uint64_t srclen = env->regs[r2 + 1] & 0xffffff;
554 uint64_t src = get_address_31fix(env, r2);
555 uint8_t pad = env->regs[r2 + 1] >> 24;
556 uint8_t v;
557 uint32_t cc;
559 if (destlen == srclen) {
560 cc = 0;
561 } else if (destlen < srclen) {
562 cc = 1;
563 } else {
564 cc = 2;
567 if (srclen > destlen) {
568 srclen = destlen;
571 for (; destlen && srclen; src++, dest++, destlen--, srclen--) {
572 v = cpu_ldub_data(env, src);
573 cpu_stb_data(env, dest, v);
576 for (; destlen; dest++, destlen--) {
577 cpu_stb_data(env, dest, pad);
580 env->regs[r1 + 1] = destlen;
581 /* can't use srclen here, we trunc'ed it */
582 env->regs[r2 + 1] -= src - env->regs[r2];
583 env->regs[r1] = dest;
584 env->regs[r2] = src;
586 return cc;
589 /* move long extended another memcopy insn with more bells and whistles */
590 uint32_t HELPER(mvcle)(CPUS390XState *env, uint32_t r1, uint64_t a2,
591 uint32_t r3)
593 uint64_t destlen = env->regs[r1 + 1];
594 uint64_t dest = env->regs[r1];
595 uint64_t srclen = env->regs[r3 + 1];
596 uint64_t src = env->regs[r3];
597 uint8_t pad = a2 & 0xff;
598 uint8_t v;
599 uint32_t cc;
601 if (!(env->psw.mask & PSW_MASK_64)) {
602 destlen = (uint32_t)destlen;
603 srclen = (uint32_t)srclen;
604 dest &= 0x7fffffff;
605 src &= 0x7fffffff;
608 if (destlen == srclen) {
609 cc = 0;
610 } else if (destlen < srclen) {
611 cc = 1;
612 } else {
613 cc = 2;
616 if (srclen > destlen) {
617 srclen = destlen;
620 for (; destlen && srclen; src++, dest++, destlen--, srclen--) {
621 v = cpu_ldub_data(env, src);
622 cpu_stb_data(env, dest, v);
625 for (; destlen; dest++, destlen--) {
626 cpu_stb_data(env, dest, pad);
629 env->regs[r1 + 1] = destlen;
630 /* can't use srclen here, we trunc'ed it */
631 /* FIXME: 31-bit mode! */
632 env->regs[r3 + 1] -= src - env->regs[r3];
633 env->regs[r1] = dest;
634 env->regs[r3] = src;
636 return cc;
639 /* compare logical long extended memcompare insn with padding */
640 uint32_t HELPER(clcle)(CPUS390XState *env, uint32_t r1, uint64_t a2,
641 uint32_t r3)
643 uint64_t destlen = env->regs[r1 + 1];
644 uint64_t dest = get_address_31fix(env, r1);
645 uint64_t srclen = env->regs[r3 + 1];
646 uint64_t src = get_address_31fix(env, r3);
647 uint8_t pad = a2 & 0xff;
648 uint8_t v1 = 0, v2 = 0;
649 uint32_t cc = 0;
651 if (!(destlen || srclen)) {
652 return cc;
655 if (srclen > destlen) {
656 srclen = destlen;
659 for (; destlen || srclen; src++, dest++, destlen--, srclen--) {
660 v1 = srclen ? cpu_ldub_data(env, src) : pad;
661 v2 = destlen ? cpu_ldub_data(env, dest) : pad;
662 if (v1 != v2) {
663 cc = (v1 < v2) ? 1 : 2;
664 break;
668 env->regs[r1 + 1] = destlen;
669 /* can't use srclen here, we trunc'ed it */
670 env->regs[r3 + 1] -= src - env->regs[r3];
671 env->regs[r1] = dest;
672 env->regs[r3] = src;
674 return cc;
677 /* checksum */
678 uint64_t HELPER(cksm)(CPUS390XState *env, uint64_t r1,
679 uint64_t src, uint64_t src_len)
681 uint64_t max_len, len;
682 uint64_t cksm = (uint32_t)r1;
684 /* Lest we fail to service interrupts in a timely manner, limit the
685 amount of work we're willing to do. For now, let's cap at 8k. */
686 max_len = (src_len > 0x2000 ? 0x2000 : src_len);
688 /* Process full words as available. */
689 for (len = 0; len + 4 <= max_len; len += 4, src += 4) {
690 cksm += (uint32_t)cpu_ldl_data(env, src);
693 switch (max_len - len) {
694 case 1:
695 cksm += cpu_ldub_data(env, src) << 24;
696 len += 1;
697 break;
698 case 2:
699 cksm += cpu_lduw_data(env, src) << 16;
700 len += 2;
701 break;
702 case 3:
703 cksm += cpu_lduw_data(env, src) << 16;
704 cksm += cpu_ldub_data(env, src + 2) << 8;
705 len += 3;
706 break;
709 /* Fold the carry from the checksum. Note that we can see carry-out
710 during folding more than once (but probably not more than twice). */
711 while (cksm > 0xffffffffull) {
712 cksm = (uint32_t)cksm + (cksm >> 32);
715 /* Indicate whether or not we've processed everything. */
716 env->cc_op = (len == src_len ? 0 : 3);
718 /* Return both cksm and processed length. */
719 env->retxl = cksm;
720 return len;
723 void HELPER(unpk)(CPUS390XState *env, uint32_t len, uint64_t dest,
724 uint64_t src)
726 int len_dest = len >> 4;
727 int len_src = len & 0xf;
728 uint8_t b;
729 int second_nibble = 0;
731 dest += len_dest;
732 src += len_src;
734 /* last byte is special, it only flips the nibbles */
735 b = cpu_ldub_data(env, src);
736 cpu_stb_data(env, dest, (b << 4) | (b >> 4));
737 src--;
738 len_src--;
740 /* now pad every nibble with 0xf0 */
742 while (len_dest > 0) {
743 uint8_t cur_byte = 0;
745 if (len_src > 0) {
746 cur_byte = cpu_ldub_data(env, src);
749 len_dest--;
750 dest--;
752 /* only advance one nibble at a time */
753 if (second_nibble) {
754 cur_byte >>= 4;
755 len_src--;
756 src--;
758 second_nibble = !second_nibble;
760 /* digit */
761 cur_byte = (cur_byte & 0xf);
762 /* zone bits */
763 cur_byte |= 0xf0;
765 cpu_stb_data(env, dest, cur_byte);
769 void HELPER(tr)(CPUS390XState *env, uint32_t len, uint64_t array,
770 uint64_t trans)
772 int i;
774 for (i = 0; i <= len; i++) {
775 uint8_t byte = cpu_ldub_data(env, array + i);
776 uint8_t new_byte = cpu_ldub_data(env, trans + byte);
778 cpu_stb_data(env, array + i, new_byte);
782 uint64_t HELPER(tre)(CPUS390XState *env, uint64_t array,
783 uint64_t len, uint64_t trans)
785 uint8_t end = env->regs[0] & 0xff;
786 uint64_t l = len;
787 uint64_t i;
789 if (!(env->psw.mask & PSW_MASK_64)) {
790 array &= 0x7fffffff;
791 l = (uint32_t)l;
794 /* Lest we fail to service interrupts in a timely manner, limit the
795 amount of work we're willing to do. For now, let's cap at 8k. */
796 if (l > 0x2000) {
797 l = 0x2000;
798 env->cc_op = 3;
799 } else {
800 env->cc_op = 0;
803 for (i = 0; i < l; i++) {
804 uint8_t byte, new_byte;
806 byte = cpu_ldub_data(env, array + i);
808 if (byte == end) {
809 env->cc_op = 1;
810 break;
813 new_byte = cpu_ldub_data(env, trans + byte);
814 cpu_stb_data(env, array + i, new_byte);
817 env->retxl = len - i;
818 return array + i;
821 uint32_t HELPER(trt)(CPUS390XState *env, uint32_t len, uint64_t array,
822 uint64_t trans)
824 uint32_t cc = 0;
825 int i;
827 for (i = 0; i <= len; i++) {
828 uint8_t byte = cpu_ldub_data(env, array + i);
829 uint8_t sbyte = cpu_ldub_data(env, trans + byte);
831 if (sbyte != 0) {
832 env->regs[1] = array + i;
833 env->regs[2] = (env->regs[2] & ~0xff) | sbyte;
834 cc = (i == len) ? 2 : 1;
835 break;
839 return cc;
842 #if !defined(CONFIG_USER_ONLY)
843 void HELPER(lctlg)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
845 S390CPU *cpu = s390_env_get_cpu(env);
846 bool PERchanged = false;
847 int i;
848 uint64_t src = a2;
849 uint64_t val;
851 for (i = r1;; i = (i + 1) % 16) {
852 val = cpu_ldq_data(env, src);
853 if (env->cregs[i] != val && i >= 9 && i <= 11) {
854 PERchanged = true;
856 env->cregs[i] = val;
857 HELPER_LOG("load ctl %d from 0x%" PRIx64 " == 0x%" PRIx64 "\n",
858 i, src, env->cregs[i]);
859 src += sizeof(uint64_t);
861 if (i == r3) {
862 break;
866 if (PERchanged && env->psw.mask & PSW_MASK_PER) {
867 s390_cpu_recompute_watchpoints(CPU(cpu));
870 tlb_flush(CPU(cpu), 1);
873 void HELPER(lctl)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
875 S390CPU *cpu = s390_env_get_cpu(env);
876 bool PERchanged = false;
877 int i;
878 uint64_t src = a2;
879 uint32_t val;
881 for (i = r1;; i = (i + 1) % 16) {
882 val = cpu_ldl_data(env, src);
883 if ((uint32_t)env->cregs[i] != val && i >= 9 && i <= 11) {
884 PERchanged = true;
886 env->cregs[i] = (env->cregs[i] & 0xFFFFFFFF00000000ULL) | val;
887 src += sizeof(uint32_t);
889 if (i == r3) {
890 break;
894 if (PERchanged && env->psw.mask & PSW_MASK_PER) {
895 s390_cpu_recompute_watchpoints(CPU(cpu));
898 tlb_flush(CPU(cpu), 1);
901 void HELPER(stctg)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
903 int i;
904 uint64_t dest = a2;
906 for (i = r1;; i = (i + 1) % 16) {
907 cpu_stq_data(env, dest, env->cregs[i]);
908 dest += sizeof(uint64_t);
910 if (i == r3) {
911 break;
916 void HELPER(stctl)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
918 int i;
919 uint64_t dest = a2;
921 for (i = r1;; i = (i + 1) % 16) {
922 cpu_stl_data(env, dest, env->cregs[i]);
923 dest += sizeof(uint32_t);
925 if (i == r3) {
926 break;
931 uint32_t HELPER(tprot)(uint64_t a1, uint64_t a2)
933 /* XXX implement */
935 return 0;
938 /* insert storage key extended */
939 uint64_t HELPER(iske)(CPUS390XState *env, uint64_t r2)
941 static S390SKeysState *ss;
942 static S390SKeysClass *skeyclass;
943 uint64_t addr = get_address(env, 0, 0, r2);
944 uint8_t key;
946 if (addr > ram_size) {
947 return 0;
950 if (unlikely(!ss)) {
951 ss = s390_get_skeys_device();
952 skeyclass = S390_SKEYS_GET_CLASS(ss);
955 if (skeyclass->get_skeys(ss, addr / TARGET_PAGE_SIZE, 1, &key)) {
956 return 0;
958 return key;
961 /* set storage key extended */
962 void HELPER(sske)(CPUS390XState *env, uint64_t r1, uint64_t r2)
964 static S390SKeysState *ss;
965 static S390SKeysClass *skeyclass;
966 uint64_t addr = get_address(env, 0, 0, r2);
967 uint8_t key;
969 if (addr > ram_size) {
970 return;
973 if (unlikely(!ss)) {
974 ss = s390_get_skeys_device();
975 skeyclass = S390_SKEYS_GET_CLASS(ss);
978 key = (uint8_t) r1;
979 skeyclass->set_skeys(ss, addr / TARGET_PAGE_SIZE, 1, &key);
982 /* reset reference bit extended */
983 uint32_t HELPER(rrbe)(CPUS390XState *env, uint64_t r2)
985 static S390SKeysState *ss;
986 static S390SKeysClass *skeyclass;
987 uint8_t re, key;
989 if (r2 > ram_size) {
990 return 0;
993 if (unlikely(!ss)) {
994 ss = s390_get_skeys_device();
995 skeyclass = S390_SKEYS_GET_CLASS(ss);
998 if (skeyclass->get_skeys(ss, r2 / TARGET_PAGE_SIZE, 1, &key)) {
999 return 0;
1002 re = key & (SK_R | SK_C);
1003 key &= ~SK_R;
1005 if (skeyclass->set_skeys(ss, r2 / TARGET_PAGE_SIZE, 1, &key)) {
1006 return 0;
1010 * cc
1012 * 0 Reference bit zero; change bit zero
1013 * 1 Reference bit zero; change bit one
1014 * 2 Reference bit one; change bit zero
1015 * 3 Reference bit one; change bit one
1018 return re >> 1;
1021 /* compare and swap and purge */
1022 uint32_t HELPER(csp)(CPUS390XState *env, uint32_t r1, uint64_t r2)
1024 S390CPU *cpu = s390_env_get_cpu(env);
1025 uint32_t cc;
1026 uint32_t o1 = env->regs[r1];
1027 uint64_t a2 = r2 & ~3ULL;
1028 uint32_t o2 = cpu_ldl_data(env, a2);
1030 if (o1 == o2) {
1031 cpu_stl_data(env, a2, env->regs[(r1 + 1) & 15]);
1032 if (r2 & 0x3) {
1033 /* flush TLB / ALB */
1034 tlb_flush(CPU(cpu), 1);
1036 cc = 0;
1037 } else {
1038 env->regs[r1] = (env->regs[r1] & 0xffffffff00000000ULL) | o2;
1039 cc = 1;
1042 return cc;
1045 uint32_t HELPER(mvcs)(CPUS390XState *env, uint64_t l, uint64_t a1, uint64_t a2)
1047 int cc = 0, i;
1049 HELPER_LOG("%s: %16" PRIx64 " %16" PRIx64 " %16" PRIx64 "\n",
1050 __func__, l, a1, a2);
1052 if (l > 256) {
1053 /* max 256 */
1054 l = 256;
1055 cc = 3;
1058 /* XXX replace w/ memcpy */
1059 for (i = 0; i < l; i++) {
1060 cpu_stb_secondary(env, a1 + i, cpu_ldub_primary(env, a2 + i));
1063 return cc;
1066 uint32_t HELPER(mvcp)(CPUS390XState *env, uint64_t l, uint64_t a1, uint64_t a2)
1068 int cc = 0, i;
1070 HELPER_LOG("%s: %16" PRIx64 " %16" PRIx64 " %16" PRIx64 "\n",
1071 __func__, l, a1, a2);
1073 if (l > 256) {
1074 /* max 256 */
1075 l = 256;
1076 cc = 3;
1079 /* XXX replace w/ memcpy */
1080 for (i = 0; i < l; i++) {
1081 cpu_stb_primary(env, a1 + i, cpu_ldub_secondary(env, a2 + i));
1084 return cc;
1087 /* invalidate pte */
1088 void HELPER(ipte)(CPUS390XState *env, uint64_t pte_addr, uint64_t vaddr)
1090 CPUState *cs = CPU(s390_env_get_cpu(env));
1091 uint64_t page = vaddr & TARGET_PAGE_MASK;
1092 uint64_t pte = 0;
1094 /* XXX broadcast to other CPUs */
1096 /* XXX Linux is nice enough to give us the exact pte address.
1097 According to spec we'd have to find it out ourselves */
1098 /* XXX Linux is fine with overwriting the pte, the spec requires
1099 us to only set the invalid bit */
1100 stq_phys(cs->as, pte_addr, pte | _PAGE_INVALID);
1102 /* XXX we exploit the fact that Linux passes the exact virtual
1103 address here - it's not obliged to! */
1104 tlb_flush_page(cs, page);
1106 /* XXX 31-bit hack */
1107 if (page & 0x80000000) {
1108 tlb_flush_page(cs, page & ~0x80000000);
1109 } else {
1110 tlb_flush_page(cs, page | 0x80000000);
1114 /* flush local tlb */
1115 void HELPER(ptlb)(CPUS390XState *env)
1117 S390CPU *cpu = s390_env_get_cpu(env);
1119 tlb_flush(CPU(cpu), 1);
1122 /* load using real address */
1123 uint64_t HELPER(lura)(CPUS390XState *env, uint64_t addr)
1125 CPUState *cs = CPU(s390_env_get_cpu(env));
1127 return (uint32_t)ldl_phys(cs->as, get_address(env, 0, 0, addr));
1130 uint64_t HELPER(lurag)(CPUS390XState *env, uint64_t addr)
1132 CPUState *cs = CPU(s390_env_get_cpu(env));
1134 return ldq_phys(cs->as, get_address(env, 0, 0, addr));
1137 /* store using real address */
1138 void HELPER(stura)(CPUS390XState *env, uint64_t addr, uint64_t v1)
1140 CPUState *cs = CPU(s390_env_get_cpu(env));
1142 stl_phys(cs->as, get_address(env, 0, 0, addr), (uint32_t)v1);
1144 if ((env->psw.mask & PSW_MASK_PER) &&
1145 (env->cregs[9] & PER_CR9_EVENT_STORE) &&
1146 (env->cregs[9] & PER_CR9_EVENT_STORE_REAL)) {
1147 /* PSW is saved just before calling the helper. */
1148 env->per_address = env->psw.addr;
1149 env->per_perc_atmid = PER_CODE_EVENT_STORE_REAL | get_per_atmid(env);
1153 void HELPER(sturg)(CPUS390XState *env, uint64_t addr, uint64_t v1)
1155 CPUState *cs = CPU(s390_env_get_cpu(env));
1157 stq_phys(cs->as, get_address(env, 0, 0, addr), v1);
1159 if ((env->psw.mask & PSW_MASK_PER) &&
1160 (env->cregs[9] & PER_CR9_EVENT_STORE) &&
1161 (env->cregs[9] & PER_CR9_EVENT_STORE_REAL)) {
1162 /* PSW is saved just before calling the helper. */
1163 env->per_address = env->psw.addr;
1164 env->per_perc_atmid = PER_CODE_EVENT_STORE_REAL | get_per_atmid(env);
1168 /* load real address */
1169 uint64_t HELPER(lra)(CPUS390XState *env, uint64_t addr)
1171 CPUState *cs = CPU(s390_env_get_cpu(env));
1172 uint32_t cc = 0;
1173 int old_exc = cs->exception_index;
1174 uint64_t asc = env->psw.mask & PSW_MASK_ASC;
1175 uint64_t ret;
1176 int flags;
1178 /* XXX incomplete - has more corner cases */
1179 if (!(env->psw.mask & PSW_MASK_64) && (addr >> 32)) {
1180 program_interrupt(env, PGM_SPECIAL_OP, 2);
1183 cs->exception_index = old_exc;
1184 if (mmu_translate(env, addr, 0, asc, &ret, &flags, true)) {
1185 cc = 3;
1187 if (cs->exception_index == EXCP_PGM) {
1188 ret = env->int_pgm_code | 0x80000000;
1189 } else {
1190 ret |= addr & ~TARGET_PAGE_MASK;
1192 cs->exception_index = old_exc;
1194 env->cc_op = cc;
1195 return ret;
1197 #endif