ARM: PL061: Checking register r/w accesses to reserved area
[qemu/ar7.git] / target-s390x / mem_helper.c
blob707862203019dacc8738b282ea901aa4579355e1
1 /*
2 * S/390 memory access helper routines
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2009 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "exec/helper-proto.h"
24 #include "exec/cpu_ldst.h"
25 #include "hw/s390x/storage-keys.h"
27 /*****************************************************************************/
28 /* Softmmu support */
29 #if !defined(CONFIG_USER_ONLY)
31 /* try to fill the TLB and return an exception if error. If retaddr is
32 NULL, it means that the function was called in C code (i.e. not
33 from generated code or from helper.c) */
34 /* XXX: fix it to restore all registers */
35 void tlb_fill(CPUState *cs, target_ulong addr, int is_write, int mmu_idx,
36 uintptr_t retaddr)
38 int ret;
40 ret = s390_cpu_handle_mmu_fault(cs, addr, is_write, mmu_idx);
41 if (unlikely(ret != 0)) {
42 if (likely(retaddr)) {
43 /* now we have a real cpu fault */
44 cpu_restore_state(cs, retaddr);
46 cpu_loop_exit(cs);
50 #endif
52 /* #define DEBUG_HELPER */
53 #ifdef DEBUG_HELPER
54 #define HELPER_LOG(x...) qemu_log(x)
55 #else
56 #define HELPER_LOG(x...)
57 #endif
59 /* Reduce the length so that addr + len doesn't cross a page boundary. */
60 static inline uint64_t adj_len_to_page(uint64_t len, uint64_t addr)
62 #ifndef CONFIG_USER_ONLY
63 if ((addr & ~TARGET_PAGE_MASK) + len - 1 >= TARGET_PAGE_SIZE) {
64 return -addr & ~TARGET_PAGE_MASK;
66 #endif
67 return len;
70 static void fast_memset(CPUS390XState *env, uint64_t dest, uint8_t byte,
71 uint32_t l)
73 int mmu_idx = cpu_mmu_index(env, false);
75 while (l > 0) {
76 void *p = tlb_vaddr_to_host(env, dest, MMU_DATA_STORE, mmu_idx);
77 if (p) {
78 /* Access to the whole page in write mode granted. */
79 int l_adj = adj_len_to_page(l, dest);
80 memset(p, byte, l_adj);
81 dest += l_adj;
82 l -= l_adj;
83 } else {
84 /* We failed to get access to the whole page. The next write
85 access will likely fill the QEMU TLB for the next iteration. */
86 cpu_stb_data(env, dest, byte);
87 dest++;
88 l--;
93 static void fast_memmove(CPUS390XState *env, uint64_t dest, uint64_t src,
94 uint32_t l)
96 int mmu_idx = cpu_mmu_index(env, false);
98 while (l > 0) {
99 void *src_p = tlb_vaddr_to_host(env, src, MMU_DATA_LOAD, mmu_idx);
100 void *dest_p = tlb_vaddr_to_host(env, dest, MMU_DATA_STORE, mmu_idx);
101 if (src_p && dest_p) {
102 /* Access to both whole pages granted. */
103 int l_adj = adj_len_to_page(l, src);
104 l_adj = adj_len_to_page(l_adj, dest);
105 memmove(dest_p, src_p, l_adj);
106 src += l_adj;
107 dest += l_adj;
108 l -= l_adj;
109 } else {
110 /* We failed to get access to one or both whole pages. The next
111 read or write access will likely fill the QEMU TLB for the
112 next iteration. */
113 cpu_stb_data(env, dest, cpu_ldub_data(env, src));
114 src++;
115 dest++;
116 l--;
121 /* and on array */
122 uint32_t HELPER(nc)(CPUS390XState *env, uint32_t l, uint64_t dest,
123 uint64_t src)
125 int i;
126 unsigned char x;
127 uint32_t cc = 0;
129 HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
130 __func__, l, dest, src);
131 for (i = 0; i <= l; i++) {
132 x = cpu_ldub_data(env, dest + i) & cpu_ldub_data(env, src + i);
133 if (x) {
134 cc = 1;
136 cpu_stb_data(env, dest + i, x);
138 return cc;
141 /* xor on array */
142 uint32_t HELPER(xc)(CPUS390XState *env, uint32_t l, uint64_t dest,
143 uint64_t src)
145 int i;
146 unsigned char x;
147 uint32_t cc = 0;
149 HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
150 __func__, l, dest, src);
152 /* xor with itself is the same as memset(0) */
153 if (src == dest) {
154 fast_memset(env, dest, 0, l + 1);
155 return 0;
158 for (i = 0; i <= l; i++) {
159 x = cpu_ldub_data(env, dest + i) ^ cpu_ldub_data(env, src + i);
160 if (x) {
161 cc = 1;
163 cpu_stb_data(env, dest + i, x);
165 return cc;
168 /* or on array */
169 uint32_t HELPER(oc)(CPUS390XState *env, uint32_t l, uint64_t dest,
170 uint64_t src)
172 int i;
173 unsigned char x;
174 uint32_t cc = 0;
176 HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
177 __func__, l, dest, src);
178 for (i = 0; i <= l; i++) {
179 x = cpu_ldub_data(env, dest + i) | cpu_ldub_data(env, src + i);
180 if (x) {
181 cc = 1;
183 cpu_stb_data(env, dest + i, x);
185 return cc;
188 /* memmove */
189 void HELPER(mvc)(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src)
191 int i = 0;
193 HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
194 __func__, l, dest, src);
196 /* mvc with source pointing to the byte after the destination is the
197 same as memset with the first source byte */
198 if (dest == (src + 1)) {
199 fast_memset(env, dest, cpu_ldub_data(env, src), l + 1);
200 return;
203 /* mvc and memmove do not behave the same when areas overlap! */
204 if ((dest < src) || (src + l < dest)) {
205 fast_memmove(env, dest, src, l + 1);
206 return;
209 /* slow version with byte accesses which always work */
210 for (i = 0; i <= l; i++) {
211 cpu_stb_data(env, dest + i, cpu_ldub_data(env, src + i));
215 /* compare unsigned byte arrays */
216 uint32_t HELPER(clc)(CPUS390XState *env, uint32_t l, uint64_t s1, uint64_t s2)
218 int i;
219 unsigned char x, y;
220 uint32_t cc;
222 HELPER_LOG("%s l %d s1 %" PRIx64 " s2 %" PRIx64 "\n",
223 __func__, l, s1, s2);
224 for (i = 0; i <= l; i++) {
225 x = cpu_ldub_data(env, s1 + i);
226 y = cpu_ldub_data(env, s2 + i);
227 HELPER_LOG("%02x (%c)/%02x (%c) ", x, x, y, y);
228 if (x < y) {
229 cc = 1;
230 goto done;
231 } else if (x > y) {
232 cc = 2;
233 goto done;
236 cc = 0;
237 done:
238 HELPER_LOG("\n");
239 return cc;
242 /* compare logical under mask */
243 uint32_t HELPER(clm)(CPUS390XState *env, uint32_t r1, uint32_t mask,
244 uint64_t addr)
246 uint8_t r, d;
247 uint32_t cc;
249 HELPER_LOG("%s: r1 0x%x mask 0x%x addr 0x%" PRIx64 "\n", __func__, r1,
250 mask, addr);
251 cc = 0;
252 while (mask) {
253 if (mask & 8) {
254 d = cpu_ldub_data(env, addr);
255 r = (r1 & 0xff000000UL) >> 24;
256 HELPER_LOG("mask 0x%x %02x/%02x (0x%" PRIx64 ") ", mask, r, d,
257 addr);
258 if (r < d) {
259 cc = 1;
260 break;
261 } else if (r > d) {
262 cc = 2;
263 break;
265 addr++;
267 mask = (mask << 1) & 0xf;
268 r1 <<= 8;
270 HELPER_LOG("\n");
271 return cc;
274 static inline uint64_t fix_address(CPUS390XState *env, uint64_t a)
276 /* 31-Bit mode */
277 if (!(env->psw.mask & PSW_MASK_64)) {
278 a &= 0x7fffffff;
280 return a;
283 static inline uint64_t get_address(CPUS390XState *env, int x2, int b2, int d2)
285 uint64_t r = d2;
286 if (x2) {
287 r += env->regs[x2];
289 if (b2) {
290 r += env->regs[b2];
292 return fix_address(env, r);
295 static inline uint64_t get_address_31fix(CPUS390XState *env, int reg)
297 return fix_address(env, env->regs[reg]);
300 /* search string (c is byte to search, r2 is string, r1 end of string) */
301 uint64_t HELPER(srst)(CPUS390XState *env, uint64_t r0, uint64_t end,
302 uint64_t str)
304 uint32_t len;
305 uint8_t v, c = r0;
307 str = fix_address(env, str);
308 end = fix_address(env, end);
310 /* Assume for now that R2 is unmodified. */
311 env->retxl = str;
313 /* Lest we fail to service interrupts in a timely manner, limit the
314 amount of work we're willing to do. For now, let's cap at 8k. */
315 for (len = 0; len < 0x2000; ++len) {
316 if (str + len == end) {
317 /* Character not found. R1 & R2 are unmodified. */
318 env->cc_op = 2;
319 return end;
321 v = cpu_ldub_data(env, str + len);
322 if (v == c) {
323 /* Character found. Set R1 to the location; R2 is unmodified. */
324 env->cc_op = 1;
325 return str + len;
329 /* CPU-determined bytes processed. Advance R2 to next byte to process. */
330 env->retxl = str + len;
331 env->cc_op = 3;
332 return end;
335 /* unsigned string compare (c is string terminator) */
336 uint64_t HELPER(clst)(CPUS390XState *env, uint64_t c, uint64_t s1, uint64_t s2)
338 uint32_t len;
340 c = c & 0xff;
341 s1 = fix_address(env, s1);
342 s2 = fix_address(env, s2);
344 /* Lest we fail to service interrupts in a timely manner, limit the
345 amount of work we're willing to do. For now, let's cap at 8k. */
346 for (len = 0; len < 0x2000; ++len) {
347 uint8_t v1 = cpu_ldub_data(env, s1 + len);
348 uint8_t v2 = cpu_ldub_data(env, s2 + len);
349 if (v1 == v2) {
350 if (v1 == c) {
351 /* Equal. CC=0, and don't advance the registers. */
352 env->cc_op = 0;
353 env->retxl = s2;
354 return s1;
356 } else {
357 /* Unequal. CC={1,2}, and advance the registers. Note that
358 the terminator need not be zero, but the string that contains
359 the terminator is by definition "low". */
360 env->cc_op = (v1 == c ? 1 : v2 == c ? 2 : v1 < v2 ? 1 : 2);
361 env->retxl = s2 + len;
362 return s1 + len;
366 /* CPU-determined bytes equal; advance the registers. */
367 env->cc_op = 3;
368 env->retxl = s2 + len;
369 return s1 + len;
372 /* move page */
373 void HELPER(mvpg)(CPUS390XState *env, uint64_t r0, uint64_t r1, uint64_t r2)
375 /* XXX missing r0 handling */
376 env->cc_op = 0;
377 fast_memmove(env, r1, r2, TARGET_PAGE_SIZE);
380 /* string copy (c is string terminator) */
381 uint64_t HELPER(mvst)(CPUS390XState *env, uint64_t c, uint64_t d, uint64_t s)
383 uint32_t len;
385 c = c & 0xff;
386 d = fix_address(env, d);
387 s = fix_address(env, s);
389 /* Lest we fail to service interrupts in a timely manner, limit the
390 amount of work we're willing to do. For now, let's cap at 8k. */
391 for (len = 0; len < 0x2000; ++len) {
392 uint8_t v = cpu_ldub_data(env, s + len);
393 cpu_stb_data(env, d + len, v);
394 if (v == c) {
395 /* Complete. Set CC=1 and advance R1. */
396 env->cc_op = 1;
397 env->retxl = s;
398 return d + len;
402 /* Incomplete. Set CC=3 and signal to advance R1 and R2. */
403 env->cc_op = 3;
404 env->retxl = s + len;
405 return d + len;
408 static uint32_t helper_icm(CPUS390XState *env, uint32_t r1, uint64_t address,
409 uint32_t mask)
411 int pos = 24; /* top of the lower half of r1 */
412 uint64_t rmask = 0xff000000ULL;
413 uint8_t val = 0;
414 int ccd = 0;
415 uint32_t cc = 0;
417 while (mask) {
418 if (mask & 8) {
419 env->regs[r1] &= ~rmask;
420 val = cpu_ldub_data(env, address);
421 if ((val & 0x80) && !ccd) {
422 cc = 1;
424 ccd = 1;
425 if (val && cc == 0) {
426 cc = 2;
428 env->regs[r1] |= (uint64_t)val << pos;
429 address++;
431 mask = (mask << 1) & 0xf;
432 pos -= 8;
433 rmask >>= 8;
436 return cc;
439 /* execute instruction
440 this instruction executes an insn modified with the contents of r1
441 it does not change the executed instruction in memory
442 it does not change the program counter
443 in other words: tricky...
444 currently implemented by interpreting the cases it is most commonly used in
446 uint32_t HELPER(ex)(CPUS390XState *env, uint32_t cc, uint64_t v1,
447 uint64_t addr, uint64_t ret)
449 S390CPU *cpu = s390_env_get_cpu(env);
450 uint16_t insn = cpu_lduw_code(env, addr);
452 HELPER_LOG("%s: v1 0x%lx addr 0x%lx insn 0x%x\n", __func__, v1, addr,
453 insn);
454 if ((insn & 0xf0ff) == 0xd000) {
455 uint32_t l, insn2, b1, b2, d1, d2;
457 l = v1 & 0xff;
458 insn2 = cpu_ldl_code(env, addr + 2);
459 b1 = (insn2 >> 28) & 0xf;
460 b2 = (insn2 >> 12) & 0xf;
461 d1 = (insn2 >> 16) & 0xfff;
462 d2 = insn2 & 0xfff;
463 switch (insn & 0xf00) {
464 case 0x200:
465 helper_mvc(env, l, get_address(env, 0, b1, d1),
466 get_address(env, 0, b2, d2));
467 break;
468 case 0x400:
469 cc = helper_nc(env, l, get_address(env, 0, b1, d1),
470 get_address(env, 0, b2, d2));
471 break;
472 case 0x500:
473 cc = helper_clc(env, l, get_address(env, 0, b1, d1),
474 get_address(env, 0, b2, d2));
475 break;
476 case 0x600:
477 cc = helper_oc(env, l, get_address(env, 0, b1, d1),
478 get_address(env, 0, b2, d2));
479 break;
480 case 0x700:
481 cc = helper_xc(env, l, get_address(env, 0, b1, d1),
482 get_address(env, 0, b2, d2));
483 break;
484 case 0xc00:
485 helper_tr(env, l, get_address(env, 0, b1, d1),
486 get_address(env, 0, b2, d2));
487 break;
488 case 0xd00:
489 cc = helper_trt(env, l, get_address(env, 0, b1, d1),
490 get_address(env, 0, b2, d2));
491 break;
492 default:
493 goto abort;
495 } else if ((insn & 0xff00) == 0x0a00) {
496 /* supervisor call */
497 HELPER_LOG("%s: svc %ld via execute\n", __func__, (insn | v1) & 0xff);
498 env->psw.addr = ret - 4;
499 env->int_svc_code = (insn | v1) & 0xff;
500 env->int_svc_ilen = 4;
501 helper_exception(env, EXCP_SVC);
502 } else if ((insn & 0xff00) == 0xbf00) {
503 uint32_t insn2, r1, r3, b2, d2;
505 insn2 = cpu_ldl_code(env, addr + 2);
506 r1 = (insn2 >> 20) & 0xf;
507 r3 = (insn2 >> 16) & 0xf;
508 b2 = (insn2 >> 12) & 0xf;
509 d2 = insn2 & 0xfff;
510 cc = helper_icm(env, r1, get_address(env, 0, b2, d2), r3);
511 } else {
512 abort:
513 cpu_abort(CPU(cpu), "EXECUTE on instruction prefix 0x%x not implemented\n",
514 insn);
516 return cc;
519 /* load access registers r1 to r3 from memory at a2 */
520 void HELPER(lam)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
522 int i;
524 for (i = r1;; i = (i + 1) % 16) {
525 env->aregs[i] = cpu_ldl_data(env, a2);
526 a2 += 4;
528 if (i == r3) {
529 break;
534 /* store access registers r1 to r3 in memory at a2 */
535 void HELPER(stam)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
537 int i;
539 for (i = r1;; i = (i + 1) % 16) {
540 cpu_stl_data(env, a2, env->aregs[i]);
541 a2 += 4;
543 if (i == r3) {
544 break;
549 /* move long */
550 uint32_t HELPER(mvcl)(CPUS390XState *env, uint32_t r1, uint32_t r2)
552 uint64_t destlen = env->regs[r1 + 1] & 0xffffff;
553 uint64_t dest = get_address_31fix(env, r1);
554 uint64_t srclen = env->regs[r2 + 1] & 0xffffff;
555 uint64_t src = get_address_31fix(env, r2);
556 uint8_t pad = env->regs[r2 + 1] >> 24;
557 uint8_t v;
558 uint32_t cc;
560 if (destlen == srclen) {
561 cc = 0;
562 } else if (destlen < srclen) {
563 cc = 1;
564 } else {
565 cc = 2;
568 if (srclen > destlen) {
569 srclen = destlen;
572 for (; destlen && srclen; src++, dest++, destlen--, srclen--) {
573 v = cpu_ldub_data(env, src);
574 cpu_stb_data(env, dest, v);
577 for (; destlen; dest++, destlen--) {
578 cpu_stb_data(env, dest, pad);
581 env->regs[r1 + 1] = destlen;
582 /* can't use srclen here, we trunc'ed it */
583 env->regs[r2 + 1] -= src - env->regs[r2];
584 env->regs[r1] = dest;
585 env->regs[r2] = src;
587 return cc;
590 /* move long extended another memcopy insn with more bells and whistles */
591 uint32_t HELPER(mvcle)(CPUS390XState *env, uint32_t r1, uint64_t a2,
592 uint32_t r3)
594 uint64_t destlen = env->regs[r1 + 1];
595 uint64_t dest = env->regs[r1];
596 uint64_t srclen = env->regs[r3 + 1];
597 uint64_t src = env->regs[r3];
598 uint8_t pad = a2 & 0xff;
599 uint8_t v;
600 uint32_t cc;
602 if (!(env->psw.mask & PSW_MASK_64)) {
603 destlen = (uint32_t)destlen;
604 srclen = (uint32_t)srclen;
605 dest &= 0x7fffffff;
606 src &= 0x7fffffff;
609 if (destlen == srclen) {
610 cc = 0;
611 } else if (destlen < srclen) {
612 cc = 1;
613 } else {
614 cc = 2;
617 if (srclen > destlen) {
618 srclen = destlen;
621 for (; destlen && srclen; src++, dest++, destlen--, srclen--) {
622 v = cpu_ldub_data(env, src);
623 cpu_stb_data(env, dest, v);
626 for (; destlen; dest++, destlen--) {
627 cpu_stb_data(env, dest, pad);
630 env->regs[r1 + 1] = destlen;
631 /* can't use srclen here, we trunc'ed it */
632 /* FIXME: 31-bit mode! */
633 env->regs[r3 + 1] -= src - env->regs[r3];
634 env->regs[r1] = dest;
635 env->regs[r3] = src;
637 return cc;
640 /* compare logical long extended memcompare insn with padding */
641 uint32_t HELPER(clcle)(CPUS390XState *env, uint32_t r1, uint64_t a2,
642 uint32_t r3)
644 uint64_t destlen = env->regs[r1 + 1];
645 uint64_t dest = get_address_31fix(env, r1);
646 uint64_t srclen = env->regs[r3 + 1];
647 uint64_t src = get_address_31fix(env, r3);
648 uint8_t pad = a2 & 0xff;
649 uint8_t v1 = 0, v2 = 0;
650 uint32_t cc = 0;
652 if (!(destlen || srclen)) {
653 return cc;
656 if (srclen > destlen) {
657 srclen = destlen;
660 for (; destlen || srclen; src++, dest++, destlen--, srclen--) {
661 v1 = srclen ? cpu_ldub_data(env, src) : pad;
662 v2 = destlen ? cpu_ldub_data(env, dest) : pad;
663 if (v1 != v2) {
664 cc = (v1 < v2) ? 1 : 2;
665 break;
669 env->regs[r1 + 1] = destlen;
670 /* can't use srclen here, we trunc'ed it */
671 env->regs[r3 + 1] -= src - env->regs[r3];
672 env->regs[r1] = dest;
673 env->regs[r3] = src;
675 return cc;
678 /* checksum */
679 uint64_t HELPER(cksm)(CPUS390XState *env, uint64_t r1,
680 uint64_t src, uint64_t src_len)
682 uint64_t max_len, len;
683 uint64_t cksm = (uint32_t)r1;
685 /* Lest we fail to service interrupts in a timely manner, limit the
686 amount of work we're willing to do. For now, let's cap at 8k. */
687 max_len = (src_len > 0x2000 ? 0x2000 : src_len);
689 /* Process full words as available. */
690 for (len = 0; len + 4 <= max_len; len += 4, src += 4) {
691 cksm += (uint32_t)cpu_ldl_data(env, src);
694 switch (max_len - len) {
695 case 1:
696 cksm += cpu_ldub_data(env, src) << 24;
697 len += 1;
698 break;
699 case 2:
700 cksm += cpu_lduw_data(env, src) << 16;
701 len += 2;
702 break;
703 case 3:
704 cksm += cpu_lduw_data(env, src) << 16;
705 cksm += cpu_ldub_data(env, src + 2) << 8;
706 len += 3;
707 break;
710 /* Fold the carry from the checksum. Note that we can see carry-out
711 during folding more than once (but probably not more than twice). */
712 while (cksm > 0xffffffffull) {
713 cksm = (uint32_t)cksm + (cksm >> 32);
716 /* Indicate whether or not we've processed everything. */
717 env->cc_op = (len == src_len ? 0 : 3);
719 /* Return both cksm and processed length. */
720 env->retxl = cksm;
721 return len;
724 void HELPER(unpk)(CPUS390XState *env, uint32_t len, uint64_t dest,
725 uint64_t src)
727 int len_dest = len >> 4;
728 int len_src = len & 0xf;
729 uint8_t b;
730 int second_nibble = 0;
732 dest += len_dest;
733 src += len_src;
735 /* last byte is special, it only flips the nibbles */
736 b = cpu_ldub_data(env, src);
737 cpu_stb_data(env, dest, (b << 4) | (b >> 4));
738 src--;
739 len_src--;
741 /* now pad every nibble with 0xf0 */
743 while (len_dest > 0) {
744 uint8_t cur_byte = 0;
746 if (len_src > 0) {
747 cur_byte = cpu_ldub_data(env, src);
750 len_dest--;
751 dest--;
753 /* only advance one nibble at a time */
754 if (second_nibble) {
755 cur_byte >>= 4;
756 len_src--;
757 src--;
759 second_nibble = !second_nibble;
761 /* digit */
762 cur_byte = (cur_byte & 0xf);
763 /* zone bits */
764 cur_byte |= 0xf0;
766 cpu_stb_data(env, dest, cur_byte);
770 void HELPER(tr)(CPUS390XState *env, uint32_t len, uint64_t array,
771 uint64_t trans)
773 int i;
775 for (i = 0; i <= len; i++) {
776 uint8_t byte = cpu_ldub_data(env, array + i);
777 uint8_t new_byte = cpu_ldub_data(env, trans + byte);
779 cpu_stb_data(env, array + i, new_byte);
783 uint64_t HELPER(tre)(CPUS390XState *env, uint64_t array,
784 uint64_t len, uint64_t trans)
786 uint8_t end = env->regs[0] & 0xff;
787 uint64_t l = len;
788 uint64_t i;
790 if (!(env->psw.mask & PSW_MASK_64)) {
791 array &= 0x7fffffff;
792 l = (uint32_t)l;
795 /* Lest we fail to service interrupts in a timely manner, limit the
796 amount of work we're willing to do. For now, let's cap at 8k. */
797 if (l > 0x2000) {
798 l = 0x2000;
799 env->cc_op = 3;
800 } else {
801 env->cc_op = 0;
804 for (i = 0; i < l; i++) {
805 uint8_t byte, new_byte;
807 byte = cpu_ldub_data(env, array + i);
809 if (byte == end) {
810 env->cc_op = 1;
811 break;
814 new_byte = cpu_ldub_data(env, trans + byte);
815 cpu_stb_data(env, array + i, new_byte);
818 env->retxl = len - i;
819 return array + i;
822 uint32_t HELPER(trt)(CPUS390XState *env, uint32_t len, uint64_t array,
823 uint64_t trans)
825 uint32_t cc = 0;
826 int i;
828 for (i = 0; i <= len; i++) {
829 uint8_t byte = cpu_ldub_data(env, array + i);
830 uint8_t sbyte = cpu_ldub_data(env, trans + byte);
832 if (sbyte != 0) {
833 env->regs[1] = array + i;
834 env->regs[2] = (env->regs[2] & ~0xff) | sbyte;
835 cc = (i == len) ? 2 : 1;
836 break;
840 return cc;
843 #if !defined(CONFIG_USER_ONLY)
844 void HELPER(lctlg)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
846 S390CPU *cpu = s390_env_get_cpu(env);
847 bool PERchanged = false;
848 int i;
849 uint64_t src = a2;
850 uint64_t val;
852 for (i = r1;; i = (i + 1) % 16) {
853 val = cpu_ldq_data(env, src);
854 if (env->cregs[i] != val && i >= 9 && i <= 11) {
855 PERchanged = true;
857 env->cregs[i] = val;
858 HELPER_LOG("load ctl %d from 0x%" PRIx64 " == 0x%" PRIx64 "\n",
859 i, src, env->cregs[i]);
860 src += sizeof(uint64_t);
862 if (i == r3) {
863 break;
867 if (PERchanged && env->psw.mask & PSW_MASK_PER) {
868 s390_cpu_recompute_watchpoints(CPU(cpu));
871 tlb_flush(CPU(cpu), 1);
874 void HELPER(lctl)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
876 S390CPU *cpu = s390_env_get_cpu(env);
877 bool PERchanged = false;
878 int i;
879 uint64_t src = a2;
880 uint32_t val;
882 for (i = r1;; i = (i + 1) % 16) {
883 val = cpu_ldl_data(env, src);
884 if ((uint32_t)env->cregs[i] != val && i >= 9 && i <= 11) {
885 PERchanged = true;
887 env->cregs[i] = (env->cregs[i] & 0xFFFFFFFF00000000ULL) | val;
888 src += sizeof(uint32_t);
890 if (i == r3) {
891 break;
895 if (PERchanged && env->psw.mask & PSW_MASK_PER) {
896 s390_cpu_recompute_watchpoints(CPU(cpu));
899 tlb_flush(CPU(cpu), 1);
902 void HELPER(stctg)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
904 int i;
905 uint64_t dest = a2;
907 for (i = r1;; i = (i + 1) % 16) {
908 cpu_stq_data(env, dest, env->cregs[i]);
909 dest += sizeof(uint64_t);
911 if (i == r3) {
912 break;
917 void HELPER(stctl)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
919 int i;
920 uint64_t dest = a2;
922 for (i = r1;; i = (i + 1) % 16) {
923 cpu_stl_data(env, dest, env->cregs[i]);
924 dest += sizeof(uint32_t);
926 if (i == r3) {
927 break;
932 uint32_t HELPER(tprot)(uint64_t a1, uint64_t a2)
934 /* XXX implement */
936 return 0;
939 /* insert storage key extended */
940 uint64_t HELPER(iske)(CPUS390XState *env, uint64_t r2)
942 static S390SKeysState *ss;
943 static S390SKeysClass *skeyclass;
944 uint64_t addr = get_address(env, 0, 0, r2);
945 uint8_t key;
947 if (addr > ram_size) {
948 return 0;
951 if (unlikely(!ss)) {
952 ss = s390_get_skeys_device();
953 skeyclass = S390_SKEYS_GET_CLASS(ss);
956 if (skeyclass->get_skeys(ss, addr / TARGET_PAGE_SIZE, 1, &key)) {
957 return 0;
959 return key;
962 /* set storage key extended */
963 void HELPER(sske)(CPUS390XState *env, uint64_t r1, uint64_t r2)
965 static S390SKeysState *ss;
966 static S390SKeysClass *skeyclass;
967 uint64_t addr = get_address(env, 0, 0, r2);
968 uint8_t key;
970 if (addr > ram_size) {
971 return;
974 if (unlikely(!ss)) {
975 ss = s390_get_skeys_device();
976 skeyclass = S390_SKEYS_GET_CLASS(ss);
979 key = (uint8_t) r1;
980 skeyclass->set_skeys(ss, addr / TARGET_PAGE_SIZE, 1, &key);
983 /* reset reference bit extended */
984 uint32_t HELPER(rrbe)(CPUS390XState *env, uint64_t r2)
986 static S390SKeysState *ss;
987 static S390SKeysClass *skeyclass;
988 uint8_t re, key;
990 if (r2 > ram_size) {
991 return 0;
994 if (unlikely(!ss)) {
995 ss = s390_get_skeys_device();
996 skeyclass = S390_SKEYS_GET_CLASS(ss);
999 if (skeyclass->get_skeys(ss, r2 / TARGET_PAGE_SIZE, 1, &key)) {
1000 return 0;
1003 re = key & (SK_R | SK_C);
1004 key &= ~SK_R;
1006 if (skeyclass->set_skeys(ss, r2 / TARGET_PAGE_SIZE, 1, &key)) {
1007 return 0;
1011 * cc
1013 * 0 Reference bit zero; change bit zero
1014 * 1 Reference bit zero; change bit one
1015 * 2 Reference bit one; change bit zero
1016 * 3 Reference bit one; change bit one
1019 return re >> 1;
1022 /* compare and swap and purge */
1023 uint32_t HELPER(csp)(CPUS390XState *env, uint32_t r1, uint64_t r2)
1025 S390CPU *cpu = s390_env_get_cpu(env);
1026 uint32_t cc;
1027 uint32_t o1 = env->regs[r1];
1028 uint64_t a2 = r2 & ~3ULL;
1029 uint32_t o2 = cpu_ldl_data(env, a2);
1031 if (o1 == o2) {
1032 cpu_stl_data(env, a2, env->regs[(r1 + 1) & 15]);
1033 if (r2 & 0x3) {
1034 /* flush TLB / ALB */
1035 tlb_flush(CPU(cpu), 1);
1037 cc = 0;
1038 } else {
1039 env->regs[r1] = (env->regs[r1] & 0xffffffff00000000ULL) | o2;
1040 cc = 1;
1043 return cc;
1046 uint32_t HELPER(mvcs)(CPUS390XState *env, uint64_t l, uint64_t a1, uint64_t a2)
1048 int cc = 0, i;
1050 HELPER_LOG("%s: %16" PRIx64 " %16" PRIx64 " %16" PRIx64 "\n",
1051 __func__, l, a1, a2);
1053 if (l > 256) {
1054 /* max 256 */
1055 l = 256;
1056 cc = 3;
1059 /* XXX replace w/ memcpy */
1060 for (i = 0; i < l; i++) {
1061 cpu_stb_secondary(env, a1 + i, cpu_ldub_primary(env, a2 + i));
1064 return cc;
1067 uint32_t HELPER(mvcp)(CPUS390XState *env, uint64_t l, uint64_t a1, uint64_t a2)
1069 int cc = 0, i;
1071 HELPER_LOG("%s: %16" PRIx64 " %16" PRIx64 " %16" PRIx64 "\n",
1072 __func__, l, a1, a2);
1074 if (l > 256) {
1075 /* max 256 */
1076 l = 256;
1077 cc = 3;
1080 /* XXX replace w/ memcpy */
1081 for (i = 0; i < l; i++) {
1082 cpu_stb_primary(env, a1 + i, cpu_ldub_secondary(env, a2 + i));
1085 return cc;
1088 /* invalidate pte */
1089 void HELPER(ipte)(CPUS390XState *env, uint64_t pte_addr, uint64_t vaddr)
1091 CPUState *cs = CPU(s390_env_get_cpu(env));
1092 uint64_t page = vaddr & TARGET_PAGE_MASK;
1093 uint64_t pte = 0;
1095 /* XXX broadcast to other CPUs */
1097 /* XXX Linux is nice enough to give us the exact pte address.
1098 According to spec we'd have to find it out ourselves */
1099 /* XXX Linux is fine with overwriting the pte, the spec requires
1100 us to only set the invalid bit */
1101 stq_phys(cs->as, pte_addr, pte | _PAGE_INVALID);
1103 /* XXX we exploit the fact that Linux passes the exact virtual
1104 address here - it's not obliged to! */
1105 tlb_flush_page(cs, page);
1107 /* XXX 31-bit hack */
1108 if (page & 0x80000000) {
1109 tlb_flush_page(cs, page & ~0x80000000);
1110 } else {
1111 tlb_flush_page(cs, page | 0x80000000);
1115 /* flush local tlb */
1116 void HELPER(ptlb)(CPUS390XState *env)
1118 S390CPU *cpu = s390_env_get_cpu(env);
1120 tlb_flush(CPU(cpu), 1);
1123 /* load using real address */
1124 uint64_t HELPER(lura)(CPUS390XState *env, uint64_t addr)
1126 CPUState *cs = CPU(s390_env_get_cpu(env));
1128 return (uint32_t)ldl_phys(cs->as, get_address(env, 0, 0, addr));
1131 uint64_t HELPER(lurag)(CPUS390XState *env, uint64_t addr)
1133 CPUState *cs = CPU(s390_env_get_cpu(env));
1135 return ldq_phys(cs->as, get_address(env, 0, 0, addr));
1138 /* store using real address */
1139 void HELPER(stura)(CPUS390XState *env, uint64_t addr, uint64_t v1)
1141 CPUState *cs = CPU(s390_env_get_cpu(env));
1143 stl_phys(cs->as, get_address(env, 0, 0, addr), (uint32_t)v1);
1145 if ((env->psw.mask & PSW_MASK_PER) &&
1146 (env->cregs[9] & PER_CR9_EVENT_STORE) &&
1147 (env->cregs[9] & PER_CR9_EVENT_STORE_REAL)) {
1148 /* PSW is saved just before calling the helper. */
1149 env->per_address = env->psw.addr;
1150 env->per_perc_atmid = PER_CODE_EVENT_STORE_REAL | get_per_atmid(env);
1154 void HELPER(sturg)(CPUS390XState *env, uint64_t addr, uint64_t v1)
1156 CPUState *cs = CPU(s390_env_get_cpu(env));
1158 stq_phys(cs->as, get_address(env, 0, 0, addr), v1);
1160 if ((env->psw.mask & PSW_MASK_PER) &&
1161 (env->cregs[9] & PER_CR9_EVENT_STORE) &&
1162 (env->cregs[9] & PER_CR9_EVENT_STORE_REAL)) {
1163 /* PSW is saved just before calling the helper. */
1164 env->per_address = env->psw.addr;
1165 env->per_perc_atmid = PER_CODE_EVENT_STORE_REAL | get_per_atmid(env);
1169 /* load real address */
1170 uint64_t HELPER(lra)(CPUS390XState *env, uint64_t addr)
1172 CPUState *cs = CPU(s390_env_get_cpu(env));
1173 uint32_t cc = 0;
1174 int old_exc = cs->exception_index;
1175 uint64_t asc = env->psw.mask & PSW_MASK_ASC;
1176 uint64_t ret;
1177 int flags;
1179 /* XXX incomplete - has more corner cases */
1180 if (!(env->psw.mask & PSW_MASK_64) && (addr >> 32)) {
1181 program_interrupt(env, PGM_SPECIAL_OP, 2);
1184 cs->exception_index = old_exc;
1185 if (mmu_translate(env, addr, 0, asc, &ret, &flags, true)) {
1186 cc = 3;
1188 if (cs->exception_index == EXCP_PGM) {
1189 ret = env->int_pgm_code | 0x80000000;
1190 } else {
1191 ret |= addr & ~TARGET_PAGE_MASK;
1193 cs->exception_index = old_exc;
1195 env->cc_op = cc;
1196 return ret;
1198 #endif