linux-user/syscall.c: Don't warn about unimplemented get_robust_list
[qemu/ar7.git] / target-s390x / mem_helper.c
blob372334b3c8ffb9256b80b453add36f4b90f15d59
1 /*
2 * S/390 memory access helper routines
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2009 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "cpu.h"
22 #include "helper.h"
24 /*****************************************************************************/
25 /* Softmmu support */
26 #if !defined(CONFIG_USER_ONLY)
27 #include "exec/softmmu_exec.h"
29 #define MMUSUFFIX _mmu
31 #define SHIFT 0
32 #include "exec/softmmu_template.h"
34 #define SHIFT 1
35 #include "exec/softmmu_template.h"
37 #define SHIFT 2
38 #include "exec/softmmu_template.h"
40 #define SHIFT 3
41 #include "exec/softmmu_template.h"
43 /* try to fill the TLB and return an exception if error. If retaddr is
44 NULL, it means that the function was called in C code (i.e. not
45 from generated code or from helper.c) */
46 /* XXX: fix it to restore all registers */
47 void tlb_fill(CPUS390XState *env, target_ulong addr, int is_write, int mmu_idx,
48 uintptr_t retaddr)
50 int ret;
52 ret = cpu_s390x_handle_mmu_fault(env, addr, is_write, mmu_idx);
53 if (unlikely(ret != 0)) {
54 if (likely(retaddr)) {
55 /* now we have a real cpu fault */
56 cpu_restore_state(env, retaddr);
58 cpu_loop_exit(env);
62 #endif
64 /* #define DEBUG_HELPER */
65 #ifdef DEBUG_HELPER
66 #define HELPER_LOG(x...) qemu_log(x)
67 #else
68 #define HELPER_LOG(x...)
69 #endif
71 #ifndef CONFIG_USER_ONLY
72 static void mvc_fast_memset(CPUS390XState *env, uint32_t l, uint64_t dest,
73 uint8_t byte)
75 hwaddr dest_phys;
76 hwaddr len = l;
77 void *dest_p;
78 uint64_t asc = env->psw.mask & PSW_MASK_ASC;
79 int flags;
81 if (mmu_translate(env, dest, 1, asc, &dest_phys, &flags)) {
82 cpu_stb_data(env, dest, byte);
83 cpu_abort(env, "should never reach here");
85 dest_phys |= dest & ~TARGET_PAGE_MASK;
87 dest_p = cpu_physical_memory_map(dest_phys, &len, 1);
89 memset(dest_p, byte, len);
91 cpu_physical_memory_unmap(dest_p, 1, len, len);
94 static void mvc_fast_memmove(CPUS390XState *env, uint32_t l, uint64_t dest,
95 uint64_t src)
97 hwaddr dest_phys;
98 hwaddr src_phys;
99 hwaddr len = l;
100 void *dest_p;
101 void *src_p;
102 uint64_t asc = env->psw.mask & PSW_MASK_ASC;
103 int flags;
105 if (mmu_translate(env, dest, 1, asc, &dest_phys, &flags)) {
106 cpu_stb_data(env, dest, 0);
107 cpu_abort(env, "should never reach here");
109 dest_phys |= dest & ~TARGET_PAGE_MASK;
111 if (mmu_translate(env, src, 0, asc, &src_phys, &flags)) {
112 cpu_ldub_data(env, src);
113 cpu_abort(env, "should never reach here");
115 src_phys |= src & ~TARGET_PAGE_MASK;
117 dest_p = cpu_physical_memory_map(dest_phys, &len, 1);
118 src_p = cpu_physical_memory_map(src_phys, &len, 0);
120 memmove(dest_p, src_p, len);
122 cpu_physical_memory_unmap(dest_p, 1, len, len);
123 cpu_physical_memory_unmap(src_p, 0, len, len);
125 #endif
127 /* and on array */
128 uint32_t HELPER(nc)(CPUS390XState *env, uint32_t l, uint64_t dest,
129 uint64_t src)
131 int i;
132 unsigned char x;
133 uint32_t cc = 0;
135 HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
136 __func__, l, dest, src);
137 for (i = 0; i <= l; i++) {
138 x = cpu_ldub_data(env, dest + i) & cpu_ldub_data(env, src + i);
139 if (x) {
140 cc = 1;
142 cpu_stb_data(env, dest + i, x);
144 return cc;
147 /* xor on array */
148 uint32_t HELPER(xc)(CPUS390XState *env, uint32_t l, uint64_t dest,
149 uint64_t src)
151 int i;
152 unsigned char x;
153 uint32_t cc = 0;
155 HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
156 __func__, l, dest, src);
158 #ifndef CONFIG_USER_ONLY
159 /* xor with itself is the same as memset(0) */
160 if ((l > 32) && (src == dest) &&
161 (src & TARGET_PAGE_MASK) == ((src + l) & TARGET_PAGE_MASK)) {
162 mvc_fast_memset(env, l + 1, dest, 0);
163 return 0;
165 #else
166 if (src == dest) {
167 memset(g2h(dest), 0, l + 1);
168 return 0;
170 #endif
172 for (i = 0; i <= l; i++) {
173 x = cpu_ldub_data(env, dest + i) ^ cpu_ldub_data(env, src + i);
174 if (x) {
175 cc = 1;
177 cpu_stb_data(env, dest + i, x);
179 return cc;
182 /* or on array */
183 uint32_t HELPER(oc)(CPUS390XState *env, uint32_t l, uint64_t dest,
184 uint64_t src)
186 int i;
187 unsigned char x;
188 uint32_t cc = 0;
190 HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
191 __func__, l, dest, src);
192 for (i = 0; i <= l; i++) {
193 x = cpu_ldub_data(env, dest + i) | cpu_ldub_data(env, src + i);
194 if (x) {
195 cc = 1;
197 cpu_stb_data(env, dest + i, x);
199 return cc;
202 /* memmove */
203 void HELPER(mvc)(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src)
205 int i = 0;
206 int x = 0;
207 uint32_t l_64 = (l + 1) / 8;
209 HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
210 __func__, l, dest, src);
212 #ifndef CONFIG_USER_ONLY
213 if ((l > 32) &&
214 (src & TARGET_PAGE_MASK) == ((src + l) & TARGET_PAGE_MASK) &&
215 (dest & TARGET_PAGE_MASK) == ((dest + l) & TARGET_PAGE_MASK)) {
216 if (dest == (src + 1)) {
217 mvc_fast_memset(env, l + 1, dest, cpu_ldub_data(env, src));
218 return;
219 } else if ((src & TARGET_PAGE_MASK) != (dest & TARGET_PAGE_MASK)) {
220 mvc_fast_memmove(env, l + 1, dest, src);
221 return;
224 #else
225 if (dest == (src + 1)) {
226 memset(g2h(dest), cpu_ldub_data(env, src), l + 1);
227 return;
228 } else {
229 memmove(g2h(dest), g2h(src), l + 1);
230 return;
232 #endif
234 /* handle the parts that fit into 8-byte loads/stores */
235 if (dest != (src + 1)) {
236 for (i = 0; i < l_64; i++) {
237 cpu_stq_data(env, dest + x, cpu_ldq_data(env, src + x));
238 x += 8;
242 /* slow version crossing pages with byte accesses */
243 for (i = x; i <= l; i++) {
244 cpu_stb_data(env, dest + i, cpu_ldub_data(env, src + i));
248 /* compare unsigned byte arrays */
249 uint32_t HELPER(clc)(CPUS390XState *env, uint32_t l, uint64_t s1, uint64_t s2)
251 int i;
252 unsigned char x, y;
253 uint32_t cc;
255 HELPER_LOG("%s l %d s1 %" PRIx64 " s2 %" PRIx64 "\n",
256 __func__, l, s1, s2);
257 for (i = 0; i <= l; i++) {
258 x = cpu_ldub_data(env, s1 + i);
259 y = cpu_ldub_data(env, s2 + i);
260 HELPER_LOG("%02x (%c)/%02x (%c) ", x, x, y, y);
261 if (x < y) {
262 cc = 1;
263 goto done;
264 } else if (x > y) {
265 cc = 2;
266 goto done;
269 cc = 0;
270 done:
271 HELPER_LOG("\n");
272 return cc;
275 /* compare logical under mask */
276 uint32_t HELPER(clm)(CPUS390XState *env, uint32_t r1, uint32_t mask,
277 uint64_t addr)
279 uint8_t r, d;
280 uint32_t cc;
282 HELPER_LOG("%s: r1 0x%x mask 0x%x addr 0x%" PRIx64 "\n", __func__, r1,
283 mask, addr);
284 cc = 0;
285 while (mask) {
286 if (mask & 8) {
287 d = cpu_ldub_data(env, addr);
288 r = (r1 & 0xff000000UL) >> 24;
289 HELPER_LOG("mask 0x%x %02x/%02x (0x%" PRIx64 ") ", mask, r, d,
290 addr);
291 if (r < d) {
292 cc = 1;
293 break;
294 } else if (r > d) {
295 cc = 2;
296 break;
298 addr++;
300 mask = (mask << 1) & 0xf;
301 r1 <<= 8;
303 HELPER_LOG("\n");
304 return cc;
307 static inline uint64_t fix_address(CPUS390XState *env, uint64_t a)
309 /* 31-Bit mode */
310 if (!(env->psw.mask & PSW_MASK_64)) {
311 a &= 0x7fffffff;
313 return a;
316 static inline uint64_t get_address(CPUS390XState *env, int x2, int b2, int d2)
318 uint64_t r = d2;
319 if (x2) {
320 r += env->regs[x2];
322 if (b2) {
323 r += env->regs[b2];
325 return fix_address(env, r);
328 static inline uint64_t get_address_31fix(CPUS390XState *env, int reg)
330 return fix_address(env, env->regs[reg]);
333 /* search string (c is byte to search, r2 is string, r1 end of string) */
334 uint64_t HELPER(srst)(CPUS390XState *env, uint64_t r0, uint64_t end,
335 uint64_t str)
337 uint32_t len;
338 uint8_t v, c = r0;
340 str = fix_address(env, str);
341 end = fix_address(env, end);
343 /* Assume for now that R2 is unmodified. */
344 env->retxl = str;
346 /* Lest we fail to service interrupts in a timely manner, limit the
347 amount of work we're willing to do. For now, lets cap at 8k. */
348 for (len = 0; len < 0x2000; ++len) {
349 if (str + len == end) {
350 /* Character not found. R1 & R2 are unmodified. */
351 env->cc_op = 2;
352 return end;
354 v = cpu_ldub_data(env, str + len);
355 if (v == c) {
356 /* Character found. Set R1 to the location; R2 is unmodified. */
357 env->cc_op = 1;
358 return str + len;
362 /* CPU-determined bytes processed. Advance R2 to next byte to process. */
363 env->retxl = str + len;
364 env->cc_op = 3;
365 return end;
368 /* unsigned string compare (c is string terminator) */
369 uint64_t HELPER(clst)(CPUS390XState *env, uint64_t c, uint64_t s1, uint64_t s2)
371 uint32_t len;
373 c = c & 0xff;
374 s1 = fix_address(env, s1);
375 s2 = fix_address(env, s2);
377 /* Lest we fail to service interrupts in a timely manner, limit the
378 amount of work we're willing to do. For now, lets cap at 8k. */
379 for (len = 0; len < 0x2000; ++len) {
380 uint8_t v1 = cpu_ldub_data(env, s1 + len);
381 uint8_t v2 = cpu_ldub_data(env, s2 + len);
382 if (v1 == v2) {
383 if (v1 == c) {
384 /* Equal. CC=0, and don't advance the registers. */
385 env->cc_op = 0;
386 env->retxl = s2;
387 return s1;
389 } else {
390 /* Unequal. CC={1,2}, and advance the registers. Note that
391 the terminator need not be zero, but the string that contains
392 the terminator is by definition "low". */
393 env->cc_op = (v1 == c ? 1 : v2 == c ? 2 : v1 < v2 ? 1 : 2);
394 env->retxl = s2 + len;
395 return s1 + len;
399 /* CPU-determined bytes equal; advance the registers. */
400 env->cc_op = 3;
401 env->retxl = s2 + len;
402 return s1 + len;
405 /* move page */
406 void HELPER(mvpg)(CPUS390XState *env, uint64_t r0, uint64_t r1, uint64_t r2)
408 /* XXX missing r0 handling */
409 env->cc_op = 0;
410 #ifdef CONFIG_USER_ONLY
411 memmove(g2h(r1), g2h(r2), TARGET_PAGE_SIZE);
412 #else
413 mvc_fast_memmove(env, TARGET_PAGE_SIZE, r1, r2);
414 #endif
417 /* string copy (c is string terminator) */
418 uint64_t HELPER(mvst)(CPUS390XState *env, uint64_t c, uint64_t d, uint64_t s)
420 uint32_t len;
422 c = c & 0xff;
423 d = fix_address(env, d);
424 s = fix_address(env, s);
426 /* Lest we fail to service interrupts in a timely manner, limit the
427 amount of work we're willing to do. For now, lets cap at 8k. */
428 for (len = 0; len < 0x2000; ++len) {
429 uint8_t v = cpu_ldub_data(env, s + len);
430 cpu_stb_data(env, d + len, v);
431 if (v == c) {
432 /* Complete. Set CC=1 and advance R1. */
433 env->cc_op = 1;
434 env->retxl = s;
435 return d + len;
439 /* Incomplete. Set CC=3 and signal to advance R1 and R2. */
440 env->cc_op = 3;
441 env->retxl = s + len;
442 return d + len;
445 static uint32_t helper_icm(CPUS390XState *env, uint32_t r1, uint64_t address,
446 uint32_t mask)
448 int pos = 24; /* top of the lower half of r1 */
449 uint64_t rmask = 0xff000000ULL;
450 uint8_t val = 0;
451 int ccd = 0;
452 uint32_t cc = 0;
454 while (mask) {
455 if (mask & 8) {
456 env->regs[r1] &= ~rmask;
457 val = cpu_ldub_data(env, address);
458 if ((val & 0x80) && !ccd) {
459 cc = 1;
461 ccd = 1;
462 if (val && cc == 0) {
463 cc = 2;
465 env->regs[r1] |= (uint64_t)val << pos;
466 address++;
468 mask = (mask << 1) & 0xf;
469 pos -= 8;
470 rmask >>= 8;
473 return cc;
476 /* execute instruction
477 this instruction executes an insn modified with the contents of r1
478 it does not change the executed instruction in memory
479 it does not change the program counter
480 in other words: tricky...
481 currently implemented by interpreting the cases it is most commonly used in
483 uint32_t HELPER(ex)(CPUS390XState *env, uint32_t cc, uint64_t v1,
484 uint64_t addr, uint64_t ret)
486 uint16_t insn = cpu_lduw_code(env, addr);
488 HELPER_LOG("%s: v1 0x%lx addr 0x%lx insn 0x%x\n", __func__, v1, addr,
489 insn);
490 if ((insn & 0xf0ff) == 0xd000) {
491 uint32_t l, insn2, b1, b2, d1, d2;
493 l = v1 & 0xff;
494 insn2 = cpu_ldl_code(env, addr + 2);
495 b1 = (insn2 >> 28) & 0xf;
496 b2 = (insn2 >> 12) & 0xf;
497 d1 = (insn2 >> 16) & 0xfff;
498 d2 = insn2 & 0xfff;
499 switch (insn & 0xf00) {
500 case 0x200:
501 helper_mvc(env, l, get_address(env, 0, b1, d1),
502 get_address(env, 0, b2, d2));
503 break;
504 case 0x500:
505 cc = helper_clc(env, l, get_address(env, 0, b1, d1),
506 get_address(env, 0, b2, d2));
507 break;
508 case 0x700:
509 cc = helper_xc(env, l, get_address(env, 0, b1, d1),
510 get_address(env, 0, b2, d2));
511 break;
512 case 0xc00:
513 helper_tr(env, l, get_address(env, 0, b1, d1),
514 get_address(env, 0, b2, d2));
515 break;
516 default:
517 goto abort;
518 break;
520 } else if ((insn & 0xff00) == 0x0a00) {
521 /* supervisor call */
522 HELPER_LOG("%s: svc %ld via execute\n", __func__, (insn | v1) & 0xff);
523 env->psw.addr = ret - 4;
524 env->int_svc_code = (insn | v1) & 0xff;
525 env->int_svc_ilen = 4;
526 helper_exception(env, EXCP_SVC);
527 } else if ((insn & 0xff00) == 0xbf00) {
528 uint32_t insn2, r1, r3, b2, d2;
530 insn2 = cpu_ldl_code(env, addr + 2);
531 r1 = (insn2 >> 20) & 0xf;
532 r3 = (insn2 >> 16) & 0xf;
533 b2 = (insn2 >> 12) & 0xf;
534 d2 = insn2 & 0xfff;
535 cc = helper_icm(env, r1, get_address(env, 0, b2, d2), r3);
536 } else {
537 abort:
538 cpu_abort(env, "EXECUTE on instruction prefix 0x%x not implemented\n",
539 insn);
541 return cc;
544 /* load access registers r1 to r3 from memory at a2 */
545 void HELPER(lam)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
547 int i;
549 for (i = r1;; i = (i + 1) % 16) {
550 env->aregs[i] = cpu_ldl_data(env, a2);
551 a2 += 4;
553 if (i == r3) {
554 break;
559 /* store access registers r1 to r3 in memory at a2 */
560 void HELPER(stam)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
562 int i;
564 for (i = r1;; i = (i + 1) % 16) {
565 cpu_stl_data(env, a2, env->aregs[i]);
566 a2 += 4;
568 if (i == r3) {
569 break;
574 /* move long */
575 uint32_t HELPER(mvcl)(CPUS390XState *env, uint32_t r1, uint32_t r2)
577 uint64_t destlen = env->regs[r1 + 1] & 0xffffff;
578 uint64_t dest = get_address_31fix(env, r1);
579 uint64_t srclen = env->regs[r2 + 1] & 0xffffff;
580 uint64_t src = get_address_31fix(env, r2);
581 uint8_t pad = src >> 24;
582 uint8_t v;
583 uint32_t cc;
585 if (destlen == srclen) {
586 cc = 0;
587 } else if (destlen < srclen) {
588 cc = 1;
589 } else {
590 cc = 2;
593 if (srclen > destlen) {
594 srclen = destlen;
597 for (; destlen && srclen; src++, dest++, destlen--, srclen--) {
598 v = cpu_ldub_data(env, src);
599 cpu_stb_data(env, dest, v);
602 for (; destlen; dest++, destlen--) {
603 cpu_stb_data(env, dest, pad);
606 env->regs[r1 + 1] = destlen;
607 /* can't use srclen here, we trunc'ed it */
608 env->regs[r2 + 1] -= src - env->regs[r2];
609 env->regs[r1] = dest;
610 env->regs[r2] = src;
612 return cc;
615 /* move long extended another memcopy insn with more bells and whistles */
616 uint32_t HELPER(mvcle)(CPUS390XState *env, uint32_t r1, uint64_t a2,
617 uint32_t r3)
619 uint64_t destlen = env->regs[r1 + 1];
620 uint64_t dest = env->regs[r1];
621 uint64_t srclen = env->regs[r3 + 1];
622 uint64_t src = env->regs[r3];
623 uint8_t pad = a2 & 0xff;
624 uint8_t v;
625 uint32_t cc;
627 if (!(env->psw.mask & PSW_MASK_64)) {
628 destlen = (uint32_t)destlen;
629 srclen = (uint32_t)srclen;
630 dest &= 0x7fffffff;
631 src &= 0x7fffffff;
634 if (destlen == srclen) {
635 cc = 0;
636 } else if (destlen < srclen) {
637 cc = 1;
638 } else {
639 cc = 2;
642 if (srclen > destlen) {
643 srclen = destlen;
646 for (; destlen && srclen; src++, dest++, destlen--, srclen--) {
647 v = cpu_ldub_data(env, src);
648 cpu_stb_data(env, dest, v);
651 for (; destlen; dest++, destlen--) {
652 cpu_stb_data(env, dest, pad);
655 env->regs[r1 + 1] = destlen;
656 /* can't use srclen here, we trunc'ed it */
657 /* FIXME: 31-bit mode! */
658 env->regs[r3 + 1] -= src - env->regs[r3];
659 env->regs[r1] = dest;
660 env->regs[r3] = src;
662 return cc;
665 /* compare logical long extended memcompare insn with padding */
666 uint32_t HELPER(clcle)(CPUS390XState *env, uint32_t r1, uint64_t a2,
667 uint32_t r3)
669 uint64_t destlen = env->regs[r1 + 1];
670 uint64_t dest = get_address_31fix(env, r1);
671 uint64_t srclen = env->regs[r3 + 1];
672 uint64_t src = get_address_31fix(env, r3);
673 uint8_t pad = a2 & 0xff;
674 uint8_t v1 = 0, v2 = 0;
675 uint32_t cc = 0;
677 if (!(destlen || srclen)) {
678 return cc;
681 if (srclen > destlen) {
682 srclen = destlen;
685 for (; destlen || srclen; src++, dest++, destlen--, srclen--) {
686 v1 = srclen ? cpu_ldub_data(env, src) : pad;
687 v2 = destlen ? cpu_ldub_data(env, dest) : pad;
688 if (v1 != v2) {
689 cc = (v1 < v2) ? 1 : 2;
690 break;
694 env->regs[r1 + 1] = destlen;
695 /* can't use srclen here, we trunc'ed it */
696 env->regs[r3 + 1] -= src - env->regs[r3];
697 env->regs[r1] = dest;
698 env->regs[r3] = src;
700 return cc;
703 /* checksum */
704 uint64_t HELPER(cksm)(CPUS390XState *env, uint64_t r1,
705 uint64_t src, uint64_t src_len)
707 uint64_t max_len, len;
708 uint64_t cksm = (uint32_t)r1;
710 /* Lest we fail to service interrupts in a timely manner, limit the
711 amount of work we're willing to do. For now, lets cap at 8k. */
712 max_len = (src_len > 0x2000 ? 0x2000 : src_len);
714 /* Process full words as available. */
715 for (len = 0; len + 4 <= max_len; len += 4, src += 4) {
716 cksm += (uint32_t)cpu_ldl_data(env, src);
719 switch (max_len - len) {
720 case 1:
721 cksm += cpu_ldub_data(env, src) << 24;
722 len += 1;
723 break;
724 case 2:
725 cksm += cpu_lduw_data(env, src) << 16;
726 len += 2;
727 break;
728 case 3:
729 cksm += cpu_lduw_data(env, src) << 16;
730 cksm += cpu_ldub_data(env, src + 2) << 8;
731 len += 3;
732 break;
735 /* Fold the carry from the checksum. Note that we can see carry-out
736 during folding more than once (but probably not more than twice). */
737 while (cksm > 0xffffffffull) {
738 cksm = (uint32_t)cksm + (cksm >> 32);
741 /* Indicate whether or not we've processed everything. */
742 env->cc_op = (len == src_len ? 0 : 3);
744 /* Return both cksm and processed length. */
745 env->retxl = cksm;
746 return len;
749 void HELPER(unpk)(CPUS390XState *env, uint32_t len, uint64_t dest,
750 uint64_t src)
752 int len_dest = len >> 4;
753 int len_src = len & 0xf;
754 uint8_t b;
755 int second_nibble = 0;
757 dest += len_dest;
758 src += len_src;
760 /* last byte is special, it only flips the nibbles */
761 b = cpu_ldub_data(env, src);
762 cpu_stb_data(env, dest, (b << 4) | (b >> 4));
763 src--;
764 len_src--;
766 /* now pad every nibble with 0xf0 */
768 while (len_dest > 0) {
769 uint8_t cur_byte = 0;
771 if (len_src > 0) {
772 cur_byte = cpu_ldub_data(env, src);
775 len_dest--;
776 dest--;
778 /* only advance one nibble at a time */
779 if (second_nibble) {
780 cur_byte >>= 4;
781 len_src--;
782 src--;
784 second_nibble = !second_nibble;
786 /* digit */
787 cur_byte = (cur_byte & 0xf);
788 /* zone bits */
789 cur_byte |= 0xf0;
791 cpu_stb_data(env, dest, cur_byte);
795 void HELPER(tr)(CPUS390XState *env, uint32_t len, uint64_t array,
796 uint64_t trans)
798 int i;
800 for (i = 0; i <= len; i++) {
801 uint8_t byte = cpu_ldub_data(env, array + i);
802 uint8_t new_byte = cpu_ldub_data(env, trans + byte);
804 cpu_stb_data(env, array + i, new_byte);
808 #if !defined(CONFIG_USER_ONLY)
809 void HELPER(lctlg)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
811 int i;
812 uint64_t src = a2;
814 for (i = r1;; i = (i + 1) % 16) {
815 env->cregs[i] = cpu_ldq_data(env, src);
816 HELPER_LOG("load ctl %d from 0x%" PRIx64 " == 0x%" PRIx64 "\n",
817 i, src, env->cregs[i]);
818 src += sizeof(uint64_t);
820 if (i == r3) {
821 break;
825 tlb_flush(env, 1);
828 void HELPER(lctl)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
830 int i;
831 uint64_t src = a2;
833 for (i = r1;; i = (i + 1) % 16) {
834 env->cregs[i] = (env->cregs[i] & 0xFFFFFFFF00000000ULL) |
835 cpu_ldl_data(env, src);
836 src += sizeof(uint32_t);
838 if (i == r3) {
839 break;
843 tlb_flush(env, 1);
846 void HELPER(stctg)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
848 int i;
849 uint64_t dest = a2;
851 for (i = r1;; i = (i + 1) % 16) {
852 cpu_stq_data(env, dest, env->cregs[i]);
853 dest += sizeof(uint64_t);
855 if (i == r3) {
856 break;
861 void HELPER(stctl)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
863 int i;
864 uint64_t dest = a2;
866 for (i = r1;; i = (i + 1) % 16) {
867 cpu_stl_data(env, dest, env->cregs[i]);
868 dest += sizeof(uint32_t);
870 if (i == r3) {
871 break;
876 uint32_t HELPER(tprot)(uint64_t a1, uint64_t a2)
878 /* XXX implement */
880 return 0;
883 /* insert storage key extended */
884 uint64_t HELPER(iske)(CPUS390XState *env, uint64_t r2)
886 uint64_t addr = get_address(env, 0, 0, r2);
888 if (addr > ram_size) {
889 return 0;
892 return env->storage_keys[addr / TARGET_PAGE_SIZE];
895 /* set storage key extended */
896 void HELPER(sske)(CPUS390XState *env, uint64_t r1, uint64_t r2)
898 uint64_t addr = get_address(env, 0, 0, r2);
900 if (addr > ram_size) {
901 return;
904 env->storage_keys[addr / TARGET_PAGE_SIZE] = r1;
907 /* reset reference bit extended */
908 uint32_t HELPER(rrbe)(CPUS390XState *env, uint64_t r2)
910 uint8_t re;
911 uint8_t key;
913 if (r2 > ram_size) {
914 return 0;
917 key = env->storage_keys[r2 / TARGET_PAGE_SIZE];
918 re = key & (SK_R | SK_C);
919 env->storage_keys[r2 / TARGET_PAGE_SIZE] = (key & ~SK_R);
922 * cc
924 * 0 Reference bit zero; change bit zero
925 * 1 Reference bit zero; change bit one
926 * 2 Reference bit one; change bit zero
927 * 3 Reference bit one; change bit one
930 return re >> 1;
933 /* compare and swap and purge */
934 uint32_t HELPER(csp)(CPUS390XState *env, uint32_t r1, uint64_t r2)
936 uint32_t cc;
937 uint32_t o1 = env->regs[r1];
938 uint64_t a2 = r2 & ~3ULL;
939 uint32_t o2 = cpu_ldl_data(env, a2);
941 if (o1 == o2) {
942 cpu_stl_data(env, a2, env->regs[(r1 + 1) & 15]);
943 if (r2 & 0x3) {
944 /* flush TLB / ALB */
945 tlb_flush(env, 1);
947 cc = 0;
948 } else {
949 env->regs[r1] = (env->regs[r1] & 0xffffffff00000000ULL) | o2;
950 cc = 1;
953 return cc;
956 static uint32_t mvc_asc(CPUS390XState *env, int64_t l, uint64_t a1,
957 uint64_t mode1, uint64_t a2, uint64_t mode2)
959 target_ulong src, dest;
960 int flags, cc = 0, i;
962 if (!l) {
963 return 0;
964 } else if (l > 256) {
965 /* max 256 */
966 l = 256;
967 cc = 3;
970 if (mmu_translate(env, a1 & TARGET_PAGE_MASK, 1, mode1, &dest, &flags)) {
971 cpu_loop_exit(env);
973 dest |= a1 & ~TARGET_PAGE_MASK;
975 if (mmu_translate(env, a2 & TARGET_PAGE_MASK, 0, mode2, &src, &flags)) {
976 cpu_loop_exit(env);
978 src |= a2 & ~TARGET_PAGE_MASK;
980 /* XXX replace w/ memcpy */
981 for (i = 0; i < l; i++) {
982 /* XXX be more clever */
983 if ((((dest + i) & TARGET_PAGE_MASK) != (dest & TARGET_PAGE_MASK)) ||
984 (((src + i) & TARGET_PAGE_MASK) != (src & TARGET_PAGE_MASK))) {
985 mvc_asc(env, l - i, a1 + i, mode1, a2 + i, mode2);
986 break;
988 stb_phys(dest + i, ldub_phys(src + i));
991 return cc;
994 uint32_t HELPER(mvcs)(CPUS390XState *env, uint64_t l, uint64_t a1, uint64_t a2)
996 HELPER_LOG("%s: %16" PRIx64 " %16" PRIx64 " %16" PRIx64 "\n",
997 __func__, l, a1, a2);
999 return mvc_asc(env, l, a1, PSW_ASC_SECONDARY, a2, PSW_ASC_PRIMARY);
1002 uint32_t HELPER(mvcp)(CPUS390XState *env, uint64_t l, uint64_t a1, uint64_t a2)
1004 HELPER_LOG("%s: %16" PRIx64 " %16" PRIx64 " %16" PRIx64 "\n",
1005 __func__, l, a1, a2);
1007 return mvc_asc(env, l, a1, PSW_ASC_PRIMARY, a2, PSW_ASC_SECONDARY);
1010 /* invalidate pte */
1011 void HELPER(ipte)(CPUS390XState *env, uint64_t pte_addr, uint64_t vaddr)
1013 uint64_t page = vaddr & TARGET_PAGE_MASK;
1014 uint64_t pte = 0;
1016 /* XXX broadcast to other CPUs */
1018 /* XXX Linux is nice enough to give us the exact pte address.
1019 According to spec we'd have to find it out ourselves */
1020 /* XXX Linux is fine with overwriting the pte, the spec requires
1021 us to only set the invalid bit */
1022 stq_phys(pte_addr, pte | _PAGE_INVALID);
1024 /* XXX we exploit the fact that Linux passes the exact virtual
1025 address here - it's not obliged to! */
1026 tlb_flush_page(env, page);
1028 /* XXX 31-bit hack */
1029 if (page & 0x80000000) {
1030 tlb_flush_page(env, page & ~0x80000000);
1031 } else {
1032 tlb_flush_page(env, page | 0x80000000);
1036 /* flush local tlb */
1037 void HELPER(ptlb)(CPUS390XState *env)
1039 tlb_flush(env, 1);
1042 /* store using real address */
1043 void HELPER(stura)(CPUS390XState *env, uint64_t addr, uint64_t v1)
1045 stw_phys(get_address(env, 0, 0, addr), (uint32_t)v1);
1048 /* load real address */
1049 uint64_t HELPER(lra)(CPUS390XState *env, uint64_t addr)
1051 uint32_t cc = 0;
1052 int old_exc = env->exception_index;
1053 uint64_t asc = env->psw.mask & PSW_MASK_ASC;
1054 uint64_t ret;
1055 int flags;
1057 /* XXX incomplete - has more corner cases */
1058 if (!(env->psw.mask & PSW_MASK_64) && (addr >> 32)) {
1059 program_interrupt(env, PGM_SPECIAL_OP, 2);
1062 env->exception_index = old_exc;
1063 if (mmu_translate(env, addr, 0, asc, &ret, &flags)) {
1064 cc = 3;
1066 if (env->exception_index == EXCP_PGM) {
1067 ret = env->int_pgm_code | 0x80000000;
1068 } else {
1069 ret |= addr & ~TARGET_PAGE_MASK;
1071 env->exception_index = old_exc;
1073 env->cc_op = cc;
1074 return ret;
1076 #endif