target-arm: Fix A64 Neon MLS
[qemu.git] / target-s390x / mem_helper.c
blobd8ca3007f84a9801fa73eab3373512b703ed422a
1 /*
2 * S/390 memory access helper routines
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2009 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "cpu.h"
22 #include "helper.h"
24 /*****************************************************************************/
25 /* Softmmu support */
26 #if !defined(CONFIG_USER_ONLY)
27 #include "exec/softmmu_exec.h"
29 #define MMUSUFFIX _mmu
31 #define SHIFT 0
32 #include "exec/softmmu_template.h"
34 #define SHIFT 1
35 #include "exec/softmmu_template.h"
37 #define SHIFT 2
38 #include "exec/softmmu_template.h"
40 #define SHIFT 3
41 #include "exec/softmmu_template.h"
43 /* try to fill the TLB and return an exception if error. If retaddr is
44 NULL, it means that the function was called in C code (i.e. not
45 from generated code or from helper.c) */
46 /* XXX: fix it to restore all registers */
47 void tlb_fill(CPUState *cs, target_ulong addr, int is_write, int mmu_idx,
48 uintptr_t retaddr)
50 int ret;
52 ret = s390_cpu_handle_mmu_fault(cs, addr, is_write, mmu_idx);
53 if (unlikely(ret != 0)) {
54 if (likely(retaddr)) {
55 /* now we have a real cpu fault */
56 cpu_restore_state(cs, retaddr);
58 cpu_loop_exit(cs);
62 #endif
64 /* #define DEBUG_HELPER */
65 #ifdef DEBUG_HELPER
66 #define HELPER_LOG(x...) qemu_log(x)
67 #else
68 #define HELPER_LOG(x...)
69 #endif
71 #ifndef CONFIG_USER_ONLY
72 static void mvc_fast_memset(CPUS390XState *env, uint32_t l, uint64_t dest,
73 uint8_t byte)
75 S390CPU *cpu = s390_env_get_cpu(env);
76 hwaddr dest_phys;
77 hwaddr len = l;
78 void *dest_p;
79 uint64_t asc = env->psw.mask & PSW_MASK_ASC;
80 int flags;
82 if (mmu_translate(env, dest, 1, asc, &dest_phys, &flags)) {
83 cpu_stb_data(env, dest, byte);
84 cpu_abort(CPU(cpu), "should never reach here");
86 dest_phys |= dest & ~TARGET_PAGE_MASK;
88 dest_p = cpu_physical_memory_map(dest_phys, &len, 1);
90 memset(dest_p, byte, len);
92 cpu_physical_memory_unmap(dest_p, 1, len, len);
95 static void mvc_fast_memmove(CPUS390XState *env, uint32_t l, uint64_t dest,
96 uint64_t src)
98 S390CPU *cpu = s390_env_get_cpu(env);
99 hwaddr dest_phys;
100 hwaddr src_phys;
101 hwaddr len = l;
102 void *dest_p;
103 void *src_p;
104 uint64_t asc = env->psw.mask & PSW_MASK_ASC;
105 int flags;
107 if (mmu_translate(env, dest, 1, asc, &dest_phys, &flags)) {
108 cpu_stb_data(env, dest, 0);
109 cpu_abort(CPU(cpu), "should never reach here");
111 dest_phys |= dest & ~TARGET_PAGE_MASK;
113 if (mmu_translate(env, src, 0, asc, &src_phys, &flags)) {
114 cpu_ldub_data(env, src);
115 cpu_abort(CPU(cpu), "should never reach here");
117 src_phys |= src & ~TARGET_PAGE_MASK;
119 dest_p = cpu_physical_memory_map(dest_phys, &len, 1);
120 src_p = cpu_physical_memory_map(src_phys, &len, 0);
122 memmove(dest_p, src_p, len);
124 cpu_physical_memory_unmap(dest_p, 1, len, len);
125 cpu_physical_memory_unmap(src_p, 0, len, len);
127 #endif
129 /* and on array */
130 uint32_t HELPER(nc)(CPUS390XState *env, uint32_t l, uint64_t dest,
131 uint64_t src)
133 int i;
134 unsigned char x;
135 uint32_t cc = 0;
137 HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
138 __func__, l, dest, src);
139 for (i = 0; i <= l; i++) {
140 x = cpu_ldub_data(env, dest + i) & cpu_ldub_data(env, src + i);
141 if (x) {
142 cc = 1;
144 cpu_stb_data(env, dest + i, x);
146 return cc;
149 /* xor on array */
150 uint32_t HELPER(xc)(CPUS390XState *env, uint32_t l, uint64_t dest,
151 uint64_t src)
153 int i;
154 unsigned char x;
155 uint32_t cc = 0;
157 HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
158 __func__, l, dest, src);
160 #ifndef CONFIG_USER_ONLY
161 /* xor with itself is the same as memset(0) */
162 if ((l > 32) && (src == dest) &&
163 (src & TARGET_PAGE_MASK) == ((src + l) & TARGET_PAGE_MASK)) {
164 mvc_fast_memset(env, l + 1, dest, 0);
165 return 0;
167 #else
168 if (src == dest) {
169 memset(g2h(dest), 0, l + 1);
170 return 0;
172 #endif
174 for (i = 0; i <= l; i++) {
175 x = cpu_ldub_data(env, dest + i) ^ cpu_ldub_data(env, src + i);
176 if (x) {
177 cc = 1;
179 cpu_stb_data(env, dest + i, x);
181 return cc;
184 /* or on array */
185 uint32_t HELPER(oc)(CPUS390XState *env, uint32_t l, uint64_t dest,
186 uint64_t src)
188 int i;
189 unsigned char x;
190 uint32_t cc = 0;
192 HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
193 __func__, l, dest, src);
194 for (i = 0; i <= l; i++) {
195 x = cpu_ldub_data(env, dest + i) | cpu_ldub_data(env, src + i);
196 if (x) {
197 cc = 1;
199 cpu_stb_data(env, dest + i, x);
201 return cc;
204 /* memmove */
205 void HELPER(mvc)(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src)
207 int i = 0;
208 int x = 0;
209 uint32_t l_64 = (l + 1) / 8;
211 HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
212 __func__, l, dest, src);
214 #ifndef CONFIG_USER_ONLY
215 if ((l > 32) &&
216 (src & TARGET_PAGE_MASK) == ((src + l) & TARGET_PAGE_MASK) &&
217 (dest & TARGET_PAGE_MASK) == ((dest + l) & TARGET_PAGE_MASK)) {
218 if (dest == (src + 1)) {
219 mvc_fast_memset(env, l + 1, dest, cpu_ldub_data(env, src));
220 return;
221 } else if ((src & TARGET_PAGE_MASK) != (dest & TARGET_PAGE_MASK)) {
222 mvc_fast_memmove(env, l + 1, dest, src);
223 return;
226 #else
227 if (dest == (src + 1)) {
228 memset(g2h(dest), cpu_ldub_data(env, src), l + 1);
229 return;
230 } else {
231 memmove(g2h(dest), g2h(src), l + 1);
232 return;
234 #endif
236 /* handle the parts that fit into 8-byte loads/stores */
237 if (dest != (src + 1)) {
238 for (i = 0; i < l_64; i++) {
239 cpu_stq_data(env, dest + x, cpu_ldq_data(env, src + x));
240 x += 8;
244 /* slow version crossing pages with byte accesses */
245 for (i = x; i <= l; i++) {
246 cpu_stb_data(env, dest + i, cpu_ldub_data(env, src + i));
250 /* compare unsigned byte arrays */
251 uint32_t HELPER(clc)(CPUS390XState *env, uint32_t l, uint64_t s1, uint64_t s2)
253 int i;
254 unsigned char x, y;
255 uint32_t cc;
257 HELPER_LOG("%s l %d s1 %" PRIx64 " s2 %" PRIx64 "\n",
258 __func__, l, s1, s2);
259 for (i = 0; i <= l; i++) {
260 x = cpu_ldub_data(env, s1 + i);
261 y = cpu_ldub_data(env, s2 + i);
262 HELPER_LOG("%02x (%c)/%02x (%c) ", x, x, y, y);
263 if (x < y) {
264 cc = 1;
265 goto done;
266 } else if (x > y) {
267 cc = 2;
268 goto done;
271 cc = 0;
272 done:
273 HELPER_LOG("\n");
274 return cc;
277 /* compare logical under mask */
278 uint32_t HELPER(clm)(CPUS390XState *env, uint32_t r1, uint32_t mask,
279 uint64_t addr)
281 uint8_t r, d;
282 uint32_t cc;
284 HELPER_LOG("%s: r1 0x%x mask 0x%x addr 0x%" PRIx64 "\n", __func__, r1,
285 mask, addr);
286 cc = 0;
287 while (mask) {
288 if (mask & 8) {
289 d = cpu_ldub_data(env, addr);
290 r = (r1 & 0xff000000UL) >> 24;
291 HELPER_LOG("mask 0x%x %02x/%02x (0x%" PRIx64 ") ", mask, r, d,
292 addr);
293 if (r < d) {
294 cc = 1;
295 break;
296 } else if (r > d) {
297 cc = 2;
298 break;
300 addr++;
302 mask = (mask << 1) & 0xf;
303 r1 <<= 8;
305 HELPER_LOG("\n");
306 return cc;
309 static inline uint64_t fix_address(CPUS390XState *env, uint64_t a)
311 /* 31-Bit mode */
312 if (!(env->psw.mask & PSW_MASK_64)) {
313 a &= 0x7fffffff;
315 return a;
318 static inline uint64_t get_address(CPUS390XState *env, int x2, int b2, int d2)
320 uint64_t r = d2;
321 if (x2) {
322 r += env->regs[x2];
324 if (b2) {
325 r += env->regs[b2];
327 return fix_address(env, r);
330 static inline uint64_t get_address_31fix(CPUS390XState *env, int reg)
332 return fix_address(env, env->regs[reg]);
335 /* search string (c is byte to search, r2 is string, r1 end of string) */
336 uint64_t HELPER(srst)(CPUS390XState *env, uint64_t r0, uint64_t end,
337 uint64_t str)
339 uint32_t len;
340 uint8_t v, c = r0;
342 str = fix_address(env, str);
343 end = fix_address(env, end);
345 /* Assume for now that R2 is unmodified. */
346 env->retxl = str;
348 /* Lest we fail to service interrupts in a timely manner, limit the
349 amount of work we're willing to do. For now, let's cap at 8k. */
350 for (len = 0; len < 0x2000; ++len) {
351 if (str + len == end) {
352 /* Character not found. R1 & R2 are unmodified. */
353 env->cc_op = 2;
354 return end;
356 v = cpu_ldub_data(env, str + len);
357 if (v == c) {
358 /* Character found. Set R1 to the location; R2 is unmodified. */
359 env->cc_op = 1;
360 return str + len;
364 /* CPU-determined bytes processed. Advance R2 to next byte to process. */
365 env->retxl = str + len;
366 env->cc_op = 3;
367 return end;
370 /* unsigned string compare (c is string terminator) */
371 uint64_t HELPER(clst)(CPUS390XState *env, uint64_t c, uint64_t s1, uint64_t s2)
373 uint32_t len;
375 c = c & 0xff;
376 s1 = fix_address(env, s1);
377 s2 = fix_address(env, s2);
379 /* Lest we fail to service interrupts in a timely manner, limit the
380 amount of work we're willing to do. For now, let's cap at 8k. */
381 for (len = 0; len < 0x2000; ++len) {
382 uint8_t v1 = cpu_ldub_data(env, s1 + len);
383 uint8_t v2 = cpu_ldub_data(env, s2 + len);
384 if (v1 == v2) {
385 if (v1 == c) {
386 /* Equal. CC=0, and don't advance the registers. */
387 env->cc_op = 0;
388 env->retxl = s2;
389 return s1;
391 } else {
392 /* Unequal. CC={1,2}, and advance the registers. Note that
393 the terminator need not be zero, but the string that contains
394 the terminator is by definition "low". */
395 env->cc_op = (v1 == c ? 1 : v2 == c ? 2 : v1 < v2 ? 1 : 2);
396 env->retxl = s2 + len;
397 return s1 + len;
401 /* CPU-determined bytes equal; advance the registers. */
402 env->cc_op = 3;
403 env->retxl = s2 + len;
404 return s1 + len;
407 /* move page */
408 void HELPER(mvpg)(CPUS390XState *env, uint64_t r0, uint64_t r1, uint64_t r2)
410 /* XXX missing r0 handling */
411 env->cc_op = 0;
412 #ifdef CONFIG_USER_ONLY
413 memmove(g2h(r1), g2h(r2), TARGET_PAGE_SIZE);
414 #else
415 mvc_fast_memmove(env, TARGET_PAGE_SIZE, r1, r2);
416 #endif
419 /* string copy (c is string terminator) */
420 uint64_t HELPER(mvst)(CPUS390XState *env, uint64_t c, uint64_t d, uint64_t s)
422 uint32_t len;
424 c = c & 0xff;
425 d = fix_address(env, d);
426 s = fix_address(env, s);
428 /* Lest we fail to service interrupts in a timely manner, limit the
429 amount of work we're willing to do. For now, let's cap at 8k. */
430 for (len = 0; len < 0x2000; ++len) {
431 uint8_t v = cpu_ldub_data(env, s + len);
432 cpu_stb_data(env, d + len, v);
433 if (v == c) {
434 /* Complete. Set CC=1 and advance R1. */
435 env->cc_op = 1;
436 env->retxl = s;
437 return d + len;
441 /* Incomplete. Set CC=3 and signal to advance R1 and R2. */
442 env->cc_op = 3;
443 env->retxl = s + len;
444 return d + len;
447 static uint32_t helper_icm(CPUS390XState *env, uint32_t r1, uint64_t address,
448 uint32_t mask)
450 int pos = 24; /* top of the lower half of r1 */
451 uint64_t rmask = 0xff000000ULL;
452 uint8_t val = 0;
453 int ccd = 0;
454 uint32_t cc = 0;
456 while (mask) {
457 if (mask & 8) {
458 env->regs[r1] &= ~rmask;
459 val = cpu_ldub_data(env, address);
460 if ((val & 0x80) && !ccd) {
461 cc = 1;
463 ccd = 1;
464 if (val && cc == 0) {
465 cc = 2;
467 env->regs[r1] |= (uint64_t)val << pos;
468 address++;
470 mask = (mask << 1) & 0xf;
471 pos -= 8;
472 rmask >>= 8;
475 return cc;
478 /* execute instruction
479 this instruction executes an insn modified with the contents of r1
480 it does not change the executed instruction in memory
481 it does not change the program counter
482 in other words: tricky...
483 currently implemented by interpreting the cases it is most commonly used in
485 uint32_t HELPER(ex)(CPUS390XState *env, uint32_t cc, uint64_t v1,
486 uint64_t addr, uint64_t ret)
488 S390CPU *cpu = s390_env_get_cpu(env);
489 uint16_t insn = cpu_lduw_code(env, addr);
491 HELPER_LOG("%s: v1 0x%lx addr 0x%lx insn 0x%x\n", __func__, v1, addr,
492 insn);
493 if ((insn & 0xf0ff) == 0xd000) {
494 uint32_t l, insn2, b1, b2, d1, d2;
496 l = v1 & 0xff;
497 insn2 = cpu_ldl_code(env, addr + 2);
498 b1 = (insn2 >> 28) & 0xf;
499 b2 = (insn2 >> 12) & 0xf;
500 d1 = (insn2 >> 16) & 0xfff;
501 d2 = insn2 & 0xfff;
502 switch (insn & 0xf00) {
503 case 0x200:
504 helper_mvc(env, l, get_address(env, 0, b1, d1),
505 get_address(env, 0, b2, d2));
506 break;
507 case 0x500:
508 cc = helper_clc(env, l, get_address(env, 0, b1, d1),
509 get_address(env, 0, b2, d2));
510 break;
511 case 0x700:
512 cc = helper_xc(env, l, get_address(env, 0, b1, d1),
513 get_address(env, 0, b2, d2));
514 break;
515 case 0xc00:
516 helper_tr(env, l, get_address(env, 0, b1, d1),
517 get_address(env, 0, b2, d2));
518 break;
519 default:
520 goto abort;
522 } else if ((insn & 0xff00) == 0x0a00) {
523 /* supervisor call */
524 HELPER_LOG("%s: svc %ld via execute\n", __func__, (insn | v1) & 0xff);
525 env->psw.addr = ret - 4;
526 env->int_svc_code = (insn | v1) & 0xff;
527 env->int_svc_ilen = 4;
528 helper_exception(env, EXCP_SVC);
529 } else if ((insn & 0xff00) == 0xbf00) {
530 uint32_t insn2, r1, r3, b2, d2;
532 insn2 = cpu_ldl_code(env, addr + 2);
533 r1 = (insn2 >> 20) & 0xf;
534 r3 = (insn2 >> 16) & 0xf;
535 b2 = (insn2 >> 12) & 0xf;
536 d2 = insn2 & 0xfff;
537 cc = helper_icm(env, r1, get_address(env, 0, b2, d2), r3);
538 } else {
539 abort:
540 cpu_abort(CPU(cpu), "EXECUTE on instruction prefix 0x%x not implemented\n",
541 insn);
543 return cc;
546 /* load access registers r1 to r3 from memory at a2 */
547 void HELPER(lam)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
549 int i;
551 for (i = r1;; i = (i + 1) % 16) {
552 env->aregs[i] = cpu_ldl_data(env, a2);
553 a2 += 4;
555 if (i == r3) {
556 break;
561 /* store access registers r1 to r3 in memory at a2 */
562 void HELPER(stam)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
564 int i;
566 for (i = r1;; i = (i + 1) % 16) {
567 cpu_stl_data(env, a2, env->aregs[i]);
568 a2 += 4;
570 if (i == r3) {
571 break;
576 /* move long */
577 uint32_t HELPER(mvcl)(CPUS390XState *env, uint32_t r1, uint32_t r2)
579 uint64_t destlen = env->regs[r1 + 1] & 0xffffff;
580 uint64_t dest = get_address_31fix(env, r1);
581 uint64_t srclen = env->regs[r2 + 1] & 0xffffff;
582 uint64_t src = get_address_31fix(env, r2);
583 uint8_t pad = src >> 24;
584 uint8_t v;
585 uint32_t cc;
587 if (destlen == srclen) {
588 cc = 0;
589 } else if (destlen < srclen) {
590 cc = 1;
591 } else {
592 cc = 2;
595 if (srclen > destlen) {
596 srclen = destlen;
599 for (; destlen && srclen; src++, dest++, destlen--, srclen--) {
600 v = cpu_ldub_data(env, src);
601 cpu_stb_data(env, dest, v);
604 for (; destlen; dest++, destlen--) {
605 cpu_stb_data(env, dest, pad);
608 env->regs[r1 + 1] = destlen;
609 /* can't use srclen here, we trunc'ed it */
610 env->regs[r2 + 1] -= src - env->regs[r2];
611 env->regs[r1] = dest;
612 env->regs[r2] = src;
614 return cc;
617 /* move long extended another memcopy insn with more bells and whistles */
618 uint32_t HELPER(mvcle)(CPUS390XState *env, uint32_t r1, uint64_t a2,
619 uint32_t r3)
621 uint64_t destlen = env->regs[r1 + 1];
622 uint64_t dest = env->regs[r1];
623 uint64_t srclen = env->regs[r3 + 1];
624 uint64_t src = env->regs[r3];
625 uint8_t pad = a2 & 0xff;
626 uint8_t v;
627 uint32_t cc;
629 if (!(env->psw.mask & PSW_MASK_64)) {
630 destlen = (uint32_t)destlen;
631 srclen = (uint32_t)srclen;
632 dest &= 0x7fffffff;
633 src &= 0x7fffffff;
636 if (destlen == srclen) {
637 cc = 0;
638 } else if (destlen < srclen) {
639 cc = 1;
640 } else {
641 cc = 2;
644 if (srclen > destlen) {
645 srclen = destlen;
648 for (; destlen && srclen; src++, dest++, destlen--, srclen--) {
649 v = cpu_ldub_data(env, src);
650 cpu_stb_data(env, dest, v);
653 for (; destlen; dest++, destlen--) {
654 cpu_stb_data(env, dest, pad);
657 env->regs[r1 + 1] = destlen;
658 /* can't use srclen here, we trunc'ed it */
659 /* FIXME: 31-bit mode! */
660 env->regs[r3 + 1] -= src - env->regs[r3];
661 env->regs[r1] = dest;
662 env->regs[r3] = src;
664 return cc;
667 /* compare logical long extended memcompare insn with padding */
668 uint32_t HELPER(clcle)(CPUS390XState *env, uint32_t r1, uint64_t a2,
669 uint32_t r3)
671 uint64_t destlen = env->regs[r1 + 1];
672 uint64_t dest = get_address_31fix(env, r1);
673 uint64_t srclen = env->regs[r3 + 1];
674 uint64_t src = get_address_31fix(env, r3);
675 uint8_t pad = a2 & 0xff;
676 uint8_t v1 = 0, v2 = 0;
677 uint32_t cc = 0;
679 if (!(destlen || srclen)) {
680 return cc;
683 if (srclen > destlen) {
684 srclen = destlen;
687 for (; destlen || srclen; src++, dest++, destlen--, srclen--) {
688 v1 = srclen ? cpu_ldub_data(env, src) : pad;
689 v2 = destlen ? cpu_ldub_data(env, dest) : pad;
690 if (v1 != v2) {
691 cc = (v1 < v2) ? 1 : 2;
692 break;
696 env->regs[r1 + 1] = destlen;
697 /* can't use srclen here, we trunc'ed it */
698 env->regs[r3 + 1] -= src - env->regs[r3];
699 env->regs[r1] = dest;
700 env->regs[r3] = src;
702 return cc;
705 /* checksum */
706 uint64_t HELPER(cksm)(CPUS390XState *env, uint64_t r1,
707 uint64_t src, uint64_t src_len)
709 uint64_t max_len, len;
710 uint64_t cksm = (uint32_t)r1;
712 /* Lest we fail to service interrupts in a timely manner, limit the
713 amount of work we're willing to do. For now, let's cap at 8k. */
714 max_len = (src_len > 0x2000 ? 0x2000 : src_len);
716 /* Process full words as available. */
717 for (len = 0; len + 4 <= max_len; len += 4, src += 4) {
718 cksm += (uint32_t)cpu_ldl_data(env, src);
721 switch (max_len - len) {
722 case 1:
723 cksm += cpu_ldub_data(env, src) << 24;
724 len += 1;
725 break;
726 case 2:
727 cksm += cpu_lduw_data(env, src) << 16;
728 len += 2;
729 break;
730 case 3:
731 cksm += cpu_lduw_data(env, src) << 16;
732 cksm += cpu_ldub_data(env, src + 2) << 8;
733 len += 3;
734 break;
737 /* Fold the carry from the checksum. Note that we can see carry-out
738 during folding more than once (but probably not more than twice). */
739 while (cksm > 0xffffffffull) {
740 cksm = (uint32_t)cksm + (cksm >> 32);
743 /* Indicate whether or not we've processed everything. */
744 env->cc_op = (len == src_len ? 0 : 3);
746 /* Return both cksm and processed length. */
747 env->retxl = cksm;
748 return len;
751 void HELPER(unpk)(CPUS390XState *env, uint32_t len, uint64_t dest,
752 uint64_t src)
754 int len_dest = len >> 4;
755 int len_src = len & 0xf;
756 uint8_t b;
757 int second_nibble = 0;
759 dest += len_dest;
760 src += len_src;
762 /* last byte is special, it only flips the nibbles */
763 b = cpu_ldub_data(env, src);
764 cpu_stb_data(env, dest, (b << 4) | (b >> 4));
765 src--;
766 len_src--;
768 /* now pad every nibble with 0xf0 */
770 while (len_dest > 0) {
771 uint8_t cur_byte = 0;
773 if (len_src > 0) {
774 cur_byte = cpu_ldub_data(env, src);
777 len_dest--;
778 dest--;
780 /* only advance one nibble at a time */
781 if (second_nibble) {
782 cur_byte >>= 4;
783 len_src--;
784 src--;
786 second_nibble = !second_nibble;
788 /* digit */
789 cur_byte = (cur_byte & 0xf);
790 /* zone bits */
791 cur_byte |= 0xf0;
793 cpu_stb_data(env, dest, cur_byte);
797 void HELPER(tr)(CPUS390XState *env, uint32_t len, uint64_t array,
798 uint64_t trans)
800 int i;
802 for (i = 0; i <= len; i++) {
803 uint8_t byte = cpu_ldub_data(env, array + i);
804 uint8_t new_byte = cpu_ldub_data(env, trans + byte);
806 cpu_stb_data(env, array + i, new_byte);
810 #if !defined(CONFIG_USER_ONLY)
811 void HELPER(lctlg)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
813 S390CPU *cpu = s390_env_get_cpu(env);
814 int i;
815 uint64_t src = a2;
817 for (i = r1;; i = (i + 1) % 16) {
818 env->cregs[i] = cpu_ldq_data(env, src);
819 HELPER_LOG("load ctl %d from 0x%" PRIx64 " == 0x%" PRIx64 "\n",
820 i, src, env->cregs[i]);
821 src += sizeof(uint64_t);
823 if (i == r3) {
824 break;
828 tlb_flush(CPU(cpu), 1);
831 void HELPER(lctl)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
833 S390CPU *cpu = s390_env_get_cpu(env);
834 int i;
835 uint64_t src = a2;
837 for (i = r1;; i = (i + 1) % 16) {
838 env->cregs[i] = (env->cregs[i] & 0xFFFFFFFF00000000ULL) |
839 cpu_ldl_data(env, src);
840 src += sizeof(uint32_t);
842 if (i == r3) {
843 break;
847 tlb_flush(CPU(cpu), 1);
850 void HELPER(stctg)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
852 int i;
853 uint64_t dest = a2;
855 for (i = r1;; i = (i + 1) % 16) {
856 cpu_stq_data(env, dest, env->cregs[i]);
857 dest += sizeof(uint64_t);
859 if (i == r3) {
860 break;
865 void HELPER(stctl)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
867 int i;
868 uint64_t dest = a2;
870 for (i = r1;; i = (i + 1) % 16) {
871 cpu_stl_data(env, dest, env->cregs[i]);
872 dest += sizeof(uint32_t);
874 if (i == r3) {
875 break;
880 uint32_t HELPER(tprot)(uint64_t a1, uint64_t a2)
882 /* XXX implement */
884 return 0;
887 /* insert storage key extended */
888 uint64_t HELPER(iske)(CPUS390XState *env, uint64_t r2)
890 uint64_t addr = get_address(env, 0, 0, r2);
892 if (addr > ram_size) {
893 return 0;
896 return env->storage_keys[addr / TARGET_PAGE_SIZE];
899 /* set storage key extended */
900 void HELPER(sske)(CPUS390XState *env, uint64_t r1, uint64_t r2)
902 uint64_t addr = get_address(env, 0, 0, r2);
904 if (addr > ram_size) {
905 return;
908 env->storage_keys[addr / TARGET_PAGE_SIZE] = r1;
911 /* reset reference bit extended */
912 uint32_t HELPER(rrbe)(CPUS390XState *env, uint64_t r2)
914 uint8_t re;
915 uint8_t key;
917 if (r2 > ram_size) {
918 return 0;
921 key = env->storage_keys[r2 / TARGET_PAGE_SIZE];
922 re = key & (SK_R | SK_C);
923 env->storage_keys[r2 / TARGET_PAGE_SIZE] = (key & ~SK_R);
926 * cc
928 * 0 Reference bit zero; change bit zero
929 * 1 Reference bit zero; change bit one
930 * 2 Reference bit one; change bit zero
931 * 3 Reference bit one; change bit one
934 return re >> 1;
937 /* compare and swap and purge */
938 uint32_t HELPER(csp)(CPUS390XState *env, uint32_t r1, uint64_t r2)
940 S390CPU *cpu = s390_env_get_cpu(env);
941 uint32_t cc;
942 uint32_t o1 = env->regs[r1];
943 uint64_t a2 = r2 & ~3ULL;
944 uint32_t o2 = cpu_ldl_data(env, a2);
946 if (o1 == o2) {
947 cpu_stl_data(env, a2, env->regs[(r1 + 1) & 15]);
948 if (r2 & 0x3) {
949 /* flush TLB / ALB */
950 tlb_flush(CPU(cpu), 1);
952 cc = 0;
953 } else {
954 env->regs[r1] = (env->regs[r1] & 0xffffffff00000000ULL) | o2;
955 cc = 1;
958 return cc;
961 static uint32_t mvc_asc(CPUS390XState *env, int64_t l, uint64_t a1,
962 uint64_t mode1, uint64_t a2, uint64_t mode2)
964 CPUState *cs = CPU(s390_env_get_cpu(env));
965 target_ulong src, dest;
966 int flags, cc = 0, i;
968 if (!l) {
969 return 0;
970 } else if (l > 256) {
971 /* max 256 */
972 l = 256;
973 cc = 3;
976 if (mmu_translate(env, a1 & TARGET_PAGE_MASK, 1, mode1, &dest, &flags)) {
977 cpu_loop_exit(CPU(s390_env_get_cpu(env)));
979 dest |= a1 & ~TARGET_PAGE_MASK;
981 if (mmu_translate(env, a2 & TARGET_PAGE_MASK, 0, mode2, &src, &flags)) {
982 cpu_loop_exit(CPU(s390_env_get_cpu(env)));
984 src |= a2 & ~TARGET_PAGE_MASK;
986 /* XXX replace w/ memcpy */
987 for (i = 0; i < l; i++) {
988 /* XXX be more clever */
989 if ((((dest + i) & TARGET_PAGE_MASK) != (dest & TARGET_PAGE_MASK)) ||
990 (((src + i) & TARGET_PAGE_MASK) != (src & TARGET_PAGE_MASK))) {
991 mvc_asc(env, l - i, a1 + i, mode1, a2 + i, mode2);
992 break;
994 stb_phys(cs->as, dest + i, ldub_phys(cs->as, src + i));
997 return cc;
1000 uint32_t HELPER(mvcs)(CPUS390XState *env, uint64_t l, uint64_t a1, uint64_t a2)
1002 HELPER_LOG("%s: %16" PRIx64 " %16" PRIx64 " %16" PRIx64 "\n",
1003 __func__, l, a1, a2);
1005 return mvc_asc(env, l, a1, PSW_ASC_SECONDARY, a2, PSW_ASC_PRIMARY);
1008 uint32_t HELPER(mvcp)(CPUS390XState *env, uint64_t l, uint64_t a1, uint64_t a2)
1010 HELPER_LOG("%s: %16" PRIx64 " %16" PRIx64 " %16" PRIx64 "\n",
1011 __func__, l, a1, a2);
1013 return mvc_asc(env, l, a1, PSW_ASC_PRIMARY, a2, PSW_ASC_SECONDARY);
1016 /* invalidate pte */
1017 void HELPER(ipte)(CPUS390XState *env, uint64_t pte_addr, uint64_t vaddr)
1019 CPUState *cs = CPU(s390_env_get_cpu(env));
1020 uint64_t page = vaddr & TARGET_PAGE_MASK;
1021 uint64_t pte = 0;
1023 /* XXX broadcast to other CPUs */
1025 /* XXX Linux is nice enough to give us the exact pte address.
1026 According to spec we'd have to find it out ourselves */
1027 /* XXX Linux is fine with overwriting the pte, the spec requires
1028 us to only set the invalid bit */
1029 stq_phys(cs->as, pte_addr, pte | _PAGE_INVALID);
1031 /* XXX we exploit the fact that Linux passes the exact virtual
1032 address here - it's not obliged to! */
1033 tlb_flush_page(cs, page);
1035 /* XXX 31-bit hack */
1036 if (page & 0x80000000) {
1037 tlb_flush_page(cs, page & ~0x80000000);
1038 } else {
1039 tlb_flush_page(cs, page | 0x80000000);
1043 /* flush local tlb */
1044 void HELPER(ptlb)(CPUS390XState *env)
1046 S390CPU *cpu = s390_env_get_cpu(env);
1048 tlb_flush(CPU(cpu), 1);
1051 /* store using real address */
1052 void HELPER(stura)(CPUS390XState *env, uint64_t addr, uint64_t v1)
1054 CPUState *cs = CPU(s390_env_get_cpu(env));
1056 stw_phys(cs->as, get_address(env, 0, 0, addr), (uint32_t)v1);
1059 /* load real address */
1060 uint64_t HELPER(lra)(CPUS390XState *env, uint64_t addr)
1062 CPUState *cs = CPU(s390_env_get_cpu(env));
1063 uint32_t cc = 0;
1064 int old_exc = cs->exception_index;
1065 uint64_t asc = env->psw.mask & PSW_MASK_ASC;
1066 uint64_t ret;
1067 int flags;
1069 /* XXX incomplete - has more corner cases */
1070 if (!(env->psw.mask & PSW_MASK_64) && (addr >> 32)) {
1071 program_interrupt(env, PGM_SPECIAL_OP, 2);
1074 cs->exception_index = old_exc;
1075 if (mmu_translate(env, addr, 0, asc, &ret, &flags)) {
1076 cc = 3;
1078 if (cs->exception_index == EXCP_PGM) {
1079 ret = env->int_pgm_code | 0x80000000;
1080 } else {
1081 ret |= addr & ~TARGET_PAGE_MASK;
1083 cs->exception_index = old_exc;
1085 env->cc_op = cc;
1086 return ret;
1088 #endif