2 * S/390 memory access helper routines
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2009 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
23 #include "exec/helper-proto.h"
24 #include "exec/exec-all.h"
25 #include "exec/cpu_ldst.h"
27 #if !defined(CONFIG_USER_ONLY)
28 #include "hw/s390x/storage-keys.h"
31 /*****************************************************************************/
33 #if !defined(CONFIG_USER_ONLY)
35 /* try to fill the TLB and return an exception if error. If retaddr is
36 NULL, it means that the function was called in C code (i.e. not
37 from generated code or from helper.c) */
38 /* XXX: fix it to restore all registers */
39 void tlb_fill(CPUState
*cs
, target_ulong addr
, MMUAccessType access_type
,
40 int mmu_idx
, uintptr_t retaddr
)
44 ret
= s390_cpu_handle_mmu_fault(cs
, addr
, access_type
, mmu_idx
);
45 if (unlikely(ret
!= 0)) {
46 if (likely(retaddr
)) {
47 /* now we have a real cpu fault */
48 cpu_restore_state(cs
, retaddr
);
56 /* #define DEBUG_HELPER */
58 #define HELPER_LOG(x...) qemu_log(x)
60 #define HELPER_LOG(x...)
63 /* Reduce the length so that addr + len doesn't cross a page boundary. */
64 static inline uint64_t adj_len_to_page(uint64_t len
, uint64_t addr
)
66 #ifndef CONFIG_USER_ONLY
67 if ((addr
& ~TARGET_PAGE_MASK
) + len
- 1 >= TARGET_PAGE_SIZE
) {
68 return -addr
& ~TARGET_PAGE_MASK
;
74 static void fast_memset(CPUS390XState
*env
, uint64_t dest
, uint8_t byte
,
77 int mmu_idx
= cpu_mmu_index(env
, false);
80 void *p
= tlb_vaddr_to_host(env
, dest
, MMU_DATA_STORE
, mmu_idx
);
82 /* Access to the whole page in write mode granted. */
83 int l_adj
= adj_len_to_page(l
, dest
);
84 memset(p
, byte
, l_adj
);
88 /* We failed to get access to the whole page. The next write
89 access will likely fill the QEMU TLB for the next iteration. */
90 cpu_stb_data(env
, dest
, byte
);
97 static void fast_memmove(CPUS390XState
*env
, uint64_t dest
, uint64_t src
,
100 int mmu_idx
= cpu_mmu_index(env
, false);
103 void *src_p
= tlb_vaddr_to_host(env
, src
, MMU_DATA_LOAD
, mmu_idx
);
104 void *dest_p
= tlb_vaddr_to_host(env
, dest
, MMU_DATA_STORE
, mmu_idx
);
105 if (src_p
&& dest_p
) {
106 /* Access to both whole pages granted. */
107 int l_adj
= adj_len_to_page(l
, src
);
108 l_adj
= adj_len_to_page(l_adj
, dest
);
109 memmove(dest_p
, src_p
, l_adj
);
114 /* We failed to get access to one or both whole pages. The next
115 read or write access will likely fill the QEMU TLB for the
117 cpu_stb_data(env
, dest
, cpu_ldub_data(env
, src
));
126 uint32_t HELPER(nc
)(CPUS390XState
*env
, uint32_t l
, uint64_t dest
,
133 HELPER_LOG("%s l %d dest %" PRIx64
" src %" PRIx64
"\n",
134 __func__
, l
, dest
, src
);
135 for (i
= 0; i
<= l
; i
++) {
136 x
= cpu_ldub_data(env
, dest
+ i
) & cpu_ldub_data(env
, src
+ i
);
140 cpu_stb_data(env
, dest
+ i
, x
);
146 uint32_t HELPER(xc
)(CPUS390XState
*env
, uint32_t l
, uint64_t dest
,
153 HELPER_LOG("%s l %d dest %" PRIx64
" src %" PRIx64
"\n",
154 __func__
, l
, dest
, src
);
156 /* xor with itself is the same as memset(0) */
158 fast_memset(env
, dest
, 0, l
+ 1);
162 for (i
= 0; i
<= l
; i
++) {
163 x
= cpu_ldub_data(env
, dest
+ i
) ^ cpu_ldub_data(env
, src
+ i
);
167 cpu_stb_data(env
, dest
+ i
, x
);
173 uint32_t HELPER(oc
)(CPUS390XState
*env
, uint32_t l
, uint64_t dest
,
180 HELPER_LOG("%s l %d dest %" PRIx64
" src %" PRIx64
"\n",
181 __func__
, l
, dest
, src
);
182 for (i
= 0; i
<= l
; i
++) {
183 x
= cpu_ldub_data(env
, dest
+ i
) | cpu_ldub_data(env
, src
+ i
);
187 cpu_stb_data(env
, dest
+ i
, x
);
193 void HELPER(mvc
)(CPUS390XState
*env
, uint32_t l
, uint64_t dest
, uint64_t src
)
197 HELPER_LOG("%s l %d dest %" PRIx64
" src %" PRIx64
"\n",
198 __func__
, l
, dest
, src
);
200 /* mvc with source pointing to the byte after the destination is the
201 same as memset with the first source byte */
202 if (dest
== (src
+ 1)) {
203 fast_memset(env
, dest
, cpu_ldub_data(env
, src
), l
+ 1);
207 /* mvc and memmove do not behave the same when areas overlap! */
208 if ((dest
< src
) || (src
+ l
< dest
)) {
209 fast_memmove(env
, dest
, src
, l
+ 1);
213 /* slow version with byte accesses which always work */
214 for (i
= 0; i
<= l
; i
++) {
215 cpu_stb_data(env
, dest
+ i
, cpu_ldub_data(env
, src
+ i
));
219 /* compare unsigned byte arrays */
220 uint32_t HELPER(clc
)(CPUS390XState
*env
, uint32_t l
, uint64_t s1
, uint64_t s2
)
226 HELPER_LOG("%s l %d s1 %" PRIx64
" s2 %" PRIx64
"\n",
227 __func__
, l
, s1
, s2
);
228 for (i
= 0; i
<= l
; i
++) {
229 x
= cpu_ldub_data(env
, s1
+ i
);
230 y
= cpu_ldub_data(env
, s2
+ i
);
231 HELPER_LOG("%02x (%c)/%02x (%c) ", x
, x
, y
, y
);
246 /* compare logical under mask */
247 uint32_t HELPER(clm
)(CPUS390XState
*env
, uint32_t r1
, uint32_t mask
,
253 HELPER_LOG("%s: r1 0x%x mask 0x%x addr 0x%" PRIx64
"\n", __func__
, r1
,
258 d
= cpu_ldub_data(env
, addr
);
259 r
= (r1
& 0xff000000UL
) >> 24;
260 HELPER_LOG("mask 0x%x %02x/%02x (0x%" PRIx64
") ", mask
, r
, d
,
271 mask
= (mask
<< 1) & 0xf;
278 static inline uint64_t fix_address(CPUS390XState
*env
, uint64_t a
)
281 if (!(env
->psw
.mask
& PSW_MASK_64
)) {
287 static inline uint64_t get_address(CPUS390XState
*env
, int x2
, int b2
, int d2
)
296 return fix_address(env
, r
);
299 static inline uint64_t get_address_31fix(CPUS390XState
*env
, int reg
)
301 return fix_address(env
, env
->regs
[reg
]);
304 /* search string (c is byte to search, r2 is string, r1 end of string) */
305 uint64_t HELPER(srst
)(CPUS390XState
*env
, uint64_t r0
, uint64_t end
,
311 str
= fix_address(env
, str
);
312 end
= fix_address(env
, end
);
314 /* Assume for now that R2 is unmodified. */
317 /* Lest we fail to service interrupts in a timely manner, limit the
318 amount of work we're willing to do. For now, let's cap at 8k. */
319 for (len
= 0; len
< 0x2000; ++len
) {
320 if (str
+ len
== end
) {
321 /* Character not found. R1 & R2 are unmodified. */
325 v
= cpu_ldub_data(env
, str
+ len
);
327 /* Character found. Set R1 to the location; R2 is unmodified. */
333 /* CPU-determined bytes processed. Advance R2 to next byte to process. */
334 env
->retxl
= str
+ len
;
339 /* unsigned string compare (c is string terminator) */
340 uint64_t HELPER(clst
)(CPUS390XState
*env
, uint64_t c
, uint64_t s1
, uint64_t s2
)
345 s1
= fix_address(env
, s1
);
346 s2
= fix_address(env
, s2
);
348 /* Lest we fail to service interrupts in a timely manner, limit the
349 amount of work we're willing to do. For now, let's cap at 8k. */
350 for (len
= 0; len
< 0x2000; ++len
) {
351 uint8_t v1
= cpu_ldub_data(env
, s1
+ len
);
352 uint8_t v2
= cpu_ldub_data(env
, s2
+ len
);
355 /* Equal. CC=0, and don't advance the registers. */
361 /* Unequal. CC={1,2}, and advance the registers. Note that
362 the terminator need not be zero, but the string that contains
363 the terminator is by definition "low". */
364 env
->cc_op
= (v1
== c
? 1 : v2
== c
? 2 : v1
< v2
? 1 : 2);
365 env
->retxl
= s2
+ len
;
370 /* CPU-determined bytes equal; advance the registers. */
372 env
->retxl
= s2
+ len
;
377 void HELPER(mvpg
)(CPUS390XState
*env
, uint64_t r0
, uint64_t r1
, uint64_t r2
)
379 /* XXX missing r0 handling */
381 fast_memmove(env
, r1
, r2
, TARGET_PAGE_SIZE
);
384 /* string copy (c is string terminator) */
385 uint64_t HELPER(mvst
)(CPUS390XState
*env
, uint64_t c
, uint64_t d
, uint64_t s
)
390 d
= fix_address(env
, d
);
391 s
= fix_address(env
, s
);
393 /* Lest we fail to service interrupts in a timely manner, limit the
394 amount of work we're willing to do. For now, let's cap at 8k. */
395 for (len
= 0; len
< 0x2000; ++len
) {
396 uint8_t v
= cpu_ldub_data(env
, s
+ len
);
397 cpu_stb_data(env
, d
+ len
, v
);
399 /* Complete. Set CC=1 and advance R1. */
406 /* Incomplete. Set CC=3 and signal to advance R1 and R2. */
408 env
->retxl
= s
+ len
;
412 static uint32_t helper_icm(CPUS390XState
*env
, uint32_t r1
, uint64_t address
,
415 int pos
= 24; /* top of the lower half of r1 */
416 uint64_t rmask
= 0xff000000ULL
;
423 env
->regs
[r1
] &= ~rmask
;
424 val
= cpu_ldub_data(env
, address
);
425 if ((val
& 0x80) && !ccd
) {
429 if (val
&& cc
== 0) {
432 env
->regs
[r1
] |= (uint64_t)val
<< pos
;
435 mask
= (mask
<< 1) & 0xf;
443 /* execute instruction
444 this instruction executes an insn modified with the contents of r1
445 it does not change the executed instruction in memory
446 it does not change the program counter
447 in other words: tricky...
448 currently implemented by interpreting the cases it is most commonly used in
450 uint32_t HELPER(ex
)(CPUS390XState
*env
, uint32_t cc
, uint64_t v1
,
451 uint64_t addr
, uint64_t ret
)
453 S390CPU
*cpu
= s390_env_get_cpu(env
);
454 uint16_t insn
= cpu_lduw_code(env
, addr
);
456 HELPER_LOG("%s: v1 0x%lx addr 0x%lx insn 0x%x\n", __func__
, v1
, addr
,
458 if ((insn
& 0xf0ff) == 0xd000) {
459 uint32_t l
, insn2
, b1
, b2
, d1
, d2
;
462 insn2
= cpu_ldl_code(env
, addr
+ 2);
463 b1
= (insn2
>> 28) & 0xf;
464 b2
= (insn2
>> 12) & 0xf;
465 d1
= (insn2
>> 16) & 0xfff;
467 switch (insn
& 0xf00) {
469 helper_mvc(env
, l
, get_address(env
, 0, b1
, d1
),
470 get_address(env
, 0, b2
, d2
));
473 cc
= helper_nc(env
, l
, get_address(env
, 0, b1
, d1
),
474 get_address(env
, 0, b2
, d2
));
477 cc
= helper_clc(env
, l
, get_address(env
, 0, b1
, d1
),
478 get_address(env
, 0, b2
, d2
));
481 cc
= helper_oc(env
, l
, get_address(env
, 0, b1
, d1
),
482 get_address(env
, 0, b2
, d2
));
485 cc
= helper_xc(env
, l
, get_address(env
, 0, b1
, d1
),
486 get_address(env
, 0, b2
, d2
));
489 helper_tr(env
, l
, get_address(env
, 0, b1
, d1
),
490 get_address(env
, 0, b2
, d2
));
493 cc
= helper_trt(env
, l
, get_address(env
, 0, b1
, d1
),
494 get_address(env
, 0, b2
, d2
));
499 } else if ((insn
& 0xff00) == 0x0a00) {
500 /* supervisor call */
501 HELPER_LOG("%s: svc %ld via execute\n", __func__
, (insn
| v1
) & 0xff);
502 env
->psw
.addr
= ret
- 4;
503 env
->int_svc_code
= (insn
| v1
) & 0xff;
504 env
->int_svc_ilen
= 4;
505 helper_exception(env
, EXCP_SVC
);
506 } else if ((insn
& 0xff00) == 0xbf00) {
507 uint32_t insn2
, r1
, r3
, b2
, d2
;
509 insn2
= cpu_ldl_code(env
, addr
+ 2);
510 r1
= (insn2
>> 20) & 0xf;
511 r3
= (insn2
>> 16) & 0xf;
512 b2
= (insn2
>> 12) & 0xf;
514 cc
= helper_icm(env
, r1
, get_address(env
, 0, b2
, d2
), r3
);
517 cpu_abort(CPU(cpu
), "EXECUTE on instruction prefix 0x%x not implemented\n",
523 /* load access registers r1 to r3 from memory at a2 */
524 void HELPER(lam
)(CPUS390XState
*env
, uint32_t r1
, uint64_t a2
, uint32_t r3
)
528 for (i
= r1
;; i
= (i
+ 1) % 16) {
529 env
->aregs
[i
] = cpu_ldl_data(env
, a2
);
538 /* store access registers r1 to r3 in memory at a2 */
539 void HELPER(stam
)(CPUS390XState
*env
, uint32_t r1
, uint64_t a2
, uint32_t r3
)
543 for (i
= r1
;; i
= (i
+ 1) % 16) {
544 cpu_stl_data(env
, a2
, env
->aregs
[i
]);
554 uint32_t HELPER(mvcl
)(CPUS390XState
*env
, uint32_t r1
, uint32_t r2
)
556 uint64_t destlen
= env
->regs
[r1
+ 1] & 0xffffff;
557 uint64_t dest
= get_address_31fix(env
, r1
);
558 uint64_t srclen
= env
->regs
[r2
+ 1] & 0xffffff;
559 uint64_t src
= get_address_31fix(env
, r2
);
560 uint8_t pad
= env
->regs
[r2
+ 1] >> 24;
564 if (destlen
== srclen
) {
566 } else if (destlen
< srclen
) {
572 if (srclen
> destlen
) {
576 for (; destlen
&& srclen
; src
++, dest
++, destlen
--, srclen
--) {
577 v
= cpu_ldub_data(env
, src
);
578 cpu_stb_data(env
, dest
, v
);
581 for (; destlen
; dest
++, destlen
--) {
582 cpu_stb_data(env
, dest
, pad
);
585 env
->regs
[r1
+ 1] = destlen
;
586 /* can't use srclen here, we trunc'ed it */
587 env
->regs
[r2
+ 1] -= src
- env
->regs
[r2
];
588 env
->regs
[r1
] = dest
;
594 /* move long extended another memcopy insn with more bells and whistles */
595 uint32_t HELPER(mvcle
)(CPUS390XState
*env
, uint32_t r1
, uint64_t a2
,
598 uint64_t destlen
= env
->regs
[r1
+ 1];
599 uint64_t dest
= env
->regs
[r1
];
600 uint64_t srclen
= env
->regs
[r3
+ 1];
601 uint64_t src
= env
->regs
[r3
];
602 uint8_t pad
= a2
& 0xff;
606 if (!(env
->psw
.mask
& PSW_MASK_64
)) {
607 destlen
= (uint32_t)destlen
;
608 srclen
= (uint32_t)srclen
;
613 if (destlen
== srclen
) {
615 } else if (destlen
< srclen
) {
621 if (srclen
> destlen
) {
625 for (; destlen
&& srclen
; src
++, dest
++, destlen
--, srclen
--) {
626 v
= cpu_ldub_data(env
, src
);
627 cpu_stb_data(env
, dest
, v
);
630 for (; destlen
; dest
++, destlen
--) {
631 cpu_stb_data(env
, dest
, pad
);
634 env
->regs
[r1
+ 1] = destlen
;
635 /* can't use srclen here, we trunc'ed it */
636 /* FIXME: 31-bit mode! */
637 env
->regs
[r3
+ 1] -= src
- env
->regs
[r3
];
638 env
->regs
[r1
] = dest
;
644 /* compare logical long extended memcompare insn with padding */
645 uint32_t HELPER(clcle
)(CPUS390XState
*env
, uint32_t r1
, uint64_t a2
,
648 uint64_t destlen
= env
->regs
[r1
+ 1];
649 uint64_t dest
= get_address_31fix(env
, r1
);
650 uint64_t srclen
= env
->regs
[r3
+ 1];
651 uint64_t src
= get_address_31fix(env
, r3
);
652 uint8_t pad
= a2
& 0xff;
653 uint8_t v1
= 0, v2
= 0;
656 if (!(destlen
|| srclen
)) {
660 if (srclen
> destlen
) {
664 for (; destlen
|| srclen
; src
++, dest
++, destlen
--, srclen
--) {
665 v1
= srclen
? cpu_ldub_data(env
, src
) : pad
;
666 v2
= destlen
? cpu_ldub_data(env
, dest
) : pad
;
668 cc
= (v1
< v2
) ? 1 : 2;
673 env
->regs
[r1
+ 1] = destlen
;
674 /* can't use srclen here, we trunc'ed it */
675 env
->regs
[r3
+ 1] -= src
- env
->regs
[r3
];
676 env
->regs
[r1
] = dest
;
683 uint64_t HELPER(cksm
)(CPUS390XState
*env
, uint64_t r1
,
684 uint64_t src
, uint64_t src_len
)
686 uint64_t max_len
, len
;
687 uint64_t cksm
= (uint32_t)r1
;
689 /* Lest we fail to service interrupts in a timely manner, limit the
690 amount of work we're willing to do. For now, let's cap at 8k. */
691 max_len
= (src_len
> 0x2000 ? 0x2000 : src_len
);
693 /* Process full words as available. */
694 for (len
= 0; len
+ 4 <= max_len
; len
+= 4, src
+= 4) {
695 cksm
+= (uint32_t)cpu_ldl_data(env
, src
);
698 switch (max_len
- len
) {
700 cksm
+= cpu_ldub_data(env
, src
) << 24;
704 cksm
+= cpu_lduw_data(env
, src
) << 16;
708 cksm
+= cpu_lduw_data(env
, src
) << 16;
709 cksm
+= cpu_ldub_data(env
, src
+ 2) << 8;
714 /* Fold the carry from the checksum. Note that we can see carry-out
715 during folding more than once (but probably not more than twice). */
716 while (cksm
> 0xffffffffull
) {
717 cksm
= (uint32_t)cksm
+ (cksm
>> 32);
720 /* Indicate whether or not we've processed everything. */
721 env
->cc_op
= (len
== src_len
? 0 : 3);
723 /* Return both cksm and processed length. */
728 void HELPER(unpk
)(CPUS390XState
*env
, uint32_t len
, uint64_t dest
,
731 int len_dest
= len
>> 4;
732 int len_src
= len
& 0xf;
734 int second_nibble
= 0;
739 /* last byte is special, it only flips the nibbles */
740 b
= cpu_ldub_data(env
, src
);
741 cpu_stb_data(env
, dest
, (b
<< 4) | (b
>> 4));
745 /* now pad every nibble with 0xf0 */
747 while (len_dest
> 0) {
748 uint8_t cur_byte
= 0;
751 cur_byte
= cpu_ldub_data(env
, src
);
757 /* only advance one nibble at a time */
763 second_nibble
= !second_nibble
;
766 cur_byte
= (cur_byte
& 0xf);
770 cpu_stb_data(env
, dest
, cur_byte
);
774 void HELPER(tr
)(CPUS390XState
*env
, uint32_t len
, uint64_t array
,
779 for (i
= 0; i
<= len
; i
++) {
780 uint8_t byte
= cpu_ldub_data(env
, array
+ i
);
781 uint8_t new_byte
= cpu_ldub_data(env
, trans
+ byte
);
783 cpu_stb_data(env
, array
+ i
, new_byte
);
787 uint64_t HELPER(tre
)(CPUS390XState
*env
, uint64_t array
,
788 uint64_t len
, uint64_t trans
)
790 uint8_t end
= env
->regs
[0] & 0xff;
794 if (!(env
->psw
.mask
& PSW_MASK_64
)) {
799 /* Lest we fail to service interrupts in a timely manner, limit the
800 amount of work we're willing to do. For now, let's cap at 8k. */
808 for (i
= 0; i
< l
; i
++) {
809 uint8_t byte
, new_byte
;
811 byte
= cpu_ldub_data(env
, array
+ i
);
818 new_byte
= cpu_ldub_data(env
, trans
+ byte
);
819 cpu_stb_data(env
, array
+ i
, new_byte
);
822 env
->retxl
= len
- i
;
826 uint32_t HELPER(trt
)(CPUS390XState
*env
, uint32_t len
, uint64_t array
,
832 for (i
= 0; i
<= len
; i
++) {
833 uint8_t byte
= cpu_ldub_data(env
, array
+ i
);
834 uint8_t sbyte
= cpu_ldub_data(env
, trans
+ byte
);
837 env
->regs
[1] = array
+ i
;
838 env
->regs
[2] = (env
->regs
[2] & ~0xff) | sbyte
;
839 cc
= (i
== len
) ? 2 : 1;
847 #if !defined(CONFIG_USER_ONLY)
848 void HELPER(lctlg
)(CPUS390XState
*env
, uint32_t r1
, uint64_t a2
, uint32_t r3
)
850 S390CPU
*cpu
= s390_env_get_cpu(env
);
851 bool PERchanged
= false;
856 for (i
= r1
;; i
= (i
+ 1) % 16) {
857 val
= cpu_ldq_data(env
, src
);
858 if (env
->cregs
[i
] != val
&& i
>= 9 && i
<= 11) {
862 HELPER_LOG("load ctl %d from 0x%" PRIx64
" == 0x%" PRIx64
"\n",
863 i
, src
, env
->cregs
[i
]);
864 src
+= sizeof(uint64_t);
871 if (PERchanged
&& env
->psw
.mask
& PSW_MASK_PER
) {
872 s390_cpu_recompute_watchpoints(CPU(cpu
));
875 tlb_flush(CPU(cpu
), 1);
878 void HELPER(lctl
)(CPUS390XState
*env
, uint32_t r1
, uint64_t a2
, uint32_t r3
)
880 S390CPU
*cpu
= s390_env_get_cpu(env
);
881 bool PERchanged
= false;
886 for (i
= r1
;; i
= (i
+ 1) % 16) {
887 val
= cpu_ldl_data(env
, src
);
888 if ((uint32_t)env
->cregs
[i
] != val
&& i
>= 9 && i
<= 11) {
891 env
->cregs
[i
] = (env
->cregs
[i
] & 0xFFFFFFFF00000000ULL
) | val
;
892 src
+= sizeof(uint32_t);
899 if (PERchanged
&& env
->psw
.mask
& PSW_MASK_PER
) {
900 s390_cpu_recompute_watchpoints(CPU(cpu
));
903 tlb_flush(CPU(cpu
), 1);
906 void HELPER(stctg
)(CPUS390XState
*env
, uint32_t r1
, uint64_t a2
, uint32_t r3
)
911 for (i
= r1
;; i
= (i
+ 1) % 16) {
912 cpu_stq_data(env
, dest
, env
->cregs
[i
]);
913 dest
+= sizeof(uint64_t);
921 void HELPER(stctl
)(CPUS390XState
*env
, uint32_t r1
, uint64_t a2
, uint32_t r3
)
926 for (i
= r1
;; i
= (i
+ 1) % 16) {
927 cpu_stl_data(env
, dest
, env
->cregs
[i
]);
928 dest
+= sizeof(uint32_t);
936 uint32_t HELPER(tprot
)(uint64_t a1
, uint64_t a2
)
943 /* insert storage key extended */
944 uint64_t HELPER(iske
)(CPUS390XState
*env
, uint64_t r2
)
946 static S390SKeysState
*ss
;
947 static S390SKeysClass
*skeyclass
;
948 uint64_t addr
= get_address(env
, 0, 0, r2
);
951 if (addr
> ram_size
) {
956 ss
= s390_get_skeys_device();
957 skeyclass
= S390_SKEYS_GET_CLASS(ss
);
960 if (skeyclass
->get_skeys(ss
, addr
/ TARGET_PAGE_SIZE
, 1, &key
)) {
966 /* set storage key extended */
967 void HELPER(sske
)(CPUS390XState
*env
, uint64_t r1
, uint64_t r2
)
969 static S390SKeysState
*ss
;
970 static S390SKeysClass
*skeyclass
;
971 uint64_t addr
= get_address(env
, 0, 0, r2
);
974 if (addr
> ram_size
) {
979 ss
= s390_get_skeys_device();
980 skeyclass
= S390_SKEYS_GET_CLASS(ss
);
984 skeyclass
->set_skeys(ss
, addr
/ TARGET_PAGE_SIZE
, 1, &key
);
987 /* reset reference bit extended */
988 uint32_t HELPER(rrbe
)(CPUS390XState
*env
, uint64_t r2
)
990 static S390SKeysState
*ss
;
991 static S390SKeysClass
*skeyclass
;
999 ss
= s390_get_skeys_device();
1000 skeyclass
= S390_SKEYS_GET_CLASS(ss
);
1003 if (skeyclass
->get_skeys(ss
, r2
/ TARGET_PAGE_SIZE
, 1, &key
)) {
1007 re
= key
& (SK_R
| SK_C
);
1010 if (skeyclass
->set_skeys(ss
, r2
/ TARGET_PAGE_SIZE
, 1, &key
)) {
1017 * 0 Reference bit zero; change bit zero
1018 * 1 Reference bit zero; change bit one
1019 * 2 Reference bit one; change bit zero
1020 * 3 Reference bit one; change bit one
1026 /* compare and swap and purge */
1027 uint32_t HELPER(csp
)(CPUS390XState
*env
, uint32_t r1
, uint64_t r2
)
1029 S390CPU
*cpu
= s390_env_get_cpu(env
);
1031 uint32_t o1
= env
->regs
[r1
];
1032 uint64_t a2
= r2
& ~3ULL;
1033 uint32_t o2
= cpu_ldl_data(env
, a2
);
1036 cpu_stl_data(env
, a2
, env
->regs
[(r1
+ 1) & 15]);
1038 /* flush TLB / ALB */
1039 tlb_flush(CPU(cpu
), 1);
1043 env
->regs
[r1
] = (env
->regs
[r1
] & 0xffffffff00000000ULL
) | o2
;
1050 uint32_t HELPER(mvcs
)(CPUS390XState
*env
, uint64_t l
, uint64_t a1
, uint64_t a2
)
1054 HELPER_LOG("%s: %16" PRIx64
" %16" PRIx64
" %16" PRIx64
"\n",
1055 __func__
, l
, a1
, a2
);
1063 /* XXX replace w/ memcpy */
1064 for (i
= 0; i
< l
; i
++) {
1065 cpu_stb_secondary(env
, a1
+ i
, cpu_ldub_primary(env
, a2
+ i
));
1071 uint32_t HELPER(mvcp
)(CPUS390XState
*env
, uint64_t l
, uint64_t a1
, uint64_t a2
)
1075 HELPER_LOG("%s: %16" PRIx64
" %16" PRIx64
" %16" PRIx64
"\n",
1076 __func__
, l
, a1
, a2
);
1084 /* XXX replace w/ memcpy */
1085 for (i
= 0; i
< l
; i
++) {
1086 cpu_stb_primary(env
, a1
+ i
, cpu_ldub_secondary(env
, a2
+ i
));
1092 /* invalidate pte */
1093 void HELPER(ipte
)(CPUS390XState
*env
, uint64_t pte_addr
, uint64_t vaddr
)
1095 CPUState
*cs
= CPU(s390_env_get_cpu(env
));
1096 uint64_t page
= vaddr
& TARGET_PAGE_MASK
;
1099 /* XXX broadcast to other CPUs */
1101 /* XXX Linux is nice enough to give us the exact pte address.
1102 According to spec we'd have to find it out ourselves */
1103 /* XXX Linux is fine with overwriting the pte, the spec requires
1104 us to only set the invalid bit */
1105 stq_phys(cs
->as
, pte_addr
, pte
| _PAGE_INVALID
);
1107 /* XXX we exploit the fact that Linux passes the exact virtual
1108 address here - it's not obliged to! */
1109 tlb_flush_page(cs
, page
);
1111 /* XXX 31-bit hack */
1112 if (page
& 0x80000000) {
1113 tlb_flush_page(cs
, page
& ~0x80000000);
1115 tlb_flush_page(cs
, page
| 0x80000000);
1119 /* flush local tlb */
1120 void HELPER(ptlb
)(CPUS390XState
*env
)
1122 S390CPU
*cpu
= s390_env_get_cpu(env
);
1124 tlb_flush(CPU(cpu
), 1);
1127 /* load using real address */
1128 uint64_t HELPER(lura
)(CPUS390XState
*env
, uint64_t addr
)
1130 CPUState
*cs
= CPU(s390_env_get_cpu(env
));
1132 return (uint32_t)ldl_phys(cs
->as
, get_address(env
, 0, 0, addr
));
1135 uint64_t HELPER(lurag
)(CPUS390XState
*env
, uint64_t addr
)
1137 CPUState
*cs
= CPU(s390_env_get_cpu(env
));
1139 return ldq_phys(cs
->as
, get_address(env
, 0, 0, addr
));
1142 /* store using real address */
1143 void HELPER(stura
)(CPUS390XState
*env
, uint64_t addr
, uint64_t v1
)
1145 CPUState
*cs
= CPU(s390_env_get_cpu(env
));
1147 stl_phys(cs
->as
, get_address(env
, 0, 0, addr
), (uint32_t)v1
);
1149 if ((env
->psw
.mask
& PSW_MASK_PER
) &&
1150 (env
->cregs
[9] & PER_CR9_EVENT_STORE
) &&
1151 (env
->cregs
[9] & PER_CR9_EVENT_STORE_REAL
)) {
1152 /* PSW is saved just before calling the helper. */
1153 env
->per_address
= env
->psw
.addr
;
1154 env
->per_perc_atmid
= PER_CODE_EVENT_STORE_REAL
| get_per_atmid(env
);
1158 void HELPER(sturg
)(CPUS390XState
*env
, uint64_t addr
, uint64_t v1
)
1160 CPUState
*cs
= CPU(s390_env_get_cpu(env
));
1162 stq_phys(cs
->as
, get_address(env
, 0, 0, addr
), v1
);
1164 if ((env
->psw
.mask
& PSW_MASK_PER
) &&
1165 (env
->cregs
[9] & PER_CR9_EVENT_STORE
) &&
1166 (env
->cregs
[9] & PER_CR9_EVENT_STORE_REAL
)) {
1167 /* PSW is saved just before calling the helper. */
1168 env
->per_address
= env
->psw
.addr
;
1169 env
->per_perc_atmid
= PER_CODE_EVENT_STORE_REAL
| get_per_atmid(env
);
1173 /* load real address */
1174 uint64_t HELPER(lra
)(CPUS390XState
*env
, uint64_t addr
)
1176 CPUState
*cs
= CPU(s390_env_get_cpu(env
));
1178 int old_exc
= cs
->exception_index
;
1179 uint64_t asc
= env
->psw
.mask
& PSW_MASK_ASC
;
1183 /* XXX incomplete - has more corner cases */
1184 if (!(env
->psw
.mask
& PSW_MASK_64
) && (addr
>> 32)) {
1185 program_interrupt(env
, PGM_SPECIAL_OP
, 2);
1188 cs
->exception_index
= old_exc
;
1189 if (mmu_translate(env
, addr
, 0, asc
, &ret
, &flags
, true)) {
1192 if (cs
->exception_index
== EXCP_PGM
) {
1193 ret
= env
->int_pgm_code
| 0x80000000;
1195 ret
|= addr
& ~TARGET_PAGE_MASK
;
1197 cs
->exception_index
= old_exc
;