2 * S/390 memory access helper routines
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2009 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
23 #include "exec/helper-proto.h"
24 #include "exec/exec-all.h"
25 #include "exec/cpu_ldst.h"
26 #include "qemu/int128.h"
28 #if !defined(CONFIG_USER_ONLY)
29 #include "hw/s390x/storage-keys.h"
32 /*****************************************************************************/
34 #if !defined(CONFIG_USER_ONLY)
36 /* try to fill the TLB and return an exception if error. If retaddr is
37 NULL, it means that the function was called in C code (i.e. not
38 from generated code or from helper.c) */
39 /* XXX: fix it to restore all registers */
40 void tlb_fill(CPUState
*cs
, target_ulong addr
, MMUAccessType access_type
,
41 int mmu_idx
, uintptr_t retaddr
)
45 ret
= s390_cpu_handle_mmu_fault(cs
, addr
, access_type
, mmu_idx
);
46 if (unlikely(ret
!= 0)) {
47 if (likely(retaddr
)) {
48 /* now we have a real cpu fault */
49 cpu_restore_state(cs
, retaddr
);
57 /* #define DEBUG_HELPER */
59 #define HELPER_LOG(x...) qemu_log(x)
61 #define HELPER_LOG(x...)
64 /* Reduce the length so that addr + len doesn't cross a page boundary. */
65 static inline uint64_t adj_len_to_page(uint64_t len
, uint64_t addr
)
67 #ifndef CONFIG_USER_ONLY
68 if ((addr
& ~TARGET_PAGE_MASK
) + len
- 1 >= TARGET_PAGE_SIZE
) {
69 return -addr
& ~TARGET_PAGE_MASK
;
75 static void fast_memset(CPUS390XState
*env
, uint64_t dest
, uint8_t byte
,
78 int mmu_idx
= cpu_mmu_index(env
, false);
81 void *p
= tlb_vaddr_to_host(env
, dest
, MMU_DATA_STORE
, mmu_idx
);
83 /* Access to the whole page in write mode granted. */
84 int l_adj
= adj_len_to_page(l
, dest
);
85 memset(p
, byte
, l_adj
);
89 /* We failed to get access to the whole page. The next write
90 access will likely fill the QEMU TLB for the next iteration. */
91 cpu_stb_data(env
, dest
, byte
);
98 static void fast_memmove(CPUS390XState
*env
, uint64_t dest
, uint64_t src
,
101 int mmu_idx
= cpu_mmu_index(env
, false);
104 void *src_p
= tlb_vaddr_to_host(env
, src
, MMU_DATA_LOAD
, mmu_idx
);
105 void *dest_p
= tlb_vaddr_to_host(env
, dest
, MMU_DATA_STORE
, mmu_idx
);
106 if (src_p
&& dest_p
) {
107 /* Access to both whole pages granted. */
108 int l_adj
= adj_len_to_page(l
, src
);
109 l_adj
= adj_len_to_page(l_adj
, dest
);
110 memmove(dest_p
, src_p
, l_adj
);
115 /* We failed to get access to one or both whole pages. The next
116 read or write access will likely fill the QEMU TLB for the
118 cpu_stb_data(env
, dest
, cpu_ldub_data(env
, src
));
127 uint32_t HELPER(nc
)(CPUS390XState
*env
, uint32_t l
, uint64_t dest
,
134 HELPER_LOG("%s l %d dest %" PRIx64
" src %" PRIx64
"\n",
135 __func__
, l
, dest
, src
);
136 for (i
= 0; i
<= l
; i
++) {
137 x
= cpu_ldub_data(env
, dest
+ i
) & cpu_ldub_data(env
, src
+ i
);
141 cpu_stb_data(env
, dest
+ i
, x
);
147 uint32_t HELPER(xc
)(CPUS390XState
*env
, uint32_t l
, uint64_t dest
,
154 HELPER_LOG("%s l %d dest %" PRIx64
" src %" PRIx64
"\n",
155 __func__
, l
, dest
, src
);
157 /* xor with itself is the same as memset(0) */
159 fast_memset(env
, dest
, 0, l
+ 1);
163 for (i
= 0; i
<= l
; i
++) {
164 x
= cpu_ldub_data(env
, dest
+ i
) ^ cpu_ldub_data(env
, src
+ i
);
168 cpu_stb_data(env
, dest
+ i
, x
);
174 uint32_t HELPER(oc
)(CPUS390XState
*env
, uint32_t l
, uint64_t dest
,
181 HELPER_LOG("%s l %d dest %" PRIx64
" src %" PRIx64
"\n",
182 __func__
, l
, dest
, src
);
183 for (i
= 0; i
<= l
; i
++) {
184 x
= cpu_ldub_data(env
, dest
+ i
) | cpu_ldub_data(env
, src
+ i
);
188 cpu_stb_data(env
, dest
+ i
, x
);
194 void HELPER(mvc
)(CPUS390XState
*env
, uint32_t l
, uint64_t dest
, uint64_t src
)
198 HELPER_LOG("%s l %d dest %" PRIx64
" src %" PRIx64
"\n",
199 __func__
, l
, dest
, src
);
201 /* mvc with source pointing to the byte after the destination is the
202 same as memset with the first source byte */
203 if (dest
== (src
+ 1)) {
204 fast_memset(env
, dest
, cpu_ldub_data(env
, src
), l
+ 1);
208 /* mvc and memmove do not behave the same when areas overlap! */
209 if ((dest
< src
) || (src
+ l
< dest
)) {
210 fast_memmove(env
, dest
, src
, l
+ 1);
214 /* slow version with byte accesses which always work */
215 for (i
= 0; i
<= l
; i
++) {
216 cpu_stb_data(env
, dest
+ i
, cpu_ldub_data(env
, src
+ i
));
220 /* compare unsigned byte arrays */
221 uint32_t HELPER(clc
)(CPUS390XState
*env
, uint32_t l
, uint64_t s1
, uint64_t s2
)
227 HELPER_LOG("%s l %d s1 %" PRIx64
" s2 %" PRIx64
"\n",
228 __func__
, l
, s1
, s2
);
229 for (i
= 0; i
<= l
; i
++) {
230 x
= cpu_ldub_data(env
, s1
+ i
);
231 y
= cpu_ldub_data(env
, s2
+ i
);
232 HELPER_LOG("%02x (%c)/%02x (%c) ", x
, x
, y
, y
);
247 /* compare logical under mask */
248 uint32_t HELPER(clm
)(CPUS390XState
*env
, uint32_t r1
, uint32_t mask
,
254 HELPER_LOG("%s: r1 0x%x mask 0x%x addr 0x%" PRIx64
"\n", __func__
, r1
,
259 d
= cpu_ldub_data(env
, addr
);
260 r
= (r1
& 0xff000000UL
) >> 24;
261 HELPER_LOG("mask 0x%x %02x/%02x (0x%" PRIx64
") ", mask
, r
, d
,
272 mask
= (mask
<< 1) & 0xf;
279 static inline uint64_t fix_address(CPUS390XState
*env
, uint64_t a
)
282 if (!(env
->psw
.mask
& PSW_MASK_64
)) {
288 static inline uint64_t get_address(CPUS390XState
*env
, int x2
, int b2
, int d2
)
297 return fix_address(env
, r
);
300 static inline uint64_t get_address_31fix(CPUS390XState
*env
, int reg
)
302 return fix_address(env
, env
->regs
[reg
]);
305 /* search string (c is byte to search, r2 is string, r1 end of string) */
306 uint64_t HELPER(srst
)(CPUS390XState
*env
, uint64_t r0
, uint64_t end
,
312 str
= fix_address(env
, str
);
313 end
= fix_address(env
, end
);
315 /* Assume for now that R2 is unmodified. */
318 /* Lest we fail to service interrupts in a timely manner, limit the
319 amount of work we're willing to do. For now, let's cap at 8k. */
320 for (len
= 0; len
< 0x2000; ++len
) {
321 if (str
+ len
== end
) {
322 /* Character not found. R1 & R2 are unmodified. */
326 v
= cpu_ldub_data(env
, str
+ len
);
328 /* Character found. Set R1 to the location; R2 is unmodified. */
334 /* CPU-determined bytes processed. Advance R2 to next byte to process. */
335 env
->retxl
= str
+ len
;
340 /* unsigned string compare (c is string terminator) */
341 uint64_t HELPER(clst
)(CPUS390XState
*env
, uint64_t c
, uint64_t s1
, uint64_t s2
)
346 s1
= fix_address(env
, s1
);
347 s2
= fix_address(env
, s2
);
349 /* Lest we fail to service interrupts in a timely manner, limit the
350 amount of work we're willing to do. For now, let's cap at 8k. */
351 for (len
= 0; len
< 0x2000; ++len
) {
352 uint8_t v1
= cpu_ldub_data(env
, s1
+ len
);
353 uint8_t v2
= cpu_ldub_data(env
, s2
+ len
);
356 /* Equal. CC=0, and don't advance the registers. */
362 /* Unequal. CC={1,2}, and advance the registers. Note that
363 the terminator need not be zero, but the string that contains
364 the terminator is by definition "low". */
365 env
->cc_op
= (v1
== c
? 1 : v2
== c
? 2 : v1
< v2
? 1 : 2);
366 env
->retxl
= s2
+ len
;
371 /* CPU-determined bytes equal; advance the registers. */
373 env
->retxl
= s2
+ len
;
378 void HELPER(mvpg
)(CPUS390XState
*env
, uint64_t r0
, uint64_t r1
, uint64_t r2
)
380 /* XXX missing r0 handling */
382 fast_memmove(env
, r1
, r2
, TARGET_PAGE_SIZE
);
385 /* string copy (c is string terminator) */
386 uint64_t HELPER(mvst
)(CPUS390XState
*env
, uint64_t c
, uint64_t d
, uint64_t s
)
391 d
= fix_address(env
, d
);
392 s
= fix_address(env
, s
);
394 /* Lest we fail to service interrupts in a timely manner, limit the
395 amount of work we're willing to do. For now, let's cap at 8k. */
396 for (len
= 0; len
< 0x2000; ++len
) {
397 uint8_t v
= cpu_ldub_data(env
, s
+ len
);
398 cpu_stb_data(env
, d
+ len
, v
);
400 /* Complete. Set CC=1 and advance R1. */
407 /* Incomplete. Set CC=3 and signal to advance R1 and R2. */
409 env
->retxl
= s
+ len
;
413 static uint32_t helper_icm(CPUS390XState
*env
, uint32_t r1
, uint64_t address
,
416 int pos
= 24; /* top of the lower half of r1 */
417 uint64_t rmask
= 0xff000000ULL
;
424 env
->regs
[r1
] &= ~rmask
;
425 val
= cpu_ldub_data(env
, address
);
426 if ((val
& 0x80) && !ccd
) {
430 if (val
&& cc
== 0) {
433 env
->regs
[r1
] |= (uint64_t)val
<< pos
;
436 mask
= (mask
<< 1) & 0xf;
444 /* execute instruction
445 this instruction executes an insn modified with the contents of r1
446 it does not change the executed instruction in memory
447 it does not change the program counter
448 in other words: tricky...
449 currently implemented by interpreting the cases it is most commonly used in
451 uint32_t HELPER(ex
)(CPUS390XState
*env
, uint32_t cc
, uint64_t v1
,
452 uint64_t addr
, uint64_t ret
)
454 S390CPU
*cpu
= s390_env_get_cpu(env
);
455 uint16_t insn
= cpu_lduw_code(env
, addr
);
457 HELPER_LOG("%s: v1 0x%lx addr 0x%lx insn 0x%x\n", __func__
, v1
, addr
,
459 if ((insn
& 0xf0ff) == 0xd000) {
460 uint32_t l
, insn2
, b1
, b2
, d1
, d2
;
463 insn2
= cpu_ldl_code(env
, addr
+ 2);
464 b1
= (insn2
>> 28) & 0xf;
465 b2
= (insn2
>> 12) & 0xf;
466 d1
= (insn2
>> 16) & 0xfff;
468 switch (insn
& 0xf00) {
470 helper_mvc(env
, l
, get_address(env
, 0, b1
, d1
),
471 get_address(env
, 0, b2
, d2
));
474 cc
= helper_nc(env
, l
, get_address(env
, 0, b1
, d1
),
475 get_address(env
, 0, b2
, d2
));
478 cc
= helper_clc(env
, l
, get_address(env
, 0, b1
, d1
),
479 get_address(env
, 0, b2
, d2
));
482 cc
= helper_oc(env
, l
, get_address(env
, 0, b1
, d1
),
483 get_address(env
, 0, b2
, d2
));
486 cc
= helper_xc(env
, l
, get_address(env
, 0, b1
, d1
),
487 get_address(env
, 0, b2
, d2
));
490 helper_tr(env
, l
, get_address(env
, 0, b1
, d1
),
491 get_address(env
, 0, b2
, d2
));
494 cc
= helper_trt(env
, l
, get_address(env
, 0, b1
, d1
),
495 get_address(env
, 0, b2
, d2
));
500 } else if ((insn
& 0xff00) == 0x0a00) {
501 /* supervisor call */
502 HELPER_LOG("%s: svc %ld via execute\n", __func__
, (insn
| v1
) & 0xff);
503 env
->psw
.addr
= ret
- 4;
504 env
->int_svc_code
= (insn
| v1
) & 0xff;
505 env
->int_svc_ilen
= 4;
506 helper_exception(env
, EXCP_SVC
);
507 } else if ((insn
& 0xff00) == 0xbf00) {
508 uint32_t insn2
, r1
, r3
, b2
, d2
;
510 insn2
= cpu_ldl_code(env
, addr
+ 2);
511 r1
= (insn2
>> 20) & 0xf;
512 r3
= (insn2
>> 16) & 0xf;
513 b2
= (insn2
>> 12) & 0xf;
515 cc
= helper_icm(env
, r1
, get_address(env
, 0, b2
, d2
), r3
);
518 cpu_abort(CPU(cpu
), "EXECUTE on instruction prefix 0x%x not implemented\n",
524 /* load access registers r1 to r3 from memory at a2 */
525 void HELPER(lam
)(CPUS390XState
*env
, uint32_t r1
, uint64_t a2
, uint32_t r3
)
529 for (i
= r1
;; i
= (i
+ 1) % 16) {
530 env
->aregs
[i
] = cpu_ldl_data(env
, a2
);
539 /* store access registers r1 to r3 in memory at a2 */
540 void HELPER(stam
)(CPUS390XState
*env
, uint32_t r1
, uint64_t a2
, uint32_t r3
)
544 for (i
= r1
;; i
= (i
+ 1) % 16) {
545 cpu_stl_data(env
, a2
, env
->aregs
[i
]);
555 uint32_t HELPER(mvcl
)(CPUS390XState
*env
, uint32_t r1
, uint32_t r2
)
557 uint64_t destlen
= env
->regs
[r1
+ 1] & 0xffffff;
558 uint64_t dest
= get_address_31fix(env
, r1
);
559 uint64_t srclen
= env
->regs
[r2
+ 1] & 0xffffff;
560 uint64_t src
= get_address_31fix(env
, r2
);
561 uint8_t pad
= env
->regs
[r2
+ 1] >> 24;
565 if (destlen
== srclen
) {
567 } else if (destlen
< srclen
) {
573 if (srclen
> destlen
) {
577 for (; destlen
&& srclen
; src
++, dest
++, destlen
--, srclen
--) {
578 v
= cpu_ldub_data(env
, src
);
579 cpu_stb_data(env
, dest
, v
);
582 for (; destlen
; dest
++, destlen
--) {
583 cpu_stb_data(env
, dest
, pad
);
586 env
->regs
[r1
+ 1] = destlen
;
587 /* can't use srclen here, we trunc'ed it */
588 env
->regs
[r2
+ 1] -= src
- env
->regs
[r2
];
589 env
->regs
[r1
] = dest
;
595 /* move long extended another memcopy insn with more bells and whistles */
596 uint32_t HELPER(mvcle
)(CPUS390XState
*env
, uint32_t r1
, uint64_t a2
,
599 uint64_t destlen
= env
->regs
[r1
+ 1];
600 uint64_t dest
= env
->regs
[r1
];
601 uint64_t srclen
= env
->regs
[r3
+ 1];
602 uint64_t src
= env
->regs
[r3
];
603 uint8_t pad
= a2
& 0xff;
607 if (!(env
->psw
.mask
& PSW_MASK_64
)) {
608 destlen
= (uint32_t)destlen
;
609 srclen
= (uint32_t)srclen
;
614 if (destlen
== srclen
) {
616 } else if (destlen
< srclen
) {
622 if (srclen
> destlen
) {
626 for (; destlen
&& srclen
; src
++, dest
++, destlen
--, srclen
--) {
627 v
= cpu_ldub_data(env
, src
);
628 cpu_stb_data(env
, dest
, v
);
631 for (; destlen
; dest
++, destlen
--) {
632 cpu_stb_data(env
, dest
, pad
);
635 env
->regs
[r1
+ 1] = destlen
;
636 /* can't use srclen here, we trunc'ed it */
637 /* FIXME: 31-bit mode! */
638 env
->regs
[r3
+ 1] -= src
- env
->regs
[r3
];
639 env
->regs
[r1
] = dest
;
645 /* compare logical long extended memcompare insn with padding */
646 uint32_t HELPER(clcle
)(CPUS390XState
*env
, uint32_t r1
, uint64_t a2
,
649 uint64_t destlen
= env
->regs
[r1
+ 1];
650 uint64_t dest
= get_address_31fix(env
, r1
);
651 uint64_t srclen
= env
->regs
[r3
+ 1];
652 uint64_t src
= get_address_31fix(env
, r3
);
653 uint8_t pad
= a2
& 0xff;
654 uint8_t v1
= 0, v2
= 0;
657 if (!(destlen
|| srclen
)) {
661 if (srclen
> destlen
) {
665 for (; destlen
|| srclen
; src
++, dest
++, destlen
--, srclen
--) {
666 v1
= srclen
? cpu_ldub_data(env
, src
) : pad
;
667 v2
= destlen
? cpu_ldub_data(env
, dest
) : pad
;
669 cc
= (v1
< v2
) ? 1 : 2;
674 env
->regs
[r1
+ 1] = destlen
;
675 /* can't use srclen here, we trunc'ed it */
676 env
->regs
[r3
+ 1] -= src
- env
->regs
[r3
];
677 env
->regs
[r1
] = dest
;
684 uint64_t HELPER(cksm
)(CPUS390XState
*env
, uint64_t r1
,
685 uint64_t src
, uint64_t src_len
)
687 uint64_t max_len
, len
;
688 uint64_t cksm
= (uint32_t)r1
;
690 /* Lest we fail to service interrupts in a timely manner, limit the
691 amount of work we're willing to do. For now, let's cap at 8k. */
692 max_len
= (src_len
> 0x2000 ? 0x2000 : src_len
);
694 /* Process full words as available. */
695 for (len
= 0; len
+ 4 <= max_len
; len
+= 4, src
+= 4) {
696 cksm
+= (uint32_t)cpu_ldl_data(env
, src
);
699 switch (max_len
- len
) {
701 cksm
+= cpu_ldub_data(env
, src
) << 24;
705 cksm
+= cpu_lduw_data(env
, src
) << 16;
709 cksm
+= cpu_lduw_data(env
, src
) << 16;
710 cksm
+= cpu_ldub_data(env
, src
+ 2) << 8;
715 /* Fold the carry from the checksum. Note that we can see carry-out
716 during folding more than once (but probably not more than twice). */
717 while (cksm
> 0xffffffffull
) {
718 cksm
= (uint32_t)cksm
+ (cksm
>> 32);
721 /* Indicate whether or not we've processed everything. */
722 env
->cc_op
= (len
== src_len
? 0 : 3);
724 /* Return both cksm and processed length. */
729 void HELPER(unpk
)(CPUS390XState
*env
, uint32_t len
, uint64_t dest
,
732 int len_dest
= len
>> 4;
733 int len_src
= len
& 0xf;
735 int second_nibble
= 0;
740 /* last byte is special, it only flips the nibbles */
741 b
= cpu_ldub_data(env
, src
);
742 cpu_stb_data(env
, dest
, (b
<< 4) | (b
>> 4));
746 /* now pad every nibble with 0xf0 */
748 while (len_dest
> 0) {
749 uint8_t cur_byte
= 0;
752 cur_byte
= cpu_ldub_data(env
, src
);
758 /* only advance one nibble at a time */
764 second_nibble
= !second_nibble
;
767 cur_byte
= (cur_byte
& 0xf);
771 cpu_stb_data(env
, dest
, cur_byte
);
775 void HELPER(tr
)(CPUS390XState
*env
, uint32_t len
, uint64_t array
,
780 for (i
= 0; i
<= len
; i
++) {
781 uint8_t byte
= cpu_ldub_data(env
, array
+ i
);
782 uint8_t new_byte
= cpu_ldub_data(env
, trans
+ byte
);
784 cpu_stb_data(env
, array
+ i
, new_byte
);
788 uint64_t HELPER(tre
)(CPUS390XState
*env
, uint64_t array
,
789 uint64_t len
, uint64_t trans
)
791 uint8_t end
= env
->regs
[0] & 0xff;
795 if (!(env
->psw
.mask
& PSW_MASK_64
)) {
800 /* Lest we fail to service interrupts in a timely manner, limit the
801 amount of work we're willing to do. For now, let's cap at 8k. */
809 for (i
= 0; i
< l
; i
++) {
810 uint8_t byte
, new_byte
;
812 byte
= cpu_ldub_data(env
, array
+ i
);
819 new_byte
= cpu_ldub_data(env
, trans
+ byte
);
820 cpu_stb_data(env
, array
+ i
, new_byte
);
823 env
->retxl
= len
- i
;
827 uint32_t HELPER(trt
)(CPUS390XState
*env
, uint32_t len
, uint64_t array
,
833 for (i
= 0; i
<= len
; i
++) {
834 uint8_t byte
= cpu_ldub_data(env
, array
+ i
);
835 uint8_t sbyte
= cpu_ldub_data(env
, trans
+ byte
);
838 env
->regs
[1] = array
+ i
;
839 env
->regs
[2] = (env
->regs
[2] & ~0xff) | sbyte
;
840 cc
= (i
== len
) ? 2 : 1;
848 void HELPER(cdsg
)(CPUS390XState
*env
, uint64_t addr
,
849 uint32_t r1
, uint32_t r3
)
851 uintptr_t ra
= GETPC();
852 Int128 cmpv
= int128_make128(env
->regs
[r1
+ 1], env
->regs
[r1
]);
853 Int128 newv
= int128_make128(env
->regs
[r3
+ 1], env
->regs
[r3
]);
858 #ifndef CONFIG_ATOMIC128
859 cpu_loop_exit_atomic(ENV_GET_CPU(env
), ra
);
861 int mem_idx
= cpu_mmu_index(env
, false);
862 TCGMemOpIdx oi
= make_memop_idx(MO_TEQ
| MO_ALIGN_16
, mem_idx
);
863 oldv
= helper_atomic_cmpxchgo_be_mmu(env
, addr
, cmpv
, newv
, oi
, ra
);
864 fail
= !int128_eq(oldv
, cmpv
);
869 oldh
= cpu_ldq_data_ra(env
, addr
+ 0, ra
);
870 oldl
= cpu_ldq_data_ra(env
, addr
+ 8, ra
);
872 oldv
= int128_make128(oldl
, oldh
);
873 fail
= !int128_eq(oldv
, cmpv
);
878 cpu_stq_data_ra(env
, addr
+ 0, int128_gethi(newv
), ra
);
879 cpu_stq_data_ra(env
, addr
+ 8, int128_getlo(newv
), ra
);
883 env
->regs
[r1
] = int128_gethi(oldv
);
884 env
->regs
[r1
+ 1] = int128_getlo(oldv
);
887 #if !defined(CONFIG_USER_ONLY)
888 void HELPER(lctlg
)(CPUS390XState
*env
, uint32_t r1
, uint64_t a2
, uint32_t r3
)
890 S390CPU
*cpu
= s390_env_get_cpu(env
);
891 bool PERchanged
= false;
896 for (i
= r1
;; i
= (i
+ 1) % 16) {
897 val
= cpu_ldq_data(env
, src
);
898 if (env
->cregs
[i
] != val
&& i
>= 9 && i
<= 11) {
902 HELPER_LOG("load ctl %d from 0x%" PRIx64
" == 0x%" PRIx64
"\n",
903 i
, src
, env
->cregs
[i
]);
904 src
+= sizeof(uint64_t);
911 if (PERchanged
&& env
->psw
.mask
& PSW_MASK_PER
) {
912 s390_cpu_recompute_watchpoints(CPU(cpu
));
918 void HELPER(lctl
)(CPUS390XState
*env
, uint32_t r1
, uint64_t a2
, uint32_t r3
)
920 S390CPU
*cpu
= s390_env_get_cpu(env
);
921 bool PERchanged
= false;
926 for (i
= r1
;; i
= (i
+ 1) % 16) {
927 val
= cpu_ldl_data(env
, src
);
928 if ((uint32_t)env
->cregs
[i
] != val
&& i
>= 9 && i
<= 11) {
931 env
->cregs
[i
] = (env
->cregs
[i
] & 0xFFFFFFFF00000000ULL
) | val
;
932 src
+= sizeof(uint32_t);
939 if (PERchanged
&& env
->psw
.mask
& PSW_MASK_PER
) {
940 s390_cpu_recompute_watchpoints(CPU(cpu
));
946 void HELPER(stctg
)(CPUS390XState
*env
, uint32_t r1
, uint64_t a2
, uint32_t r3
)
951 for (i
= r1
;; i
= (i
+ 1) % 16) {
952 cpu_stq_data(env
, dest
, env
->cregs
[i
]);
953 dest
+= sizeof(uint64_t);
961 void HELPER(stctl
)(CPUS390XState
*env
, uint32_t r1
, uint64_t a2
, uint32_t r3
)
966 for (i
= r1
;; i
= (i
+ 1) % 16) {
967 cpu_stl_data(env
, dest
, env
->cregs
[i
]);
968 dest
+= sizeof(uint32_t);
976 uint32_t HELPER(tprot
)(uint64_t a1
, uint64_t a2
)
983 /* insert storage key extended */
984 uint64_t HELPER(iske
)(CPUS390XState
*env
, uint64_t r2
)
986 static S390SKeysState
*ss
;
987 static S390SKeysClass
*skeyclass
;
988 uint64_t addr
= get_address(env
, 0, 0, r2
);
991 if (addr
> ram_size
) {
996 ss
= s390_get_skeys_device();
997 skeyclass
= S390_SKEYS_GET_CLASS(ss
);
1000 if (skeyclass
->get_skeys(ss
, addr
/ TARGET_PAGE_SIZE
, 1, &key
)) {
1006 /* set storage key extended */
1007 void HELPER(sske
)(CPUS390XState
*env
, uint64_t r1
, uint64_t r2
)
1009 static S390SKeysState
*ss
;
1010 static S390SKeysClass
*skeyclass
;
1011 uint64_t addr
= get_address(env
, 0, 0, r2
);
1014 if (addr
> ram_size
) {
1018 if (unlikely(!ss
)) {
1019 ss
= s390_get_skeys_device();
1020 skeyclass
= S390_SKEYS_GET_CLASS(ss
);
1024 skeyclass
->set_skeys(ss
, addr
/ TARGET_PAGE_SIZE
, 1, &key
);
1027 /* reset reference bit extended */
1028 uint32_t HELPER(rrbe
)(CPUS390XState
*env
, uint64_t r2
)
1030 static S390SKeysState
*ss
;
1031 static S390SKeysClass
*skeyclass
;
1034 if (r2
> ram_size
) {
1038 if (unlikely(!ss
)) {
1039 ss
= s390_get_skeys_device();
1040 skeyclass
= S390_SKEYS_GET_CLASS(ss
);
1043 if (skeyclass
->get_skeys(ss
, r2
/ TARGET_PAGE_SIZE
, 1, &key
)) {
1047 re
= key
& (SK_R
| SK_C
);
1050 if (skeyclass
->set_skeys(ss
, r2
/ TARGET_PAGE_SIZE
, 1, &key
)) {
1057 * 0 Reference bit zero; change bit zero
1058 * 1 Reference bit zero; change bit one
1059 * 2 Reference bit one; change bit zero
1060 * 3 Reference bit one; change bit one
1066 /* compare and swap and purge */
1067 uint32_t HELPER(csp
)(CPUS390XState
*env
, uint32_t r1
, uint64_t r2
)
1069 S390CPU
*cpu
= s390_env_get_cpu(env
);
1071 uint32_t o1
= env
->regs
[r1
];
1072 uint64_t a2
= r2
& ~3ULL;
1073 uint32_t o2
= cpu_ldl_data(env
, a2
);
1076 cpu_stl_data(env
, a2
, env
->regs
[(r1
+ 1) & 15]);
1078 /* flush TLB / ALB */
1079 tlb_flush(CPU(cpu
));
1083 env
->regs
[r1
] = (env
->regs
[r1
] & 0xffffffff00000000ULL
) | o2
;
1090 uint32_t HELPER(mvcs
)(CPUS390XState
*env
, uint64_t l
, uint64_t a1
, uint64_t a2
)
1094 HELPER_LOG("%s: %16" PRIx64
" %16" PRIx64
" %16" PRIx64
"\n",
1095 __func__
, l
, a1
, a2
);
1103 /* XXX replace w/ memcpy */
1104 for (i
= 0; i
< l
; i
++) {
1105 cpu_stb_secondary(env
, a1
+ i
, cpu_ldub_primary(env
, a2
+ i
));
1111 uint32_t HELPER(mvcp
)(CPUS390XState
*env
, uint64_t l
, uint64_t a1
, uint64_t a2
)
1115 HELPER_LOG("%s: %16" PRIx64
" %16" PRIx64
" %16" PRIx64
"\n",
1116 __func__
, l
, a1
, a2
);
1124 /* XXX replace w/ memcpy */
1125 for (i
= 0; i
< l
; i
++) {
1126 cpu_stb_primary(env
, a1
+ i
, cpu_ldub_secondary(env
, a2
+ i
));
1132 /* invalidate pte */
1133 void HELPER(ipte
)(CPUS390XState
*env
, uint64_t pte_addr
, uint64_t vaddr
)
1135 CPUState
*cs
= CPU(s390_env_get_cpu(env
));
1136 uint64_t page
= vaddr
& TARGET_PAGE_MASK
;
1139 /* XXX broadcast to other CPUs */
1141 /* XXX Linux is nice enough to give us the exact pte address.
1142 According to spec we'd have to find it out ourselves */
1143 /* XXX Linux is fine with overwriting the pte, the spec requires
1144 us to only set the invalid bit */
1145 stq_phys(cs
->as
, pte_addr
, pte
| _PAGE_INVALID
);
1147 /* XXX we exploit the fact that Linux passes the exact virtual
1148 address here - it's not obliged to! */
1149 tlb_flush_page(cs
, page
);
1151 /* XXX 31-bit hack */
1152 if (page
& 0x80000000) {
1153 tlb_flush_page(cs
, page
& ~0x80000000);
1155 tlb_flush_page(cs
, page
| 0x80000000);
1159 /* flush local tlb */
1160 void HELPER(ptlb
)(CPUS390XState
*env
)
1162 S390CPU
*cpu
= s390_env_get_cpu(env
);
1164 tlb_flush(CPU(cpu
));
1167 /* load using real address */
1168 uint64_t HELPER(lura
)(CPUS390XState
*env
, uint64_t addr
)
1170 CPUState
*cs
= CPU(s390_env_get_cpu(env
));
1172 return (uint32_t)ldl_phys(cs
->as
, get_address(env
, 0, 0, addr
));
1175 uint64_t HELPER(lurag
)(CPUS390XState
*env
, uint64_t addr
)
1177 CPUState
*cs
= CPU(s390_env_get_cpu(env
));
1179 return ldq_phys(cs
->as
, get_address(env
, 0, 0, addr
));
1182 /* store using real address */
1183 void HELPER(stura
)(CPUS390XState
*env
, uint64_t addr
, uint64_t v1
)
1185 CPUState
*cs
= CPU(s390_env_get_cpu(env
));
1187 stl_phys(cs
->as
, get_address(env
, 0, 0, addr
), (uint32_t)v1
);
1189 if ((env
->psw
.mask
& PSW_MASK_PER
) &&
1190 (env
->cregs
[9] & PER_CR9_EVENT_STORE
) &&
1191 (env
->cregs
[9] & PER_CR9_EVENT_STORE_REAL
)) {
1192 /* PSW is saved just before calling the helper. */
1193 env
->per_address
= env
->psw
.addr
;
1194 env
->per_perc_atmid
= PER_CODE_EVENT_STORE_REAL
| get_per_atmid(env
);
1198 void HELPER(sturg
)(CPUS390XState
*env
, uint64_t addr
, uint64_t v1
)
1200 CPUState
*cs
= CPU(s390_env_get_cpu(env
));
1202 stq_phys(cs
->as
, get_address(env
, 0, 0, addr
), v1
);
1204 if ((env
->psw
.mask
& PSW_MASK_PER
) &&
1205 (env
->cregs
[9] & PER_CR9_EVENT_STORE
) &&
1206 (env
->cregs
[9] & PER_CR9_EVENT_STORE_REAL
)) {
1207 /* PSW is saved just before calling the helper. */
1208 env
->per_address
= env
->psw
.addr
;
1209 env
->per_perc_atmid
= PER_CODE_EVENT_STORE_REAL
| get_per_atmid(env
);
1213 /* load real address */
1214 uint64_t HELPER(lra
)(CPUS390XState
*env
, uint64_t addr
)
1216 CPUState
*cs
= CPU(s390_env_get_cpu(env
));
1218 int old_exc
= cs
->exception_index
;
1219 uint64_t asc
= env
->psw
.mask
& PSW_MASK_ASC
;
1223 /* XXX incomplete - has more corner cases */
1224 if (!(env
->psw
.mask
& PSW_MASK_64
) && (addr
>> 32)) {
1225 program_interrupt(env
, PGM_SPECIAL_OP
, 2);
1228 cs
->exception_index
= old_exc
;
1229 if (mmu_translate(env
, addr
, 0, asc
, &ret
, &flags
, true)) {
1232 if (cs
->exception_index
== EXCP_PGM
) {
1233 ret
= env
->int_pgm_code
| 0x80000000;
1235 ret
|= addr
& ~TARGET_PAGE_MASK
;
1237 cs
->exception_index
= old_exc
;