2 * S/390 memory access helper routines
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2009 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
22 #include "exec/helper-proto.h"
23 #include "exec/cpu_ldst.h"
25 /*****************************************************************************/
27 #if !defined(CONFIG_USER_ONLY)
29 /* try to fill the TLB and return an exception if error. If retaddr is
30 NULL, it means that the function was called in C code (i.e. not
31 from generated code or from helper.c) */
32 /* XXX: fix it to restore all registers */
33 void tlb_fill(CPUState
*cs
, target_ulong addr
, int is_write
, int mmu_idx
,
38 ret
= s390_cpu_handle_mmu_fault(cs
, addr
, is_write
, mmu_idx
);
39 if (unlikely(ret
!= 0)) {
40 if (likely(retaddr
)) {
41 /* now we have a real cpu fault */
42 cpu_restore_state(cs
, retaddr
);
50 /* #define DEBUG_HELPER */
52 #define HELPER_LOG(x...) qemu_log(x)
54 #define HELPER_LOG(x...)
57 /* Reduce the length so that addr + len doesn't cross a page boundary. */
58 static inline uint64_t adj_len_to_page(uint64_t len
, uint64_t addr
)
60 #ifndef CONFIG_USER_ONLY
61 if ((addr
& ~TARGET_PAGE_MASK
) + len
- 1 >= TARGET_PAGE_SIZE
) {
62 return -addr
& ~TARGET_PAGE_MASK
;
68 static void fast_memset(CPUS390XState
*env
, uint64_t dest
, uint8_t byte
,
71 int mmu_idx
= cpu_mmu_index(env
);
74 void *p
= tlb_vaddr_to_host(env
, dest
, MMU_DATA_STORE
, mmu_idx
);
76 /* Access to the whole page in write mode granted. */
77 int l_adj
= adj_len_to_page(l
, dest
);
78 memset(p
, byte
, l_adj
);
82 /* We failed to get access to the whole page. The next write
83 access will likely fill the QEMU TLB for the next iteration. */
84 cpu_stb_data(env
, dest
, byte
);
91 static void fast_memmove(CPUS390XState
*env
, uint64_t dest
, uint64_t src
,
94 int mmu_idx
= cpu_mmu_index(env
);
97 void *src_p
= tlb_vaddr_to_host(env
, src
, MMU_DATA_LOAD
, mmu_idx
);
98 void *dest_p
= tlb_vaddr_to_host(env
, dest
, MMU_DATA_STORE
, mmu_idx
);
99 if (src_p
&& dest_p
) {
100 /* Access to both whole pages granted. */
101 int l_adj
= adj_len_to_page(l
, src
);
102 l_adj
= adj_len_to_page(l_adj
, dest
);
103 memmove(dest_p
, src_p
, l_adj
);
108 /* We failed to get access to one or both whole pages. The next
109 read or write access will likely fill the QEMU TLB for the
111 cpu_stb_data(env
, dest
, cpu_ldub_data(env
, src
));
120 uint32_t HELPER(nc
)(CPUS390XState
*env
, uint32_t l
, uint64_t dest
,
127 HELPER_LOG("%s l %d dest %" PRIx64
" src %" PRIx64
"\n",
128 __func__
, l
, dest
, src
);
129 for (i
= 0; i
<= l
; i
++) {
130 x
= cpu_ldub_data(env
, dest
+ i
) & cpu_ldub_data(env
, src
+ i
);
134 cpu_stb_data(env
, dest
+ i
, x
);
140 uint32_t HELPER(xc
)(CPUS390XState
*env
, uint32_t l
, uint64_t dest
,
147 HELPER_LOG("%s l %d dest %" PRIx64
" src %" PRIx64
"\n",
148 __func__
, l
, dest
, src
);
150 /* xor with itself is the same as memset(0) */
152 fast_memset(env
, dest
, 0, l
+ 1);
156 for (i
= 0; i
<= l
; i
++) {
157 x
= cpu_ldub_data(env
, dest
+ i
) ^ cpu_ldub_data(env
, src
+ i
);
161 cpu_stb_data(env
, dest
+ i
, x
);
167 uint32_t HELPER(oc
)(CPUS390XState
*env
, uint32_t l
, uint64_t dest
,
174 HELPER_LOG("%s l %d dest %" PRIx64
" src %" PRIx64
"\n",
175 __func__
, l
, dest
, src
);
176 for (i
= 0; i
<= l
; i
++) {
177 x
= cpu_ldub_data(env
, dest
+ i
) | cpu_ldub_data(env
, src
+ i
);
181 cpu_stb_data(env
, dest
+ i
, x
);
187 void HELPER(mvc
)(CPUS390XState
*env
, uint32_t l
, uint64_t dest
, uint64_t src
)
191 HELPER_LOG("%s l %d dest %" PRIx64
" src %" PRIx64
"\n",
192 __func__
, l
, dest
, src
);
194 /* mvc with source pointing to the byte after the destination is the
195 same as memset with the first source byte */
196 if (dest
== (src
+ 1)) {
197 fast_memset(env
, dest
, cpu_ldub_data(env
, src
), l
+ 1);
201 /* mvc and memmove do not behave the same when areas overlap! */
202 if ((dest
< src
) || (src
+ l
< dest
)) {
203 fast_memmove(env
, dest
, src
, l
+ 1);
207 /* slow version with byte accesses which always work */
208 for (i
= 0; i
<= l
; i
++) {
209 cpu_stb_data(env
, dest
+ i
, cpu_ldub_data(env
, src
+ i
));
213 /* compare unsigned byte arrays */
214 uint32_t HELPER(clc
)(CPUS390XState
*env
, uint32_t l
, uint64_t s1
, uint64_t s2
)
220 HELPER_LOG("%s l %d s1 %" PRIx64
" s2 %" PRIx64
"\n",
221 __func__
, l
, s1
, s2
);
222 for (i
= 0; i
<= l
; i
++) {
223 x
= cpu_ldub_data(env
, s1
+ i
);
224 y
= cpu_ldub_data(env
, s2
+ i
);
225 HELPER_LOG("%02x (%c)/%02x (%c) ", x
, x
, y
, y
);
240 /* compare logical under mask */
241 uint32_t HELPER(clm
)(CPUS390XState
*env
, uint32_t r1
, uint32_t mask
,
247 HELPER_LOG("%s: r1 0x%x mask 0x%x addr 0x%" PRIx64
"\n", __func__
, r1
,
252 d
= cpu_ldub_data(env
, addr
);
253 r
= (r1
& 0xff000000UL
) >> 24;
254 HELPER_LOG("mask 0x%x %02x/%02x (0x%" PRIx64
") ", mask
, r
, d
,
265 mask
= (mask
<< 1) & 0xf;
272 static inline uint64_t fix_address(CPUS390XState
*env
, uint64_t a
)
275 if (!(env
->psw
.mask
& PSW_MASK_64
)) {
281 static inline uint64_t get_address(CPUS390XState
*env
, int x2
, int b2
, int d2
)
290 return fix_address(env
, r
);
293 static inline uint64_t get_address_31fix(CPUS390XState
*env
, int reg
)
295 return fix_address(env
, env
->regs
[reg
]);
298 /* search string (c is byte to search, r2 is string, r1 end of string) */
299 uint64_t HELPER(srst
)(CPUS390XState
*env
, uint64_t r0
, uint64_t end
,
305 str
= fix_address(env
, str
);
306 end
= fix_address(env
, end
);
308 /* Assume for now that R2 is unmodified. */
311 /* Lest we fail to service interrupts in a timely manner, limit the
312 amount of work we're willing to do. For now, let's cap at 8k. */
313 for (len
= 0; len
< 0x2000; ++len
) {
314 if (str
+ len
== end
) {
315 /* Character not found. R1 & R2 are unmodified. */
319 v
= cpu_ldub_data(env
, str
+ len
);
321 /* Character found. Set R1 to the location; R2 is unmodified. */
327 /* CPU-determined bytes processed. Advance R2 to next byte to process. */
328 env
->retxl
= str
+ len
;
333 /* unsigned string compare (c is string terminator) */
334 uint64_t HELPER(clst
)(CPUS390XState
*env
, uint64_t c
, uint64_t s1
, uint64_t s2
)
339 s1
= fix_address(env
, s1
);
340 s2
= fix_address(env
, s2
);
342 /* Lest we fail to service interrupts in a timely manner, limit the
343 amount of work we're willing to do. For now, let's cap at 8k. */
344 for (len
= 0; len
< 0x2000; ++len
) {
345 uint8_t v1
= cpu_ldub_data(env
, s1
+ len
);
346 uint8_t v2
= cpu_ldub_data(env
, s2
+ len
);
349 /* Equal. CC=0, and don't advance the registers. */
355 /* Unequal. CC={1,2}, and advance the registers. Note that
356 the terminator need not be zero, but the string that contains
357 the terminator is by definition "low". */
358 env
->cc_op
= (v1
== c
? 1 : v2
== c
? 2 : v1
< v2
? 1 : 2);
359 env
->retxl
= s2
+ len
;
364 /* CPU-determined bytes equal; advance the registers. */
366 env
->retxl
= s2
+ len
;
371 void HELPER(mvpg
)(CPUS390XState
*env
, uint64_t r0
, uint64_t r1
, uint64_t r2
)
373 /* XXX missing r0 handling */
375 fast_memmove(env
, r1
, r2
, TARGET_PAGE_SIZE
);
378 /* string copy (c is string terminator) */
379 uint64_t HELPER(mvst
)(CPUS390XState
*env
, uint64_t c
, uint64_t d
, uint64_t s
)
384 d
= fix_address(env
, d
);
385 s
= fix_address(env
, s
);
387 /* Lest we fail to service interrupts in a timely manner, limit the
388 amount of work we're willing to do. For now, let's cap at 8k. */
389 for (len
= 0; len
< 0x2000; ++len
) {
390 uint8_t v
= cpu_ldub_data(env
, s
+ len
);
391 cpu_stb_data(env
, d
+ len
, v
);
393 /* Complete. Set CC=1 and advance R1. */
400 /* Incomplete. Set CC=3 and signal to advance R1 and R2. */
402 env
->retxl
= s
+ len
;
406 static uint32_t helper_icm(CPUS390XState
*env
, uint32_t r1
, uint64_t address
,
409 int pos
= 24; /* top of the lower half of r1 */
410 uint64_t rmask
= 0xff000000ULL
;
417 env
->regs
[r1
] &= ~rmask
;
418 val
= cpu_ldub_data(env
, address
);
419 if ((val
& 0x80) && !ccd
) {
423 if (val
&& cc
== 0) {
426 env
->regs
[r1
] |= (uint64_t)val
<< pos
;
429 mask
= (mask
<< 1) & 0xf;
437 /* execute instruction
438 this instruction executes an insn modified with the contents of r1
439 it does not change the executed instruction in memory
440 it does not change the program counter
441 in other words: tricky...
442 currently implemented by interpreting the cases it is most commonly used in
444 uint32_t HELPER(ex
)(CPUS390XState
*env
, uint32_t cc
, uint64_t v1
,
445 uint64_t addr
, uint64_t ret
)
447 S390CPU
*cpu
= s390_env_get_cpu(env
);
448 uint16_t insn
= cpu_lduw_code(env
, addr
);
450 HELPER_LOG("%s: v1 0x%lx addr 0x%lx insn 0x%x\n", __func__
, v1
, addr
,
452 if ((insn
& 0xf0ff) == 0xd000) {
453 uint32_t l
, insn2
, b1
, b2
, d1
, d2
;
456 insn2
= cpu_ldl_code(env
, addr
+ 2);
457 b1
= (insn2
>> 28) & 0xf;
458 b2
= (insn2
>> 12) & 0xf;
459 d1
= (insn2
>> 16) & 0xfff;
461 switch (insn
& 0xf00) {
463 helper_mvc(env
, l
, get_address(env
, 0, b1
, d1
),
464 get_address(env
, 0, b2
, d2
));
467 cc
= helper_nc(env
, l
, get_address(env
, 0, b1
, d1
),
468 get_address(env
, 0, b2
, d2
));
471 cc
= helper_clc(env
, l
, get_address(env
, 0, b1
, d1
),
472 get_address(env
, 0, b2
, d2
));
475 cc
= helper_oc(env
, l
, get_address(env
, 0, b1
, d1
),
476 get_address(env
, 0, b2
, d2
));
479 cc
= helper_xc(env
, l
, get_address(env
, 0, b1
, d1
),
480 get_address(env
, 0, b2
, d2
));
483 helper_tr(env
, l
, get_address(env
, 0, b1
, d1
),
484 get_address(env
, 0, b2
, d2
));
487 cc
= helper_trt(env
, l
, get_address(env
, 0, b1
, d1
),
488 get_address(env
, 0, b2
, d2
));
493 } else if ((insn
& 0xff00) == 0x0a00) {
494 /* supervisor call */
495 HELPER_LOG("%s: svc %ld via execute\n", __func__
, (insn
| v1
) & 0xff);
496 env
->psw
.addr
= ret
- 4;
497 env
->int_svc_code
= (insn
| v1
) & 0xff;
498 env
->int_svc_ilen
= 4;
499 helper_exception(env
, EXCP_SVC
);
500 } else if ((insn
& 0xff00) == 0xbf00) {
501 uint32_t insn2
, r1
, r3
, b2
, d2
;
503 insn2
= cpu_ldl_code(env
, addr
+ 2);
504 r1
= (insn2
>> 20) & 0xf;
505 r3
= (insn2
>> 16) & 0xf;
506 b2
= (insn2
>> 12) & 0xf;
508 cc
= helper_icm(env
, r1
, get_address(env
, 0, b2
, d2
), r3
);
511 cpu_abort(CPU(cpu
), "EXECUTE on instruction prefix 0x%x not implemented\n",
517 /* load access registers r1 to r3 from memory at a2 */
518 void HELPER(lam
)(CPUS390XState
*env
, uint32_t r1
, uint64_t a2
, uint32_t r3
)
522 for (i
= r1
;; i
= (i
+ 1) % 16) {
523 env
->aregs
[i
] = cpu_ldl_data(env
, a2
);
532 /* store access registers r1 to r3 in memory at a2 */
533 void HELPER(stam
)(CPUS390XState
*env
, uint32_t r1
, uint64_t a2
, uint32_t r3
)
537 for (i
= r1
;; i
= (i
+ 1) % 16) {
538 cpu_stl_data(env
, a2
, env
->aregs
[i
]);
548 uint32_t HELPER(mvcl
)(CPUS390XState
*env
, uint32_t r1
, uint32_t r2
)
550 uint64_t destlen
= env
->regs
[r1
+ 1] & 0xffffff;
551 uint64_t dest
= get_address_31fix(env
, r1
);
552 uint64_t srclen
= env
->regs
[r2
+ 1] & 0xffffff;
553 uint64_t src
= get_address_31fix(env
, r2
);
554 uint8_t pad
= env
->regs
[r2
+ 1] >> 24;
558 if (destlen
== srclen
) {
560 } else if (destlen
< srclen
) {
566 if (srclen
> destlen
) {
570 for (; destlen
&& srclen
; src
++, dest
++, destlen
--, srclen
--) {
571 v
= cpu_ldub_data(env
, src
);
572 cpu_stb_data(env
, dest
, v
);
575 for (; destlen
; dest
++, destlen
--) {
576 cpu_stb_data(env
, dest
, pad
);
579 env
->regs
[r1
+ 1] = destlen
;
580 /* can't use srclen here, we trunc'ed it */
581 env
->regs
[r2
+ 1] -= src
- env
->regs
[r2
];
582 env
->regs
[r1
] = dest
;
588 /* move long extended another memcopy insn with more bells and whistles */
589 uint32_t HELPER(mvcle
)(CPUS390XState
*env
, uint32_t r1
, uint64_t a2
,
592 uint64_t destlen
= env
->regs
[r1
+ 1];
593 uint64_t dest
= env
->regs
[r1
];
594 uint64_t srclen
= env
->regs
[r3
+ 1];
595 uint64_t src
= env
->regs
[r3
];
596 uint8_t pad
= a2
& 0xff;
600 if (!(env
->psw
.mask
& PSW_MASK_64
)) {
601 destlen
= (uint32_t)destlen
;
602 srclen
= (uint32_t)srclen
;
607 if (destlen
== srclen
) {
609 } else if (destlen
< srclen
) {
615 if (srclen
> destlen
) {
619 for (; destlen
&& srclen
; src
++, dest
++, destlen
--, srclen
--) {
620 v
= cpu_ldub_data(env
, src
);
621 cpu_stb_data(env
, dest
, v
);
624 for (; destlen
; dest
++, destlen
--) {
625 cpu_stb_data(env
, dest
, pad
);
628 env
->regs
[r1
+ 1] = destlen
;
629 /* can't use srclen here, we trunc'ed it */
630 /* FIXME: 31-bit mode! */
631 env
->regs
[r3
+ 1] -= src
- env
->regs
[r3
];
632 env
->regs
[r1
] = dest
;
638 /* compare logical long extended memcompare insn with padding */
639 uint32_t HELPER(clcle
)(CPUS390XState
*env
, uint32_t r1
, uint64_t a2
,
642 uint64_t destlen
= env
->regs
[r1
+ 1];
643 uint64_t dest
= get_address_31fix(env
, r1
);
644 uint64_t srclen
= env
->regs
[r3
+ 1];
645 uint64_t src
= get_address_31fix(env
, r3
);
646 uint8_t pad
= a2
& 0xff;
647 uint8_t v1
= 0, v2
= 0;
650 if (!(destlen
|| srclen
)) {
654 if (srclen
> destlen
) {
658 for (; destlen
|| srclen
; src
++, dest
++, destlen
--, srclen
--) {
659 v1
= srclen
? cpu_ldub_data(env
, src
) : pad
;
660 v2
= destlen
? cpu_ldub_data(env
, dest
) : pad
;
662 cc
= (v1
< v2
) ? 1 : 2;
667 env
->regs
[r1
+ 1] = destlen
;
668 /* can't use srclen here, we trunc'ed it */
669 env
->regs
[r3
+ 1] -= src
- env
->regs
[r3
];
670 env
->regs
[r1
] = dest
;
677 uint64_t HELPER(cksm
)(CPUS390XState
*env
, uint64_t r1
,
678 uint64_t src
, uint64_t src_len
)
680 uint64_t max_len
, len
;
681 uint64_t cksm
= (uint32_t)r1
;
683 /* Lest we fail to service interrupts in a timely manner, limit the
684 amount of work we're willing to do. For now, let's cap at 8k. */
685 max_len
= (src_len
> 0x2000 ? 0x2000 : src_len
);
687 /* Process full words as available. */
688 for (len
= 0; len
+ 4 <= max_len
; len
+= 4, src
+= 4) {
689 cksm
+= (uint32_t)cpu_ldl_data(env
, src
);
692 switch (max_len
- len
) {
694 cksm
+= cpu_ldub_data(env
, src
) << 24;
698 cksm
+= cpu_lduw_data(env
, src
) << 16;
702 cksm
+= cpu_lduw_data(env
, src
) << 16;
703 cksm
+= cpu_ldub_data(env
, src
+ 2) << 8;
708 /* Fold the carry from the checksum. Note that we can see carry-out
709 during folding more than once (but probably not more than twice). */
710 while (cksm
> 0xffffffffull
) {
711 cksm
= (uint32_t)cksm
+ (cksm
>> 32);
714 /* Indicate whether or not we've processed everything. */
715 env
->cc_op
= (len
== src_len
? 0 : 3);
717 /* Return both cksm and processed length. */
722 void HELPER(unpk
)(CPUS390XState
*env
, uint32_t len
, uint64_t dest
,
725 int len_dest
= len
>> 4;
726 int len_src
= len
& 0xf;
728 int second_nibble
= 0;
733 /* last byte is special, it only flips the nibbles */
734 b
= cpu_ldub_data(env
, src
);
735 cpu_stb_data(env
, dest
, (b
<< 4) | (b
>> 4));
739 /* now pad every nibble with 0xf0 */
741 while (len_dest
> 0) {
742 uint8_t cur_byte
= 0;
745 cur_byte
= cpu_ldub_data(env
, src
);
751 /* only advance one nibble at a time */
757 second_nibble
= !second_nibble
;
760 cur_byte
= (cur_byte
& 0xf);
764 cpu_stb_data(env
, dest
, cur_byte
);
768 void HELPER(tr
)(CPUS390XState
*env
, uint32_t len
, uint64_t array
,
773 for (i
= 0; i
<= len
; i
++) {
774 uint8_t byte
= cpu_ldub_data(env
, array
+ i
);
775 uint8_t new_byte
= cpu_ldub_data(env
, trans
+ byte
);
777 cpu_stb_data(env
, array
+ i
, new_byte
);
781 uint64_t HELPER(tre
)(CPUS390XState
*env
, uint64_t array
,
782 uint64_t len
, uint64_t trans
)
784 uint8_t end
= env
->regs
[0] & 0xff;
788 if (!(env
->psw
.mask
& PSW_MASK_64
)) {
793 /* Lest we fail to service interrupts in a timely manner, limit the
794 amount of work we're willing to do. For now, let's cap at 8k. */
802 for (i
= 0; i
< l
; i
++) {
803 uint8_t byte
, new_byte
;
805 byte
= cpu_ldub_data(env
, array
+ i
);
812 new_byte
= cpu_ldub_data(env
, trans
+ byte
);
813 cpu_stb_data(env
, array
+ i
, new_byte
);
816 env
->retxl
= len
- i
;
820 uint32_t HELPER(trt
)(CPUS390XState
*env
, uint32_t len
, uint64_t array
,
826 for (i
= 0; i
<= len
; i
++) {
827 uint8_t byte
= cpu_ldub_data(env
, array
+ i
);
828 uint8_t sbyte
= cpu_ldub_data(env
, trans
+ byte
);
831 env
->regs
[1] = array
+ i
;
832 env
->regs
[2] = (env
->regs
[2] & ~0xff) | sbyte
;
833 cc
= (i
== len
) ? 2 : 1;
841 #if !defined(CONFIG_USER_ONLY)
842 void HELPER(lctlg
)(CPUS390XState
*env
, uint32_t r1
, uint64_t a2
, uint32_t r3
)
844 S390CPU
*cpu
= s390_env_get_cpu(env
);
845 bool PERchanged
= false;
850 for (i
= r1
;; i
= (i
+ 1) % 16) {
851 val
= cpu_ldq_data(env
, src
);
852 if (env
->cregs
[i
] != val
&& i
>= 9 && i
<= 11) {
856 HELPER_LOG("load ctl %d from 0x%" PRIx64
" == 0x%" PRIx64
"\n",
857 i
, src
, env
->cregs
[i
]);
858 src
+= sizeof(uint64_t);
865 if (PERchanged
&& env
->psw
.mask
& PSW_MASK_PER
) {
866 s390_cpu_recompute_watchpoints(CPU(cpu
));
869 tlb_flush(CPU(cpu
), 1);
872 void HELPER(lctl
)(CPUS390XState
*env
, uint32_t r1
, uint64_t a2
, uint32_t r3
)
874 S390CPU
*cpu
= s390_env_get_cpu(env
);
875 bool PERchanged
= false;
880 for (i
= r1
;; i
= (i
+ 1) % 16) {
881 val
= cpu_ldl_data(env
, src
);
882 if ((uint32_t)env
->cregs
[i
] != val
&& i
>= 9 && i
<= 11) {
885 env
->cregs
[i
] = (env
->cregs
[i
] & 0xFFFFFFFF00000000ULL
) | val
;
886 src
+= sizeof(uint32_t);
893 if (PERchanged
&& env
->psw
.mask
& PSW_MASK_PER
) {
894 s390_cpu_recompute_watchpoints(CPU(cpu
));
897 tlb_flush(CPU(cpu
), 1);
900 void HELPER(stctg
)(CPUS390XState
*env
, uint32_t r1
, uint64_t a2
, uint32_t r3
)
905 for (i
= r1
;; i
= (i
+ 1) % 16) {
906 cpu_stq_data(env
, dest
, env
->cregs
[i
]);
907 dest
+= sizeof(uint64_t);
915 void HELPER(stctl
)(CPUS390XState
*env
, uint32_t r1
, uint64_t a2
, uint32_t r3
)
920 for (i
= r1
;; i
= (i
+ 1) % 16) {
921 cpu_stl_data(env
, dest
, env
->cregs
[i
]);
922 dest
+= sizeof(uint32_t);
930 uint32_t HELPER(tprot
)(uint64_t a1
, uint64_t a2
)
937 /* insert storage key extended */
938 uint64_t HELPER(iske
)(CPUS390XState
*env
, uint64_t r2
)
940 uint64_t addr
= get_address(env
, 0, 0, r2
);
942 if (addr
> ram_size
) {
946 return env
->storage_keys
[addr
/ TARGET_PAGE_SIZE
];
949 /* set storage key extended */
950 void HELPER(sske
)(CPUS390XState
*env
, uint64_t r1
, uint64_t r2
)
952 uint64_t addr
= get_address(env
, 0, 0, r2
);
954 if (addr
> ram_size
) {
958 env
->storage_keys
[addr
/ TARGET_PAGE_SIZE
] = r1
;
961 /* reset reference bit extended */
962 uint32_t HELPER(rrbe
)(CPUS390XState
*env
, uint64_t r2
)
971 key
= env
->storage_keys
[r2
/ TARGET_PAGE_SIZE
];
972 re
= key
& (SK_R
| SK_C
);
973 env
->storage_keys
[r2
/ TARGET_PAGE_SIZE
] = (key
& ~SK_R
);
978 * 0 Reference bit zero; change bit zero
979 * 1 Reference bit zero; change bit one
980 * 2 Reference bit one; change bit zero
981 * 3 Reference bit one; change bit one
987 /* compare and swap and purge */
988 uint32_t HELPER(csp
)(CPUS390XState
*env
, uint32_t r1
, uint64_t r2
)
990 S390CPU
*cpu
= s390_env_get_cpu(env
);
992 uint32_t o1
= env
->regs
[r1
];
993 uint64_t a2
= r2
& ~3ULL;
994 uint32_t o2
= cpu_ldl_data(env
, a2
);
997 cpu_stl_data(env
, a2
, env
->regs
[(r1
+ 1) & 15]);
999 /* flush TLB / ALB */
1000 tlb_flush(CPU(cpu
), 1);
1004 env
->regs
[r1
] = (env
->regs
[r1
] & 0xffffffff00000000ULL
) | o2
;
1011 uint32_t HELPER(mvcs
)(CPUS390XState
*env
, uint64_t l
, uint64_t a1
, uint64_t a2
)
1015 HELPER_LOG("%s: %16" PRIx64
" %16" PRIx64
" %16" PRIx64
"\n",
1016 __func__
, l
, a1
, a2
);
1024 /* XXX replace w/ memcpy */
1025 for (i
= 0; i
< l
; i
++) {
1026 cpu_stb_secondary(env
, a1
+ i
, cpu_ldub_primary(env
, a2
+ i
));
1032 uint32_t HELPER(mvcp
)(CPUS390XState
*env
, uint64_t l
, uint64_t a1
, uint64_t a2
)
1036 HELPER_LOG("%s: %16" PRIx64
" %16" PRIx64
" %16" PRIx64
"\n",
1037 __func__
, l
, a1
, a2
);
1045 /* XXX replace w/ memcpy */
1046 for (i
= 0; i
< l
; i
++) {
1047 cpu_stb_primary(env
, a1
+ i
, cpu_ldub_secondary(env
, a2
+ i
));
1053 /* invalidate pte */
1054 void HELPER(ipte
)(CPUS390XState
*env
, uint64_t pte_addr
, uint64_t vaddr
)
1056 CPUState
*cs
= CPU(s390_env_get_cpu(env
));
1057 uint64_t page
= vaddr
& TARGET_PAGE_MASK
;
1060 /* XXX broadcast to other CPUs */
1062 /* XXX Linux is nice enough to give us the exact pte address.
1063 According to spec we'd have to find it out ourselves */
1064 /* XXX Linux is fine with overwriting the pte, the spec requires
1065 us to only set the invalid bit */
1066 stq_phys(cs
->as
, pte_addr
, pte
| _PAGE_INVALID
);
1068 /* XXX we exploit the fact that Linux passes the exact virtual
1069 address here - it's not obliged to! */
1070 tlb_flush_page(cs
, page
);
1072 /* XXX 31-bit hack */
1073 if (page
& 0x80000000) {
1074 tlb_flush_page(cs
, page
& ~0x80000000);
1076 tlb_flush_page(cs
, page
| 0x80000000);
1080 /* flush local tlb */
1081 void HELPER(ptlb
)(CPUS390XState
*env
)
1083 S390CPU
*cpu
= s390_env_get_cpu(env
);
1085 tlb_flush(CPU(cpu
), 1);
1088 /* load using real address */
1089 uint64_t HELPER(lura
)(CPUS390XState
*env
, uint64_t addr
)
1091 CPUState
*cs
= CPU(s390_env_get_cpu(env
));
1093 return (uint32_t)ldl_phys(cs
->as
, get_address(env
, 0, 0, addr
));
1096 uint64_t HELPER(lurag
)(CPUS390XState
*env
, uint64_t addr
)
1098 CPUState
*cs
= CPU(s390_env_get_cpu(env
));
1100 return ldq_phys(cs
->as
, get_address(env
, 0, 0, addr
));
1103 /* store using real address */
1104 void HELPER(stura
)(CPUS390XState
*env
, uint64_t addr
, uint64_t v1
)
1106 CPUState
*cs
= CPU(s390_env_get_cpu(env
));
1108 stl_phys(cs
->as
, get_address(env
, 0, 0, addr
), (uint32_t)v1
);
1110 if ((env
->psw
.mask
& PSW_MASK_PER
) &&
1111 (env
->cregs
[9] & PER_CR9_EVENT_STORE
) &&
1112 (env
->cregs
[9] & PER_CR9_EVENT_STORE_REAL
)) {
1113 /* PSW is saved just before calling the helper. */
1114 env
->per_address
= env
->psw
.addr
;
1115 env
->per_perc_atmid
= PER_CODE_EVENT_STORE_REAL
| get_per_atmid(env
);
1119 void HELPER(sturg
)(CPUS390XState
*env
, uint64_t addr
, uint64_t v1
)
1121 CPUState
*cs
= CPU(s390_env_get_cpu(env
));
1123 stq_phys(cs
->as
, get_address(env
, 0, 0, addr
), v1
);
1125 if ((env
->psw
.mask
& PSW_MASK_PER
) &&
1126 (env
->cregs
[9] & PER_CR9_EVENT_STORE
) &&
1127 (env
->cregs
[9] & PER_CR9_EVENT_STORE_REAL
)) {
1128 /* PSW is saved just before calling the helper. */
1129 env
->per_address
= env
->psw
.addr
;
1130 env
->per_perc_atmid
= PER_CODE_EVENT_STORE_REAL
| get_per_atmid(env
);
1134 /* load real address */
1135 uint64_t HELPER(lra
)(CPUS390XState
*env
, uint64_t addr
)
1137 CPUState
*cs
= CPU(s390_env_get_cpu(env
));
1139 int old_exc
= cs
->exception_index
;
1140 uint64_t asc
= env
->psw
.mask
& PSW_MASK_ASC
;
1144 /* XXX incomplete - has more corner cases */
1145 if (!(env
->psw
.mask
& PSW_MASK_64
) && (addr
>> 32)) {
1146 program_interrupt(env
, PGM_SPECIAL_OP
, 2);
1149 cs
->exception_index
= old_exc
;
1150 if (mmu_translate(env
, addr
, 0, asc
, &ret
, &flags
, true)) {
1153 if (cs
->exception_index
== EXCP_PGM
) {
1154 ret
= env
->int_pgm_code
| 0x80000000;
1156 ret
|= addr
& ~TARGET_PAGE_MASK
;
1158 cs
->exception_index
= old_exc
;