2 * S/390 memory access helper routines
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2009 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
24 /*****************************************************************************/
26 #if !defined(CONFIG_USER_ONLY)
27 #include "exec/softmmu_exec.h"
29 #define MMUSUFFIX _mmu
32 #include "exec/softmmu_template.h"
35 #include "exec/softmmu_template.h"
38 #include "exec/softmmu_template.h"
41 #include "exec/softmmu_template.h"
43 /* try to fill the TLB and return an exception if error. If retaddr is
44 NULL, it means that the function was called in C code (i.e. not
45 from generated code or from helper.c) */
46 /* XXX: fix it to restore all registers */
47 void tlb_fill(CPUS390XState
*env
, target_ulong addr
, int is_write
, int mmu_idx
,
52 ret
= cpu_s390x_handle_mmu_fault(env
, addr
, is_write
, mmu_idx
);
53 if (unlikely(ret
!= 0)) {
54 if (likely(retaddr
)) {
55 /* now we have a real cpu fault */
56 cpu_restore_state(env
, retaddr
);
64 /* #define DEBUG_HELPER */
66 #define HELPER_LOG(x...) qemu_log(x)
68 #define HELPER_LOG(x...)
71 #ifndef CONFIG_USER_ONLY
72 static void mvc_fast_memset(CPUS390XState
*env
, uint32_t l
, uint64_t dest
,
78 uint64_t asc
= env
->psw
.mask
& PSW_MASK_ASC
;
81 if (mmu_translate(env
, dest
, 1, asc
, &dest_phys
, &flags
)) {
82 cpu_stb_data(env
, dest
, byte
);
83 cpu_abort(env
, "should never reach here");
85 dest_phys
|= dest
& ~TARGET_PAGE_MASK
;
87 dest_p
= cpu_physical_memory_map(dest_phys
, &len
, 1);
89 memset(dest_p
, byte
, len
);
91 cpu_physical_memory_unmap(dest_p
, 1, len
, len
);
94 static void mvc_fast_memmove(CPUS390XState
*env
, uint32_t l
, uint64_t dest
,
102 uint64_t asc
= env
->psw
.mask
& PSW_MASK_ASC
;
105 if (mmu_translate(env
, dest
, 1, asc
, &dest_phys
, &flags
)) {
106 cpu_stb_data(env
, dest
, 0);
107 cpu_abort(env
, "should never reach here");
109 dest_phys
|= dest
& ~TARGET_PAGE_MASK
;
111 if (mmu_translate(env
, src
, 0, asc
, &src_phys
, &flags
)) {
112 cpu_ldub_data(env
, src
);
113 cpu_abort(env
, "should never reach here");
115 src_phys
|= src
& ~TARGET_PAGE_MASK
;
117 dest_p
= cpu_physical_memory_map(dest_phys
, &len
, 1);
118 src_p
= cpu_physical_memory_map(src_phys
, &len
, 0);
120 memmove(dest_p
, src_p
, len
);
122 cpu_physical_memory_unmap(dest_p
, 1, len
, len
);
123 cpu_physical_memory_unmap(src_p
, 0, len
, len
);
128 uint32_t HELPER(nc
)(CPUS390XState
*env
, uint32_t l
, uint64_t dest
,
135 HELPER_LOG("%s l %d dest %" PRIx64
" src %" PRIx64
"\n",
136 __func__
, l
, dest
, src
);
137 for (i
= 0; i
<= l
; i
++) {
138 x
= cpu_ldub_data(env
, dest
+ i
) & cpu_ldub_data(env
, src
+ i
);
142 cpu_stb_data(env
, dest
+ i
, x
);
148 uint32_t HELPER(xc
)(CPUS390XState
*env
, uint32_t l
, uint64_t dest
,
155 HELPER_LOG("%s l %d dest %" PRIx64
" src %" PRIx64
"\n",
156 __func__
, l
, dest
, src
);
158 #ifndef CONFIG_USER_ONLY
159 /* xor with itself is the same as memset(0) */
160 if ((l
> 32) && (src
== dest
) &&
161 (src
& TARGET_PAGE_MASK
) == ((src
+ l
) & TARGET_PAGE_MASK
)) {
162 mvc_fast_memset(env
, l
+ 1, dest
, 0);
167 memset(g2h(dest
), 0, l
+ 1);
172 for (i
= 0; i
<= l
; i
++) {
173 x
= cpu_ldub_data(env
, dest
+ i
) ^ cpu_ldub_data(env
, src
+ i
);
177 cpu_stb_data(env
, dest
+ i
, x
);
183 uint32_t HELPER(oc
)(CPUS390XState
*env
, uint32_t l
, uint64_t dest
,
190 HELPER_LOG("%s l %d dest %" PRIx64
" src %" PRIx64
"\n",
191 __func__
, l
, dest
, src
);
192 for (i
= 0; i
<= l
; i
++) {
193 x
= cpu_ldub_data(env
, dest
+ i
) | cpu_ldub_data(env
, src
+ i
);
197 cpu_stb_data(env
, dest
+ i
, x
);
203 void HELPER(mvc
)(CPUS390XState
*env
, uint32_t l
, uint64_t dest
, uint64_t src
)
207 uint32_t l_64
= (l
+ 1) / 8;
209 HELPER_LOG("%s l %d dest %" PRIx64
" src %" PRIx64
"\n",
210 __func__
, l
, dest
, src
);
212 #ifndef CONFIG_USER_ONLY
214 (src
& TARGET_PAGE_MASK
) == ((src
+ l
) & TARGET_PAGE_MASK
) &&
215 (dest
& TARGET_PAGE_MASK
) == ((dest
+ l
) & TARGET_PAGE_MASK
)) {
216 if (dest
== (src
+ 1)) {
217 mvc_fast_memset(env
, l
+ 1, dest
, cpu_ldub_data(env
, src
));
219 } else if ((src
& TARGET_PAGE_MASK
) != (dest
& TARGET_PAGE_MASK
)) {
220 mvc_fast_memmove(env
, l
+ 1, dest
, src
);
225 if (dest
== (src
+ 1)) {
226 memset(g2h(dest
), cpu_ldub_data(env
, src
), l
+ 1);
229 memmove(g2h(dest
), g2h(src
), l
+ 1);
234 /* handle the parts that fit into 8-byte loads/stores */
235 if (dest
!= (src
+ 1)) {
236 for (i
= 0; i
< l_64
; i
++) {
237 cpu_stq_data(env
, dest
+ x
, cpu_ldq_data(env
, src
+ x
));
242 /* slow version crossing pages with byte accesses */
243 for (i
= x
; i
<= l
; i
++) {
244 cpu_stb_data(env
, dest
+ i
, cpu_ldub_data(env
, src
+ i
));
248 /* compare unsigned byte arrays */
249 uint32_t HELPER(clc
)(CPUS390XState
*env
, uint32_t l
, uint64_t s1
, uint64_t s2
)
255 HELPER_LOG("%s l %d s1 %" PRIx64
" s2 %" PRIx64
"\n",
256 __func__
, l
, s1
, s2
);
257 for (i
= 0; i
<= l
; i
++) {
258 x
= cpu_ldub_data(env
, s1
+ i
);
259 y
= cpu_ldub_data(env
, s2
+ i
);
260 HELPER_LOG("%02x (%c)/%02x (%c) ", x
, x
, y
, y
);
275 /* compare logical under mask */
276 uint32_t HELPER(clm
)(CPUS390XState
*env
, uint32_t r1
, uint32_t mask
,
282 HELPER_LOG("%s: r1 0x%x mask 0x%x addr 0x%" PRIx64
"\n", __func__
, r1
,
287 d
= cpu_ldub_data(env
, addr
);
288 r
= (r1
& 0xff000000UL
) >> 24;
289 HELPER_LOG("mask 0x%x %02x/%02x (0x%" PRIx64
") ", mask
, r
, d
,
300 mask
= (mask
<< 1) & 0xf;
307 static inline uint64_t fix_address(CPUS390XState
*env
, uint64_t a
)
310 if (!(env
->psw
.mask
& PSW_MASK_64
)) {
316 static inline uint64_t get_address(CPUS390XState
*env
, int x2
, int b2
, int d2
)
325 return fix_address(env
, r
);
328 static inline uint64_t get_address_31fix(CPUS390XState
*env
, int reg
)
330 return fix_address(env
, env
->regs
[reg
]);
333 /* search string (c is byte to search, r2 is string, r1 end of string) */
334 uint64_t HELPER(srst
)(CPUS390XState
*env
, uint64_t r0
, uint64_t end
,
340 str
= fix_address(env
, str
);
341 end
= fix_address(env
, end
);
343 /* Assume for now that R2 is unmodified. */
346 /* Lest we fail to service interrupts in a timely manner, limit the
347 amount of work we're willing to do. For now, let's cap at 8k. */
348 for (len
= 0; len
< 0x2000; ++len
) {
349 if (str
+ len
== end
) {
350 /* Character not found. R1 & R2 are unmodified. */
354 v
= cpu_ldub_data(env
, str
+ len
);
356 /* Character found. Set R1 to the location; R2 is unmodified. */
362 /* CPU-determined bytes processed. Advance R2 to next byte to process. */
363 env
->retxl
= str
+ len
;
368 /* unsigned string compare (c is string terminator) */
369 uint64_t HELPER(clst
)(CPUS390XState
*env
, uint64_t c
, uint64_t s1
, uint64_t s2
)
374 s1
= fix_address(env
, s1
);
375 s2
= fix_address(env
, s2
);
377 /* Lest we fail to service interrupts in a timely manner, limit the
378 amount of work we're willing to do. For now, let's cap at 8k. */
379 for (len
= 0; len
< 0x2000; ++len
) {
380 uint8_t v1
= cpu_ldub_data(env
, s1
+ len
);
381 uint8_t v2
= cpu_ldub_data(env
, s2
+ len
);
384 /* Equal. CC=0, and don't advance the registers. */
390 /* Unequal. CC={1,2}, and advance the registers. Note that
391 the terminator need not be zero, but the string that contains
392 the terminator is by definition "low". */
393 env
->cc_op
= (v1
== c
? 1 : v2
== c
? 2 : v1
< v2
? 1 : 2);
394 env
->retxl
= s2
+ len
;
399 /* CPU-determined bytes equal; advance the registers. */
401 env
->retxl
= s2
+ len
;
406 void HELPER(mvpg
)(CPUS390XState
*env
, uint64_t r0
, uint64_t r1
, uint64_t r2
)
408 /* XXX missing r0 handling */
410 #ifdef CONFIG_USER_ONLY
411 memmove(g2h(r1
), g2h(r2
), TARGET_PAGE_SIZE
);
413 mvc_fast_memmove(env
, TARGET_PAGE_SIZE
, r1
, r2
);
417 /* string copy (c is string terminator) */
418 uint64_t HELPER(mvst
)(CPUS390XState
*env
, uint64_t c
, uint64_t d
, uint64_t s
)
423 d
= fix_address(env
, d
);
424 s
= fix_address(env
, s
);
426 /* Lest we fail to service interrupts in a timely manner, limit the
427 amount of work we're willing to do. For now, let's cap at 8k. */
428 for (len
= 0; len
< 0x2000; ++len
) {
429 uint8_t v
= cpu_ldub_data(env
, s
+ len
);
430 cpu_stb_data(env
, d
+ len
, v
);
432 /* Complete. Set CC=1 and advance R1. */
439 /* Incomplete. Set CC=3 and signal to advance R1 and R2. */
441 env
->retxl
= s
+ len
;
445 static uint32_t helper_icm(CPUS390XState
*env
, uint32_t r1
, uint64_t address
,
448 int pos
= 24; /* top of the lower half of r1 */
449 uint64_t rmask
= 0xff000000ULL
;
456 env
->regs
[r1
] &= ~rmask
;
457 val
= cpu_ldub_data(env
, address
);
458 if ((val
& 0x80) && !ccd
) {
462 if (val
&& cc
== 0) {
465 env
->regs
[r1
] |= (uint64_t)val
<< pos
;
468 mask
= (mask
<< 1) & 0xf;
476 /* execute instruction
477 this instruction executes an insn modified with the contents of r1
478 it does not change the executed instruction in memory
479 it does not change the program counter
480 in other words: tricky...
481 currently implemented by interpreting the cases it is most commonly used in
483 uint32_t HELPER(ex
)(CPUS390XState
*env
, uint32_t cc
, uint64_t v1
,
484 uint64_t addr
, uint64_t ret
)
486 uint16_t insn
= cpu_lduw_code(env
, addr
);
488 HELPER_LOG("%s: v1 0x%lx addr 0x%lx insn 0x%x\n", __func__
, v1
, addr
,
490 if ((insn
& 0xf0ff) == 0xd000) {
491 uint32_t l
, insn2
, b1
, b2
, d1
, d2
;
494 insn2
= cpu_ldl_code(env
, addr
+ 2);
495 b1
= (insn2
>> 28) & 0xf;
496 b2
= (insn2
>> 12) & 0xf;
497 d1
= (insn2
>> 16) & 0xfff;
499 switch (insn
& 0xf00) {
501 helper_mvc(env
, l
, get_address(env
, 0, b1
, d1
),
502 get_address(env
, 0, b2
, d2
));
505 cc
= helper_clc(env
, l
, get_address(env
, 0, b1
, d1
),
506 get_address(env
, 0, b2
, d2
));
509 cc
= helper_xc(env
, l
, get_address(env
, 0, b1
, d1
),
510 get_address(env
, 0, b2
, d2
));
513 helper_tr(env
, l
, get_address(env
, 0, b1
, d1
),
514 get_address(env
, 0, b2
, d2
));
519 } else if ((insn
& 0xff00) == 0x0a00) {
520 /* supervisor call */
521 HELPER_LOG("%s: svc %ld via execute\n", __func__
, (insn
| v1
) & 0xff);
522 env
->psw
.addr
= ret
- 4;
523 env
->int_svc_code
= (insn
| v1
) & 0xff;
524 env
->int_svc_ilen
= 4;
525 helper_exception(env
, EXCP_SVC
);
526 } else if ((insn
& 0xff00) == 0xbf00) {
527 uint32_t insn2
, r1
, r3
, b2
, d2
;
529 insn2
= cpu_ldl_code(env
, addr
+ 2);
530 r1
= (insn2
>> 20) & 0xf;
531 r3
= (insn2
>> 16) & 0xf;
532 b2
= (insn2
>> 12) & 0xf;
534 cc
= helper_icm(env
, r1
, get_address(env
, 0, b2
, d2
), r3
);
537 cpu_abort(env
, "EXECUTE on instruction prefix 0x%x not implemented\n",
543 /* load access registers r1 to r3 from memory at a2 */
544 void HELPER(lam
)(CPUS390XState
*env
, uint32_t r1
, uint64_t a2
, uint32_t r3
)
548 for (i
= r1
;; i
= (i
+ 1) % 16) {
549 env
->aregs
[i
] = cpu_ldl_data(env
, a2
);
558 /* store access registers r1 to r3 in memory at a2 */
559 void HELPER(stam
)(CPUS390XState
*env
, uint32_t r1
, uint64_t a2
, uint32_t r3
)
563 for (i
= r1
;; i
= (i
+ 1) % 16) {
564 cpu_stl_data(env
, a2
, env
->aregs
[i
]);
574 uint32_t HELPER(mvcl
)(CPUS390XState
*env
, uint32_t r1
, uint32_t r2
)
576 uint64_t destlen
= env
->regs
[r1
+ 1] & 0xffffff;
577 uint64_t dest
= get_address_31fix(env
, r1
);
578 uint64_t srclen
= env
->regs
[r2
+ 1] & 0xffffff;
579 uint64_t src
= get_address_31fix(env
, r2
);
580 uint8_t pad
= src
>> 24;
584 if (destlen
== srclen
) {
586 } else if (destlen
< srclen
) {
592 if (srclen
> destlen
) {
596 for (; destlen
&& srclen
; src
++, dest
++, destlen
--, srclen
--) {
597 v
= cpu_ldub_data(env
, src
);
598 cpu_stb_data(env
, dest
, v
);
601 for (; destlen
; dest
++, destlen
--) {
602 cpu_stb_data(env
, dest
, pad
);
605 env
->regs
[r1
+ 1] = destlen
;
606 /* can't use srclen here, we trunc'ed it */
607 env
->regs
[r2
+ 1] -= src
- env
->regs
[r2
];
608 env
->regs
[r1
] = dest
;
614 /* move long extended another memcopy insn with more bells and whistles */
615 uint32_t HELPER(mvcle
)(CPUS390XState
*env
, uint32_t r1
, uint64_t a2
,
618 uint64_t destlen
= env
->regs
[r1
+ 1];
619 uint64_t dest
= env
->regs
[r1
];
620 uint64_t srclen
= env
->regs
[r3
+ 1];
621 uint64_t src
= env
->regs
[r3
];
622 uint8_t pad
= a2
& 0xff;
626 if (!(env
->psw
.mask
& PSW_MASK_64
)) {
627 destlen
= (uint32_t)destlen
;
628 srclen
= (uint32_t)srclen
;
633 if (destlen
== srclen
) {
635 } else if (destlen
< srclen
) {
641 if (srclen
> destlen
) {
645 for (; destlen
&& srclen
; src
++, dest
++, destlen
--, srclen
--) {
646 v
= cpu_ldub_data(env
, src
);
647 cpu_stb_data(env
, dest
, v
);
650 for (; destlen
; dest
++, destlen
--) {
651 cpu_stb_data(env
, dest
, pad
);
654 env
->regs
[r1
+ 1] = destlen
;
655 /* can't use srclen here, we trunc'ed it */
656 /* FIXME: 31-bit mode! */
657 env
->regs
[r3
+ 1] -= src
- env
->regs
[r3
];
658 env
->regs
[r1
] = dest
;
664 /* compare logical long extended memcompare insn with padding */
665 uint32_t HELPER(clcle
)(CPUS390XState
*env
, uint32_t r1
, uint64_t a2
,
668 uint64_t destlen
= env
->regs
[r1
+ 1];
669 uint64_t dest
= get_address_31fix(env
, r1
);
670 uint64_t srclen
= env
->regs
[r3
+ 1];
671 uint64_t src
= get_address_31fix(env
, r3
);
672 uint8_t pad
= a2
& 0xff;
673 uint8_t v1
= 0, v2
= 0;
676 if (!(destlen
|| srclen
)) {
680 if (srclen
> destlen
) {
684 for (; destlen
|| srclen
; src
++, dest
++, destlen
--, srclen
--) {
685 v1
= srclen
? cpu_ldub_data(env
, src
) : pad
;
686 v2
= destlen
? cpu_ldub_data(env
, dest
) : pad
;
688 cc
= (v1
< v2
) ? 1 : 2;
693 env
->regs
[r1
+ 1] = destlen
;
694 /* can't use srclen here, we trunc'ed it */
695 env
->regs
[r3
+ 1] -= src
- env
->regs
[r3
];
696 env
->regs
[r1
] = dest
;
703 uint64_t HELPER(cksm
)(CPUS390XState
*env
, uint64_t r1
,
704 uint64_t src
, uint64_t src_len
)
706 uint64_t max_len
, len
;
707 uint64_t cksm
= (uint32_t)r1
;
709 /* Lest we fail to service interrupts in a timely manner, limit the
710 amount of work we're willing to do. For now, let's cap at 8k. */
711 max_len
= (src_len
> 0x2000 ? 0x2000 : src_len
);
713 /* Process full words as available. */
714 for (len
= 0; len
+ 4 <= max_len
; len
+= 4, src
+= 4) {
715 cksm
+= (uint32_t)cpu_ldl_data(env
, src
);
718 switch (max_len
- len
) {
720 cksm
+= cpu_ldub_data(env
, src
) << 24;
724 cksm
+= cpu_lduw_data(env
, src
) << 16;
728 cksm
+= cpu_lduw_data(env
, src
) << 16;
729 cksm
+= cpu_ldub_data(env
, src
+ 2) << 8;
734 /* Fold the carry from the checksum. Note that we can see carry-out
735 during folding more than once (but probably not more than twice). */
736 while (cksm
> 0xffffffffull
) {
737 cksm
= (uint32_t)cksm
+ (cksm
>> 32);
740 /* Indicate whether or not we've processed everything. */
741 env
->cc_op
= (len
== src_len
? 0 : 3);
743 /* Return both cksm and processed length. */
748 void HELPER(unpk
)(CPUS390XState
*env
, uint32_t len
, uint64_t dest
,
751 int len_dest
= len
>> 4;
752 int len_src
= len
& 0xf;
754 int second_nibble
= 0;
759 /* last byte is special, it only flips the nibbles */
760 b
= cpu_ldub_data(env
, src
);
761 cpu_stb_data(env
, dest
, (b
<< 4) | (b
>> 4));
765 /* now pad every nibble with 0xf0 */
767 while (len_dest
> 0) {
768 uint8_t cur_byte
= 0;
771 cur_byte
= cpu_ldub_data(env
, src
);
777 /* only advance one nibble at a time */
783 second_nibble
= !second_nibble
;
786 cur_byte
= (cur_byte
& 0xf);
790 cpu_stb_data(env
, dest
, cur_byte
);
794 void HELPER(tr
)(CPUS390XState
*env
, uint32_t len
, uint64_t array
,
799 for (i
= 0; i
<= len
; i
++) {
800 uint8_t byte
= cpu_ldub_data(env
, array
+ i
);
801 uint8_t new_byte
= cpu_ldub_data(env
, trans
+ byte
);
803 cpu_stb_data(env
, array
+ i
, new_byte
);
807 #if !defined(CONFIG_USER_ONLY)
808 void HELPER(lctlg
)(CPUS390XState
*env
, uint32_t r1
, uint64_t a2
, uint32_t r3
)
813 for (i
= r1
;; i
= (i
+ 1) % 16) {
814 env
->cregs
[i
] = cpu_ldq_data(env
, src
);
815 HELPER_LOG("load ctl %d from 0x%" PRIx64
" == 0x%" PRIx64
"\n",
816 i
, src
, env
->cregs
[i
]);
817 src
+= sizeof(uint64_t);
827 void HELPER(lctl
)(CPUS390XState
*env
, uint32_t r1
, uint64_t a2
, uint32_t r3
)
832 for (i
= r1
;; i
= (i
+ 1) % 16) {
833 env
->cregs
[i
] = (env
->cregs
[i
] & 0xFFFFFFFF00000000ULL
) |
834 cpu_ldl_data(env
, src
);
835 src
+= sizeof(uint32_t);
845 void HELPER(stctg
)(CPUS390XState
*env
, uint32_t r1
, uint64_t a2
, uint32_t r3
)
850 for (i
= r1
;; i
= (i
+ 1) % 16) {
851 cpu_stq_data(env
, dest
, env
->cregs
[i
]);
852 dest
+= sizeof(uint64_t);
860 void HELPER(stctl
)(CPUS390XState
*env
, uint32_t r1
, uint64_t a2
, uint32_t r3
)
865 for (i
= r1
;; i
= (i
+ 1) % 16) {
866 cpu_stl_data(env
, dest
, env
->cregs
[i
]);
867 dest
+= sizeof(uint32_t);
875 uint32_t HELPER(tprot
)(uint64_t a1
, uint64_t a2
)
882 /* insert storage key extended */
883 uint64_t HELPER(iske
)(CPUS390XState
*env
, uint64_t r2
)
885 uint64_t addr
= get_address(env
, 0, 0, r2
);
887 if (addr
> ram_size
) {
891 return env
->storage_keys
[addr
/ TARGET_PAGE_SIZE
];
894 /* set storage key extended */
895 void HELPER(sske
)(CPUS390XState
*env
, uint64_t r1
, uint64_t r2
)
897 uint64_t addr
= get_address(env
, 0, 0, r2
);
899 if (addr
> ram_size
) {
903 env
->storage_keys
[addr
/ TARGET_PAGE_SIZE
] = r1
;
906 /* reset reference bit extended */
907 uint32_t HELPER(rrbe
)(CPUS390XState
*env
, uint64_t r2
)
916 key
= env
->storage_keys
[r2
/ TARGET_PAGE_SIZE
];
917 re
= key
& (SK_R
| SK_C
);
918 env
->storage_keys
[r2
/ TARGET_PAGE_SIZE
] = (key
& ~SK_R
);
923 * 0 Reference bit zero; change bit zero
924 * 1 Reference bit zero; change bit one
925 * 2 Reference bit one; change bit zero
926 * 3 Reference bit one; change bit one
932 /* compare and swap and purge */
933 uint32_t HELPER(csp
)(CPUS390XState
*env
, uint32_t r1
, uint64_t r2
)
936 uint32_t o1
= env
->regs
[r1
];
937 uint64_t a2
= r2
& ~3ULL;
938 uint32_t o2
= cpu_ldl_data(env
, a2
);
941 cpu_stl_data(env
, a2
, env
->regs
[(r1
+ 1) & 15]);
943 /* flush TLB / ALB */
948 env
->regs
[r1
] = (env
->regs
[r1
] & 0xffffffff00000000ULL
) | o2
;
955 static uint32_t mvc_asc(CPUS390XState
*env
, int64_t l
, uint64_t a1
,
956 uint64_t mode1
, uint64_t a2
, uint64_t mode2
)
958 target_ulong src
, dest
;
959 int flags
, cc
= 0, i
;
963 } else if (l
> 256) {
969 if (mmu_translate(env
, a1
& TARGET_PAGE_MASK
, 1, mode1
, &dest
, &flags
)) {
972 dest
|= a1
& ~TARGET_PAGE_MASK
;
974 if (mmu_translate(env
, a2
& TARGET_PAGE_MASK
, 0, mode2
, &src
, &flags
)) {
977 src
|= a2
& ~TARGET_PAGE_MASK
;
979 /* XXX replace w/ memcpy */
980 for (i
= 0; i
< l
; i
++) {
981 /* XXX be more clever */
982 if ((((dest
+ i
) & TARGET_PAGE_MASK
) != (dest
& TARGET_PAGE_MASK
)) ||
983 (((src
+ i
) & TARGET_PAGE_MASK
) != (src
& TARGET_PAGE_MASK
))) {
984 mvc_asc(env
, l
- i
, a1
+ i
, mode1
, a2
+ i
, mode2
);
987 stb_phys(dest
+ i
, ldub_phys(src
+ i
));
993 uint32_t HELPER(mvcs
)(CPUS390XState
*env
, uint64_t l
, uint64_t a1
, uint64_t a2
)
995 HELPER_LOG("%s: %16" PRIx64
" %16" PRIx64
" %16" PRIx64
"\n",
996 __func__
, l
, a1
, a2
);
998 return mvc_asc(env
, l
, a1
, PSW_ASC_SECONDARY
, a2
, PSW_ASC_PRIMARY
);
1001 uint32_t HELPER(mvcp
)(CPUS390XState
*env
, uint64_t l
, uint64_t a1
, uint64_t a2
)
1003 HELPER_LOG("%s: %16" PRIx64
" %16" PRIx64
" %16" PRIx64
"\n",
1004 __func__
, l
, a1
, a2
);
1006 return mvc_asc(env
, l
, a1
, PSW_ASC_PRIMARY
, a2
, PSW_ASC_SECONDARY
);
1009 /* invalidate pte */
1010 void HELPER(ipte
)(CPUS390XState
*env
, uint64_t pte_addr
, uint64_t vaddr
)
1012 uint64_t page
= vaddr
& TARGET_PAGE_MASK
;
1015 /* XXX broadcast to other CPUs */
1017 /* XXX Linux is nice enough to give us the exact pte address.
1018 According to spec we'd have to find it out ourselves */
1019 /* XXX Linux is fine with overwriting the pte, the spec requires
1020 us to only set the invalid bit */
1021 stq_phys(pte_addr
, pte
| _PAGE_INVALID
);
1023 /* XXX we exploit the fact that Linux passes the exact virtual
1024 address here - it's not obliged to! */
1025 tlb_flush_page(env
, page
);
1027 /* XXX 31-bit hack */
1028 if (page
& 0x80000000) {
1029 tlb_flush_page(env
, page
& ~0x80000000);
1031 tlb_flush_page(env
, page
| 0x80000000);
1035 /* flush local tlb */
1036 void HELPER(ptlb
)(CPUS390XState
*env
)
1041 /* store using real address */
1042 void HELPER(stura
)(CPUS390XState
*env
, uint64_t addr
, uint64_t v1
)
1044 stw_phys(get_address(env
, 0, 0, addr
), (uint32_t)v1
);
1047 /* load real address */
1048 uint64_t HELPER(lra
)(CPUS390XState
*env
, uint64_t addr
)
1051 int old_exc
= env
->exception_index
;
1052 uint64_t asc
= env
->psw
.mask
& PSW_MASK_ASC
;
1056 /* XXX incomplete - has more corner cases */
1057 if (!(env
->psw
.mask
& PSW_MASK_64
) && (addr
>> 32)) {
1058 program_interrupt(env
, PGM_SPECIAL_OP
, 2);
1061 env
->exception_index
= old_exc
;
1062 if (mmu_translate(env
, addr
, 0, asc
, &ret
, &flags
)) {
1065 if (env
->exception_index
== EXCP_PGM
) {
1066 ret
= env
->int_pgm_code
| 0x80000000;
1068 ret
|= addr
& ~TARGET_PAGE_MASK
;
1070 env
->exception_index
= old_exc
;