2 * S/390 memory access helper routines
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2009 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
24 /*****************************************************************************/
26 #if !defined(CONFIG_USER_ONLY)
27 #include "exec/softmmu_exec.h"
29 #define MMUSUFFIX _mmu
32 #include "exec/softmmu_template.h"
35 #include "exec/softmmu_template.h"
38 #include "exec/softmmu_template.h"
41 #include "exec/softmmu_template.h"
43 /* try to fill the TLB and return an exception if error. If retaddr is
44 NULL, it means that the function was called in C code (i.e. not
45 from generated code or from helper.c) */
46 /* XXX: fix it to restore all registers */
47 void tlb_fill(CPUS390XState
*env
, target_ulong addr
, int is_write
, int mmu_idx
,
52 ret
= cpu_s390x_handle_mmu_fault(env
, addr
, is_write
, mmu_idx
);
53 if (unlikely(ret
!= 0)) {
54 if (likely(retaddr
)) {
55 /* now we have a real cpu fault */
56 cpu_restore_state(env
, retaddr
);
64 /* #define DEBUG_HELPER */
66 #define HELPER_LOG(x...) qemu_log(x)
68 #define HELPER_LOG(x...)
71 #ifndef CONFIG_USER_ONLY
72 static void mvc_fast_memset(CPUS390XState
*env
, uint32_t l
, uint64_t dest
,
78 uint64_t asc
= env
->psw
.mask
& PSW_MASK_ASC
;
81 if (mmu_translate(env
, dest
, 1, asc
, &dest_phys
, &flags
)) {
82 cpu_stb_data(env
, dest
, byte
);
83 cpu_abort(env
, "should never reach here");
85 dest_phys
|= dest
& ~TARGET_PAGE_MASK
;
87 dest_p
= cpu_physical_memory_map(dest_phys
, &len
, 1);
89 memset(dest_p
, byte
, len
);
91 cpu_physical_memory_unmap(dest_p
, 1, len
, len
);
94 static void mvc_fast_memmove(CPUS390XState
*env
, uint32_t l
, uint64_t dest
,
102 uint64_t asc
= env
->psw
.mask
& PSW_MASK_ASC
;
105 if (mmu_translate(env
, dest
, 1, asc
, &dest_phys
, &flags
)) {
106 cpu_stb_data(env
, dest
, 0);
107 cpu_abort(env
, "should never reach here");
109 dest_phys
|= dest
& ~TARGET_PAGE_MASK
;
111 if (mmu_translate(env
, src
, 0, asc
, &src_phys
, &flags
)) {
112 cpu_ldub_data(env
, src
);
113 cpu_abort(env
, "should never reach here");
115 src_phys
|= src
& ~TARGET_PAGE_MASK
;
117 dest_p
= cpu_physical_memory_map(dest_phys
, &len
, 1);
118 src_p
= cpu_physical_memory_map(src_phys
, &len
, 0);
120 memmove(dest_p
, src_p
, len
);
122 cpu_physical_memory_unmap(dest_p
, 1, len
, len
);
123 cpu_physical_memory_unmap(src_p
, 0, len
, len
);
128 uint32_t HELPER(nc
)(CPUS390XState
*env
, uint32_t l
, uint64_t dest
,
135 HELPER_LOG("%s l %d dest %" PRIx64
" src %" PRIx64
"\n",
136 __func__
, l
, dest
, src
);
137 for (i
= 0; i
<= l
; i
++) {
138 x
= cpu_ldub_data(env
, dest
+ i
) & cpu_ldub_data(env
, src
+ i
);
142 cpu_stb_data(env
, dest
+ i
, x
);
148 uint32_t HELPER(xc
)(CPUS390XState
*env
, uint32_t l
, uint64_t dest
,
155 HELPER_LOG("%s l %d dest %" PRIx64
" src %" PRIx64
"\n",
156 __func__
, l
, dest
, src
);
158 #ifndef CONFIG_USER_ONLY
159 /* xor with itself is the same as memset(0) */
160 if ((l
> 32) && (src
== dest
) &&
161 (src
& TARGET_PAGE_MASK
) == ((src
+ l
) & TARGET_PAGE_MASK
)) {
162 mvc_fast_memset(env
, l
+ 1, dest
, 0);
167 memset(g2h(dest
), 0, l
+ 1);
172 for (i
= 0; i
<= l
; i
++) {
173 x
= cpu_ldub_data(env
, dest
+ i
) ^ cpu_ldub_data(env
, src
+ i
);
177 cpu_stb_data(env
, dest
+ i
, x
);
183 uint32_t HELPER(oc
)(CPUS390XState
*env
, uint32_t l
, uint64_t dest
,
190 HELPER_LOG("%s l %d dest %" PRIx64
" src %" PRIx64
"\n",
191 __func__
, l
, dest
, src
);
192 for (i
= 0; i
<= l
; i
++) {
193 x
= cpu_ldub_data(env
, dest
+ i
) | cpu_ldub_data(env
, src
+ i
);
197 cpu_stb_data(env
, dest
+ i
, x
);
203 void HELPER(mvc
)(CPUS390XState
*env
, uint32_t l
, uint64_t dest
, uint64_t src
)
207 uint32_t l_64
= (l
+ 1) / 8;
209 HELPER_LOG("%s l %d dest %" PRIx64
" src %" PRIx64
"\n",
210 __func__
, l
, dest
, src
);
212 #ifndef CONFIG_USER_ONLY
214 (src
& TARGET_PAGE_MASK
) == ((src
+ l
) & TARGET_PAGE_MASK
) &&
215 (dest
& TARGET_PAGE_MASK
) == ((dest
+ l
) & TARGET_PAGE_MASK
)) {
216 if (dest
== (src
+ 1)) {
217 mvc_fast_memset(env
, l
+ 1, dest
, cpu_ldub_data(env
, src
));
219 } else if ((src
& TARGET_PAGE_MASK
) != (dest
& TARGET_PAGE_MASK
)) {
220 mvc_fast_memmove(env
, l
+ 1, dest
, src
);
225 if (dest
== (src
+ 1)) {
226 memset(g2h(dest
), cpu_ldub_data(env
, src
), l
+ 1);
229 memmove(g2h(dest
), g2h(src
), l
+ 1);
234 /* handle the parts that fit into 8-byte loads/stores */
235 if (dest
!= (src
+ 1)) {
236 for (i
= 0; i
< l_64
; i
++) {
237 cpu_stq_data(env
, dest
+ x
, cpu_ldq_data(env
, src
+ x
));
242 /* slow version crossing pages with byte accesses */
243 for (i
= x
; i
<= l
; i
++) {
244 cpu_stb_data(env
, dest
+ i
, cpu_ldub_data(env
, src
+ i
));
248 /* compare unsigned byte arrays */
249 uint32_t HELPER(clc
)(CPUS390XState
*env
, uint32_t l
, uint64_t s1
, uint64_t s2
)
255 HELPER_LOG("%s l %d s1 %" PRIx64
" s2 %" PRIx64
"\n",
256 __func__
, l
, s1
, s2
);
257 for (i
= 0; i
<= l
; i
++) {
258 x
= cpu_ldub_data(env
, s1
+ i
);
259 y
= cpu_ldub_data(env
, s2
+ i
);
260 HELPER_LOG("%02x (%c)/%02x (%c) ", x
, x
, y
, y
);
275 /* compare logical under mask */
276 uint32_t HELPER(clm
)(CPUS390XState
*env
, uint32_t r1
, uint32_t mask
,
282 HELPER_LOG("%s: r1 0x%x mask 0x%x addr 0x%" PRIx64
"\n", __func__
, r1
,
287 d
= cpu_ldub_data(env
, addr
);
288 r
= (r1
& 0xff000000UL
) >> 24;
289 HELPER_LOG("mask 0x%x %02x/%02x (0x%" PRIx64
") ", mask
, r
, d
,
300 mask
= (mask
<< 1) & 0xf;
307 static inline uint64_t fix_address(CPUS390XState
*env
, uint64_t a
)
310 if (!(env
->psw
.mask
& PSW_MASK_64
)) {
316 static inline uint64_t get_address(CPUS390XState
*env
, int x2
, int b2
, int d2
)
325 return fix_address(env
, r
);
328 static inline uint64_t get_address_31fix(CPUS390XState
*env
, int reg
)
330 return fix_address(env
, env
->regs
[reg
]);
333 /* search string (c is byte to search, r2 is string, r1 end of string) */
334 uint64_t HELPER(srst
)(CPUS390XState
*env
, uint64_t r0
, uint64_t end
,
340 str
= fix_address(env
, str
);
341 end
= fix_address(env
, end
);
343 /* Assume for now that R2 is unmodified. */
346 /* Lest we fail to service interrupts in a timely manner, limit the
347 amount of work we're willing to do. For now, let's cap at 8k. */
348 for (len
= 0; len
< 0x2000; ++len
) {
349 if (str
+ len
== end
) {
350 /* Character not found. R1 & R2 are unmodified. */
354 v
= cpu_ldub_data(env
, str
+ len
);
356 /* Character found. Set R1 to the location; R2 is unmodified. */
362 /* CPU-determined bytes processed. Advance R2 to next byte to process. */
363 env
->retxl
= str
+ len
;
368 /* unsigned string compare (c is string terminator) */
369 uint64_t HELPER(clst
)(CPUS390XState
*env
, uint64_t c
, uint64_t s1
, uint64_t s2
)
374 s1
= fix_address(env
, s1
);
375 s2
= fix_address(env
, s2
);
377 /* Lest we fail to service interrupts in a timely manner, limit the
378 amount of work we're willing to do. For now, let's cap at 8k. */
379 for (len
= 0; len
< 0x2000; ++len
) {
380 uint8_t v1
= cpu_ldub_data(env
, s1
+ len
);
381 uint8_t v2
= cpu_ldub_data(env
, s2
+ len
);
384 /* Equal. CC=0, and don't advance the registers. */
390 /* Unequal. CC={1,2}, and advance the registers. Note that
391 the terminator need not be zero, but the string that contains
392 the terminator is by definition "low". */
393 env
->cc_op
= (v1
== c
? 1 : v2
== c
? 2 : v1
< v2
? 1 : 2);
394 env
->retxl
= s2
+ len
;
399 /* CPU-determined bytes equal; advance the registers. */
401 env
->retxl
= s2
+ len
;
406 void HELPER(mvpg
)(CPUS390XState
*env
, uint64_t r0
, uint64_t r1
, uint64_t r2
)
408 /* XXX missing r0 handling */
410 #ifdef CONFIG_USER_ONLY
411 memmove(g2h(r1
), g2h(r2
), TARGET_PAGE_SIZE
);
413 mvc_fast_memmove(env
, TARGET_PAGE_SIZE
, r1
, r2
);
417 /* string copy (c is string terminator) */
418 uint64_t HELPER(mvst
)(CPUS390XState
*env
, uint64_t c
, uint64_t d
, uint64_t s
)
423 d
= fix_address(env
, d
);
424 s
= fix_address(env
, s
);
426 /* Lest we fail to service interrupts in a timely manner, limit the
427 amount of work we're willing to do. For now, let's cap at 8k. */
428 for (len
= 0; len
< 0x2000; ++len
) {
429 uint8_t v
= cpu_ldub_data(env
, s
+ len
);
430 cpu_stb_data(env
, d
+ len
, v
);
432 /* Complete. Set CC=1 and advance R1. */
439 /* Incomplete. Set CC=3 and signal to advance R1 and R2. */
441 env
->retxl
= s
+ len
;
445 static uint32_t helper_icm(CPUS390XState
*env
, uint32_t r1
, uint64_t address
,
448 int pos
= 24; /* top of the lower half of r1 */
449 uint64_t rmask
= 0xff000000ULL
;
456 env
->regs
[r1
] &= ~rmask
;
457 val
= cpu_ldub_data(env
, address
);
458 if ((val
& 0x80) && !ccd
) {
462 if (val
&& cc
== 0) {
465 env
->regs
[r1
] |= (uint64_t)val
<< pos
;
468 mask
= (mask
<< 1) & 0xf;
476 /* execute instruction
477 this instruction executes an insn modified with the contents of r1
478 it does not change the executed instruction in memory
479 it does not change the program counter
480 in other words: tricky...
481 currently implemented by interpreting the cases it is most commonly used in
483 uint32_t HELPER(ex
)(CPUS390XState
*env
, uint32_t cc
, uint64_t v1
,
484 uint64_t addr
, uint64_t ret
)
486 uint16_t insn
= cpu_lduw_code(env
, addr
);
488 HELPER_LOG("%s: v1 0x%lx addr 0x%lx insn 0x%x\n", __func__
, v1
, addr
,
490 if ((insn
& 0xf0ff) == 0xd000) {
491 uint32_t l
, insn2
, b1
, b2
, d1
, d2
;
494 insn2
= cpu_ldl_code(env
, addr
+ 2);
495 b1
= (insn2
>> 28) & 0xf;
496 b2
= (insn2
>> 12) & 0xf;
497 d1
= (insn2
>> 16) & 0xfff;
499 switch (insn
& 0xf00) {
501 helper_mvc(env
, l
, get_address(env
, 0, b1
, d1
),
502 get_address(env
, 0, b2
, d2
));
505 cc
= helper_clc(env
, l
, get_address(env
, 0, b1
, d1
),
506 get_address(env
, 0, b2
, d2
));
509 cc
= helper_xc(env
, l
, get_address(env
, 0, b1
, d1
),
510 get_address(env
, 0, b2
, d2
));
513 helper_tr(env
, l
, get_address(env
, 0, b1
, d1
),
514 get_address(env
, 0, b2
, d2
));
520 } else if ((insn
& 0xff00) == 0x0a00) {
521 /* supervisor call */
522 HELPER_LOG("%s: svc %ld via execute\n", __func__
, (insn
| v1
) & 0xff);
523 env
->psw
.addr
= ret
- 4;
524 env
->int_svc_code
= (insn
| v1
) & 0xff;
525 env
->int_svc_ilen
= 4;
526 helper_exception(env
, EXCP_SVC
);
527 } else if ((insn
& 0xff00) == 0xbf00) {
528 uint32_t insn2
, r1
, r3
, b2
, d2
;
530 insn2
= cpu_ldl_code(env
, addr
+ 2);
531 r1
= (insn2
>> 20) & 0xf;
532 r3
= (insn2
>> 16) & 0xf;
533 b2
= (insn2
>> 12) & 0xf;
535 cc
= helper_icm(env
, r1
, get_address(env
, 0, b2
, d2
), r3
);
538 cpu_abort(env
, "EXECUTE on instruction prefix 0x%x not implemented\n",
544 /* load access registers r1 to r3 from memory at a2 */
545 void HELPER(lam
)(CPUS390XState
*env
, uint32_t r1
, uint64_t a2
, uint32_t r3
)
549 for (i
= r1
;; i
= (i
+ 1) % 16) {
550 env
->aregs
[i
] = cpu_ldl_data(env
, a2
);
559 /* store access registers r1 to r3 in memory at a2 */
560 void HELPER(stam
)(CPUS390XState
*env
, uint32_t r1
, uint64_t a2
, uint32_t r3
)
564 for (i
= r1
;; i
= (i
+ 1) % 16) {
565 cpu_stl_data(env
, a2
, env
->aregs
[i
]);
575 uint32_t HELPER(mvcl
)(CPUS390XState
*env
, uint32_t r1
, uint32_t r2
)
577 uint64_t destlen
= env
->regs
[r1
+ 1] & 0xffffff;
578 uint64_t dest
= get_address_31fix(env
, r1
);
579 uint64_t srclen
= env
->regs
[r2
+ 1] & 0xffffff;
580 uint64_t src
= get_address_31fix(env
, r2
);
581 uint8_t pad
= src
>> 24;
585 if (destlen
== srclen
) {
587 } else if (destlen
< srclen
) {
593 if (srclen
> destlen
) {
597 for (; destlen
&& srclen
; src
++, dest
++, destlen
--, srclen
--) {
598 v
= cpu_ldub_data(env
, src
);
599 cpu_stb_data(env
, dest
, v
);
602 for (; destlen
; dest
++, destlen
--) {
603 cpu_stb_data(env
, dest
, pad
);
606 env
->regs
[r1
+ 1] = destlen
;
607 /* can't use srclen here, we trunc'ed it */
608 env
->regs
[r2
+ 1] -= src
- env
->regs
[r2
];
609 env
->regs
[r1
] = dest
;
615 /* move long extended another memcopy insn with more bells and whistles */
616 uint32_t HELPER(mvcle
)(CPUS390XState
*env
, uint32_t r1
, uint64_t a2
,
619 uint64_t destlen
= env
->regs
[r1
+ 1];
620 uint64_t dest
= env
->regs
[r1
];
621 uint64_t srclen
= env
->regs
[r3
+ 1];
622 uint64_t src
= env
->regs
[r3
];
623 uint8_t pad
= a2
& 0xff;
627 if (!(env
->psw
.mask
& PSW_MASK_64
)) {
628 destlen
= (uint32_t)destlen
;
629 srclen
= (uint32_t)srclen
;
634 if (destlen
== srclen
) {
636 } else if (destlen
< srclen
) {
642 if (srclen
> destlen
) {
646 for (; destlen
&& srclen
; src
++, dest
++, destlen
--, srclen
--) {
647 v
= cpu_ldub_data(env
, src
);
648 cpu_stb_data(env
, dest
, v
);
651 for (; destlen
; dest
++, destlen
--) {
652 cpu_stb_data(env
, dest
, pad
);
655 env
->regs
[r1
+ 1] = destlen
;
656 /* can't use srclen here, we trunc'ed it */
657 /* FIXME: 31-bit mode! */
658 env
->regs
[r3
+ 1] -= src
- env
->regs
[r3
];
659 env
->regs
[r1
] = dest
;
665 /* compare logical long extended memcompare insn with padding */
666 uint32_t HELPER(clcle
)(CPUS390XState
*env
, uint32_t r1
, uint64_t a2
,
669 uint64_t destlen
= env
->regs
[r1
+ 1];
670 uint64_t dest
= get_address_31fix(env
, r1
);
671 uint64_t srclen
= env
->regs
[r3
+ 1];
672 uint64_t src
= get_address_31fix(env
, r3
);
673 uint8_t pad
= a2
& 0xff;
674 uint8_t v1
= 0, v2
= 0;
677 if (!(destlen
|| srclen
)) {
681 if (srclen
> destlen
) {
685 for (; destlen
|| srclen
; src
++, dest
++, destlen
--, srclen
--) {
686 v1
= srclen
? cpu_ldub_data(env
, src
) : pad
;
687 v2
= destlen
? cpu_ldub_data(env
, dest
) : pad
;
689 cc
= (v1
< v2
) ? 1 : 2;
694 env
->regs
[r1
+ 1] = destlen
;
695 /* can't use srclen here, we trunc'ed it */
696 env
->regs
[r3
+ 1] -= src
- env
->regs
[r3
];
697 env
->regs
[r1
] = dest
;
704 uint64_t HELPER(cksm
)(CPUS390XState
*env
, uint64_t r1
,
705 uint64_t src
, uint64_t src_len
)
707 uint64_t max_len
, len
;
708 uint64_t cksm
= (uint32_t)r1
;
710 /* Lest we fail to service interrupts in a timely manner, limit the
711 amount of work we're willing to do. For now, let's cap at 8k. */
712 max_len
= (src_len
> 0x2000 ? 0x2000 : src_len
);
714 /* Process full words as available. */
715 for (len
= 0; len
+ 4 <= max_len
; len
+= 4, src
+= 4) {
716 cksm
+= (uint32_t)cpu_ldl_data(env
, src
);
719 switch (max_len
- len
) {
721 cksm
+= cpu_ldub_data(env
, src
) << 24;
725 cksm
+= cpu_lduw_data(env
, src
) << 16;
729 cksm
+= cpu_lduw_data(env
, src
) << 16;
730 cksm
+= cpu_ldub_data(env
, src
+ 2) << 8;
735 /* Fold the carry from the checksum. Note that we can see carry-out
736 during folding more than once (but probably not more than twice). */
737 while (cksm
> 0xffffffffull
) {
738 cksm
= (uint32_t)cksm
+ (cksm
>> 32);
741 /* Indicate whether or not we've processed everything. */
742 env
->cc_op
= (len
== src_len
? 0 : 3);
744 /* Return both cksm and processed length. */
749 void HELPER(unpk
)(CPUS390XState
*env
, uint32_t len
, uint64_t dest
,
752 int len_dest
= len
>> 4;
753 int len_src
= len
& 0xf;
755 int second_nibble
= 0;
760 /* last byte is special, it only flips the nibbles */
761 b
= cpu_ldub_data(env
, src
);
762 cpu_stb_data(env
, dest
, (b
<< 4) | (b
>> 4));
766 /* now pad every nibble with 0xf0 */
768 while (len_dest
> 0) {
769 uint8_t cur_byte
= 0;
772 cur_byte
= cpu_ldub_data(env
, src
);
778 /* only advance one nibble at a time */
784 second_nibble
= !second_nibble
;
787 cur_byte
= (cur_byte
& 0xf);
791 cpu_stb_data(env
, dest
, cur_byte
);
795 void HELPER(tr
)(CPUS390XState
*env
, uint32_t len
, uint64_t array
,
800 for (i
= 0; i
<= len
; i
++) {
801 uint8_t byte
= cpu_ldub_data(env
, array
+ i
);
802 uint8_t new_byte
= cpu_ldub_data(env
, trans
+ byte
);
804 cpu_stb_data(env
, array
+ i
, new_byte
);
808 #if !defined(CONFIG_USER_ONLY)
809 void HELPER(lctlg
)(CPUS390XState
*env
, uint32_t r1
, uint64_t a2
, uint32_t r3
)
814 for (i
= r1
;; i
= (i
+ 1) % 16) {
815 env
->cregs
[i
] = cpu_ldq_data(env
, src
);
816 HELPER_LOG("load ctl %d from 0x%" PRIx64
" == 0x%" PRIx64
"\n",
817 i
, src
, env
->cregs
[i
]);
818 src
+= sizeof(uint64_t);
828 void HELPER(lctl
)(CPUS390XState
*env
, uint32_t r1
, uint64_t a2
, uint32_t r3
)
833 for (i
= r1
;; i
= (i
+ 1) % 16) {
834 env
->cregs
[i
] = (env
->cregs
[i
] & 0xFFFFFFFF00000000ULL
) |
835 cpu_ldl_data(env
, src
);
836 src
+= sizeof(uint32_t);
846 void HELPER(stctg
)(CPUS390XState
*env
, uint32_t r1
, uint64_t a2
, uint32_t r3
)
851 for (i
= r1
;; i
= (i
+ 1) % 16) {
852 cpu_stq_data(env
, dest
, env
->cregs
[i
]);
853 dest
+= sizeof(uint64_t);
861 void HELPER(stctl
)(CPUS390XState
*env
, uint32_t r1
, uint64_t a2
, uint32_t r3
)
866 for (i
= r1
;; i
= (i
+ 1) % 16) {
867 cpu_stl_data(env
, dest
, env
->cregs
[i
]);
868 dest
+= sizeof(uint32_t);
876 uint32_t HELPER(tprot
)(uint64_t a1
, uint64_t a2
)
883 /* insert storage key extended */
884 uint64_t HELPER(iske
)(CPUS390XState
*env
, uint64_t r2
)
886 uint64_t addr
= get_address(env
, 0, 0, r2
);
888 if (addr
> ram_size
) {
892 return env
->storage_keys
[addr
/ TARGET_PAGE_SIZE
];
895 /* set storage key extended */
896 void HELPER(sske
)(CPUS390XState
*env
, uint64_t r1
, uint64_t r2
)
898 uint64_t addr
= get_address(env
, 0, 0, r2
);
900 if (addr
> ram_size
) {
904 env
->storage_keys
[addr
/ TARGET_PAGE_SIZE
] = r1
;
907 /* reset reference bit extended */
908 uint32_t HELPER(rrbe
)(CPUS390XState
*env
, uint64_t r2
)
917 key
= env
->storage_keys
[r2
/ TARGET_PAGE_SIZE
];
918 re
= key
& (SK_R
| SK_C
);
919 env
->storage_keys
[r2
/ TARGET_PAGE_SIZE
] = (key
& ~SK_R
);
924 * 0 Reference bit zero; change bit zero
925 * 1 Reference bit zero; change bit one
926 * 2 Reference bit one; change bit zero
927 * 3 Reference bit one; change bit one
933 /* compare and swap and purge */
934 uint32_t HELPER(csp
)(CPUS390XState
*env
, uint32_t r1
, uint64_t r2
)
937 uint32_t o1
= env
->regs
[r1
];
938 uint64_t a2
= r2
& ~3ULL;
939 uint32_t o2
= cpu_ldl_data(env
, a2
);
942 cpu_stl_data(env
, a2
, env
->regs
[(r1
+ 1) & 15]);
944 /* flush TLB / ALB */
949 env
->regs
[r1
] = (env
->regs
[r1
] & 0xffffffff00000000ULL
) | o2
;
956 static uint32_t mvc_asc(CPUS390XState
*env
, int64_t l
, uint64_t a1
,
957 uint64_t mode1
, uint64_t a2
, uint64_t mode2
)
959 target_ulong src
, dest
;
960 int flags
, cc
= 0, i
;
964 } else if (l
> 256) {
970 if (mmu_translate(env
, a1
& TARGET_PAGE_MASK
, 1, mode1
, &dest
, &flags
)) {
973 dest
|= a1
& ~TARGET_PAGE_MASK
;
975 if (mmu_translate(env
, a2
& TARGET_PAGE_MASK
, 0, mode2
, &src
, &flags
)) {
978 src
|= a2
& ~TARGET_PAGE_MASK
;
980 /* XXX replace w/ memcpy */
981 for (i
= 0; i
< l
; i
++) {
982 /* XXX be more clever */
983 if ((((dest
+ i
) & TARGET_PAGE_MASK
) != (dest
& TARGET_PAGE_MASK
)) ||
984 (((src
+ i
) & TARGET_PAGE_MASK
) != (src
& TARGET_PAGE_MASK
))) {
985 mvc_asc(env
, l
- i
, a1
+ i
, mode1
, a2
+ i
, mode2
);
988 stb_phys(dest
+ i
, ldub_phys(src
+ i
));
994 uint32_t HELPER(mvcs
)(CPUS390XState
*env
, uint64_t l
, uint64_t a1
, uint64_t a2
)
996 HELPER_LOG("%s: %16" PRIx64
" %16" PRIx64
" %16" PRIx64
"\n",
997 __func__
, l
, a1
, a2
);
999 return mvc_asc(env
, l
, a1
, PSW_ASC_SECONDARY
, a2
, PSW_ASC_PRIMARY
);
1002 uint32_t HELPER(mvcp
)(CPUS390XState
*env
, uint64_t l
, uint64_t a1
, uint64_t a2
)
1004 HELPER_LOG("%s: %16" PRIx64
" %16" PRIx64
" %16" PRIx64
"\n",
1005 __func__
, l
, a1
, a2
);
1007 return mvc_asc(env
, l
, a1
, PSW_ASC_PRIMARY
, a2
, PSW_ASC_SECONDARY
);
1010 /* invalidate pte */
1011 void HELPER(ipte
)(CPUS390XState
*env
, uint64_t pte_addr
, uint64_t vaddr
)
1013 uint64_t page
= vaddr
& TARGET_PAGE_MASK
;
1016 /* XXX broadcast to other CPUs */
1018 /* XXX Linux is nice enough to give us the exact pte address.
1019 According to spec we'd have to find it out ourselves */
1020 /* XXX Linux is fine with overwriting the pte, the spec requires
1021 us to only set the invalid bit */
1022 stq_phys(pte_addr
, pte
| _PAGE_INVALID
);
1024 /* XXX we exploit the fact that Linux passes the exact virtual
1025 address here - it's not obliged to! */
1026 tlb_flush_page(env
, page
);
1028 /* XXX 31-bit hack */
1029 if (page
& 0x80000000) {
1030 tlb_flush_page(env
, page
& ~0x80000000);
1032 tlb_flush_page(env
, page
| 0x80000000);
1036 /* flush local tlb */
1037 void HELPER(ptlb
)(CPUS390XState
*env
)
1042 /* store using real address */
1043 void HELPER(stura
)(CPUS390XState
*env
, uint64_t addr
, uint64_t v1
)
1045 stw_phys(get_address(env
, 0, 0, addr
), (uint32_t)v1
);
1048 /* load real address */
1049 uint64_t HELPER(lra
)(CPUS390XState
*env
, uint64_t addr
)
1052 int old_exc
= env
->exception_index
;
1053 uint64_t asc
= env
->psw
.mask
& PSW_MASK_ASC
;
1057 /* XXX incomplete - has more corner cases */
1058 if (!(env
->psw
.mask
& PSW_MASK_64
) && (addr
>> 32)) {
1059 program_interrupt(env
, PGM_SPECIAL_OP
, 2);
1062 env
->exception_index
= old_exc
;
1063 if (mmu_translate(env
, addr
, 0, asc
, &ret
, &flags
)) {
1066 if (env
->exception_index
== EXCP_PGM
) {
1067 ret
= env
->int_pgm_code
| 0x80000000;
1069 ret
|= addr
& ~TARGET_PAGE_MASK
;
1071 env
->exception_index
= old_exc
;