2 * S/390 memory access helper routines
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2009 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
22 #include "exec/helper-proto.h"
23 #include "exec/cpu_ldst.h"
25 /*****************************************************************************/
27 #if !defined(CONFIG_USER_ONLY)
29 /* try to fill the TLB and return an exception if error. If retaddr is
30 NULL, it means that the function was called in C code (i.e. not
31 from generated code or from helper.c) */
32 /* XXX: fix it to restore all registers */
33 void tlb_fill(CPUState
*cs
, target_ulong addr
, int is_write
, int mmu_idx
,
38 ret
= s390_cpu_handle_mmu_fault(cs
, addr
, is_write
, mmu_idx
);
39 if (unlikely(ret
!= 0)) {
40 if (likely(retaddr
)) {
41 /* now we have a real cpu fault */
42 cpu_restore_state(cs
, retaddr
);
50 /* #define DEBUG_HELPER */
52 #define HELPER_LOG(x...) qemu_log(x)
54 #define HELPER_LOG(x...)
57 /* Reduce the length so that addr + len doesn't cross a page boundary. */
58 static inline uint64_t adj_len_to_page(uint64_t len
, uint64_t addr
)
60 #ifndef CONFIG_USER_ONLY
61 if ((addr
& ~TARGET_PAGE_MASK
) + len
- 1 >= TARGET_PAGE_SIZE
) {
62 return -addr
& ~TARGET_PAGE_MASK
;
68 static void fast_memset(CPUS390XState
*env
, uint64_t dest
, uint8_t byte
,
71 int mmu_idx
= cpu_mmu_index(env
);
74 void *p
= tlb_vaddr_to_host(env
, dest
, MMU_DATA_STORE
, mmu_idx
);
76 /* Access to the whole page in write mode granted. */
77 int l_adj
= adj_len_to_page(l
, dest
);
78 memset(p
, byte
, l_adj
);
82 /* We failed to get access to the whole page. The next write
83 access will likely fill the QEMU TLB for the next iteration. */
84 cpu_stb_data(env
, dest
, byte
);
91 static void fast_memmove(CPUS390XState
*env
, uint64_t dest
, uint64_t src
,
94 int mmu_idx
= cpu_mmu_index(env
);
97 void *src_p
= tlb_vaddr_to_host(env
, src
, MMU_DATA_LOAD
, mmu_idx
);
98 void *dest_p
= tlb_vaddr_to_host(env
, dest
, MMU_DATA_STORE
, mmu_idx
);
99 if (src_p
&& dest_p
) {
100 /* Access to both whole pages granted. */
101 int l_adj
= adj_len_to_page(l
, src
);
102 l_adj
= adj_len_to_page(l_adj
, dest
);
103 memmove(dest_p
, src_p
, l_adj
);
108 /* We failed to get access to one or both whole pages. The next
109 read or write access will likely fill the QEMU TLB for the
111 cpu_stb_data(env
, dest
, cpu_ldub_data(env
, src
));
120 uint32_t HELPER(nc
)(CPUS390XState
*env
, uint32_t l
, uint64_t dest
,
127 HELPER_LOG("%s l %d dest %" PRIx64
" src %" PRIx64
"\n",
128 __func__
, l
, dest
, src
);
129 for (i
= 0; i
<= l
; i
++) {
130 x
= cpu_ldub_data(env
, dest
+ i
) & cpu_ldub_data(env
, src
+ i
);
134 cpu_stb_data(env
, dest
+ i
, x
);
140 uint32_t HELPER(xc
)(CPUS390XState
*env
, uint32_t l
, uint64_t dest
,
147 HELPER_LOG("%s l %d dest %" PRIx64
" src %" PRIx64
"\n",
148 __func__
, l
, dest
, src
);
150 /* xor with itself is the same as memset(0) */
152 fast_memset(env
, dest
, 0, l
+ 1);
156 for (i
= 0; i
<= l
; i
++) {
157 x
= cpu_ldub_data(env
, dest
+ i
) ^ cpu_ldub_data(env
, src
+ i
);
161 cpu_stb_data(env
, dest
+ i
, x
);
167 uint32_t HELPER(oc
)(CPUS390XState
*env
, uint32_t l
, uint64_t dest
,
174 HELPER_LOG("%s l %d dest %" PRIx64
" src %" PRIx64
"\n",
175 __func__
, l
, dest
, src
);
176 for (i
= 0; i
<= l
; i
++) {
177 x
= cpu_ldub_data(env
, dest
+ i
) | cpu_ldub_data(env
, src
+ i
);
181 cpu_stb_data(env
, dest
+ i
, x
);
187 void HELPER(mvc
)(CPUS390XState
*env
, uint32_t l
, uint64_t dest
, uint64_t src
)
191 HELPER_LOG("%s l %d dest %" PRIx64
" src %" PRIx64
"\n",
192 __func__
, l
, dest
, src
);
194 /* mvc with source pointing to the byte after the destination is the
195 same as memset with the first source byte */
196 if (dest
== (src
+ 1)) {
197 fast_memset(env
, dest
, cpu_ldub_data(env
, src
), l
+ 1);
201 /* mvc and memmove do not behave the same when areas overlap! */
202 if ((dest
< src
) || (src
+ l
< dest
)) {
203 fast_memmove(env
, dest
, src
, l
+ 1);
207 /* slow version with byte accesses which always work */
208 for (i
= 0; i
<= l
; i
++) {
209 cpu_stb_data(env
, dest
+ i
, cpu_ldub_data(env
, src
+ i
));
213 /* compare unsigned byte arrays */
214 uint32_t HELPER(clc
)(CPUS390XState
*env
, uint32_t l
, uint64_t s1
, uint64_t s2
)
220 HELPER_LOG("%s l %d s1 %" PRIx64
" s2 %" PRIx64
"\n",
221 __func__
, l
, s1
, s2
);
222 for (i
= 0; i
<= l
; i
++) {
223 x
= cpu_ldub_data(env
, s1
+ i
);
224 y
= cpu_ldub_data(env
, s2
+ i
);
225 HELPER_LOG("%02x (%c)/%02x (%c) ", x
, x
, y
, y
);
240 /* compare logical under mask */
241 uint32_t HELPER(clm
)(CPUS390XState
*env
, uint32_t r1
, uint32_t mask
,
247 HELPER_LOG("%s: r1 0x%x mask 0x%x addr 0x%" PRIx64
"\n", __func__
, r1
,
252 d
= cpu_ldub_data(env
, addr
);
253 r
= (r1
& 0xff000000UL
) >> 24;
254 HELPER_LOG("mask 0x%x %02x/%02x (0x%" PRIx64
") ", mask
, r
, d
,
265 mask
= (mask
<< 1) & 0xf;
272 static inline uint64_t fix_address(CPUS390XState
*env
, uint64_t a
)
275 if (!(env
->psw
.mask
& PSW_MASK_64
)) {
281 static inline uint64_t get_address(CPUS390XState
*env
, int x2
, int b2
, int d2
)
290 return fix_address(env
, r
);
293 static inline uint64_t get_address_31fix(CPUS390XState
*env
, int reg
)
295 return fix_address(env
, env
->regs
[reg
]);
298 /* search string (c is byte to search, r2 is string, r1 end of string) */
299 uint64_t HELPER(srst
)(CPUS390XState
*env
, uint64_t r0
, uint64_t end
,
305 str
= fix_address(env
, str
);
306 end
= fix_address(env
, end
);
308 /* Assume for now that R2 is unmodified. */
311 /* Lest we fail to service interrupts in a timely manner, limit the
312 amount of work we're willing to do. For now, let's cap at 8k. */
313 for (len
= 0; len
< 0x2000; ++len
) {
314 if (str
+ len
== end
) {
315 /* Character not found. R1 & R2 are unmodified. */
319 v
= cpu_ldub_data(env
, str
+ len
);
321 /* Character found. Set R1 to the location; R2 is unmodified. */
327 /* CPU-determined bytes processed. Advance R2 to next byte to process. */
328 env
->retxl
= str
+ len
;
333 /* unsigned string compare (c is string terminator) */
334 uint64_t HELPER(clst
)(CPUS390XState
*env
, uint64_t c
, uint64_t s1
, uint64_t s2
)
339 s1
= fix_address(env
, s1
);
340 s2
= fix_address(env
, s2
);
342 /* Lest we fail to service interrupts in a timely manner, limit the
343 amount of work we're willing to do. For now, let's cap at 8k. */
344 for (len
= 0; len
< 0x2000; ++len
) {
345 uint8_t v1
= cpu_ldub_data(env
, s1
+ len
);
346 uint8_t v2
= cpu_ldub_data(env
, s2
+ len
);
349 /* Equal. CC=0, and don't advance the registers. */
355 /* Unequal. CC={1,2}, and advance the registers. Note that
356 the terminator need not be zero, but the string that contains
357 the terminator is by definition "low". */
358 env
->cc_op
= (v1
== c
? 1 : v2
== c
? 2 : v1
< v2
? 1 : 2);
359 env
->retxl
= s2
+ len
;
364 /* CPU-determined bytes equal; advance the registers. */
366 env
->retxl
= s2
+ len
;
371 void HELPER(mvpg
)(CPUS390XState
*env
, uint64_t r0
, uint64_t r1
, uint64_t r2
)
373 /* XXX missing r0 handling */
375 fast_memmove(env
, r1
, r2
, TARGET_PAGE_SIZE
);
378 /* string copy (c is string terminator) */
379 uint64_t HELPER(mvst
)(CPUS390XState
*env
, uint64_t c
, uint64_t d
, uint64_t s
)
384 d
= fix_address(env
, d
);
385 s
= fix_address(env
, s
);
387 /* Lest we fail to service interrupts in a timely manner, limit the
388 amount of work we're willing to do. For now, let's cap at 8k. */
389 for (len
= 0; len
< 0x2000; ++len
) {
390 uint8_t v
= cpu_ldub_data(env
, s
+ len
);
391 cpu_stb_data(env
, d
+ len
, v
);
393 /* Complete. Set CC=1 and advance R1. */
400 /* Incomplete. Set CC=3 and signal to advance R1 and R2. */
402 env
->retxl
= s
+ len
;
406 static uint32_t helper_icm(CPUS390XState
*env
, uint32_t r1
, uint64_t address
,
409 int pos
= 24; /* top of the lower half of r1 */
410 uint64_t rmask
= 0xff000000ULL
;
417 env
->regs
[r1
] &= ~rmask
;
418 val
= cpu_ldub_data(env
, address
);
419 if ((val
& 0x80) && !ccd
) {
423 if (val
&& cc
== 0) {
426 env
->regs
[r1
] |= (uint64_t)val
<< pos
;
429 mask
= (mask
<< 1) & 0xf;
437 /* execute instruction
438 this instruction executes an insn modified with the contents of r1
439 it does not change the executed instruction in memory
440 it does not change the program counter
441 in other words: tricky...
442 currently implemented by interpreting the cases it is most commonly used in
444 uint32_t HELPER(ex
)(CPUS390XState
*env
, uint32_t cc
, uint64_t v1
,
445 uint64_t addr
, uint64_t ret
)
447 S390CPU
*cpu
= s390_env_get_cpu(env
);
448 uint16_t insn
= cpu_lduw_code(env
, addr
);
450 HELPER_LOG("%s: v1 0x%lx addr 0x%lx insn 0x%x\n", __func__
, v1
, addr
,
452 if ((insn
& 0xf0ff) == 0xd000) {
453 uint32_t l
, insn2
, b1
, b2
, d1
, d2
;
456 insn2
= cpu_ldl_code(env
, addr
+ 2);
457 b1
= (insn2
>> 28) & 0xf;
458 b2
= (insn2
>> 12) & 0xf;
459 d1
= (insn2
>> 16) & 0xfff;
461 switch (insn
& 0xf00) {
463 helper_mvc(env
, l
, get_address(env
, 0, b1
, d1
),
464 get_address(env
, 0, b2
, d2
));
467 cc
= helper_nc(env
, l
, get_address(env
, 0, b1
, d1
),
468 get_address(env
, 0, b2
, d2
));
471 cc
= helper_clc(env
, l
, get_address(env
, 0, b1
, d1
),
472 get_address(env
, 0, b2
, d2
));
475 cc
= helper_oc(env
, l
, get_address(env
, 0, b1
, d1
),
476 get_address(env
, 0, b2
, d2
));
479 cc
= helper_xc(env
, l
, get_address(env
, 0, b1
, d1
),
480 get_address(env
, 0, b2
, d2
));
483 helper_tr(env
, l
, get_address(env
, 0, b1
, d1
),
484 get_address(env
, 0, b2
, d2
));
486 cc
= helper_trt(env
, l
, get_address(env
, 0, b1
, d1
),
487 get_address(env
, 0, b2
, d2
));
492 } else if ((insn
& 0xff00) == 0x0a00) {
493 /* supervisor call */
494 HELPER_LOG("%s: svc %ld via execute\n", __func__
, (insn
| v1
) & 0xff);
495 env
->psw
.addr
= ret
- 4;
496 env
->int_svc_code
= (insn
| v1
) & 0xff;
497 env
->int_svc_ilen
= 4;
498 helper_exception(env
, EXCP_SVC
);
499 } else if ((insn
& 0xff00) == 0xbf00) {
500 uint32_t insn2
, r1
, r3
, b2
, d2
;
502 insn2
= cpu_ldl_code(env
, addr
+ 2);
503 r1
= (insn2
>> 20) & 0xf;
504 r3
= (insn2
>> 16) & 0xf;
505 b2
= (insn2
>> 12) & 0xf;
507 cc
= helper_icm(env
, r1
, get_address(env
, 0, b2
, d2
), r3
);
510 cpu_abort(CPU(cpu
), "EXECUTE on instruction prefix 0x%x not implemented\n",
516 /* load access registers r1 to r3 from memory at a2 */
517 void HELPER(lam
)(CPUS390XState
*env
, uint32_t r1
, uint64_t a2
, uint32_t r3
)
521 for (i
= r1
;; i
= (i
+ 1) % 16) {
522 env
->aregs
[i
] = cpu_ldl_data(env
, a2
);
531 /* store access registers r1 to r3 in memory at a2 */
532 void HELPER(stam
)(CPUS390XState
*env
, uint32_t r1
, uint64_t a2
, uint32_t r3
)
536 for (i
= r1
;; i
= (i
+ 1) % 16) {
537 cpu_stl_data(env
, a2
, env
->aregs
[i
]);
547 uint32_t HELPER(mvcl
)(CPUS390XState
*env
, uint32_t r1
, uint32_t r2
)
549 uint64_t destlen
= env
->regs
[r1
+ 1] & 0xffffff;
550 uint64_t dest
= get_address_31fix(env
, r1
);
551 uint64_t srclen
= env
->regs
[r2
+ 1] & 0xffffff;
552 uint64_t src
= get_address_31fix(env
, r2
);
553 uint8_t pad
= src
>> 24;
557 if (destlen
== srclen
) {
559 } else if (destlen
< srclen
) {
565 if (srclen
> destlen
) {
569 for (; destlen
&& srclen
; src
++, dest
++, destlen
--, srclen
--) {
570 v
= cpu_ldub_data(env
, src
);
571 cpu_stb_data(env
, dest
, v
);
574 for (; destlen
; dest
++, destlen
--) {
575 cpu_stb_data(env
, dest
, pad
);
578 env
->regs
[r1
+ 1] = destlen
;
579 /* can't use srclen here, we trunc'ed it */
580 env
->regs
[r2
+ 1] -= src
- env
->regs
[r2
];
581 env
->regs
[r1
] = dest
;
587 /* move long extended another memcopy insn with more bells and whistles */
588 uint32_t HELPER(mvcle
)(CPUS390XState
*env
, uint32_t r1
, uint64_t a2
,
591 uint64_t destlen
= env
->regs
[r1
+ 1];
592 uint64_t dest
= env
->regs
[r1
];
593 uint64_t srclen
= env
->regs
[r3
+ 1];
594 uint64_t src
= env
->regs
[r3
];
595 uint8_t pad
= a2
& 0xff;
599 if (!(env
->psw
.mask
& PSW_MASK_64
)) {
600 destlen
= (uint32_t)destlen
;
601 srclen
= (uint32_t)srclen
;
606 if (destlen
== srclen
) {
608 } else if (destlen
< srclen
) {
614 if (srclen
> destlen
) {
618 for (; destlen
&& srclen
; src
++, dest
++, destlen
--, srclen
--) {
619 v
= cpu_ldub_data(env
, src
);
620 cpu_stb_data(env
, dest
, v
);
623 for (; destlen
; dest
++, destlen
--) {
624 cpu_stb_data(env
, dest
, pad
);
627 env
->regs
[r1
+ 1] = destlen
;
628 /* can't use srclen here, we trunc'ed it */
629 /* FIXME: 31-bit mode! */
630 env
->regs
[r3
+ 1] -= src
- env
->regs
[r3
];
631 env
->regs
[r1
] = dest
;
637 /* compare logical long extended memcompare insn with padding */
638 uint32_t HELPER(clcle
)(CPUS390XState
*env
, uint32_t r1
, uint64_t a2
,
641 uint64_t destlen
= env
->regs
[r1
+ 1];
642 uint64_t dest
= get_address_31fix(env
, r1
);
643 uint64_t srclen
= env
->regs
[r3
+ 1];
644 uint64_t src
= get_address_31fix(env
, r3
);
645 uint8_t pad
= a2
& 0xff;
646 uint8_t v1
= 0, v2
= 0;
649 if (!(destlen
|| srclen
)) {
653 if (srclen
> destlen
) {
657 for (; destlen
|| srclen
; src
++, dest
++, destlen
--, srclen
--) {
658 v1
= srclen
? cpu_ldub_data(env
, src
) : pad
;
659 v2
= destlen
? cpu_ldub_data(env
, dest
) : pad
;
661 cc
= (v1
< v2
) ? 1 : 2;
666 env
->regs
[r1
+ 1] = destlen
;
667 /* can't use srclen here, we trunc'ed it */
668 env
->regs
[r3
+ 1] -= src
- env
->regs
[r3
];
669 env
->regs
[r1
] = dest
;
676 uint64_t HELPER(cksm
)(CPUS390XState
*env
, uint64_t r1
,
677 uint64_t src
, uint64_t src_len
)
679 uint64_t max_len
, len
;
680 uint64_t cksm
= (uint32_t)r1
;
682 /* Lest we fail to service interrupts in a timely manner, limit the
683 amount of work we're willing to do. For now, let's cap at 8k. */
684 max_len
= (src_len
> 0x2000 ? 0x2000 : src_len
);
686 /* Process full words as available. */
687 for (len
= 0; len
+ 4 <= max_len
; len
+= 4, src
+= 4) {
688 cksm
+= (uint32_t)cpu_ldl_data(env
, src
);
691 switch (max_len
- len
) {
693 cksm
+= cpu_ldub_data(env
, src
) << 24;
697 cksm
+= cpu_lduw_data(env
, src
) << 16;
701 cksm
+= cpu_lduw_data(env
, src
) << 16;
702 cksm
+= cpu_ldub_data(env
, src
+ 2) << 8;
707 /* Fold the carry from the checksum. Note that we can see carry-out
708 during folding more than once (but probably not more than twice). */
709 while (cksm
> 0xffffffffull
) {
710 cksm
= (uint32_t)cksm
+ (cksm
>> 32);
713 /* Indicate whether or not we've processed everything. */
714 env
->cc_op
= (len
== src_len
? 0 : 3);
716 /* Return both cksm and processed length. */
721 void HELPER(unpk
)(CPUS390XState
*env
, uint32_t len
, uint64_t dest
,
724 int len_dest
= len
>> 4;
725 int len_src
= len
& 0xf;
727 int second_nibble
= 0;
732 /* last byte is special, it only flips the nibbles */
733 b
= cpu_ldub_data(env
, src
);
734 cpu_stb_data(env
, dest
, (b
<< 4) | (b
>> 4));
738 /* now pad every nibble with 0xf0 */
740 while (len_dest
> 0) {
741 uint8_t cur_byte
= 0;
744 cur_byte
= cpu_ldub_data(env
, src
);
750 /* only advance one nibble at a time */
756 second_nibble
= !second_nibble
;
759 cur_byte
= (cur_byte
& 0xf);
763 cpu_stb_data(env
, dest
, cur_byte
);
767 void HELPER(tr
)(CPUS390XState
*env
, uint32_t len
, uint64_t array
,
772 for (i
= 0; i
<= len
; i
++) {
773 uint8_t byte
= cpu_ldub_data(env
, array
+ i
);
774 uint8_t new_byte
= cpu_ldub_data(env
, trans
+ byte
);
776 cpu_stb_data(env
, array
+ i
, new_byte
);
780 uint64_t HELPER(tre
)(CPUS390XState
*env
, uint64_t array
,
781 uint64_t len
, uint64_t trans
)
783 uint8_t end
= env
->regs
[0] & 0xff;
787 if (!(env
->psw
.mask
& PSW_MASK_64
)) {
792 /* Lest we fail to service interrupts in a timely manner, limit the
793 amount of work we're willing to do. For now, let's cap at 8k. */
801 for (i
= 0; i
< l
; i
++) {
802 uint8_t byte
, new_byte
;
804 byte
= cpu_ldub_data(env
, array
+ i
);
811 new_byte
= cpu_ldub_data(env
, trans
+ byte
);
812 cpu_stb_data(env
, array
+ i
, new_byte
);
815 env
->retxl
= len
- i
;
819 uint32_t HELPER(trt
)(CPUS390XState
*env
, uint32_t len
, uint64_t array
,
825 for (i
= 0; i
<= len
; i
++) {
826 uint8_t byte
= cpu_ldub_data(env
, array
+ i
);
827 uint8_t sbyte
= cpu_ldub_data(env
, trans
+ byte
);
830 env
->regs
[1] = array
+ i
;
831 env
->regs
[2] = (env
->regs
[2] & ~0xff) | sbyte
;
832 cc
= (i
== len
) ? 2 : 1;
840 #if !defined(CONFIG_USER_ONLY)
841 void HELPER(lctlg
)(CPUS390XState
*env
, uint32_t r1
, uint64_t a2
, uint32_t r3
)
843 S390CPU
*cpu
= s390_env_get_cpu(env
);
844 bool PERchanged
= false;
849 for (i
= r1
;; i
= (i
+ 1) % 16) {
850 val
= cpu_ldq_data(env
, src
);
851 if (env
->cregs
[i
] != val
&& i
>= 9 && i
<= 11) {
855 HELPER_LOG("load ctl %d from 0x%" PRIx64
" == 0x%" PRIx64
"\n",
856 i
, src
, env
->cregs
[i
]);
857 src
+= sizeof(uint64_t);
864 if (PERchanged
&& env
->psw
.mask
& PSW_MASK_PER
) {
865 s390_cpu_recompute_watchpoints(CPU(cpu
));
868 tlb_flush(CPU(cpu
), 1);
871 void HELPER(lctl
)(CPUS390XState
*env
, uint32_t r1
, uint64_t a2
, uint32_t r3
)
873 S390CPU
*cpu
= s390_env_get_cpu(env
);
874 bool PERchanged
= false;
879 for (i
= r1
;; i
= (i
+ 1) % 16) {
880 val
= cpu_ldl_data(env
, src
);
881 if ((uint32_t)env
->cregs
[i
] != val
&& i
>= 9 && i
<= 11) {
884 env
->cregs
[i
] = (env
->cregs
[i
] & 0xFFFFFFFF00000000ULL
) | val
;
885 src
+= sizeof(uint32_t);
892 if (PERchanged
&& env
->psw
.mask
& PSW_MASK_PER
) {
893 s390_cpu_recompute_watchpoints(CPU(cpu
));
896 tlb_flush(CPU(cpu
), 1);
899 void HELPER(stctg
)(CPUS390XState
*env
, uint32_t r1
, uint64_t a2
, uint32_t r3
)
904 for (i
= r1
;; i
= (i
+ 1) % 16) {
905 cpu_stq_data(env
, dest
, env
->cregs
[i
]);
906 dest
+= sizeof(uint64_t);
914 void HELPER(stctl
)(CPUS390XState
*env
, uint32_t r1
, uint64_t a2
, uint32_t r3
)
919 for (i
= r1
;; i
= (i
+ 1) % 16) {
920 cpu_stl_data(env
, dest
, env
->cregs
[i
]);
921 dest
+= sizeof(uint32_t);
929 uint32_t HELPER(tprot
)(uint64_t a1
, uint64_t a2
)
936 /* insert storage key extended */
937 uint64_t HELPER(iske
)(CPUS390XState
*env
, uint64_t r2
)
939 uint64_t addr
= get_address(env
, 0, 0, r2
);
941 if (addr
> ram_size
) {
945 return env
->storage_keys
[addr
/ TARGET_PAGE_SIZE
];
948 /* set storage key extended */
949 void HELPER(sske
)(CPUS390XState
*env
, uint64_t r1
, uint64_t r2
)
951 uint64_t addr
= get_address(env
, 0, 0, r2
);
953 if (addr
> ram_size
) {
957 env
->storage_keys
[addr
/ TARGET_PAGE_SIZE
] = r1
;
960 /* reset reference bit extended */
961 uint32_t HELPER(rrbe
)(CPUS390XState
*env
, uint64_t r2
)
970 key
= env
->storage_keys
[r2
/ TARGET_PAGE_SIZE
];
971 re
= key
& (SK_R
| SK_C
);
972 env
->storage_keys
[r2
/ TARGET_PAGE_SIZE
] = (key
& ~SK_R
);
977 * 0 Reference bit zero; change bit zero
978 * 1 Reference bit zero; change bit one
979 * 2 Reference bit one; change bit zero
980 * 3 Reference bit one; change bit one
986 /* compare and swap and purge */
987 uint32_t HELPER(csp
)(CPUS390XState
*env
, uint32_t r1
, uint64_t r2
)
989 S390CPU
*cpu
= s390_env_get_cpu(env
);
991 uint32_t o1
= env
->regs
[r1
];
992 uint64_t a2
= r2
& ~3ULL;
993 uint32_t o2
= cpu_ldl_data(env
, a2
);
996 cpu_stl_data(env
, a2
, env
->regs
[(r1
+ 1) & 15]);
998 /* flush TLB / ALB */
999 tlb_flush(CPU(cpu
), 1);
1003 env
->regs
[r1
] = (env
->regs
[r1
] & 0xffffffff00000000ULL
) | o2
;
1010 uint32_t HELPER(mvcs
)(CPUS390XState
*env
, uint64_t l
, uint64_t a1
, uint64_t a2
)
1014 HELPER_LOG("%s: %16" PRIx64
" %16" PRIx64
" %16" PRIx64
"\n",
1015 __func__
, l
, a1
, a2
);
1023 /* XXX replace w/ memcpy */
1024 for (i
= 0; i
< l
; i
++) {
1025 cpu_stb_secondary(env
, a1
+ i
, cpu_ldub_primary(env
, a2
+ i
));
1031 uint32_t HELPER(mvcp
)(CPUS390XState
*env
, uint64_t l
, uint64_t a1
, uint64_t a2
)
1035 HELPER_LOG("%s: %16" PRIx64
" %16" PRIx64
" %16" PRIx64
"\n",
1036 __func__
, l
, a1
, a2
);
1044 /* XXX replace w/ memcpy */
1045 for (i
= 0; i
< l
; i
++) {
1046 cpu_stb_primary(env
, a1
+ i
, cpu_ldub_secondary(env
, a2
+ i
));
1052 /* invalidate pte */
1053 void HELPER(ipte
)(CPUS390XState
*env
, uint64_t pte_addr
, uint64_t vaddr
)
1055 CPUState
*cs
= CPU(s390_env_get_cpu(env
));
1056 uint64_t page
= vaddr
& TARGET_PAGE_MASK
;
1059 /* XXX broadcast to other CPUs */
1061 /* XXX Linux is nice enough to give us the exact pte address.
1062 According to spec we'd have to find it out ourselves */
1063 /* XXX Linux is fine with overwriting the pte, the spec requires
1064 us to only set the invalid bit */
1065 stq_phys(cs
->as
, pte_addr
, pte
| _PAGE_INVALID
);
1067 /* XXX we exploit the fact that Linux passes the exact virtual
1068 address here - it's not obliged to! */
1069 tlb_flush_page(cs
, page
);
1071 /* XXX 31-bit hack */
1072 if (page
& 0x80000000) {
1073 tlb_flush_page(cs
, page
& ~0x80000000);
1075 tlb_flush_page(cs
, page
| 0x80000000);
1079 /* flush local tlb */
1080 void HELPER(ptlb
)(CPUS390XState
*env
)
1082 S390CPU
*cpu
= s390_env_get_cpu(env
);
1084 tlb_flush(CPU(cpu
), 1);
1087 /* load using real address */
1088 uint64_t HELPER(lura
)(CPUS390XState
*env
, uint64_t addr
)
1090 CPUState
*cs
= CPU(s390_env_get_cpu(env
));
1092 return (uint32_t)ldl_phys(cs
->as
, get_address(env
, 0, 0, addr
));
1095 uint64_t HELPER(lurag
)(CPUS390XState
*env
, uint64_t addr
)
1097 CPUState
*cs
= CPU(s390_env_get_cpu(env
));
1099 return ldq_phys(cs
->as
, get_address(env
, 0, 0, addr
));
1102 /* store using real address */
1103 void HELPER(stura
)(CPUS390XState
*env
, uint64_t addr
, uint64_t v1
)
1105 CPUState
*cs
= CPU(s390_env_get_cpu(env
));
1107 stl_phys(cs
->as
, get_address(env
, 0, 0, addr
), (uint32_t)v1
);
1109 if ((env
->psw
.mask
& PSW_MASK_PER
) &&
1110 (env
->cregs
[9] & PER_CR9_EVENT_STORE
) &&
1111 (env
->cregs
[9] & PER_CR9_EVENT_STORE_REAL
)) {
1112 /* PSW is saved just before calling the helper. */
1113 env
->per_address
= env
->psw
.addr
;
1114 env
->per_perc_atmid
= PER_CODE_EVENT_STORE_REAL
| get_per_atmid(env
);
1118 void HELPER(sturg
)(CPUS390XState
*env
, uint64_t addr
, uint64_t v1
)
1120 CPUState
*cs
= CPU(s390_env_get_cpu(env
));
1122 stq_phys(cs
->as
, get_address(env
, 0, 0, addr
), v1
);
1124 if ((env
->psw
.mask
& PSW_MASK_PER
) &&
1125 (env
->cregs
[9] & PER_CR9_EVENT_STORE
) &&
1126 (env
->cregs
[9] & PER_CR9_EVENT_STORE_REAL
)) {
1127 /* PSW is saved just before calling the helper. */
1128 env
->per_address
= env
->psw
.addr
;
1129 env
->per_perc_atmid
= PER_CODE_EVENT_STORE_REAL
| get_per_atmid(env
);
1133 /* load real address */
1134 uint64_t HELPER(lra
)(CPUS390XState
*env
, uint64_t addr
)
1136 CPUState
*cs
= CPU(s390_env_get_cpu(env
));
1138 int old_exc
= cs
->exception_index
;
1139 uint64_t asc
= env
->psw
.mask
& PSW_MASK_ASC
;
1143 /* XXX incomplete - has more corner cases */
1144 if (!(env
->psw
.mask
& PSW_MASK_64
) && (addr
>> 32)) {
1145 program_interrupt(env
, PGM_SPECIAL_OP
, 2);
1148 cs
->exception_index
= old_exc
;
1149 if (mmu_translate(env
, addr
, 0, asc
, &ret
, &flags
, true)) {
1152 if (cs
->exception_index
== EXCP_PGM
) {
1153 ret
= env
->int_pgm_code
| 0x80000000;
1155 ret
|= addr
& ~TARGET_PAGE_MASK
;
1157 cs
->exception_index
= old_exc
;