2 * S/390 helper routines
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2009 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
22 #include "dyngen-exec.h"
23 #include "host-utils.h"
27 #include "qemu-timer.h"
29 #include <linux/kvm.h>
32 #if !defined (CONFIG_USER_ONLY)
36 /*****************************************************************************/
38 #if !defined (CONFIG_USER_ONLY)
39 #include "softmmu_exec.h"
41 #define MMUSUFFIX _mmu
44 #include "softmmu_template.h"
47 #include "softmmu_template.h"
50 #include "softmmu_template.h"
53 #include "softmmu_template.h"
55 /* try to fill the TLB and return an exception if error. If retaddr is
56 NULL, it means that the function was called in C code (i.e. not
57 from generated code or from helper.c) */
58 /* XXX: fix it to restore all registers */
59 void tlb_fill(CPUS390XState
*env1
, target_ulong addr
, int is_write
, int mmu_idx
,
63 CPUS390XState
*saved_env
;
68 ret
= cpu_s390x_handle_mmu_fault(env
, addr
, is_write
, mmu_idx
);
69 if (unlikely(ret
!= 0)) {
70 if (likely(retaddr
)) {
71 /* now we have a real cpu fault */
72 tb
= tb_find_pc(retaddr
);
74 /* the PC is inside the translated code. It means that we have
75 a virtual CPU fault */
76 cpu_restore_state(tb
, env
, retaddr
);
86 /* #define DEBUG_HELPER */
88 #define HELPER_LOG(x...) qemu_log(x)
90 #define HELPER_LOG(x...)
93 /* raise an exception */
94 void HELPER(exception
)(uint32_t excp
)
96 HELPER_LOG("%s: exception %d\n", __FUNCTION__
, excp
);
97 env
->exception_index
= excp
;
101 #ifndef CONFIG_USER_ONLY
102 static void mvc_fast_memset(CPUS390XState
*env
, uint32_t l
, uint64_t dest
,
105 target_phys_addr_t dest_phys
;
106 target_phys_addr_t len
= l
;
108 uint64_t asc
= env
->psw
.mask
& PSW_MASK_ASC
;
111 if (mmu_translate(env
, dest
, 1, asc
, &dest_phys
, &flags
)) {
113 cpu_abort(env
, "should never reach here");
115 dest_phys
|= dest
& ~TARGET_PAGE_MASK
;
117 dest_p
= cpu_physical_memory_map(dest_phys
, &len
, 1);
119 memset(dest_p
, byte
, len
);
121 cpu_physical_memory_unmap(dest_p
, 1, len
, len
);
124 static void mvc_fast_memmove(CPUS390XState
*env
, uint32_t l
, uint64_t dest
,
127 target_phys_addr_t dest_phys
;
128 target_phys_addr_t src_phys
;
129 target_phys_addr_t len
= l
;
132 uint64_t asc
= env
->psw
.mask
& PSW_MASK_ASC
;
135 if (mmu_translate(env
, dest
, 1, asc
, &dest_phys
, &flags
)) {
137 cpu_abort(env
, "should never reach here");
139 dest_phys
|= dest
& ~TARGET_PAGE_MASK
;
141 if (mmu_translate(env
, src
, 0, asc
, &src_phys
, &flags
)) {
143 cpu_abort(env
, "should never reach here");
145 src_phys
|= src
& ~TARGET_PAGE_MASK
;
147 dest_p
= cpu_physical_memory_map(dest_phys
, &len
, 1);
148 src_p
= cpu_physical_memory_map(src_phys
, &len
, 0);
150 memmove(dest_p
, src_p
, len
);
152 cpu_physical_memory_unmap(dest_p
, 1, len
, len
);
153 cpu_physical_memory_unmap(src_p
, 0, len
, len
);
158 uint32_t HELPER(nc
)(uint32_t l
, uint64_t dest
, uint64_t src
)
164 HELPER_LOG("%s l %d dest %" PRIx64
" src %" PRIx64
"\n",
165 __FUNCTION__
, l
, dest
, src
);
166 for (i
= 0; i
<= l
; i
++) {
167 x
= ldub(dest
+ i
) & ldub(src
+ i
);
177 uint32_t HELPER(xc
)(uint32_t l
, uint64_t dest
, uint64_t src
)
183 HELPER_LOG("%s l %d dest %" PRIx64
" src %" PRIx64
"\n",
184 __FUNCTION__
, l
, dest
, src
);
186 #ifndef CONFIG_USER_ONLY
187 /* xor with itself is the same as memset(0) */
188 if ((l
> 32) && (src
== dest
) &&
189 (src
& TARGET_PAGE_MASK
) == ((src
+ l
) & TARGET_PAGE_MASK
)) {
190 mvc_fast_memset(env
, l
+ 1, dest
, 0);
195 memset(g2h(dest
), 0, l
+ 1);
200 for (i
= 0; i
<= l
; i
++) {
201 x
= ldub(dest
+ i
) ^ ldub(src
+ i
);
211 uint32_t HELPER(oc
)(uint32_t l
, uint64_t dest
, uint64_t src
)
217 HELPER_LOG("%s l %d dest %" PRIx64
" src %" PRIx64
"\n",
218 __FUNCTION__
, l
, dest
, src
);
219 for (i
= 0; i
<= l
; i
++) {
220 x
= ldub(dest
+ i
) | ldub(src
+ i
);
230 void HELPER(mvc
)(uint32_t l
, uint64_t dest
, uint64_t src
)
234 uint32_t l_64
= (l
+ 1) / 8;
236 HELPER_LOG("%s l %d dest %" PRIx64
" src %" PRIx64
"\n",
237 __FUNCTION__
, l
, dest
, src
);
239 #ifndef CONFIG_USER_ONLY
241 (src
& TARGET_PAGE_MASK
) == ((src
+ l
) & TARGET_PAGE_MASK
) &&
242 (dest
& TARGET_PAGE_MASK
) == ((dest
+ l
) & TARGET_PAGE_MASK
)) {
243 if (dest
== (src
+ 1)) {
244 mvc_fast_memset(env
, l
+ 1, dest
, ldub(src
));
246 } else if ((src
& TARGET_PAGE_MASK
) != (dest
& TARGET_PAGE_MASK
)) {
247 mvc_fast_memmove(env
, l
+ 1, dest
, src
);
252 if (dest
== (src
+ 1)) {
253 memset(g2h(dest
), ldub(src
), l
+ 1);
256 memmove(g2h(dest
), g2h(src
), l
+ 1);
261 /* handle the parts that fit into 8-byte loads/stores */
262 if (dest
!= (src
+ 1)) {
263 for (i
= 0; i
< l_64
; i
++) {
264 stq(dest
+ x
, ldq(src
+ x
));
269 /* slow version crossing pages with byte accesses */
270 for (i
= x
; i
<= l
; i
++) {
271 stb(dest
+ i
, ldub(src
+ i
));
275 /* compare unsigned byte arrays */
276 uint32_t HELPER(clc
)(uint32_t l
, uint64_t s1
, uint64_t s2
)
281 HELPER_LOG("%s l %d s1 %" PRIx64
" s2 %" PRIx64
"\n",
282 __FUNCTION__
, l
, s1
, s2
);
283 for (i
= 0; i
<= l
; i
++) {
286 HELPER_LOG("%02x (%c)/%02x (%c) ", x
, x
, y
, y
);
301 /* compare logical under mask */
302 uint32_t HELPER(clm
)(uint32_t r1
, uint32_t mask
, uint64_t addr
)
306 HELPER_LOG("%s: r1 0x%x mask 0x%x addr 0x%" PRIx64
"\n", __FUNCTION__
, r1
,
312 r
= (r1
& 0xff000000UL
) >> 24;
313 HELPER_LOG("mask 0x%x %02x/%02x (0x%" PRIx64
") ", mask
, r
, d
,
324 mask
= (mask
<< 1) & 0xf;
331 /* store character under mask */
332 void HELPER(stcm
)(uint32_t r1
, uint32_t mask
, uint64_t addr
)
335 HELPER_LOG("%s: r1 0x%x mask 0x%x addr 0x%lx\n", __FUNCTION__
, r1
, mask
,
339 r
= (r1
& 0xff000000UL
) >> 24;
341 HELPER_LOG("mask 0x%x %02x (0x%lx) ", mask
, r
, addr
);
344 mask
= (mask
<< 1) & 0xf;
350 /* 64/64 -> 128 unsigned multiplication */
351 void HELPER(mlg
)(uint32_t r1
, uint64_t v2
)
353 #if HOST_LONG_BITS == 64 && defined(__GNUC__)
354 /* assuming 64-bit hosts have __uint128_t */
355 __uint128_t res
= (__uint128_t
)env
->regs
[r1
+ 1];
356 res
*= (__uint128_t
)v2
;
357 env
->regs
[r1
] = (uint64_t)(res
>> 64);
358 env
->regs
[r1
+ 1] = (uint64_t)res
;
360 mulu64(&env
->regs
[r1
+ 1], &env
->regs
[r1
], env
->regs
[r1
+ 1], v2
);
364 /* 128 -> 64/64 unsigned division */
365 void HELPER(dlg
)(uint32_t r1
, uint64_t v2
)
367 uint64_t divisor
= v2
;
369 if (!env
->regs
[r1
]) {
370 /* 64 -> 64/64 case */
371 env
->regs
[r1
] = env
->regs
[r1
+1] % divisor
;
372 env
->regs
[r1
+1] = env
->regs
[r1
+1] / divisor
;
376 #if HOST_LONG_BITS == 64 && defined(__GNUC__)
377 /* assuming 64-bit hosts have __uint128_t */
378 __uint128_t dividend
= (((__uint128_t
)env
->regs
[r1
]) << 64) |
380 __uint128_t quotient
= dividend
/ divisor
;
381 env
->regs
[r1
+1] = quotient
;
382 __uint128_t remainder
= dividend
% divisor
;
383 env
->regs
[r1
] = remainder
;
385 /* 32-bit hosts would need special wrapper functionality - just abort if
386 we encounter such a case; it's very unlikely anyways. */
387 cpu_abort(env
, "128 -> 64/64 division not implemented\n");
392 static inline uint64_t get_address(int x2
, int b2
, int d2
)
405 if (!(env
->psw
.mask
& PSW_MASK_64
)) {
412 static inline uint64_t get_address_31fix(int reg
)
414 uint64_t r
= env
->regs
[reg
];
417 if (!(env
->psw
.mask
& PSW_MASK_64
)) {
424 /* search string (c is byte to search, r2 is string, r1 end of string) */
425 uint32_t HELPER(srst
)(uint32_t c
, uint32_t r1
, uint32_t r2
)
429 uint64_t str
= get_address_31fix(r2
);
430 uint64_t end
= get_address_31fix(r1
);
432 HELPER_LOG("%s: c %d *r1 0x%" PRIx64
" *r2 0x%" PRIx64
"\n", __FUNCTION__
,
433 c
, env
->regs
[r1
], env
->regs
[r2
]);
435 for (i
= str
; i
!= end
; i
++) {
446 /* unsigned string compare (c is string terminator) */
447 uint32_t HELPER(clst
)(uint32_t c
, uint32_t r1
, uint32_t r2
)
449 uint64_t s1
= get_address_31fix(r1
);
450 uint64_t s2
= get_address_31fix(r2
);
454 #ifdef CONFIG_USER_ONLY
456 HELPER_LOG("%s: comparing '%s' and '%s'\n",
457 __FUNCTION__
, (char*)g2h(s1
), (char*)g2h(s2
));
463 if ((v1
== c
|| v2
== c
) || (v1
!= v2
)) {
473 cc
= (v1
< v2
) ? 1 : 2;
474 /* FIXME: 31-bit mode! */
482 void HELPER(mvpg
)(uint64_t r0
, uint64_t r1
, uint64_t r2
)
484 /* XXX missing r0 handling */
485 #ifdef CONFIG_USER_ONLY
488 for (i
= 0; i
< TARGET_PAGE_SIZE
; i
++) {
489 stb(r1
+ i
, ldub(r2
+ i
));
492 mvc_fast_memmove(env
, TARGET_PAGE_SIZE
, r1
, r2
);
496 /* string copy (c is string terminator) */
497 void HELPER(mvst
)(uint32_t c
, uint32_t r1
, uint32_t r2
)
499 uint64_t dest
= get_address_31fix(r1
);
500 uint64_t src
= get_address_31fix(r2
);
503 #ifdef CONFIG_USER_ONLY
505 HELPER_LOG("%s: copy '%s' to 0x%lx\n", __FUNCTION__
, (char*)g2h(src
),
518 env
->regs
[r1
] = dest
; /* FIXME: 31-bit mode! */
521 /* compare and swap 64-bit */
522 uint32_t HELPER(csg
)(uint32_t r1
, uint64_t a2
, uint32_t r3
)
524 /* FIXME: locking? */
526 uint64_t v2
= ldq(a2
);
527 if (env
->regs
[r1
] == v2
) {
529 stq(a2
, env
->regs
[r3
]);
537 /* compare double and swap 64-bit */
538 uint32_t HELPER(cdsg
)(uint32_t r1
, uint64_t a2
, uint32_t r3
)
540 /* FIXME: locking? */
542 uint64_t v2_hi
= ldq(a2
);
543 uint64_t v2_lo
= ldq(a2
+ 8);
544 uint64_t v1_hi
= env
->regs
[r1
];
545 uint64_t v1_lo
= env
->regs
[r1
+ 1];
547 if ((v1_hi
== v2_hi
) && (v1_lo
== v2_lo
)) {
549 stq(a2
, env
->regs
[r3
]);
550 stq(a2
+ 8, env
->regs
[r3
+ 1]);
553 env
->regs
[r1
] = v2_hi
;
554 env
->regs
[r1
+ 1] = v2_lo
;
560 /* compare and swap 32-bit */
561 uint32_t HELPER(cs
)(uint32_t r1
, uint64_t a2
, uint32_t r3
)
563 /* FIXME: locking? */
565 HELPER_LOG("%s: r1 %d a2 0x%lx r3 %d\n", __FUNCTION__
, r1
, a2
, r3
);
566 uint32_t v2
= ldl(a2
);
567 if (((uint32_t)env
->regs
[r1
]) == v2
) {
569 stl(a2
, (uint32_t)env
->regs
[r3
]);
572 env
->regs
[r1
] = (env
->regs
[r1
] & 0xffffffff00000000ULL
) | v2
;
577 static uint32_t helper_icm(uint32_t r1
, uint64_t address
, uint32_t mask
)
579 int pos
= 24; /* top of the lower half of r1 */
580 uint64_t rmask
= 0xff000000ULL
;
587 env
->regs
[r1
] &= ~rmask
;
589 if ((val
& 0x80) && !ccd
) {
593 if (val
&& cc
== 0) {
596 env
->regs
[r1
] |= (uint64_t)val
<< pos
;
599 mask
= (mask
<< 1) & 0xf;
607 /* execute instruction
608 this instruction executes an insn modified with the contents of r1
609 it does not change the executed instruction in memory
610 it does not change the program counter
611 in other words: tricky...
612 currently implemented by interpreting the cases it is most commonly used in
614 uint32_t HELPER(ex
)(uint32_t cc
, uint64_t v1
, uint64_t addr
, uint64_t ret
)
616 uint16_t insn
= lduw_code(addr
);
617 HELPER_LOG("%s: v1 0x%lx addr 0x%lx insn 0x%x\n", __FUNCTION__
, v1
, addr
,
619 if ((insn
& 0xf0ff) == 0xd000) {
620 uint32_t l
, insn2
, b1
, b2
, d1
, d2
;
622 insn2
= ldl_code(addr
+ 2);
623 b1
= (insn2
>> 28) & 0xf;
624 b2
= (insn2
>> 12) & 0xf;
625 d1
= (insn2
>> 16) & 0xfff;
627 switch (insn
& 0xf00) {
629 helper_mvc(l
, get_address(0, b1
, d1
), get_address(0, b2
, d2
));
632 cc
= helper_clc(l
, get_address(0, b1
, d1
), get_address(0, b2
, d2
));
635 cc
= helper_xc(l
, get_address(0, b1
, d1
), get_address(0, b2
, d2
));
638 helper_tr(l
, get_address(0, b1
, d1
), get_address(0, b2
, d2
));
644 } else if ((insn
& 0xff00) == 0x0a00) {
645 /* supervisor call */
646 HELPER_LOG("%s: svc %ld via execute\n", __FUNCTION__
, (insn
|v1
) & 0xff);
647 env
->psw
.addr
= ret
- 4;
648 env
->int_svc_code
= (insn
|v1
) & 0xff;
649 env
->int_svc_ilc
= 4;
650 helper_exception(EXCP_SVC
);
651 } else if ((insn
& 0xff00) == 0xbf00) {
652 uint32_t insn2
, r1
, r3
, b2
, d2
;
653 insn2
= ldl_code(addr
+ 2);
654 r1
= (insn2
>> 20) & 0xf;
655 r3
= (insn2
>> 16) & 0xf;
656 b2
= (insn2
>> 12) & 0xf;
658 cc
= helper_icm(r1
, get_address(0, b2
, d2
), r3
);
661 cpu_abort(env
, "EXECUTE on instruction prefix 0x%x not implemented\n",
667 /* absolute value 32-bit */
668 uint32_t HELPER(abs_i32
)(int32_t val
)
677 /* negative absolute value 32-bit */
678 int32_t HELPER(nabs_i32
)(int32_t val
)
687 /* absolute value 64-bit */
688 uint64_t HELPER(abs_i64
)(int64_t val
)
690 HELPER_LOG("%s: val 0x%" PRIx64
"\n", __FUNCTION__
, val
);
699 /* negative absolute value 64-bit */
700 int64_t HELPER(nabs_i64
)(int64_t val
)
709 /* add with carry 32-bit unsigned */
710 uint32_t HELPER(addc_u32
)(uint32_t cc
, uint32_t v1
, uint32_t v2
)
722 /* store character under mask high operates on the upper half of r1 */
723 void HELPER(stcmh
)(uint32_t r1
, uint64_t address
, uint32_t mask
)
725 int pos
= 56; /* top of the upper half of r1 */
729 stb(address
, (env
->regs
[r1
] >> pos
) & 0xff);
732 mask
= (mask
<< 1) & 0xf;
737 /* insert character under mask high; same as icm, but operates on the
739 uint32_t HELPER(icmh
)(uint32_t r1
, uint64_t address
, uint32_t mask
)
741 int pos
= 56; /* top of the upper half of r1 */
742 uint64_t rmask
= 0xff00000000000000ULL
;
749 env
->regs
[r1
] &= ~rmask
;
751 if ((val
& 0x80) && !ccd
) {
755 if (val
&& cc
== 0) {
758 env
->regs
[r1
] |= (uint64_t)val
<< pos
;
761 mask
= (mask
<< 1) & 0xf;
769 /* insert psw mask and condition code into r1 */
770 void HELPER(ipm
)(uint32_t cc
, uint32_t r1
)
772 uint64_t r
= env
->regs
[r1
];
774 r
&= 0xffffffff00ffffffULL
;
775 r
|= (cc
<< 28) | ( (env
->psw
.mask
>> 40) & 0xf );
777 HELPER_LOG("%s: cc %d psw.mask 0x%lx r1 0x%lx\n", __FUNCTION__
,
778 cc
, env
->psw
.mask
, r
);
781 /* load access registers r1 to r3 from memory at a2 */
782 void HELPER(lam
)(uint32_t r1
, uint64_t a2
, uint32_t r3
)
786 for (i
= r1
;; i
= (i
+ 1) % 16) {
787 env
->aregs
[i
] = ldl(a2
);
796 /* store access registers r1 to r3 in memory at a2 */
797 void HELPER(stam
)(uint32_t r1
, uint64_t a2
, uint32_t r3
)
801 for (i
= r1
;; i
= (i
+ 1) % 16) {
802 stl(a2
, env
->aregs
[i
]);
812 uint32_t HELPER(mvcl
)(uint32_t r1
, uint32_t r2
)
814 uint64_t destlen
= env
->regs
[r1
+ 1] & 0xffffff;
815 uint64_t dest
= get_address_31fix(r1
);
816 uint64_t srclen
= env
->regs
[r2
+ 1] & 0xffffff;
817 uint64_t src
= get_address_31fix(r2
);
818 uint8_t pad
= src
>> 24;
822 if (destlen
== srclen
) {
824 } else if (destlen
< srclen
) {
830 if (srclen
> destlen
) {
834 for (; destlen
&& srclen
; src
++, dest
++, destlen
--, srclen
--) {
839 for (; destlen
; dest
++, destlen
--) {
843 env
->regs
[r1
+ 1] = destlen
;
844 /* can't use srclen here, we trunc'ed it */
845 env
->regs
[r2
+ 1] -= src
- env
->regs
[r2
];
846 env
->regs
[r1
] = dest
;
852 /* move long extended another memcopy insn with more bells and whistles */
853 uint32_t HELPER(mvcle
)(uint32_t r1
, uint64_t a2
, uint32_t r3
)
855 uint64_t destlen
= env
->regs
[r1
+ 1];
856 uint64_t dest
= env
->regs
[r1
];
857 uint64_t srclen
= env
->regs
[r3
+ 1];
858 uint64_t src
= env
->regs
[r3
];
859 uint8_t pad
= a2
& 0xff;
863 if (!(env
->psw
.mask
& PSW_MASK_64
)) {
864 destlen
= (uint32_t)destlen
;
865 srclen
= (uint32_t)srclen
;
870 if (destlen
== srclen
) {
872 } else if (destlen
< srclen
) {
878 if (srclen
> destlen
) {
882 for (; destlen
&& srclen
; src
++, dest
++, destlen
--, srclen
--) {
887 for (; destlen
; dest
++, destlen
--) {
891 env
->regs
[r1
+ 1] = destlen
;
892 /* can't use srclen here, we trunc'ed it */
893 /* FIXME: 31-bit mode! */
894 env
->regs
[r3
+ 1] -= src
- env
->regs
[r3
];
895 env
->regs
[r1
] = dest
;
901 /* compare logical long extended memcompare insn with padding */
902 uint32_t HELPER(clcle
)(uint32_t r1
, uint64_t a2
, uint32_t r3
)
904 uint64_t destlen
= env
->regs
[r1
+ 1];
905 uint64_t dest
= get_address_31fix(r1
);
906 uint64_t srclen
= env
->regs
[r3
+ 1];
907 uint64_t src
= get_address_31fix(r3
);
908 uint8_t pad
= a2
& 0xff;
909 uint8_t v1
= 0,v2
= 0;
912 if (!(destlen
|| srclen
)) {
916 if (srclen
> destlen
) {
920 for (; destlen
|| srclen
; src
++, dest
++, destlen
--, srclen
--) {
921 v1
= srclen
? ldub(src
) : pad
;
922 v2
= destlen
? ldub(dest
) : pad
;
924 cc
= (v1
< v2
) ? 1 : 2;
929 env
->regs
[r1
+ 1] = destlen
;
930 /* can't use srclen here, we trunc'ed it */
931 env
->regs
[r3
+ 1] -= src
- env
->regs
[r3
];
932 env
->regs
[r1
] = dest
;
938 /* subtract unsigned v2 from v1 with borrow */
939 uint32_t HELPER(slb
)(uint32_t cc
, uint32_t r1
, uint32_t v2
)
941 uint32_t v1
= env
->regs
[r1
];
942 uint32_t res
= v1
+ (~v2
) + (cc
>> 1);
944 env
->regs
[r1
] = (env
->regs
[r1
] & 0xffffffff00000000ULL
) | res
;
953 /* subtract unsigned v2 from v1 with borrow */
954 uint32_t HELPER(slbg
)(uint32_t cc
, uint32_t r1
, uint64_t v1
, uint64_t v2
)
956 uint64_t res
= v1
+ (~v2
) + (cc
>> 1);
967 static inline int float_comp_to_cc(int float_compare
)
969 switch (float_compare
) {
970 case float_relation_equal
:
972 case float_relation_less
:
974 case float_relation_greater
:
976 case float_relation_unordered
:
979 cpu_abort(env
, "unknown return value for float compare\n");
983 /* condition codes for binary FP ops */
984 static uint32_t set_cc_f32(float32 v1
, float32 v2
)
986 return float_comp_to_cc(float32_compare_quiet(v1
, v2
, &env
->fpu_status
));
989 static uint32_t set_cc_f64(float64 v1
, float64 v2
)
991 return float_comp_to_cc(float64_compare_quiet(v1
, v2
, &env
->fpu_status
));
994 /* condition codes for unary FP ops */
995 static uint32_t set_cc_nz_f32(float32 v
)
997 if (float32_is_any_nan(v
)) {
999 } else if (float32_is_zero(v
)) {
1001 } else if (float32_is_neg(v
)) {
1008 static uint32_t set_cc_nz_f64(float64 v
)
1010 if (float64_is_any_nan(v
)) {
1012 } else if (float64_is_zero(v
)) {
1014 } else if (float64_is_neg(v
)) {
1021 static uint32_t set_cc_nz_f128(float128 v
)
1023 if (float128_is_any_nan(v
)) {
1025 } else if (float128_is_zero(v
)) {
1027 } else if (float128_is_neg(v
)) {
1034 /* convert 32-bit int to 64-bit float */
1035 void HELPER(cdfbr
)(uint32_t f1
, int32_t v2
)
1037 HELPER_LOG("%s: converting %d to f%d\n", __FUNCTION__
, v2
, f1
);
1038 env
->fregs
[f1
].d
= int32_to_float64(v2
, &env
->fpu_status
);
1041 /* convert 32-bit int to 128-bit float */
1042 void HELPER(cxfbr
)(uint32_t f1
, int32_t v2
)
1045 v1
.q
= int32_to_float128(v2
, &env
->fpu_status
);
1046 env
->fregs
[f1
].ll
= v1
.ll
.upper
;
1047 env
->fregs
[f1
+ 2].ll
= v1
.ll
.lower
;
1050 /* convert 64-bit int to 32-bit float */
1051 void HELPER(cegbr
)(uint32_t f1
, int64_t v2
)
1053 HELPER_LOG("%s: converting %ld to f%d\n", __FUNCTION__
, v2
, f1
);
1054 env
->fregs
[f1
].l
.upper
= int64_to_float32(v2
, &env
->fpu_status
);
1057 /* convert 64-bit int to 64-bit float */
1058 void HELPER(cdgbr
)(uint32_t f1
, int64_t v2
)
1060 HELPER_LOG("%s: converting %ld to f%d\n", __FUNCTION__
, v2
, f1
);
1061 env
->fregs
[f1
].d
= int64_to_float64(v2
, &env
->fpu_status
);
1064 /* convert 64-bit int to 128-bit float */
1065 void HELPER(cxgbr
)(uint32_t f1
, int64_t v2
)
1068 x1
.q
= int64_to_float128(v2
, &env
->fpu_status
);
1069 HELPER_LOG("%s: converted %ld to 0x%lx and 0x%lx\n", __FUNCTION__
, v2
,
1070 x1
.ll
.upper
, x1
.ll
.lower
);
1071 env
->fregs
[f1
].ll
= x1
.ll
.upper
;
1072 env
->fregs
[f1
+ 2].ll
= x1
.ll
.lower
;
1075 /* convert 32-bit int to 32-bit float */
1076 void HELPER(cefbr
)(uint32_t f1
, int32_t v2
)
1078 env
->fregs
[f1
].l
.upper
= int32_to_float32(v2
, &env
->fpu_status
);
1079 HELPER_LOG("%s: converting %d to 0x%d in f%d\n", __FUNCTION__
, v2
,
1080 env
->fregs
[f1
].l
.upper
, f1
);
1083 /* 32-bit FP addition RR */
1084 uint32_t HELPER(aebr
)(uint32_t f1
, uint32_t f2
)
1086 env
->fregs
[f1
].l
.upper
= float32_add(env
->fregs
[f1
].l
.upper
,
1087 env
->fregs
[f2
].l
.upper
,
1089 HELPER_LOG("%s: adding 0x%d resulting in 0x%d in f%d\n", __FUNCTION__
,
1090 env
->fregs
[f2
].l
.upper
, env
->fregs
[f1
].l
.upper
, f1
);
1092 return set_cc_nz_f32(env
->fregs
[f1
].l
.upper
);
1095 /* 64-bit FP addition RR */
1096 uint32_t HELPER(adbr
)(uint32_t f1
, uint32_t f2
)
1098 env
->fregs
[f1
].d
= float64_add(env
->fregs
[f1
].d
, env
->fregs
[f2
].d
,
1100 HELPER_LOG("%s: adding 0x%ld resulting in 0x%ld in f%d\n", __FUNCTION__
,
1101 env
->fregs
[f2
].d
, env
->fregs
[f1
].d
, f1
);
1103 return set_cc_nz_f64(env
->fregs
[f1
].d
);
1106 /* 32-bit FP subtraction RR */
1107 uint32_t HELPER(sebr
)(uint32_t f1
, uint32_t f2
)
1109 env
->fregs
[f1
].l
.upper
= float32_sub(env
->fregs
[f1
].l
.upper
,
1110 env
->fregs
[f2
].l
.upper
,
1112 HELPER_LOG("%s: adding 0x%d resulting in 0x%d in f%d\n", __FUNCTION__
,
1113 env
->fregs
[f2
].l
.upper
, env
->fregs
[f1
].l
.upper
, f1
);
1115 return set_cc_nz_f32(env
->fregs
[f1
].l
.upper
);
1118 /* 64-bit FP subtraction RR */
1119 uint32_t HELPER(sdbr
)(uint32_t f1
, uint32_t f2
)
1121 env
->fregs
[f1
].d
= float64_sub(env
->fregs
[f1
].d
, env
->fregs
[f2
].d
,
1123 HELPER_LOG("%s: subtracting 0x%ld resulting in 0x%ld in f%d\n",
1124 __FUNCTION__
, env
->fregs
[f2
].d
, env
->fregs
[f1
].d
, f1
);
1126 return set_cc_nz_f64(env
->fregs
[f1
].d
);
1129 /* 32-bit FP division RR */
1130 void HELPER(debr
)(uint32_t f1
, uint32_t f2
)
1132 env
->fregs
[f1
].l
.upper
= float32_div(env
->fregs
[f1
].l
.upper
,
1133 env
->fregs
[f2
].l
.upper
,
1137 /* 128-bit FP division RR */
1138 void HELPER(dxbr
)(uint32_t f1
, uint32_t f2
)
1141 v1
.ll
.upper
= env
->fregs
[f1
].ll
;
1142 v1
.ll
.lower
= env
->fregs
[f1
+ 2].ll
;
1144 v2
.ll
.upper
= env
->fregs
[f2
].ll
;
1145 v2
.ll
.lower
= env
->fregs
[f2
+ 2].ll
;
1147 res
.q
= float128_div(v1
.q
, v2
.q
, &env
->fpu_status
);
1148 env
->fregs
[f1
].ll
= res
.ll
.upper
;
1149 env
->fregs
[f1
+ 2].ll
= res
.ll
.lower
;
1152 /* 64-bit FP multiplication RR */
1153 void HELPER(mdbr
)(uint32_t f1
, uint32_t f2
)
1155 env
->fregs
[f1
].d
= float64_mul(env
->fregs
[f1
].d
, env
->fregs
[f2
].d
,
1159 /* 128-bit FP multiplication RR */
1160 void HELPER(mxbr
)(uint32_t f1
, uint32_t f2
)
1163 v1
.ll
.upper
= env
->fregs
[f1
].ll
;
1164 v1
.ll
.lower
= env
->fregs
[f1
+ 2].ll
;
1166 v2
.ll
.upper
= env
->fregs
[f2
].ll
;
1167 v2
.ll
.lower
= env
->fregs
[f2
+ 2].ll
;
1169 res
.q
= float128_mul(v1
.q
, v2
.q
, &env
->fpu_status
);
1170 env
->fregs
[f1
].ll
= res
.ll
.upper
;
1171 env
->fregs
[f1
+ 2].ll
= res
.ll
.lower
;
1174 /* convert 32-bit float to 64-bit float */
1175 void HELPER(ldebr
)(uint32_t r1
, uint32_t r2
)
1177 env
->fregs
[r1
].d
= float32_to_float64(env
->fregs
[r2
].l
.upper
,
1181 /* convert 128-bit float to 64-bit float */
1182 void HELPER(ldxbr
)(uint32_t f1
, uint32_t f2
)
1185 x2
.ll
.upper
= env
->fregs
[f2
].ll
;
1186 x2
.ll
.lower
= env
->fregs
[f2
+ 2].ll
;
1187 env
->fregs
[f1
].d
= float128_to_float64(x2
.q
, &env
->fpu_status
);
1188 HELPER_LOG("%s: to 0x%ld\n", __FUNCTION__
, env
->fregs
[f1
].d
);
1191 /* convert 64-bit float to 128-bit float */
1192 void HELPER(lxdbr
)(uint32_t f1
, uint32_t f2
)
1195 res
.q
= float64_to_float128(env
->fregs
[f2
].d
, &env
->fpu_status
);
1196 env
->fregs
[f1
].ll
= res
.ll
.upper
;
1197 env
->fregs
[f1
+ 2].ll
= res
.ll
.lower
;
1200 /* convert 64-bit float to 32-bit float */
1201 void HELPER(ledbr
)(uint32_t f1
, uint32_t f2
)
1203 float64 d2
= env
->fregs
[f2
].d
;
1204 env
->fregs
[f1
].l
.upper
= float64_to_float32(d2
, &env
->fpu_status
);
1207 /* convert 128-bit float to 32-bit float */
1208 void HELPER(lexbr
)(uint32_t f1
, uint32_t f2
)
1211 x2
.ll
.upper
= env
->fregs
[f2
].ll
;
1212 x2
.ll
.lower
= env
->fregs
[f2
+ 2].ll
;
1213 env
->fregs
[f1
].l
.upper
= float128_to_float32(x2
.q
, &env
->fpu_status
);
1214 HELPER_LOG("%s: to 0x%d\n", __FUNCTION__
, env
->fregs
[f1
].l
.upper
);
1217 /* absolute value of 32-bit float */
1218 uint32_t HELPER(lpebr
)(uint32_t f1
, uint32_t f2
)
1221 float32 v2
= env
->fregs
[f2
].d
;
1222 v1
= float32_abs(v2
);
1223 env
->fregs
[f1
].d
= v1
;
1224 return set_cc_nz_f32(v1
);
1227 /* absolute value of 64-bit float */
1228 uint32_t HELPER(lpdbr
)(uint32_t f1
, uint32_t f2
)
1231 float64 v2
= env
->fregs
[f2
].d
;
1232 v1
= float64_abs(v2
);
1233 env
->fregs
[f1
].d
= v1
;
1234 return set_cc_nz_f64(v1
);
1237 /* absolute value of 128-bit float */
1238 uint32_t HELPER(lpxbr
)(uint32_t f1
, uint32_t f2
)
1242 v2
.ll
.upper
= env
->fregs
[f2
].ll
;
1243 v2
.ll
.lower
= env
->fregs
[f2
+ 2].ll
;
1244 v1
.q
= float128_abs(v2
.q
);
1245 env
->fregs
[f1
].ll
= v1
.ll
.upper
;
1246 env
->fregs
[f1
+ 2].ll
= v1
.ll
.lower
;
1247 return set_cc_nz_f128(v1
.q
);
1250 /* load and test 64-bit float */
1251 uint32_t HELPER(ltdbr
)(uint32_t f1
, uint32_t f2
)
1253 env
->fregs
[f1
].d
= env
->fregs
[f2
].d
;
1254 return set_cc_nz_f64(env
->fregs
[f1
].d
);
1257 /* load and test 32-bit float */
1258 uint32_t HELPER(ltebr
)(uint32_t f1
, uint32_t f2
)
1260 env
->fregs
[f1
].l
.upper
= env
->fregs
[f2
].l
.upper
;
1261 return set_cc_nz_f32(env
->fregs
[f1
].l
.upper
);
1264 /* load and test 128-bit float */
1265 uint32_t HELPER(ltxbr
)(uint32_t f1
, uint32_t f2
)
1268 x
.ll
.upper
= env
->fregs
[f2
].ll
;
1269 x
.ll
.lower
= env
->fregs
[f2
+ 2].ll
;
1270 env
->fregs
[f1
].ll
= x
.ll
.upper
;
1271 env
->fregs
[f1
+ 2].ll
= x
.ll
.lower
;
1272 return set_cc_nz_f128(x
.q
);
1275 /* load complement of 32-bit float */
1276 uint32_t HELPER(lcebr
)(uint32_t f1
, uint32_t f2
)
1278 env
->fregs
[f1
].l
.upper
= float32_chs(env
->fregs
[f2
].l
.upper
);
1280 return set_cc_nz_f32(env
->fregs
[f1
].l
.upper
);
1283 /* load complement of 64-bit float */
1284 uint32_t HELPER(lcdbr
)(uint32_t f1
, uint32_t f2
)
1286 env
->fregs
[f1
].d
= float64_chs(env
->fregs
[f2
].d
);
1288 return set_cc_nz_f64(env
->fregs
[f1
].d
);
1291 /* load complement of 128-bit float */
1292 uint32_t HELPER(lcxbr
)(uint32_t f1
, uint32_t f2
)
1295 x2
.ll
.upper
= env
->fregs
[f2
].ll
;
1296 x2
.ll
.lower
= env
->fregs
[f2
+ 2].ll
;
1297 x1
.q
= float128_chs(x2
.q
);
1298 env
->fregs
[f1
].ll
= x1
.ll
.upper
;
1299 env
->fregs
[f1
+ 2].ll
= x1
.ll
.lower
;
1300 return set_cc_nz_f128(x1
.q
);
1303 /* 32-bit FP addition RM */
1304 void HELPER(aeb
)(uint32_t f1
, uint32_t val
)
1306 float32 v1
= env
->fregs
[f1
].l
.upper
;
1309 HELPER_LOG("%s: adding 0x%d from f%d and 0x%d\n", __FUNCTION__
,
1311 env
->fregs
[f1
].l
.upper
= float32_add(v1
, v2
.f
, &env
->fpu_status
);
1314 /* 32-bit FP division RM */
1315 void HELPER(deb
)(uint32_t f1
, uint32_t val
)
1317 float32 v1
= env
->fregs
[f1
].l
.upper
;
1320 HELPER_LOG("%s: dividing 0x%d from f%d by 0x%d\n", __FUNCTION__
,
1322 env
->fregs
[f1
].l
.upper
= float32_div(v1
, v2
.f
, &env
->fpu_status
);
1325 /* 32-bit FP multiplication RM */
1326 void HELPER(meeb
)(uint32_t f1
, uint32_t val
)
1328 float32 v1
= env
->fregs
[f1
].l
.upper
;
1331 HELPER_LOG("%s: multiplying 0x%d from f%d and 0x%d\n", __FUNCTION__
,
1333 env
->fregs
[f1
].l
.upper
= float32_mul(v1
, v2
.f
, &env
->fpu_status
);
1336 /* 32-bit FP compare RR */
1337 uint32_t HELPER(cebr
)(uint32_t f1
, uint32_t f2
)
1339 float32 v1
= env
->fregs
[f1
].l
.upper
;
1340 float32 v2
= env
->fregs
[f2
].l
.upper
;
1341 HELPER_LOG("%s: comparing 0x%d from f%d and 0x%d\n", __FUNCTION__
,
1343 return set_cc_f32(v1
, v2
);
1346 /* 64-bit FP compare RR */
1347 uint32_t HELPER(cdbr
)(uint32_t f1
, uint32_t f2
)
1349 float64 v1
= env
->fregs
[f1
].d
;
1350 float64 v2
= env
->fregs
[f2
].d
;
1351 HELPER_LOG("%s: comparing 0x%ld from f%d and 0x%ld\n", __FUNCTION__
,
1353 return set_cc_f64(v1
, v2
);
1356 /* 128-bit FP compare RR */
1357 uint32_t HELPER(cxbr
)(uint32_t f1
, uint32_t f2
)
1360 v1
.ll
.upper
= env
->fregs
[f1
].ll
;
1361 v1
.ll
.lower
= env
->fregs
[f1
+ 2].ll
;
1363 v2
.ll
.upper
= env
->fregs
[f2
].ll
;
1364 v2
.ll
.lower
= env
->fregs
[f2
+ 2].ll
;
1366 return float_comp_to_cc(float128_compare_quiet(v1
.q
, v2
.q
,
1370 /* 64-bit FP compare RM */
1371 uint32_t HELPER(cdb
)(uint32_t f1
, uint64_t a2
)
1373 float64 v1
= env
->fregs
[f1
].d
;
1376 HELPER_LOG("%s: comparing 0x%ld from f%d and 0x%lx\n", __FUNCTION__
, v1
,
1378 return set_cc_f64(v1
, v2
.d
);
1381 /* 64-bit FP addition RM */
1382 uint32_t HELPER(adb
)(uint32_t f1
, uint64_t a2
)
1384 float64 v1
= env
->fregs
[f1
].d
;
1387 HELPER_LOG("%s: adding 0x%lx from f%d and 0x%lx\n", __FUNCTION__
,
1389 env
->fregs
[f1
].d
= v1
= float64_add(v1
, v2
.d
, &env
->fpu_status
);
1390 return set_cc_nz_f64(v1
);
1393 /* 32-bit FP subtraction RM */
1394 void HELPER(seb
)(uint32_t f1
, uint32_t val
)
1396 float32 v1
= env
->fregs
[f1
].l
.upper
;
1399 env
->fregs
[f1
].l
.upper
= float32_sub(v1
, v2
.f
, &env
->fpu_status
);
1402 /* 64-bit FP subtraction RM */
1403 uint32_t HELPER(sdb
)(uint32_t f1
, uint64_t a2
)
1405 float64 v1
= env
->fregs
[f1
].d
;
1408 env
->fregs
[f1
].d
= v1
= float64_sub(v1
, v2
.d
, &env
->fpu_status
);
1409 return set_cc_nz_f64(v1
);
1412 /* 64-bit FP multiplication RM */
1413 void HELPER(mdb
)(uint32_t f1
, uint64_t a2
)
1415 float64 v1
= env
->fregs
[f1
].d
;
1418 HELPER_LOG("%s: multiplying 0x%lx from f%d and 0x%ld\n", __FUNCTION__
,
1420 env
->fregs
[f1
].d
= float64_mul(v1
, v2
.d
, &env
->fpu_status
);
1423 /* 64-bit FP division RM */
1424 void HELPER(ddb
)(uint32_t f1
, uint64_t a2
)
1426 float64 v1
= env
->fregs
[f1
].d
;
1429 HELPER_LOG("%s: dividing 0x%lx from f%d by 0x%ld\n", __FUNCTION__
,
1431 env
->fregs
[f1
].d
= float64_div(v1
, v2
.d
, &env
->fpu_status
);
1434 static void set_round_mode(int m3
)
1441 /* biased round no nearest */
1443 /* round to nearest */
1444 set_float_rounding_mode(float_round_nearest_even
, &env
->fpu_status
);
1448 set_float_rounding_mode(float_round_to_zero
, &env
->fpu_status
);
1452 set_float_rounding_mode(float_round_up
, &env
->fpu_status
);
1456 set_float_rounding_mode(float_round_down
, &env
->fpu_status
);
1461 /* convert 32-bit float to 64-bit int */
1462 uint32_t HELPER(cgebr
)(uint32_t r1
, uint32_t f2
, uint32_t m3
)
1464 float32 v2
= env
->fregs
[f2
].l
.upper
;
1466 env
->regs
[r1
] = float32_to_int64(v2
, &env
->fpu_status
);
1467 return set_cc_nz_f32(v2
);
1470 /* convert 64-bit float to 64-bit int */
1471 uint32_t HELPER(cgdbr
)(uint32_t r1
, uint32_t f2
, uint32_t m3
)
1473 float64 v2
= env
->fregs
[f2
].d
;
1475 env
->regs
[r1
] = float64_to_int64(v2
, &env
->fpu_status
);
1476 return set_cc_nz_f64(v2
);
1479 /* convert 128-bit float to 64-bit int */
1480 uint32_t HELPER(cgxbr
)(uint32_t r1
, uint32_t f2
, uint32_t m3
)
1483 v2
.ll
.upper
= env
->fregs
[f2
].ll
;
1484 v2
.ll
.lower
= env
->fregs
[f2
+ 2].ll
;
1486 env
->regs
[r1
] = float128_to_int64(v2
.q
, &env
->fpu_status
);
1487 if (float128_is_any_nan(v2
.q
)) {
1489 } else if (float128_is_zero(v2
.q
)) {
1491 } else if (float128_is_neg(v2
.q
)) {
1498 /* convert 32-bit float to 32-bit int */
1499 uint32_t HELPER(cfebr
)(uint32_t r1
, uint32_t f2
, uint32_t m3
)
1501 float32 v2
= env
->fregs
[f2
].l
.upper
;
1503 env
->regs
[r1
] = (env
->regs
[r1
] & 0xffffffff00000000ULL
) |
1504 float32_to_int32(v2
, &env
->fpu_status
);
1505 return set_cc_nz_f32(v2
);
1508 /* convert 64-bit float to 32-bit int */
1509 uint32_t HELPER(cfdbr
)(uint32_t r1
, uint32_t f2
, uint32_t m3
)
1511 float64 v2
= env
->fregs
[f2
].d
;
1513 env
->regs
[r1
] = (env
->regs
[r1
] & 0xffffffff00000000ULL
) |
1514 float64_to_int32(v2
, &env
->fpu_status
);
1515 return set_cc_nz_f64(v2
);
1518 /* convert 128-bit float to 32-bit int */
1519 uint32_t HELPER(cfxbr
)(uint32_t r1
, uint32_t f2
, uint32_t m3
)
1522 v2
.ll
.upper
= env
->fregs
[f2
].ll
;
1523 v2
.ll
.lower
= env
->fregs
[f2
+ 2].ll
;
1524 env
->regs
[r1
] = (env
->regs
[r1
] & 0xffffffff00000000ULL
) |
1525 float128_to_int32(v2
.q
, &env
->fpu_status
);
1526 return set_cc_nz_f128(v2
.q
);
1529 /* load 32-bit FP zero */
1530 void HELPER(lzer
)(uint32_t f1
)
1532 env
->fregs
[f1
].l
.upper
= float32_zero
;
1535 /* load 64-bit FP zero */
1536 void HELPER(lzdr
)(uint32_t f1
)
1538 env
->fregs
[f1
].d
= float64_zero
;
1541 /* load 128-bit FP zero */
1542 void HELPER(lzxr
)(uint32_t f1
)
1545 x
.q
= float64_to_float128(float64_zero
, &env
->fpu_status
);
1546 env
->fregs
[f1
].ll
= x
.ll
.upper
;
1547 env
->fregs
[f1
+ 1].ll
= x
.ll
.lower
;
1550 /* 128-bit FP subtraction RR */
1551 uint32_t HELPER(sxbr
)(uint32_t f1
, uint32_t f2
)
1554 v1
.ll
.upper
= env
->fregs
[f1
].ll
;
1555 v1
.ll
.lower
= env
->fregs
[f1
+ 2].ll
;
1557 v2
.ll
.upper
= env
->fregs
[f2
].ll
;
1558 v2
.ll
.lower
= env
->fregs
[f2
+ 2].ll
;
1560 res
.q
= float128_sub(v1
.q
, v2
.q
, &env
->fpu_status
);
1561 env
->fregs
[f1
].ll
= res
.ll
.upper
;
1562 env
->fregs
[f1
+ 2].ll
= res
.ll
.lower
;
1563 return set_cc_nz_f128(res
.q
);
1566 /* 128-bit FP addition RR */
1567 uint32_t HELPER(axbr
)(uint32_t f1
, uint32_t f2
)
1570 v1
.ll
.upper
= env
->fregs
[f1
].ll
;
1571 v1
.ll
.lower
= env
->fregs
[f1
+ 2].ll
;
1573 v2
.ll
.upper
= env
->fregs
[f2
].ll
;
1574 v2
.ll
.lower
= env
->fregs
[f2
+ 2].ll
;
1576 res
.q
= float128_add(v1
.q
, v2
.q
, &env
->fpu_status
);
1577 env
->fregs
[f1
].ll
= res
.ll
.upper
;
1578 env
->fregs
[f1
+ 2].ll
= res
.ll
.lower
;
1579 return set_cc_nz_f128(res
.q
);
1582 /* 32-bit FP multiplication RR */
1583 void HELPER(meebr
)(uint32_t f1
, uint32_t f2
)
1585 env
->fregs
[f1
].l
.upper
= float32_mul(env
->fregs
[f1
].l
.upper
,
1586 env
->fregs
[f2
].l
.upper
,
1590 /* 64-bit FP division RR */
1591 void HELPER(ddbr
)(uint32_t f1
, uint32_t f2
)
1593 env
->fregs
[f1
].d
= float64_div(env
->fregs
[f1
].d
, env
->fregs
[f2
].d
,
1597 /* 64-bit FP multiply and add RM */
1598 void HELPER(madb
)(uint32_t f1
, uint64_t a2
, uint32_t f3
)
1600 HELPER_LOG("%s: f1 %d a2 0x%lx f3 %d\n", __FUNCTION__
, f1
, a2
, f3
);
1603 env
->fregs
[f1
].d
= float64_add(env
->fregs
[f1
].d
,
1604 float64_mul(v2
.d
, env
->fregs
[f3
].d
,
1609 /* 64-bit FP multiply and add RR */
1610 void HELPER(madbr
)(uint32_t f1
, uint32_t f3
, uint32_t f2
)
1612 HELPER_LOG("%s: f1 %d f2 %d f3 %d\n", __FUNCTION__
, f1
, f2
, f3
);
1613 env
->fregs
[f1
].d
= float64_add(float64_mul(env
->fregs
[f2
].d
,
1616 env
->fregs
[f1
].d
, &env
->fpu_status
);
1619 /* 64-bit FP multiply and subtract RR */
1620 void HELPER(msdbr
)(uint32_t f1
, uint32_t f3
, uint32_t f2
)
1622 HELPER_LOG("%s: f1 %d f2 %d f3 %d\n", __FUNCTION__
, f1
, f2
, f3
);
1623 env
->fregs
[f1
].d
= float64_sub(float64_mul(env
->fregs
[f2
].d
,
1626 env
->fregs
[f1
].d
, &env
->fpu_status
);
1629 /* 32-bit FP multiply and add RR */
1630 void HELPER(maebr
)(uint32_t f1
, uint32_t f3
, uint32_t f2
)
1632 env
->fregs
[f1
].l
.upper
= float32_add(env
->fregs
[f1
].l
.upper
,
1633 float32_mul(env
->fregs
[f2
].l
.upper
,
1634 env
->fregs
[f3
].l
.upper
,
1639 /* convert 32-bit float to 64-bit float */
1640 void HELPER(ldeb
)(uint32_t f1
, uint64_t a2
)
1644 env
->fregs
[f1
].d
= float32_to_float64(v2
,
1648 /* convert 64-bit float to 128-bit float */
1649 void HELPER(lxdb
)(uint32_t f1
, uint64_t a2
)
1654 v1
.q
= float64_to_float128(v2
.d
, &env
->fpu_status
);
1655 env
->fregs
[f1
].ll
= v1
.ll
.upper
;
1656 env
->fregs
[f1
+ 2].ll
= v1
.ll
.lower
;
1659 /* test data class 32-bit */
1660 uint32_t HELPER(tceb
)(uint32_t f1
, uint64_t m2
)
1662 float32 v1
= env
->fregs
[f1
].l
.upper
;
1663 int neg
= float32_is_neg(v1
);
1666 HELPER_LOG("%s: v1 0x%lx m2 0x%lx neg %d\n", __FUNCTION__
, (long)v1
, m2
, neg
);
1667 if ((float32_is_zero(v1
) && (m2
& (1 << (11-neg
)))) ||
1668 (float32_is_infinity(v1
) && (m2
& (1 << (5-neg
)))) ||
1669 (float32_is_any_nan(v1
) && (m2
& (1 << (3-neg
)))) ||
1670 (float32_is_signaling_nan(v1
) && (m2
& (1 << (1-neg
))))) {
1672 } else if (m2
& (1 << (9-neg
))) {
1673 /* assume normalized number */
1677 /* FIXME: denormalized? */
1681 /* test data class 64-bit */
1682 uint32_t HELPER(tcdb
)(uint32_t f1
, uint64_t m2
)
1684 float64 v1
= env
->fregs
[f1
].d
;
1685 int neg
= float64_is_neg(v1
);
1688 HELPER_LOG("%s: v1 0x%lx m2 0x%lx neg %d\n", __FUNCTION__
, v1
, m2
, neg
);
1689 if ((float64_is_zero(v1
) && (m2
& (1 << (11-neg
)))) ||
1690 (float64_is_infinity(v1
) && (m2
& (1 << (5-neg
)))) ||
1691 (float64_is_any_nan(v1
) && (m2
& (1 << (3-neg
)))) ||
1692 (float64_is_signaling_nan(v1
) && (m2
& (1 << (1-neg
))))) {
1694 } else if (m2
& (1 << (9-neg
))) {
1695 /* assume normalized number */
1698 /* FIXME: denormalized? */
1702 /* test data class 128-bit */
1703 uint32_t HELPER(tcxb
)(uint32_t f1
, uint64_t m2
)
1707 v1
.ll
.upper
= env
->fregs
[f1
].ll
;
1708 v1
.ll
.lower
= env
->fregs
[f1
+ 2].ll
;
1710 int neg
= float128_is_neg(v1
.q
);
1711 if ((float128_is_zero(v1
.q
) && (m2
& (1 << (11-neg
)))) ||
1712 (float128_is_infinity(v1
.q
) && (m2
& (1 << (5-neg
)))) ||
1713 (float128_is_any_nan(v1
.q
) && (m2
& (1 << (3-neg
)))) ||
1714 (float128_is_signaling_nan(v1
.q
) && (m2
& (1 << (1-neg
))))) {
1716 } else if (m2
& (1 << (9-neg
))) {
1717 /* assume normalized number */
1720 /* FIXME: denormalized? */
1724 /* find leftmost one */
1725 uint32_t HELPER(flogr
)(uint32_t r1
, uint64_t v2
)
1730 while (!(v2
& 0x8000000000000000ULL
) && v2
) {
1737 env
->regs
[r1
+ 1] = 0;
1740 env
->regs
[r1
] = res
;
1741 env
->regs
[r1
+ 1] = ov2
& ~(0x8000000000000000ULL
>> res
);
1746 /* square root 64-bit RR */
1747 void HELPER(sqdbr
)(uint32_t f1
, uint32_t f2
)
1749 env
->fregs
[f1
].d
= float64_sqrt(env
->fregs
[f2
].d
, &env
->fpu_status
);
1753 void HELPER(cksm
)(uint32_t r1
, uint32_t r2
)
1755 uint64_t src
= get_address_31fix(r2
);
1756 uint64_t src_len
= env
->regs
[(r2
+ 1) & 15];
1757 uint64_t cksm
= (uint32_t)env
->regs
[r1
];
1759 while (src_len
>= 4) {
1762 /* move to next word */
1771 cksm
+= ldub(src
) << 24;
1774 cksm
+= lduw(src
) << 16;
1777 cksm
+= lduw(src
) << 16;
1778 cksm
+= ldub(src
+ 2) << 8;
1782 /* indicate we've processed everything */
1783 env
->regs
[r2
] = src
+ src_len
;
1784 env
->regs
[(r2
+ 1) & 15] = 0;
1787 env
->regs
[r1
] = (env
->regs
[r1
] & 0xffffffff00000000ULL
) |
1788 ((uint32_t)cksm
+ (cksm
>> 32));
1791 static inline uint32_t cc_calc_ltgt_32(CPUS390XState
*env
, int32_t src
,
1796 } else if (src
< dst
) {
1803 static inline uint32_t cc_calc_ltgt0_32(CPUS390XState
*env
, int32_t dst
)
1805 return cc_calc_ltgt_32(env
, dst
, 0);
1808 static inline uint32_t cc_calc_ltgt_64(CPUS390XState
*env
, int64_t src
,
1813 } else if (src
< dst
) {
1820 static inline uint32_t cc_calc_ltgt0_64(CPUS390XState
*env
, int64_t dst
)
1822 return cc_calc_ltgt_64(env
, dst
, 0);
1825 static inline uint32_t cc_calc_ltugtu_32(CPUS390XState
*env
, uint32_t src
,
1830 } else if (src
< dst
) {
1837 static inline uint32_t cc_calc_ltugtu_64(CPUS390XState
*env
, uint64_t src
,
1842 } else if (src
< dst
) {
1849 static inline uint32_t cc_calc_tm_32(CPUS390XState
*env
, uint32_t val
, uint32_t mask
)
1851 HELPER_LOG("%s: val 0x%x mask 0x%x\n", __FUNCTION__
, val
, mask
);
1852 uint16_t r
= val
& mask
;
1853 if (r
== 0 || mask
== 0) {
1855 } else if (r
== mask
) {
1862 /* set condition code for test under mask */
1863 static inline uint32_t cc_calc_tm_64(CPUS390XState
*env
, uint64_t val
, uint32_t mask
)
1865 uint16_t r
= val
& mask
;
1866 HELPER_LOG("%s: val 0x%lx mask 0x%x r 0x%x\n", __FUNCTION__
, val
, mask
, r
);
1867 if (r
== 0 || mask
== 0) {
1869 } else if (r
== mask
) {
1872 while (!(mask
& 0x8000)) {
1884 static inline uint32_t cc_calc_nz(CPUS390XState
*env
, uint64_t dst
)
1889 static inline uint32_t cc_calc_add_64(CPUS390XState
*env
, int64_t a1
, int64_t a2
,
1892 if ((a1
> 0 && a2
> 0 && ar
< 0) || (a1
< 0 && a2
< 0 && ar
> 0)) {
1893 return 3; /* overflow */
1897 } else if (ar
> 0) {
1905 static inline uint32_t cc_calc_addu_64(CPUS390XState
*env
, uint64_t a1
, uint64_t a2
,
1915 if (ar
< a1
|| ar
< a2
) {
1923 static inline uint32_t cc_calc_sub_64(CPUS390XState
*env
, int64_t a1
, int64_t a2
,
1926 if ((a1
> 0 && a2
< 0 && ar
< 0) || (a1
< 0 && a2
> 0 && ar
> 0)) {
1927 return 3; /* overflow */
1931 } else if (ar
> 0) {
1939 static inline uint32_t cc_calc_subu_64(CPUS390XState
*env
, uint64_t a1
, uint64_t a2
,
1953 static inline uint32_t cc_calc_abs_64(CPUS390XState
*env
, int64_t dst
)
1955 if ((uint64_t)dst
== 0x8000000000000000ULL
) {
1964 static inline uint32_t cc_calc_nabs_64(CPUS390XState
*env
, int64_t dst
)
1969 static inline uint32_t cc_calc_comp_64(CPUS390XState
*env
, int64_t dst
)
1971 if ((uint64_t)dst
== 0x8000000000000000ULL
) {
1973 } else if (dst
< 0) {
1975 } else if (dst
> 0) {
1983 static inline uint32_t cc_calc_add_32(CPUS390XState
*env
, int32_t a1
, int32_t a2
,
1986 if ((a1
> 0 && a2
> 0 && ar
< 0) || (a1
< 0 && a2
< 0 && ar
> 0)) {
1987 return 3; /* overflow */
1991 } else if (ar
> 0) {
1999 static inline uint32_t cc_calc_addu_32(CPUS390XState
*env
, uint32_t a1
, uint32_t a2
,
2009 if (ar
< a1
|| ar
< a2
) {
2017 static inline uint32_t cc_calc_sub_32(CPUS390XState
*env
, int32_t a1
, int32_t a2
,
2020 if ((a1
> 0 && a2
< 0 && ar
< 0) || (a1
< 0 && a2
> 0 && ar
> 0)) {
2021 return 3; /* overflow */
2025 } else if (ar
> 0) {
2033 static inline uint32_t cc_calc_subu_32(CPUS390XState
*env
, uint32_t a1
, uint32_t a2
,
2047 static inline uint32_t cc_calc_abs_32(CPUS390XState
*env
, int32_t dst
)
2049 if ((uint32_t)dst
== 0x80000000UL
) {
2058 static inline uint32_t cc_calc_nabs_32(CPUS390XState
*env
, int32_t dst
)
2063 static inline uint32_t cc_calc_comp_32(CPUS390XState
*env
, int32_t dst
)
2065 if ((uint32_t)dst
== 0x80000000UL
) {
2067 } else if (dst
< 0) {
2069 } else if (dst
> 0) {
2076 /* calculate condition code for insert character under mask insn */
2077 static inline uint32_t cc_calc_icm_32(CPUS390XState
*env
, uint32_t mask
, uint32_t val
)
2079 HELPER_LOG("%s: mask 0x%x val %d\n", __FUNCTION__
, mask
, val
);
2085 } else if (val
& 0x80000000) {
2092 if (!val
|| !mask
) {
2108 static inline uint32_t cc_calc_slag(CPUS390XState
*env
, uint64_t src
, uint64_t shift
)
2110 uint64_t mask
= ((1ULL << shift
) - 1ULL) << (64 - shift
);
2113 /* check if the sign bit stays the same */
2114 if (src
& (1ULL << 63)) {
2120 if ((src
& mask
) != match
) {
2125 r
= ((src
<< shift
) & ((1ULL << 63) - 1)) | (src
& (1ULL << 63));
2127 if ((int64_t)r
== 0) {
2129 } else if ((int64_t)r
< 0) {
2137 static inline uint32_t do_calc_cc(CPUS390XState
*env
, uint32_t cc_op
, uint64_t src
,
2138 uint64_t dst
, uint64_t vr
)
2147 /* cc_op value _is_ cc */
2150 case CC_OP_LTGT0_32
:
2151 r
= cc_calc_ltgt0_32(env
, dst
);
2153 case CC_OP_LTGT0_64
:
2154 r
= cc_calc_ltgt0_64(env
, dst
);
2157 r
= cc_calc_ltgt_32(env
, src
, dst
);
2160 r
= cc_calc_ltgt_64(env
, src
, dst
);
2162 case CC_OP_LTUGTU_32
:
2163 r
= cc_calc_ltugtu_32(env
, src
, dst
);
2165 case CC_OP_LTUGTU_64
:
2166 r
= cc_calc_ltugtu_64(env
, src
, dst
);
2169 r
= cc_calc_tm_32(env
, src
, dst
);
2172 r
= cc_calc_tm_64(env
, src
, dst
);
2175 r
= cc_calc_nz(env
, dst
);
2178 r
= cc_calc_add_64(env
, src
, dst
, vr
);
2181 r
= cc_calc_addu_64(env
, src
, dst
, vr
);
2184 r
= cc_calc_sub_64(env
, src
, dst
, vr
);
2187 r
= cc_calc_subu_64(env
, src
, dst
, vr
);
2190 r
= cc_calc_abs_64(env
, dst
);
2193 r
= cc_calc_nabs_64(env
, dst
);
2196 r
= cc_calc_comp_64(env
, dst
);
2200 r
= cc_calc_add_32(env
, src
, dst
, vr
);
2203 r
= cc_calc_addu_32(env
, src
, dst
, vr
);
2206 r
= cc_calc_sub_32(env
, src
, dst
, vr
);
2209 r
= cc_calc_subu_32(env
, src
, dst
, vr
);
2212 r
= cc_calc_abs_64(env
, dst
);
2215 r
= cc_calc_nabs_64(env
, dst
);
2218 r
= cc_calc_comp_32(env
, dst
);
2222 r
= cc_calc_icm_32(env
, src
, dst
);
2225 r
= cc_calc_slag(env
, src
, dst
);
2228 case CC_OP_LTGT_F32
:
2229 r
= set_cc_f32(src
, dst
);
2231 case CC_OP_LTGT_F64
:
2232 r
= set_cc_f64(src
, dst
);
2235 r
= set_cc_nz_f32(dst
);
2238 r
= set_cc_nz_f64(dst
);
2242 cpu_abort(env
, "Unknown CC operation: %s\n", cc_name(cc_op
));
2245 HELPER_LOG("%s: %15s 0x%016lx 0x%016lx 0x%016lx = %d\n", __FUNCTION__
,
2246 cc_name(cc_op
), src
, dst
, vr
, r
);
2250 uint32_t calc_cc(CPUS390XState
*env
, uint32_t cc_op
, uint64_t src
, uint64_t dst
,
2253 return do_calc_cc(env
, cc_op
, src
, dst
, vr
);
2256 uint32_t HELPER(calc_cc
)(uint32_t cc_op
, uint64_t src
, uint64_t dst
,
2259 return do_calc_cc(env
, cc_op
, src
, dst
, vr
);
2262 uint64_t HELPER(cvd
)(int32_t bin
)
2265 uint64_t dec
= 0x0c;
2273 for (shift
= 4; (shift
< 64) && bin
; shift
+= 4) {
2274 int current_number
= bin
% 10;
2276 dec
|= (current_number
) << shift
;
2283 void HELPER(unpk
)(uint32_t len
, uint64_t dest
, uint64_t src
)
2285 int len_dest
= len
>> 4;
2286 int len_src
= len
& 0xf;
2288 int second_nibble
= 0;
2293 /* last byte is special, it only flips the nibbles */
2295 stb(dest
, (b
<< 4) | (b
>> 4));
2299 /* now pad every nibble with 0xf0 */
2301 while (len_dest
> 0) {
2302 uint8_t cur_byte
= 0;
2305 cur_byte
= ldub(src
);
2311 /* only advance one nibble at a time */
2312 if (second_nibble
) {
2317 second_nibble
= !second_nibble
;
2320 cur_byte
= (cur_byte
& 0xf);
2324 stb(dest
, cur_byte
);
2328 void HELPER(tr
)(uint32_t len
, uint64_t array
, uint64_t trans
)
2332 for (i
= 0; i
<= len
; i
++) {
2333 uint8_t byte
= ldub(array
+ i
);
2334 uint8_t new_byte
= ldub(trans
+ byte
);
2335 stb(array
+ i
, new_byte
);
2339 #ifndef CONFIG_USER_ONLY
2341 void HELPER(load_psw
)(uint64_t mask
, uint64_t addr
)
2343 load_psw(env
, mask
, addr
);
2347 static void program_interrupt(CPUS390XState
*env
, uint32_t code
, int ilc
)
2349 qemu_log("program interrupt at %#" PRIx64
"\n", env
->psw
.addr
);
2351 if (kvm_enabled()) {
2353 kvm_s390_interrupt(env
, KVM_S390_PROGRAM_INT
, code
);
2356 env
->int_pgm_code
= code
;
2357 env
->int_pgm_ilc
= ilc
;
2358 env
->exception_index
= EXCP_PGM
;
2363 static void ext_interrupt(CPUS390XState
*env
, int type
, uint32_t param
,
2366 cpu_inject_ext(env
, type
, param
, param64
);
2369 int sclp_service_call(CPUS390XState
*env
, uint32_t sccb
, uint64_t code
)
2375 printf("sclp(0x%x, 0x%" PRIx64
")\n", sccb
, code
);
2378 if (sccb
& ~0x7ffffff8ul
) {
2379 fprintf(stderr
, "KVM: invalid sccb address 0x%x\n", sccb
);
2385 case SCLP_CMDW_READ_SCP_INFO
:
2386 case SCLP_CMDW_READ_SCP_INFO_FORCED
:
2387 while ((ram_size
>> (20 + shift
)) > 65535) {
2390 stw_phys(sccb
+ SCP_MEM_CODE
, ram_size
>> (20 + shift
));
2391 stb_phys(sccb
+ SCP_INCREMENT
, 1 << shift
);
2392 stw_phys(sccb
+ SCP_RESPONSE_CODE
, 0x10);
2394 if (kvm_enabled()) {
2396 kvm_s390_interrupt_internal(env
, KVM_S390_INT_SERVICE
,
2401 ext_interrupt(env
, EXT_SERVICE
, sccb
& ~3, 0);
2406 printf("KVM: invalid sclp call 0x%x / 0x%" PRIx64
"x\n", sccb
, code
);
2416 /* SCLP service call */
2417 uint32_t HELPER(servc
)(uint32_t r1
, uint64_t r2
)
2419 if (sclp_service_call(env
, r1
, r2
)) {
2427 uint64_t HELPER(diag
)(uint32_t num
, uint64_t mem
, uint64_t code
)
2434 r
= s390_virtio_hypercall(env
, mem
, code
);
2450 program_interrupt(env
, PGM_OPERATION
, ILC_LATER_INC
);
2457 void HELPER(stidp
)(uint64_t a1
)
2459 stq(a1
, env
->cpu_num
);
2463 void HELPER(spx
)(uint64_t a1
)
2468 env
->psa
= prefix
& 0xfffff000;
2469 qemu_log("prefix: %#x\n", prefix
);
2470 tlb_flush_page(env
, 0);
2471 tlb_flush_page(env
, TARGET_PAGE_SIZE
);
2475 uint32_t HELPER(sck
)(uint64_t a1
)
2477 /* XXX not implemented - is it necessary? */
2482 static inline uint64_t clock_value(CPUS390XState
*env
)
2486 time
= env
->tod_offset
+
2487 time2tod(qemu_get_clock_ns(vm_clock
) - env
->tod_basetime
);
2493 uint32_t HELPER(stck
)(uint64_t a1
)
2495 stq(a1
, clock_value(env
));
2500 /* Store Clock Extended */
2501 uint32_t HELPER(stcke
)(uint64_t a1
)
2504 /* basically the same value as stck */
2505 stq(a1
+ 1, clock_value(env
) | env
->cpu_num
);
2506 /* more fine grained than stck */
2508 /* XXX programmable fields */
2515 /* Set Clock Comparator */
2516 void HELPER(sckc
)(uint64_t a1
)
2518 uint64_t time
= ldq(a1
);
2520 if (time
== -1ULL) {
2524 /* difference between now and then */
2525 time
-= clock_value(env
);
2527 time
= (time
* 125) >> 9;
2529 qemu_mod_timer(env
->tod_timer
, qemu_get_clock_ns(vm_clock
) + time
);
2532 /* Store Clock Comparator */
2533 void HELPER(stckc
)(uint64_t a1
)
2540 void HELPER(spt
)(uint64_t a1
)
2542 uint64_t time
= ldq(a1
);
2544 if (time
== -1ULL) {
2549 time
= (time
* 125) >> 9;
2551 qemu_mod_timer(env
->cpu_timer
, qemu_get_clock_ns(vm_clock
) + time
);
2554 /* Store CPU Timer */
2555 void HELPER(stpt
)(uint64_t a1
)
2561 /* Store System Information */
2562 uint32_t HELPER(stsi
)(uint64_t a0
, uint32_t r0
, uint32_t r1
)
2567 if ((r0
& STSI_LEVEL_MASK
) <= STSI_LEVEL_3
&&
2568 ((r0
& STSI_R0_RESERVED_MASK
) || (r1
& STSI_R1_RESERVED_MASK
))) {
2569 /* valid function code, invalid reserved bits */
2570 program_interrupt(env
, PGM_SPECIFICATION
, 2);
2573 sel1
= r0
& STSI_R0_SEL1_MASK
;
2574 sel2
= r1
& STSI_R1_SEL2_MASK
;
2576 /* XXX: spec exception if sysib is not 4k-aligned */
2578 switch (r0
& STSI_LEVEL_MASK
) {
2580 if ((sel1
== 1) && (sel2
== 1)) {
2581 /* Basic Machine Configuration */
2582 struct sysib_111 sysib
;
2584 memset(&sysib
, 0, sizeof(sysib
));
2585 ebcdic_put(sysib
.manuf
, "QEMU ", 16);
2586 /* same as machine type number in STORE CPU ID */
2587 ebcdic_put(sysib
.type
, "QEMU", 4);
2588 /* same as model number in STORE CPU ID */
2589 ebcdic_put(sysib
.model
, "QEMU ", 16);
2590 ebcdic_put(sysib
.sequence
, "QEMU ", 16);
2591 ebcdic_put(sysib
.plant
, "QEMU", 4);
2592 cpu_physical_memory_rw(a0
, (uint8_t*)&sysib
, sizeof(sysib
), 1);
2593 } else if ((sel1
== 2) && (sel2
== 1)) {
2594 /* Basic Machine CPU */
2595 struct sysib_121 sysib
;
2597 memset(&sysib
, 0, sizeof(sysib
));
2598 /* XXX make different for different CPUs? */
2599 ebcdic_put(sysib
.sequence
, "QEMUQEMUQEMUQEMU", 16);
2600 ebcdic_put(sysib
.plant
, "QEMU", 4);
2601 stw_p(&sysib
.cpu_addr
, env
->cpu_num
);
2602 cpu_physical_memory_rw(a0
, (uint8_t*)&sysib
, sizeof(sysib
), 1);
2603 } else if ((sel1
== 2) && (sel2
== 2)) {
2604 /* Basic Machine CPUs */
2605 struct sysib_122 sysib
;
2607 memset(&sysib
, 0, sizeof(sysib
));
2608 stl_p(&sysib
.capability
, 0x443afc29);
2609 /* XXX change when SMP comes */
2610 stw_p(&sysib
.total_cpus
, 1);
2611 stw_p(&sysib
.active_cpus
, 1);
2612 stw_p(&sysib
.standby_cpus
, 0);
2613 stw_p(&sysib
.reserved_cpus
, 0);
2614 cpu_physical_memory_rw(a0
, (uint8_t*)&sysib
, sizeof(sysib
), 1);
2621 if ((sel1
== 2) && (sel2
== 1)) {
2623 struct sysib_221 sysib
;
2625 memset(&sysib
, 0, sizeof(sysib
));
2626 /* XXX make different for different CPUs? */
2627 ebcdic_put(sysib
.sequence
, "QEMUQEMUQEMUQEMU", 16);
2628 ebcdic_put(sysib
.plant
, "QEMU", 4);
2629 stw_p(&sysib
.cpu_addr
, env
->cpu_num
);
2630 stw_p(&sysib
.cpu_id
, 0);
2631 cpu_physical_memory_rw(a0
, (uint8_t*)&sysib
, sizeof(sysib
), 1);
2632 } else if ((sel1
== 2) && (sel2
== 2)) {
2634 struct sysib_222 sysib
;
2636 memset(&sysib
, 0, sizeof(sysib
));
2637 stw_p(&sysib
.lpar_num
, 0);
2639 /* XXX change when SMP comes */
2640 stw_p(&sysib
.total_cpus
, 1);
2641 stw_p(&sysib
.conf_cpus
, 1);
2642 stw_p(&sysib
.standby_cpus
, 0);
2643 stw_p(&sysib
.reserved_cpus
, 0);
2644 ebcdic_put(sysib
.name
, "QEMU ", 8);
2645 stl_p(&sysib
.caf
, 1000);
2646 stw_p(&sysib
.dedicated_cpus
, 0);
2647 stw_p(&sysib
.shared_cpus
, 0);
2648 cpu_physical_memory_rw(a0
, (uint8_t*)&sysib
, sizeof(sysib
), 1);
2656 if ((sel1
== 2) && (sel2
== 2)) {
2658 struct sysib_322 sysib
;
2660 memset(&sysib
, 0, sizeof(sysib
));
2662 /* XXX change when SMP comes */
2663 stw_p(&sysib
.vm
[0].total_cpus
, 1);
2664 stw_p(&sysib
.vm
[0].conf_cpus
, 1);
2665 stw_p(&sysib
.vm
[0].standby_cpus
, 0);
2666 stw_p(&sysib
.vm
[0].reserved_cpus
, 0);
2667 ebcdic_put(sysib
.vm
[0].name
, "KVMguest", 8);
2668 stl_p(&sysib
.vm
[0].caf
, 1000);
2669 ebcdic_put(sysib
.vm
[0].cpi
, "KVM/Linux ", 16);
2670 cpu_physical_memory_rw(a0
, (uint8_t*)&sysib
, sizeof(sysib
), 1);
2676 case STSI_LEVEL_CURRENT
:
2677 env
->regs
[0] = STSI_LEVEL_3
;
2687 void HELPER(lctlg
)(uint32_t r1
, uint64_t a2
, uint32_t r3
)
2692 for (i
= r1
;; i
= (i
+ 1) % 16) {
2693 env
->cregs
[i
] = ldq(src
);
2694 HELPER_LOG("load ctl %d from 0x%" PRIx64
" == 0x%" PRIx64
"\n",
2695 i
, src
, env
->cregs
[i
]);
2696 src
+= sizeof(uint64_t);
2706 void HELPER(lctl
)(uint32_t r1
, uint64_t a2
, uint32_t r3
)
2711 for (i
= r1
;; i
= (i
+ 1) % 16) {
2712 env
->cregs
[i
] = (env
->cregs
[i
] & 0xFFFFFFFF00000000ULL
) | ldl(src
);
2713 src
+= sizeof(uint32_t);
2723 void HELPER(stctg
)(uint32_t r1
, uint64_t a2
, uint32_t r3
)
2728 for (i
= r1
;; i
= (i
+ 1) % 16) {
2729 stq(dest
, env
->cregs
[i
]);
2730 dest
+= sizeof(uint64_t);
2738 void HELPER(stctl
)(uint32_t r1
, uint64_t a2
, uint32_t r3
)
2743 for (i
= r1
;; i
= (i
+ 1) % 16) {
2744 stl(dest
, env
->cregs
[i
]);
2745 dest
+= sizeof(uint32_t);
2753 uint32_t HELPER(tprot
)(uint64_t a1
, uint64_t a2
)
2760 /* insert storage key extended */
2761 uint64_t HELPER(iske
)(uint64_t r2
)
2763 uint64_t addr
= get_address(0, 0, r2
);
2765 if (addr
> ram_size
) {
2769 return env
->storage_keys
[addr
/ TARGET_PAGE_SIZE
];
2772 /* set storage key extended */
2773 void HELPER(sske
)(uint32_t r1
, uint64_t r2
)
2775 uint64_t addr
= get_address(0, 0, r2
);
2777 if (addr
> ram_size
) {
2781 env
->storage_keys
[addr
/ TARGET_PAGE_SIZE
] = r1
;
2784 /* reset reference bit extended */
2785 uint32_t HELPER(rrbe
)(uint32_t r1
, uint64_t r2
)
2789 if (r2
> ram_size
) {
2793 key
= env
->storage_keys
[r2
/ TARGET_PAGE_SIZE
];
2794 re
= key
& (SK_R
| SK_C
);
2795 env
->storage_keys
[r2
/ TARGET_PAGE_SIZE
] = (key
& ~SK_R
);
2800 * 0 Reference bit zero; change bit zero
2801 * 1 Reference bit zero; change bit one
2802 * 2 Reference bit one; change bit zero
2803 * 3 Reference bit one; change bit one
2809 /* compare and swap and purge */
2810 uint32_t HELPER(csp
)(uint32_t r1
, uint32_t r2
)
2813 uint32_t o1
= env
->regs
[r1
];
2814 uint64_t a2
= get_address_31fix(r2
) & ~3ULL;
2815 uint32_t o2
= ldl(a2
);
2818 stl(a2
, env
->regs
[(r1
+ 1) & 15]);
2819 if (env
->regs
[r2
] & 0x3) {
2820 /* flush TLB / ALB */
2825 env
->regs
[r1
] = (env
->regs
[r1
] & 0xffffffff00000000ULL
) | o2
;
2832 static uint32_t mvc_asc(int64_t l
, uint64_t a1
, uint64_t mode1
, uint64_t a2
,
2835 target_ulong src
, dest
;
2836 int flags
, cc
= 0, i
;
2840 } else if (l
> 256) {
2846 if (mmu_translate(env
, a1
& TARGET_PAGE_MASK
, 1, mode1
, &dest
, &flags
)) {
2849 dest
|= a1
& ~TARGET_PAGE_MASK
;
2851 if (mmu_translate(env
, a2
& TARGET_PAGE_MASK
, 0, mode2
, &src
, &flags
)) {
2854 src
|= a2
& ~TARGET_PAGE_MASK
;
2856 /* XXX replace w/ memcpy */
2857 for (i
= 0; i
< l
; i
++) {
2858 /* XXX be more clever */
2859 if ((((dest
+ i
) & TARGET_PAGE_MASK
) != (dest
& TARGET_PAGE_MASK
)) ||
2860 (((src
+ i
) & TARGET_PAGE_MASK
) != (src
& TARGET_PAGE_MASK
))) {
2861 mvc_asc(l
- i
, a1
+ i
, mode1
, a2
+ i
, mode2
);
2864 stb_phys(dest
+ i
, ldub_phys(src
+ i
));
2870 uint32_t HELPER(mvcs
)(uint64_t l
, uint64_t a1
, uint64_t a2
)
2872 HELPER_LOG("%s: %16" PRIx64
" %16" PRIx64
" %16" PRIx64
"\n",
2873 __FUNCTION__
, l
, a1
, a2
);
2875 return mvc_asc(l
, a1
, PSW_ASC_SECONDARY
, a2
, PSW_ASC_PRIMARY
);
2878 uint32_t HELPER(mvcp
)(uint64_t l
, uint64_t a1
, uint64_t a2
)
2880 HELPER_LOG("%s: %16" PRIx64
" %16" PRIx64
" %16" PRIx64
"\n",
2881 __FUNCTION__
, l
, a1
, a2
);
2883 return mvc_asc(l
, a1
, PSW_ASC_PRIMARY
, a2
, PSW_ASC_SECONDARY
);
2886 uint32_t HELPER(sigp
)(uint64_t order_code
, uint32_t r1
, uint64_t cpu_addr
)
2890 HELPER_LOG("%s: %016" PRIx64
" %08x %016" PRIx64
"\n",
2891 __FUNCTION__
, order_code
, r1
, cpu_addr
);
2893 /* Remember: Use "R1 or R1+1, whichever is the odd-numbered register"
2894 as parameter (input). Status (output) is always R1. */
2896 switch (order_code
) {
2901 /* enumerate CPU status */
2903 /* XXX implement when SMP comes */
2906 env
->regs
[r1
] &= 0xffffffff00000000ULL
;
2909 #if !defined (CONFIG_USER_ONLY)
2911 qemu_system_reset_request();
2915 qemu_system_shutdown_request();
2921 fprintf(stderr
, "XXX unknown sigp: 0x%" PRIx64
"\n", order_code
);
2928 void HELPER(sacf
)(uint64_t a1
)
2930 HELPER_LOG("%s: %16" PRIx64
"\n", __FUNCTION__
, a1
);
2932 switch (a1
& 0xf00) {
2934 env
->psw
.mask
&= ~PSW_MASK_ASC
;
2935 env
->psw
.mask
|= PSW_ASC_PRIMARY
;
2938 env
->psw
.mask
&= ~PSW_MASK_ASC
;
2939 env
->psw
.mask
|= PSW_ASC_SECONDARY
;
2942 env
->psw
.mask
&= ~PSW_MASK_ASC
;
2943 env
->psw
.mask
|= PSW_ASC_HOME
;
2946 qemu_log("unknown sacf mode: %" PRIx64
"\n", a1
);
2947 program_interrupt(env
, PGM_SPECIFICATION
, 2);
2952 /* invalidate pte */
2953 void HELPER(ipte
)(uint64_t pte_addr
, uint64_t vaddr
)
2955 uint64_t page
= vaddr
& TARGET_PAGE_MASK
;
2958 /* XXX broadcast to other CPUs */
2960 /* XXX Linux is nice enough to give us the exact pte address.
2961 According to spec we'd have to find it out ourselves */
2962 /* XXX Linux is fine with overwriting the pte, the spec requires
2963 us to only set the invalid bit */
2964 stq_phys(pte_addr
, pte
| _PAGE_INVALID
);
2966 /* XXX we exploit the fact that Linux passes the exact virtual
2967 address here - it's not obliged to! */
2968 tlb_flush_page(env
, page
);
2970 /* XXX 31-bit hack */
2971 if (page
& 0x80000000) {
2972 tlb_flush_page(env
, page
& ~0x80000000);
2974 tlb_flush_page(env
, page
| 0x80000000);
2978 /* flush local tlb */
2979 void HELPER(ptlb
)(void)
2984 /* store using real address */
2985 void HELPER(stura
)(uint64_t addr
, uint32_t v1
)
2987 stw_phys(get_address(0, 0, addr
), v1
);
2990 /* load real address */
2991 uint32_t HELPER(lra
)(uint64_t addr
, uint32_t r1
)
2994 int old_exc
= env
->exception_index
;
2995 uint64_t asc
= env
->psw
.mask
& PSW_MASK_ASC
;
2999 /* XXX incomplete - has more corner cases */
3000 if (!(env
->psw
.mask
& PSW_MASK_64
) && (addr
>> 32)) {
3001 program_interrupt(env
, PGM_SPECIAL_OP
, 2);
3004 env
->exception_index
= old_exc
;
3005 if (mmu_translate(env
, addr
, 0, asc
, &ret
, &flags
)) {
3008 if (env
->exception_index
== EXCP_PGM
) {
3009 ret
= env
->int_pgm_code
| 0x80000000;
3011 ret
|= addr
& ~TARGET_PAGE_MASK
;
3013 env
->exception_index
= old_exc
;
3015 if (!(env
->psw
.mask
& PSW_MASK_64
)) {
3016 env
->regs
[r1
] = (env
->regs
[r1
] & 0xffffffff00000000ULL
) | (ret
& 0xffffffffULL
);
3018 env
->regs
[r1
] = ret
;