2 * S/390 helper routines
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2009 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
22 #include "dyngen-exec.h"
23 #include "host-utils.h"
27 #include "qemu-timer.h"
29 #include <linux/kvm.h>
32 #if !defined (CONFIG_USER_ONLY)
36 /*****************************************************************************/
38 #if !defined (CONFIG_USER_ONLY)
39 #include "softmmu_exec.h"
41 #define MMUSUFFIX _mmu
44 #include "softmmu_template.h"
47 #include "softmmu_template.h"
50 #include "softmmu_template.h"
53 #include "softmmu_template.h"
55 /* try to fill the TLB and return an exception if error. If retaddr is
56 NULL, it means that the function was called in C code (i.e. not
57 from generated code or from helper.c) */
58 /* XXX: fix it to restore all registers */
59 void tlb_fill(CPUState
*env1
, target_ulong addr
, int is_write
, int mmu_idx
,
69 ret
= cpu_s390x_handle_mmu_fault(env
, addr
, is_write
, mmu_idx
);
70 if (unlikely(ret
!= 0)) {
71 if (likely(retaddr
)) {
72 /* now we have a real cpu fault */
73 pc
= (unsigned long)retaddr
;
76 /* the PC is inside the translated code. It means that we have
77 a virtual CPU fault */
78 cpu_restore_state(tb
, env
, pc
);
88 /* #define DEBUG_HELPER */
90 #define HELPER_LOG(x...) qemu_log(x)
92 #define HELPER_LOG(x...)
95 /* raise an exception */
96 void HELPER(exception
)(uint32_t excp
)
98 HELPER_LOG("%s: exception %d\n", __FUNCTION__
, excp
);
99 env
->exception_index
= excp
;
103 #ifndef CONFIG_USER_ONLY
104 static void mvc_fast_memset(CPUState
*env
, uint32_t l
, uint64_t dest
,
107 target_phys_addr_t dest_phys
;
108 target_phys_addr_t len
= l
;
110 uint64_t asc
= env
->psw
.mask
& PSW_MASK_ASC
;
113 if (mmu_translate(env
, dest
, 1, asc
, &dest_phys
, &flags
)) {
115 cpu_abort(env
, "should never reach here");
117 dest_phys
|= dest
& ~TARGET_PAGE_MASK
;
119 dest_p
= cpu_physical_memory_map(dest_phys
, &len
, 1);
121 memset(dest_p
, byte
, len
);
123 cpu_physical_memory_unmap(dest_p
, 1, len
, len
);
126 static void mvc_fast_memmove(CPUState
*env
, uint32_t l
, uint64_t dest
,
129 target_phys_addr_t dest_phys
;
130 target_phys_addr_t src_phys
;
131 target_phys_addr_t len
= l
;
134 uint64_t asc
= env
->psw
.mask
& PSW_MASK_ASC
;
137 if (mmu_translate(env
, dest
, 1, asc
, &dest_phys
, &flags
)) {
139 cpu_abort(env
, "should never reach here");
141 dest_phys
|= dest
& ~TARGET_PAGE_MASK
;
143 if (mmu_translate(env
, src
, 0, asc
, &src_phys
, &flags
)) {
145 cpu_abort(env
, "should never reach here");
147 src_phys
|= src
& ~TARGET_PAGE_MASK
;
149 dest_p
= cpu_physical_memory_map(dest_phys
, &len
, 1);
150 src_p
= cpu_physical_memory_map(src_phys
, &len
, 0);
152 memmove(dest_p
, src_p
, len
);
154 cpu_physical_memory_unmap(dest_p
, 1, len
, len
);
155 cpu_physical_memory_unmap(src_p
, 0, len
, len
);
160 uint32_t HELPER(nc
)(uint32_t l
, uint64_t dest
, uint64_t src
)
166 HELPER_LOG("%s l %d dest %" PRIx64
" src %" PRIx64
"\n",
167 __FUNCTION__
, l
, dest
, src
);
168 for (i
= 0; i
<= l
; i
++) {
169 x
= ldub(dest
+ i
) & ldub(src
+ i
);
179 uint32_t HELPER(xc
)(uint32_t l
, uint64_t dest
, uint64_t src
)
185 HELPER_LOG("%s l %d dest %" PRIx64
" src %" PRIx64
"\n",
186 __FUNCTION__
, l
, dest
, src
);
188 #ifndef CONFIG_USER_ONLY
189 /* xor with itself is the same as memset(0) */
190 if ((l
> 32) && (src
== dest
) &&
191 (src
& TARGET_PAGE_MASK
) == ((src
+ l
) & TARGET_PAGE_MASK
)) {
192 mvc_fast_memset(env
, l
+ 1, dest
, 0);
197 memset(g2h(dest
), 0, l
+ 1);
202 for (i
= 0; i
<= l
; i
++) {
203 x
= ldub(dest
+ i
) ^ ldub(src
+ i
);
213 uint32_t HELPER(oc
)(uint32_t l
, uint64_t dest
, uint64_t src
)
219 HELPER_LOG("%s l %d dest %" PRIx64
" src %" PRIx64
"\n",
220 __FUNCTION__
, l
, dest
, src
);
221 for (i
= 0; i
<= l
; i
++) {
222 x
= ldub(dest
+ i
) | ldub(src
+ i
);
232 void HELPER(mvc
)(uint32_t l
, uint64_t dest
, uint64_t src
)
236 uint32_t l_64
= (l
+ 1) / 8;
238 HELPER_LOG("%s l %d dest %" PRIx64
" src %" PRIx64
"\n",
239 __FUNCTION__
, l
, dest
, src
);
241 #ifndef CONFIG_USER_ONLY
243 (src
& TARGET_PAGE_MASK
) == ((src
+ l
) & TARGET_PAGE_MASK
) &&
244 (dest
& TARGET_PAGE_MASK
) == ((dest
+ l
) & TARGET_PAGE_MASK
)) {
245 if (dest
== (src
+ 1)) {
246 mvc_fast_memset(env
, l
+ 1, dest
, ldub(src
));
248 } else if ((src
& TARGET_PAGE_MASK
) != (dest
& TARGET_PAGE_MASK
)) {
249 mvc_fast_memmove(env
, l
+ 1, dest
, src
);
254 if (dest
== (src
+ 1)) {
255 memset(g2h(dest
), ldub(src
), l
+ 1);
258 memmove(g2h(dest
), g2h(src
), l
+ 1);
263 /* handle the parts that fit into 8-byte loads/stores */
264 if (dest
!= (src
+ 1)) {
265 for (i
= 0; i
< l_64
; i
++) {
266 stq(dest
+ x
, ldq(src
+ x
));
271 /* slow version crossing pages with byte accesses */
272 for (i
= x
; i
<= l
; i
++) {
273 stb(dest
+ i
, ldub(src
+ i
));
277 /* compare unsigned byte arrays */
278 uint32_t HELPER(clc
)(uint32_t l
, uint64_t s1
, uint64_t s2
)
283 HELPER_LOG("%s l %d s1 %" PRIx64
" s2 %" PRIx64
"\n",
284 __FUNCTION__
, l
, s1
, s2
);
285 for (i
= 0; i
<= l
; i
++) {
288 HELPER_LOG("%02x (%c)/%02x (%c) ", x
, x
, y
, y
);
303 /* compare logical under mask */
304 uint32_t HELPER(clm
)(uint32_t r1
, uint32_t mask
, uint64_t addr
)
308 HELPER_LOG("%s: r1 0x%x mask 0x%x addr 0x%" PRIx64
"\n", __FUNCTION__
, r1
,
314 r
= (r1
& 0xff000000UL
) >> 24;
315 HELPER_LOG("mask 0x%x %02x/%02x (0x%" PRIx64
") ", mask
, r
, d
,
326 mask
= (mask
<< 1) & 0xf;
333 /* store character under mask */
334 void HELPER(stcm
)(uint32_t r1
, uint32_t mask
, uint64_t addr
)
337 HELPER_LOG("%s: r1 0x%x mask 0x%x addr 0x%lx\n", __FUNCTION__
, r1
, mask
,
341 r
= (r1
& 0xff000000UL
) >> 24;
343 HELPER_LOG("mask 0x%x %02x (0x%lx) ", mask
, r
, addr
);
346 mask
= (mask
<< 1) & 0xf;
352 /* 64/64 -> 128 unsigned multiplication */
353 void HELPER(mlg
)(uint32_t r1
, uint64_t v2
)
355 #if HOST_LONG_BITS == 64 && defined(__GNUC__)
356 /* assuming 64-bit hosts have __uint128_t */
357 __uint128_t res
= (__uint128_t
)env
->regs
[r1
+ 1];
358 res
*= (__uint128_t
)v2
;
359 env
->regs
[r1
] = (uint64_t)(res
>> 64);
360 env
->regs
[r1
+ 1] = (uint64_t)res
;
362 mulu64(&env
->regs
[r1
+ 1], &env
->regs
[r1
], env
->regs
[r1
+ 1], v2
);
366 /* 128 -> 64/64 unsigned division */
367 void HELPER(dlg
)(uint32_t r1
, uint64_t v2
)
369 uint64_t divisor
= v2
;
371 if (!env
->regs
[r1
]) {
372 /* 64 -> 64/64 case */
373 env
->regs
[r1
] = env
->regs
[r1
+1] % divisor
;
374 env
->regs
[r1
+1] = env
->regs
[r1
+1] / divisor
;
378 #if HOST_LONG_BITS == 64 && defined(__GNUC__)
379 /* assuming 64-bit hosts have __uint128_t */
380 __uint128_t dividend
= (((__uint128_t
)env
->regs
[r1
]) << 64) |
382 __uint128_t quotient
= dividend
/ divisor
;
383 env
->regs
[r1
+1] = quotient
;
384 __uint128_t remainder
= dividend
% divisor
;
385 env
->regs
[r1
] = remainder
;
387 /* 32-bit hosts would need special wrapper functionality - just abort if
388 we encounter such a case; it's very unlikely anyways. */
389 cpu_abort(env
, "128 -> 64/64 division not implemented\n");
394 static inline uint64_t get_address(int x2
, int b2
, int d2
)
407 if (!(env
->psw
.mask
& PSW_MASK_64
)) {
414 static inline uint64_t get_address_31fix(int reg
)
416 uint64_t r
= env
->regs
[reg
];
419 if (!(env
->psw
.mask
& PSW_MASK_64
)) {
426 /* search string (c is byte to search, r2 is string, r1 end of string) */
427 uint32_t HELPER(srst
)(uint32_t c
, uint32_t r1
, uint32_t r2
)
431 uint64_t str
= get_address_31fix(r2
);
432 uint64_t end
= get_address_31fix(r1
);
434 HELPER_LOG("%s: c %d *r1 0x%" PRIx64
" *r2 0x%" PRIx64
"\n", __FUNCTION__
,
435 c
, env
->regs
[r1
], env
->regs
[r2
]);
437 for (i
= str
; i
!= end
; i
++) {
448 /* unsigned string compare (c is string terminator) */
449 uint32_t HELPER(clst
)(uint32_t c
, uint32_t r1
, uint32_t r2
)
451 uint64_t s1
= get_address_31fix(r1
);
452 uint64_t s2
= get_address_31fix(r2
);
456 #ifdef CONFIG_USER_ONLY
458 HELPER_LOG("%s: comparing '%s' and '%s'\n",
459 __FUNCTION__
, (char*)g2h(s1
), (char*)g2h(s2
));
465 if ((v1
== c
|| v2
== c
) || (v1
!= v2
)) {
475 cc
= (v1
< v2
) ? 1 : 2;
476 /* FIXME: 31-bit mode! */
484 void HELPER(mvpg
)(uint64_t r0
, uint64_t r1
, uint64_t r2
)
486 /* XXX missing r0 handling */
487 #ifdef CONFIG_USER_ONLY
490 for (i
= 0; i
< TARGET_PAGE_SIZE
; i
++) {
491 stb(r1
+ i
, ldub(r2
+ i
));
494 mvc_fast_memmove(env
, TARGET_PAGE_SIZE
, r1
, r2
);
498 /* string copy (c is string terminator) */
499 void HELPER(mvst
)(uint32_t c
, uint32_t r1
, uint32_t r2
)
501 uint64_t dest
= get_address_31fix(r1
);
502 uint64_t src
= get_address_31fix(r2
);
505 #ifdef CONFIG_USER_ONLY
507 HELPER_LOG("%s: copy '%s' to 0x%lx\n", __FUNCTION__
, (char*)g2h(src
),
520 env
->regs
[r1
] = dest
; /* FIXME: 31-bit mode! */
523 /* compare and swap 64-bit */
524 uint32_t HELPER(csg
)(uint32_t r1
, uint64_t a2
, uint32_t r3
)
526 /* FIXME: locking? */
528 uint64_t v2
= ldq(a2
);
529 if (env
->regs
[r1
] == v2
) {
531 stq(a2
, env
->regs
[r3
]);
539 /* compare double and swap 64-bit */
540 uint32_t HELPER(cdsg
)(uint32_t r1
, uint64_t a2
, uint32_t r3
)
542 /* FIXME: locking? */
544 uint64_t v2_hi
= ldq(a2
);
545 uint64_t v2_lo
= ldq(a2
+ 8);
546 uint64_t v1_hi
= env
->regs
[r1
];
547 uint64_t v1_lo
= env
->regs
[r1
+ 1];
549 if ((v1_hi
== v2_hi
) && (v1_lo
== v2_lo
)) {
551 stq(a2
, env
->regs
[r3
]);
552 stq(a2
+ 8, env
->regs
[r3
+ 1]);
555 env
->regs
[r1
] = v2_hi
;
556 env
->regs
[r1
+ 1] = v2_lo
;
562 /* compare and swap 32-bit */
563 uint32_t HELPER(cs
)(uint32_t r1
, uint64_t a2
, uint32_t r3
)
565 /* FIXME: locking? */
567 HELPER_LOG("%s: r1 %d a2 0x%lx r3 %d\n", __FUNCTION__
, r1
, a2
, r3
);
568 uint32_t v2
= ldl(a2
);
569 if (((uint32_t)env
->regs
[r1
]) == v2
) {
571 stl(a2
, (uint32_t)env
->regs
[r3
]);
574 env
->regs
[r1
] = (env
->regs
[r1
] & 0xffffffff00000000ULL
) | v2
;
579 static uint32_t helper_icm(uint32_t r1
, uint64_t address
, uint32_t mask
)
581 int pos
= 24; /* top of the lower half of r1 */
582 uint64_t rmask
= 0xff000000ULL
;
589 env
->regs
[r1
] &= ~rmask
;
591 if ((val
& 0x80) && !ccd
) {
595 if (val
&& cc
== 0) {
598 env
->regs
[r1
] |= (uint64_t)val
<< pos
;
601 mask
= (mask
<< 1) & 0xf;
609 /* execute instruction
610 this instruction executes an insn modified with the contents of r1
611 it does not change the executed instruction in memory
612 it does not change the program counter
613 in other words: tricky...
614 currently implemented by interpreting the cases it is most commonly used in
616 uint32_t HELPER(ex
)(uint32_t cc
, uint64_t v1
, uint64_t addr
, uint64_t ret
)
618 uint16_t insn
= lduw_code(addr
);
619 HELPER_LOG("%s: v1 0x%lx addr 0x%lx insn 0x%x\n", __FUNCTION__
, v1
, addr
,
621 if ((insn
& 0xf0ff) == 0xd000) {
622 uint32_t l
, insn2
, b1
, b2
, d1
, d2
;
624 insn2
= ldl_code(addr
+ 2);
625 b1
= (insn2
>> 28) & 0xf;
626 b2
= (insn2
>> 12) & 0xf;
627 d1
= (insn2
>> 16) & 0xfff;
629 switch (insn
& 0xf00) {
631 helper_mvc(l
, get_address(0, b1
, d1
), get_address(0, b2
, d2
));
634 cc
= helper_clc(l
, get_address(0, b1
, d1
), get_address(0, b2
, d2
));
637 cc
= helper_xc(l
, get_address(0, b1
, d1
), get_address(0, b2
, d2
));
640 helper_tr(l
, get_address(0, b1
, d1
), get_address(0, b2
, d2
));
646 } else if ((insn
& 0xff00) == 0x0a00) {
647 /* supervisor call */
648 HELPER_LOG("%s: svc %ld via execute\n", __FUNCTION__
, (insn
|v1
) & 0xff);
649 env
->psw
.addr
= ret
- 4;
650 env
->int_svc_code
= (insn
|v1
) & 0xff;
651 env
->int_svc_ilc
= 4;
652 helper_exception(EXCP_SVC
);
653 } else if ((insn
& 0xff00) == 0xbf00) {
654 uint32_t insn2
, r1
, r3
, b2
, d2
;
655 insn2
= ldl_code(addr
+ 2);
656 r1
= (insn2
>> 20) & 0xf;
657 r3
= (insn2
>> 16) & 0xf;
658 b2
= (insn2
>> 12) & 0xf;
660 cc
= helper_icm(r1
, get_address(0, b2
, d2
), r3
);
663 cpu_abort(env
, "EXECUTE on instruction prefix 0x%x not implemented\n",
669 /* absolute value 32-bit */
670 uint32_t HELPER(abs_i32
)(int32_t val
)
679 /* negative absolute value 32-bit */
680 int32_t HELPER(nabs_i32
)(int32_t val
)
689 /* absolute value 64-bit */
690 uint64_t HELPER(abs_i64
)(int64_t val
)
692 HELPER_LOG("%s: val 0x%" PRIx64
"\n", __FUNCTION__
, val
);
701 /* negative absolute value 64-bit */
702 int64_t HELPER(nabs_i64
)(int64_t val
)
711 /* add with carry 32-bit unsigned */
712 uint32_t HELPER(addc_u32
)(uint32_t cc
, uint32_t v1
, uint32_t v2
)
724 /* store character under mask high operates on the upper half of r1 */
725 void HELPER(stcmh
)(uint32_t r1
, uint64_t address
, uint32_t mask
)
727 int pos
= 56; /* top of the upper half of r1 */
731 stb(address
, (env
->regs
[r1
] >> pos
) & 0xff);
734 mask
= (mask
<< 1) & 0xf;
739 /* insert character under mask high; same as icm, but operates on the
741 uint32_t HELPER(icmh
)(uint32_t r1
, uint64_t address
, uint32_t mask
)
743 int pos
= 56; /* top of the upper half of r1 */
744 uint64_t rmask
= 0xff00000000000000ULL
;
751 env
->regs
[r1
] &= ~rmask
;
753 if ((val
& 0x80) && !ccd
) {
757 if (val
&& cc
== 0) {
760 env
->regs
[r1
] |= (uint64_t)val
<< pos
;
763 mask
= (mask
<< 1) & 0xf;
771 /* insert psw mask and condition code into r1 */
772 void HELPER(ipm
)(uint32_t cc
, uint32_t r1
)
774 uint64_t r
= env
->regs
[r1
];
776 r
&= 0xffffffff00ffffffULL
;
777 r
|= (cc
<< 28) | ( (env
->psw
.mask
>> 40) & 0xf );
779 HELPER_LOG("%s: cc %d psw.mask 0x%lx r1 0x%lx\n", __FUNCTION__
,
780 cc
, env
->psw
.mask
, r
);
783 /* load access registers r1 to r3 from memory at a2 */
784 void HELPER(lam
)(uint32_t r1
, uint64_t a2
, uint32_t r3
)
788 for (i
= r1
;; i
= (i
+ 1) % 16) {
789 env
->aregs
[i
] = ldl(a2
);
798 /* store access registers r1 to r3 in memory at a2 */
799 void HELPER(stam
)(uint32_t r1
, uint64_t a2
, uint32_t r3
)
803 for (i
= r1
;; i
= (i
+ 1) % 16) {
804 stl(a2
, env
->aregs
[i
]);
814 uint32_t HELPER(mvcl
)(uint32_t r1
, uint32_t r2
)
816 uint64_t destlen
= env
->regs
[r1
+ 1] & 0xffffff;
817 uint64_t dest
= get_address_31fix(r1
);
818 uint64_t srclen
= env
->regs
[r2
+ 1] & 0xffffff;
819 uint64_t src
= get_address_31fix(r2
);
820 uint8_t pad
= src
>> 24;
824 if (destlen
== srclen
) {
826 } else if (destlen
< srclen
) {
832 if (srclen
> destlen
) {
836 for (; destlen
&& srclen
; src
++, dest
++, destlen
--, srclen
--) {
841 for (; destlen
; dest
++, destlen
--) {
845 env
->regs
[r1
+ 1] = destlen
;
846 /* can't use srclen here, we trunc'ed it */
847 env
->regs
[r2
+ 1] -= src
- env
->regs
[r2
];
848 env
->regs
[r1
] = dest
;
854 /* move long extended another memcopy insn with more bells and whistles */
855 uint32_t HELPER(mvcle
)(uint32_t r1
, uint64_t a2
, uint32_t r3
)
857 uint64_t destlen
= env
->regs
[r1
+ 1];
858 uint64_t dest
= env
->regs
[r1
];
859 uint64_t srclen
= env
->regs
[r3
+ 1];
860 uint64_t src
= env
->regs
[r3
];
861 uint8_t pad
= a2
& 0xff;
865 if (!(env
->psw
.mask
& PSW_MASK_64
)) {
866 destlen
= (uint32_t)destlen
;
867 srclen
= (uint32_t)srclen
;
872 if (destlen
== srclen
) {
874 } else if (destlen
< srclen
) {
880 if (srclen
> destlen
) {
884 for (; destlen
&& srclen
; src
++, dest
++, destlen
--, srclen
--) {
889 for (; destlen
; dest
++, destlen
--) {
893 env
->regs
[r1
+ 1] = destlen
;
894 /* can't use srclen here, we trunc'ed it */
895 /* FIXME: 31-bit mode! */
896 env
->regs
[r3
+ 1] -= src
- env
->regs
[r3
];
897 env
->regs
[r1
] = dest
;
903 /* compare logical long extended memcompare insn with padding */
904 uint32_t HELPER(clcle
)(uint32_t r1
, uint64_t a2
, uint32_t r3
)
906 uint64_t destlen
= env
->regs
[r1
+ 1];
907 uint64_t dest
= get_address_31fix(r1
);
908 uint64_t srclen
= env
->regs
[r3
+ 1];
909 uint64_t src
= get_address_31fix(r3
);
910 uint8_t pad
= a2
& 0xff;
911 uint8_t v1
= 0,v2
= 0;
914 if (!(destlen
|| srclen
)) {
918 if (srclen
> destlen
) {
922 for (; destlen
|| srclen
; src
++, dest
++, destlen
--, srclen
--) {
923 v1
= srclen
? ldub(src
) : pad
;
924 v2
= destlen
? ldub(dest
) : pad
;
926 cc
= (v1
< v2
) ? 1 : 2;
931 env
->regs
[r1
+ 1] = destlen
;
932 /* can't use srclen here, we trunc'ed it */
933 env
->regs
[r3
+ 1] -= src
- env
->regs
[r3
];
934 env
->regs
[r1
] = dest
;
940 /* subtract unsigned v2 from v1 with borrow */
941 uint32_t HELPER(slb
)(uint32_t cc
, uint32_t r1
, uint32_t v2
)
943 uint32_t v1
= env
->regs
[r1
];
944 uint32_t res
= v1
+ (~v2
) + (cc
>> 1);
946 env
->regs
[r1
] = (env
->regs
[r1
] & 0xffffffff00000000ULL
) | res
;
955 /* subtract unsigned v2 from v1 with borrow */
956 uint32_t HELPER(slbg
)(uint32_t cc
, uint32_t r1
, uint64_t v1
, uint64_t v2
)
958 uint64_t res
= v1
+ (~v2
) + (cc
>> 1);
969 static inline int float_comp_to_cc(int float_compare
)
971 switch (float_compare
) {
972 case float_relation_equal
:
974 case float_relation_less
:
976 case float_relation_greater
:
978 case float_relation_unordered
:
981 cpu_abort(env
, "unknown return value for float compare\n");
985 /* condition codes for binary FP ops */
986 static uint32_t set_cc_f32(float32 v1
, float32 v2
)
988 return float_comp_to_cc(float32_compare_quiet(v1
, v2
, &env
->fpu_status
));
991 static uint32_t set_cc_f64(float64 v1
, float64 v2
)
993 return float_comp_to_cc(float64_compare_quiet(v1
, v2
, &env
->fpu_status
));
996 /* condition codes for unary FP ops */
997 static uint32_t set_cc_nz_f32(float32 v
)
999 if (float32_is_any_nan(v
)) {
1001 } else if (float32_is_zero(v
)) {
1003 } else if (float32_is_neg(v
)) {
1010 static uint32_t set_cc_nz_f64(float64 v
)
1012 if (float64_is_any_nan(v
)) {
1014 } else if (float64_is_zero(v
)) {
1016 } else if (float64_is_neg(v
)) {
1023 static uint32_t set_cc_nz_f128(float128 v
)
1025 if (float128_is_any_nan(v
)) {
1027 } else if (float128_is_zero(v
)) {
1029 } else if (float128_is_neg(v
)) {
1036 /* convert 32-bit int to 64-bit float */
1037 void HELPER(cdfbr
)(uint32_t f1
, int32_t v2
)
1039 HELPER_LOG("%s: converting %d to f%d\n", __FUNCTION__
, v2
, f1
);
1040 env
->fregs
[f1
].d
= int32_to_float64(v2
, &env
->fpu_status
);
1043 /* convert 32-bit int to 128-bit float */
1044 void HELPER(cxfbr
)(uint32_t f1
, int32_t v2
)
1047 v1
.q
= int32_to_float128(v2
, &env
->fpu_status
);
1048 env
->fregs
[f1
].ll
= v1
.ll
.upper
;
1049 env
->fregs
[f1
+ 2].ll
= v1
.ll
.lower
;
1052 /* convert 64-bit int to 32-bit float */
1053 void HELPER(cegbr
)(uint32_t f1
, int64_t v2
)
1055 HELPER_LOG("%s: converting %ld to f%d\n", __FUNCTION__
, v2
, f1
);
1056 env
->fregs
[f1
].l
.upper
= int64_to_float32(v2
, &env
->fpu_status
);
1059 /* convert 64-bit int to 64-bit float */
1060 void HELPER(cdgbr
)(uint32_t f1
, int64_t v2
)
1062 HELPER_LOG("%s: converting %ld to f%d\n", __FUNCTION__
, v2
, f1
);
1063 env
->fregs
[f1
].d
= int64_to_float64(v2
, &env
->fpu_status
);
1066 /* convert 64-bit int to 128-bit float */
1067 void HELPER(cxgbr
)(uint32_t f1
, int64_t v2
)
1070 x1
.q
= int64_to_float128(v2
, &env
->fpu_status
);
1071 HELPER_LOG("%s: converted %ld to 0x%lx and 0x%lx\n", __FUNCTION__
, v2
,
1072 x1
.ll
.upper
, x1
.ll
.lower
);
1073 env
->fregs
[f1
].ll
= x1
.ll
.upper
;
1074 env
->fregs
[f1
+ 2].ll
= x1
.ll
.lower
;
1077 /* convert 32-bit int to 32-bit float */
1078 void HELPER(cefbr
)(uint32_t f1
, int32_t v2
)
1080 env
->fregs
[f1
].l
.upper
= int32_to_float32(v2
, &env
->fpu_status
);
1081 HELPER_LOG("%s: converting %d to 0x%d in f%d\n", __FUNCTION__
, v2
,
1082 env
->fregs
[f1
].l
.upper
, f1
);
1085 /* 32-bit FP addition RR */
1086 uint32_t HELPER(aebr
)(uint32_t f1
, uint32_t f2
)
1088 env
->fregs
[f1
].l
.upper
= float32_add(env
->fregs
[f1
].l
.upper
,
1089 env
->fregs
[f2
].l
.upper
,
1091 HELPER_LOG("%s: adding 0x%d resulting in 0x%d in f%d\n", __FUNCTION__
,
1092 env
->fregs
[f2
].l
.upper
, env
->fregs
[f1
].l
.upper
, f1
);
1094 return set_cc_nz_f32(env
->fregs
[f1
].l
.upper
);
1097 /* 64-bit FP addition RR */
1098 uint32_t HELPER(adbr
)(uint32_t f1
, uint32_t f2
)
1100 env
->fregs
[f1
].d
= float64_add(env
->fregs
[f1
].d
, env
->fregs
[f2
].d
,
1102 HELPER_LOG("%s: adding 0x%ld resulting in 0x%ld in f%d\n", __FUNCTION__
,
1103 env
->fregs
[f2
].d
, env
->fregs
[f1
].d
, f1
);
1105 return set_cc_nz_f64(env
->fregs
[f1
].d
);
1108 /* 32-bit FP subtraction RR */
1109 uint32_t HELPER(sebr
)(uint32_t f1
, uint32_t f2
)
1111 env
->fregs
[f1
].l
.upper
= float32_sub(env
->fregs
[f1
].l
.upper
,
1112 env
->fregs
[f2
].l
.upper
,
1114 HELPER_LOG("%s: adding 0x%d resulting in 0x%d in f%d\n", __FUNCTION__
,
1115 env
->fregs
[f2
].l
.upper
, env
->fregs
[f1
].l
.upper
, f1
);
1117 return set_cc_nz_f32(env
->fregs
[f1
].l
.upper
);
1120 /* 64-bit FP subtraction RR */
1121 uint32_t HELPER(sdbr
)(uint32_t f1
, uint32_t f2
)
1123 env
->fregs
[f1
].d
= float64_sub(env
->fregs
[f1
].d
, env
->fregs
[f2
].d
,
1125 HELPER_LOG("%s: subtracting 0x%ld resulting in 0x%ld in f%d\n",
1126 __FUNCTION__
, env
->fregs
[f2
].d
, env
->fregs
[f1
].d
, f1
);
1128 return set_cc_nz_f64(env
->fregs
[f1
].d
);
1131 /* 32-bit FP division RR */
1132 void HELPER(debr
)(uint32_t f1
, uint32_t f2
)
1134 env
->fregs
[f1
].l
.upper
= float32_div(env
->fregs
[f1
].l
.upper
,
1135 env
->fregs
[f2
].l
.upper
,
1139 /* 128-bit FP division RR */
1140 void HELPER(dxbr
)(uint32_t f1
, uint32_t f2
)
1143 v1
.ll
.upper
= env
->fregs
[f1
].ll
;
1144 v1
.ll
.lower
= env
->fregs
[f1
+ 2].ll
;
1146 v2
.ll
.upper
= env
->fregs
[f2
].ll
;
1147 v2
.ll
.lower
= env
->fregs
[f2
+ 2].ll
;
1149 res
.q
= float128_div(v1
.q
, v2
.q
, &env
->fpu_status
);
1150 env
->fregs
[f1
].ll
= res
.ll
.upper
;
1151 env
->fregs
[f1
+ 2].ll
= res
.ll
.lower
;
1154 /* 64-bit FP multiplication RR */
1155 void HELPER(mdbr
)(uint32_t f1
, uint32_t f2
)
1157 env
->fregs
[f1
].d
= float64_mul(env
->fregs
[f1
].d
, env
->fregs
[f2
].d
,
1161 /* 128-bit FP multiplication RR */
1162 void HELPER(mxbr
)(uint32_t f1
, uint32_t f2
)
1165 v1
.ll
.upper
= env
->fregs
[f1
].ll
;
1166 v1
.ll
.lower
= env
->fregs
[f1
+ 2].ll
;
1168 v2
.ll
.upper
= env
->fregs
[f2
].ll
;
1169 v2
.ll
.lower
= env
->fregs
[f2
+ 2].ll
;
1171 res
.q
= float128_mul(v1
.q
, v2
.q
, &env
->fpu_status
);
1172 env
->fregs
[f1
].ll
= res
.ll
.upper
;
1173 env
->fregs
[f1
+ 2].ll
= res
.ll
.lower
;
1176 /* convert 32-bit float to 64-bit float */
1177 void HELPER(ldebr
)(uint32_t r1
, uint32_t r2
)
1179 env
->fregs
[r1
].d
= float32_to_float64(env
->fregs
[r2
].l
.upper
,
1183 /* convert 128-bit float to 64-bit float */
1184 void HELPER(ldxbr
)(uint32_t f1
, uint32_t f2
)
1187 x2
.ll
.upper
= env
->fregs
[f2
].ll
;
1188 x2
.ll
.lower
= env
->fregs
[f2
+ 2].ll
;
1189 env
->fregs
[f1
].d
= float128_to_float64(x2
.q
, &env
->fpu_status
);
1190 HELPER_LOG("%s: to 0x%ld\n", __FUNCTION__
, env
->fregs
[f1
].d
);
1193 /* convert 64-bit float to 128-bit float */
1194 void HELPER(lxdbr
)(uint32_t f1
, uint32_t f2
)
1197 res
.q
= float64_to_float128(env
->fregs
[f2
].d
, &env
->fpu_status
);
1198 env
->fregs
[f1
].ll
= res
.ll
.upper
;
1199 env
->fregs
[f1
+ 2].ll
= res
.ll
.lower
;
1202 /* convert 64-bit float to 32-bit float */
1203 void HELPER(ledbr
)(uint32_t f1
, uint32_t f2
)
1205 float64 d2
= env
->fregs
[f2
].d
;
1206 env
->fregs
[f1
].l
.upper
= float64_to_float32(d2
, &env
->fpu_status
);
1209 /* convert 128-bit float to 32-bit float */
1210 void HELPER(lexbr
)(uint32_t f1
, uint32_t f2
)
1213 x2
.ll
.upper
= env
->fregs
[f2
].ll
;
1214 x2
.ll
.lower
= env
->fregs
[f2
+ 2].ll
;
1215 env
->fregs
[f1
].l
.upper
= float128_to_float32(x2
.q
, &env
->fpu_status
);
1216 HELPER_LOG("%s: to 0x%d\n", __FUNCTION__
, env
->fregs
[f1
].l
.upper
);
1219 /* absolute value of 32-bit float */
1220 uint32_t HELPER(lpebr
)(uint32_t f1
, uint32_t f2
)
1223 float32 v2
= env
->fregs
[f2
].d
;
1224 v1
= float32_abs(v2
);
1225 env
->fregs
[f1
].d
= v1
;
1226 return set_cc_nz_f32(v1
);
1229 /* absolute value of 64-bit float */
1230 uint32_t HELPER(lpdbr
)(uint32_t f1
, uint32_t f2
)
1233 float64 v2
= env
->fregs
[f2
].d
;
1234 v1
= float64_abs(v2
);
1235 env
->fregs
[f1
].d
= v1
;
1236 return set_cc_nz_f64(v1
);
1239 /* absolute value of 128-bit float */
1240 uint32_t HELPER(lpxbr
)(uint32_t f1
, uint32_t f2
)
1244 v2
.ll
.upper
= env
->fregs
[f2
].ll
;
1245 v2
.ll
.lower
= env
->fregs
[f2
+ 2].ll
;
1246 v1
.q
= float128_abs(v2
.q
);
1247 env
->fregs
[f1
].ll
= v1
.ll
.upper
;
1248 env
->fregs
[f1
+ 2].ll
= v1
.ll
.lower
;
1249 return set_cc_nz_f128(v1
.q
);
1252 /* load and test 64-bit float */
1253 uint32_t HELPER(ltdbr
)(uint32_t f1
, uint32_t f2
)
1255 env
->fregs
[f1
].d
= env
->fregs
[f2
].d
;
1256 return set_cc_nz_f64(env
->fregs
[f1
].d
);
1259 /* load and test 32-bit float */
1260 uint32_t HELPER(ltebr
)(uint32_t f1
, uint32_t f2
)
1262 env
->fregs
[f1
].l
.upper
= env
->fregs
[f2
].l
.upper
;
1263 return set_cc_nz_f32(env
->fregs
[f1
].l
.upper
);
1266 /* load and test 128-bit float */
1267 uint32_t HELPER(ltxbr
)(uint32_t f1
, uint32_t f2
)
1270 x
.ll
.upper
= env
->fregs
[f2
].ll
;
1271 x
.ll
.lower
= env
->fregs
[f2
+ 2].ll
;
1272 env
->fregs
[f1
].ll
= x
.ll
.upper
;
1273 env
->fregs
[f1
+ 2].ll
= x
.ll
.lower
;
1274 return set_cc_nz_f128(x
.q
);
1277 /* load complement of 32-bit float */
1278 uint32_t HELPER(lcebr
)(uint32_t f1
, uint32_t f2
)
1280 env
->fregs
[f1
].l
.upper
= float32_chs(env
->fregs
[f2
].l
.upper
);
1282 return set_cc_nz_f32(env
->fregs
[f1
].l
.upper
);
1285 /* load complement of 64-bit float */
1286 uint32_t HELPER(lcdbr
)(uint32_t f1
, uint32_t f2
)
1288 env
->fregs
[f1
].d
= float64_chs(env
->fregs
[f2
].d
);
1290 return set_cc_nz_f64(env
->fregs
[f1
].d
);
1293 /* load complement of 128-bit float */
1294 uint32_t HELPER(lcxbr
)(uint32_t f1
, uint32_t f2
)
1297 x2
.ll
.upper
= env
->fregs
[f2
].ll
;
1298 x2
.ll
.lower
= env
->fregs
[f2
+ 2].ll
;
1299 x1
.q
= float128_chs(x2
.q
);
1300 env
->fregs
[f1
].ll
= x1
.ll
.upper
;
1301 env
->fregs
[f1
+ 2].ll
= x1
.ll
.lower
;
1302 return set_cc_nz_f128(x1
.q
);
1305 /* 32-bit FP addition RM */
1306 void HELPER(aeb
)(uint32_t f1
, uint32_t val
)
1308 float32 v1
= env
->fregs
[f1
].l
.upper
;
1311 HELPER_LOG("%s: adding 0x%d from f%d and 0x%d\n", __FUNCTION__
,
1313 env
->fregs
[f1
].l
.upper
= float32_add(v1
, v2
.f
, &env
->fpu_status
);
1316 /* 32-bit FP division RM */
1317 void HELPER(deb
)(uint32_t f1
, uint32_t val
)
1319 float32 v1
= env
->fregs
[f1
].l
.upper
;
1322 HELPER_LOG("%s: dividing 0x%d from f%d by 0x%d\n", __FUNCTION__
,
1324 env
->fregs
[f1
].l
.upper
= float32_div(v1
, v2
.f
, &env
->fpu_status
);
1327 /* 32-bit FP multiplication RM */
1328 void HELPER(meeb
)(uint32_t f1
, uint32_t val
)
1330 float32 v1
= env
->fregs
[f1
].l
.upper
;
1333 HELPER_LOG("%s: multiplying 0x%d from f%d and 0x%d\n", __FUNCTION__
,
1335 env
->fregs
[f1
].l
.upper
= float32_mul(v1
, v2
.f
, &env
->fpu_status
);
1338 /* 32-bit FP compare RR */
1339 uint32_t HELPER(cebr
)(uint32_t f1
, uint32_t f2
)
1341 float32 v1
= env
->fregs
[f1
].l
.upper
;
1342 float32 v2
= env
->fregs
[f2
].l
.upper
;
1343 HELPER_LOG("%s: comparing 0x%d from f%d and 0x%d\n", __FUNCTION__
,
1345 return set_cc_f32(v1
, v2
);
1348 /* 64-bit FP compare RR */
1349 uint32_t HELPER(cdbr
)(uint32_t f1
, uint32_t f2
)
1351 float64 v1
= env
->fregs
[f1
].d
;
1352 float64 v2
= env
->fregs
[f2
].d
;
1353 HELPER_LOG("%s: comparing 0x%ld from f%d and 0x%ld\n", __FUNCTION__
,
1355 return set_cc_f64(v1
, v2
);
1358 /* 128-bit FP compare RR */
1359 uint32_t HELPER(cxbr
)(uint32_t f1
, uint32_t f2
)
1362 v1
.ll
.upper
= env
->fregs
[f1
].ll
;
1363 v1
.ll
.lower
= env
->fregs
[f1
+ 2].ll
;
1365 v2
.ll
.upper
= env
->fregs
[f2
].ll
;
1366 v2
.ll
.lower
= env
->fregs
[f2
+ 2].ll
;
1368 return float_comp_to_cc(float128_compare_quiet(v1
.q
, v2
.q
,
1372 /* 64-bit FP compare RM */
1373 uint32_t HELPER(cdb
)(uint32_t f1
, uint64_t a2
)
1375 float64 v1
= env
->fregs
[f1
].d
;
1378 HELPER_LOG("%s: comparing 0x%ld from f%d and 0x%lx\n", __FUNCTION__
, v1
,
1380 return set_cc_f64(v1
, v2
.d
);
1383 /* 64-bit FP addition RM */
1384 uint32_t HELPER(adb
)(uint32_t f1
, uint64_t a2
)
1386 float64 v1
= env
->fregs
[f1
].d
;
1389 HELPER_LOG("%s: adding 0x%lx from f%d and 0x%lx\n", __FUNCTION__
,
1391 env
->fregs
[f1
].d
= v1
= float64_add(v1
, v2
.d
, &env
->fpu_status
);
1392 return set_cc_nz_f64(v1
);
1395 /* 32-bit FP subtraction RM */
1396 void HELPER(seb
)(uint32_t f1
, uint32_t val
)
1398 float32 v1
= env
->fregs
[f1
].l
.upper
;
1401 env
->fregs
[f1
].l
.upper
= float32_sub(v1
, v2
.f
, &env
->fpu_status
);
1404 /* 64-bit FP subtraction RM */
1405 uint32_t HELPER(sdb
)(uint32_t f1
, uint64_t a2
)
1407 float64 v1
= env
->fregs
[f1
].d
;
1410 env
->fregs
[f1
].d
= v1
= float64_sub(v1
, v2
.d
, &env
->fpu_status
);
1411 return set_cc_nz_f64(v1
);
1414 /* 64-bit FP multiplication RM */
1415 void HELPER(mdb
)(uint32_t f1
, uint64_t a2
)
1417 float64 v1
= env
->fregs
[f1
].d
;
1420 HELPER_LOG("%s: multiplying 0x%lx from f%d and 0x%ld\n", __FUNCTION__
,
1422 env
->fregs
[f1
].d
= float64_mul(v1
, v2
.d
, &env
->fpu_status
);
1425 /* 64-bit FP division RM */
1426 void HELPER(ddb
)(uint32_t f1
, uint64_t a2
)
1428 float64 v1
= env
->fregs
[f1
].d
;
1431 HELPER_LOG("%s: dividing 0x%lx from f%d by 0x%ld\n", __FUNCTION__
,
1433 env
->fregs
[f1
].d
= float64_div(v1
, v2
.d
, &env
->fpu_status
);
1436 static void set_round_mode(int m3
)
1443 /* biased round no nearest */
1445 /* round to nearest */
1446 set_float_rounding_mode(float_round_nearest_even
, &env
->fpu_status
);
1450 set_float_rounding_mode(float_round_to_zero
, &env
->fpu_status
);
1454 set_float_rounding_mode(float_round_up
, &env
->fpu_status
);
1458 set_float_rounding_mode(float_round_down
, &env
->fpu_status
);
1463 /* convert 32-bit float to 64-bit int */
1464 uint32_t HELPER(cgebr
)(uint32_t r1
, uint32_t f2
, uint32_t m3
)
1466 float32 v2
= env
->fregs
[f2
].l
.upper
;
1468 env
->regs
[r1
] = float32_to_int64(v2
, &env
->fpu_status
);
1469 return set_cc_nz_f32(v2
);
1472 /* convert 64-bit float to 64-bit int */
1473 uint32_t HELPER(cgdbr
)(uint32_t r1
, uint32_t f2
, uint32_t m3
)
1475 float64 v2
= env
->fregs
[f2
].d
;
1477 env
->regs
[r1
] = float64_to_int64(v2
, &env
->fpu_status
);
1478 return set_cc_nz_f64(v2
);
1481 /* convert 128-bit float to 64-bit int */
1482 uint32_t HELPER(cgxbr
)(uint32_t r1
, uint32_t f2
, uint32_t m3
)
1485 v2
.ll
.upper
= env
->fregs
[f2
].ll
;
1486 v2
.ll
.lower
= env
->fregs
[f2
+ 2].ll
;
1488 env
->regs
[r1
] = float128_to_int64(v2
.q
, &env
->fpu_status
);
1489 if (float128_is_any_nan(v2
.q
)) {
1491 } else if (float128_is_zero(v2
.q
)) {
1493 } else if (float128_is_neg(v2
.q
)) {
1500 /* convert 32-bit float to 32-bit int */
1501 uint32_t HELPER(cfebr
)(uint32_t r1
, uint32_t f2
, uint32_t m3
)
1503 float32 v2
= env
->fregs
[f2
].l
.upper
;
1505 env
->regs
[r1
] = (env
->regs
[r1
] & 0xffffffff00000000ULL
) |
1506 float32_to_int32(v2
, &env
->fpu_status
);
1507 return set_cc_nz_f32(v2
);
1510 /* convert 64-bit float to 32-bit int */
1511 uint32_t HELPER(cfdbr
)(uint32_t r1
, uint32_t f2
, uint32_t m3
)
1513 float64 v2
= env
->fregs
[f2
].d
;
1515 env
->regs
[r1
] = (env
->regs
[r1
] & 0xffffffff00000000ULL
) |
1516 float64_to_int32(v2
, &env
->fpu_status
);
1517 return set_cc_nz_f64(v2
);
1520 /* convert 128-bit float to 32-bit int */
1521 uint32_t HELPER(cfxbr
)(uint32_t r1
, uint32_t f2
, uint32_t m3
)
1524 v2
.ll
.upper
= env
->fregs
[f2
].ll
;
1525 v2
.ll
.lower
= env
->fregs
[f2
+ 2].ll
;
1526 env
->regs
[r1
] = (env
->regs
[r1
] & 0xffffffff00000000ULL
) |
1527 float128_to_int32(v2
.q
, &env
->fpu_status
);
1528 return set_cc_nz_f128(v2
.q
);
1531 /* load 32-bit FP zero */
1532 void HELPER(lzer
)(uint32_t f1
)
1534 env
->fregs
[f1
].l
.upper
= float32_zero
;
1537 /* load 64-bit FP zero */
1538 void HELPER(lzdr
)(uint32_t f1
)
1540 env
->fregs
[f1
].d
= float64_zero
;
1543 /* load 128-bit FP zero */
1544 void HELPER(lzxr
)(uint32_t f1
)
1547 x
.q
= float64_to_float128(float64_zero
, &env
->fpu_status
);
1548 env
->fregs
[f1
].ll
= x
.ll
.upper
;
1549 env
->fregs
[f1
+ 1].ll
= x
.ll
.lower
;
1552 /* 128-bit FP subtraction RR */
1553 uint32_t HELPER(sxbr
)(uint32_t f1
, uint32_t f2
)
1556 v1
.ll
.upper
= env
->fregs
[f1
].ll
;
1557 v1
.ll
.lower
= env
->fregs
[f1
+ 2].ll
;
1559 v2
.ll
.upper
= env
->fregs
[f2
].ll
;
1560 v2
.ll
.lower
= env
->fregs
[f2
+ 2].ll
;
1562 res
.q
= float128_sub(v1
.q
, v2
.q
, &env
->fpu_status
);
1563 env
->fregs
[f1
].ll
= res
.ll
.upper
;
1564 env
->fregs
[f1
+ 2].ll
= res
.ll
.lower
;
1565 return set_cc_nz_f128(res
.q
);
1568 /* 128-bit FP addition RR */
1569 uint32_t HELPER(axbr
)(uint32_t f1
, uint32_t f2
)
1572 v1
.ll
.upper
= env
->fregs
[f1
].ll
;
1573 v1
.ll
.lower
= env
->fregs
[f1
+ 2].ll
;
1575 v2
.ll
.upper
= env
->fregs
[f2
].ll
;
1576 v2
.ll
.lower
= env
->fregs
[f2
+ 2].ll
;
1578 res
.q
= float128_add(v1
.q
, v2
.q
, &env
->fpu_status
);
1579 env
->fregs
[f1
].ll
= res
.ll
.upper
;
1580 env
->fregs
[f1
+ 2].ll
= res
.ll
.lower
;
1581 return set_cc_nz_f128(res
.q
);
1584 /* 32-bit FP multiplication RR */
1585 void HELPER(meebr
)(uint32_t f1
, uint32_t f2
)
1587 env
->fregs
[f1
].l
.upper
= float32_mul(env
->fregs
[f1
].l
.upper
,
1588 env
->fregs
[f2
].l
.upper
,
1592 /* 64-bit FP division RR */
1593 void HELPER(ddbr
)(uint32_t f1
, uint32_t f2
)
1595 env
->fregs
[f1
].d
= float64_div(env
->fregs
[f1
].d
, env
->fregs
[f2
].d
,
1599 /* 64-bit FP multiply and add RM */
1600 void HELPER(madb
)(uint32_t f1
, uint64_t a2
, uint32_t f3
)
1602 HELPER_LOG("%s: f1 %d a2 0x%lx f3 %d\n", __FUNCTION__
, f1
, a2
, f3
);
1605 env
->fregs
[f1
].d
= float64_add(env
->fregs
[f1
].d
,
1606 float64_mul(v2
.d
, env
->fregs
[f3
].d
,
1611 /* 64-bit FP multiply and add RR */
1612 void HELPER(madbr
)(uint32_t f1
, uint32_t f3
, uint32_t f2
)
1614 HELPER_LOG("%s: f1 %d f2 %d f3 %d\n", __FUNCTION__
, f1
, f2
, f3
);
1615 env
->fregs
[f1
].d
= float64_add(float64_mul(env
->fregs
[f2
].d
,
1618 env
->fregs
[f1
].d
, &env
->fpu_status
);
1621 /* 64-bit FP multiply and subtract RR */
1622 void HELPER(msdbr
)(uint32_t f1
, uint32_t f3
, uint32_t f2
)
1624 HELPER_LOG("%s: f1 %d f2 %d f3 %d\n", __FUNCTION__
, f1
, f2
, f3
);
1625 env
->fregs
[f1
].d
= float64_sub(float64_mul(env
->fregs
[f2
].d
,
1628 env
->fregs
[f1
].d
, &env
->fpu_status
);
1631 /* 32-bit FP multiply and add RR */
1632 void HELPER(maebr
)(uint32_t f1
, uint32_t f3
, uint32_t f2
)
1634 env
->fregs
[f1
].l
.upper
= float32_add(env
->fregs
[f1
].l
.upper
,
1635 float32_mul(env
->fregs
[f2
].l
.upper
,
1636 env
->fregs
[f3
].l
.upper
,
1641 /* convert 32-bit float to 64-bit float */
1642 void HELPER(ldeb
)(uint32_t f1
, uint64_t a2
)
1646 env
->fregs
[f1
].d
= float32_to_float64(v2
,
1650 /* convert 64-bit float to 128-bit float */
1651 void HELPER(lxdb
)(uint32_t f1
, uint64_t a2
)
1656 v1
.q
= float64_to_float128(v2
.d
, &env
->fpu_status
);
1657 env
->fregs
[f1
].ll
= v1
.ll
.upper
;
1658 env
->fregs
[f1
+ 2].ll
= v1
.ll
.lower
;
1661 /* test data class 32-bit */
1662 uint32_t HELPER(tceb
)(uint32_t f1
, uint64_t m2
)
1664 float32 v1
= env
->fregs
[f1
].l
.upper
;
1665 int neg
= float32_is_neg(v1
);
1668 HELPER_LOG("%s: v1 0x%lx m2 0x%lx neg %d\n", __FUNCTION__
, (long)v1
, m2
, neg
);
1669 if ((float32_is_zero(v1
) && (m2
& (1 << (11-neg
)))) ||
1670 (float32_is_infinity(v1
) && (m2
& (1 << (5-neg
)))) ||
1671 (float32_is_any_nan(v1
) && (m2
& (1 << (3-neg
)))) ||
1672 (float32_is_signaling_nan(v1
) && (m2
& (1 << (1-neg
))))) {
1674 } else if (m2
& (1 << (9-neg
))) {
1675 /* assume normalized number */
1679 /* FIXME: denormalized? */
1683 /* test data class 64-bit */
1684 uint32_t HELPER(tcdb
)(uint32_t f1
, uint64_t m2
)
1686 float64 v1
= env
->fregs
[f1
].d
;
1687 int neg
= float64_is_neg(v1
);
1690 HELPER_LOG("%s: v1 0x%lx m2 0x%lx neg %d\n", __FUNCTION__
, v1
, m2
, neg
);
1691 if ((float64_is_zero(v1
) && (m2
& (1 << (11-neg
)))) ||
1692 (float64_is_infinity(v1
) && (m2
& (1 << (5-neg
)))) ||
1693 (float64_is_any_nan(v1
) && (m2
& (1 << (3-neg
)))) ||
1694 (float64_is_signaling_nan(v1
) && (m2
& (1 << (1-neg
))))) {
1696 } else if (m2
& (1 << (9-neg
))) {
1697 /* assume normalized number */
1700 /* FIXME: denormalized? */
1704 /* test data class 128-bit */
1705 uint32_t HELPER(tcxb
)(uint32_t f1
, uint64_t m2
)
1709 v1
.ll
.upper
= env
->fregs
[f1
].ll
;
1710 v1
.ll
.lower
= env
->fregs
[f1
+ 2].ll
;
1712 int neg
= float128_is_neg(v1
.q
);
1713 if ((float128_is_zero(v1
.q
) && (m2
& (1 << (11-neg
)))) ||
1714 (float128_is_infinity(v1
.q
) && (m2
& (1 << (5-neg
)))) ||
1715 (float128_is_any_nan(v1
.q
) && (m2
& (1 << (3-neg
)))) ||
1716 (float128_is_signaling_nan(v1
.q
) && (m2
& (1 << (1-neg
))))) {
1718 } else if (m2
& (1 << (9-neg
))) {
1719 /* assume normalized number */
1722 /* FIXME: denormalized? */
1726 /* find leftmost one */
1727 uint32_t HELPER(flogr
)(uint32_t r1
, uint64_t v2
)
1732 while (!(v2
& 0x8000000000000000ULL
) && v2
) {
1739 env
->regs
[r1
+ 1] = 0;
1742 env
->regs
[r1
] = res
;
1743 env
->regs
[r1
+ 1] = ov2
& ~(0x8000000000000000ULL
>> res
);
1748 /* square root 64-bit RR */
1749 void HELPER(sqdbr
)(uint32_t f1
, uint32_t f2
)
1751 env
->fregs
[f1
].d
= float64_sqrt(env
->fregs
[f2
].d
, &env
->fpu_status
);
1755 void HELPER(cksm
)(uint32_t r1
, uint32_t r2
)
1757 uint64_t src
= get_address_31fix(r2
);
1758 uint64_t src_len
= env
->regs
[(r2
+ 1) & 15];
1759 uint64_t cksm
= (uint32_t)env
->regs
[r1
];
1761 while (src_len
>= 4) {
1764 /* move to next word */
1773 cksm
+= ldub(src
) << 24;
1776 cksm
+= lduw(src
) << 16;
1779 cksm
+= lduw(src
) << 16;
1780 cksm
+= ldub(src
+ 2) << 8;
1784 /* indicate we've processed everything */
1785 env
->regs
[r2
] = src
+ src_len
;
1786 env
->regs
[(r2
+ 1) & 15] = 0;
1789 env
->regs
[r1
] = (env
->regs
[r1
] & 0xffffffff00000000ULL
) |
1790 ((uint32_t)cksm
+ (cksm
>> 32));
1793 static inline uint32_t cc_calc_ltgt_32(CPUState
*env
, int32_t src
,
1798 } else if (src
< dst
) {
1805 static inline uint32_t cc_calc_ltgt0_32(CPUState
*env
, int32_t dst
)
1807 return cc_calc_ltgt_32(env
, dst
, 0);
1810 static inline uint32_t cc_calc_ltgt_64(CPUState
*env
, int64_t src
,
1815 } else if (src
< dst
) {
1822 static inline uint32_t cc_calc_ltgt0_64(CPUState
*env
, int64_t dst
)
1824 return cc_calc_ltgt_64(env
, dst
, 0);
1827 static inline uint32_t cc_calc_ltugtu_32(CPUState
*env
, uint32_t src
,
1832 } else if (src
< dst
) {
1839 static inline uint32_t cc_calc_ltugtu_64(CPUState
*env
, uint64_t src
,
1844 } else if (src
< dst
) {
1851 static inline uint32_t cc_calc_tm_32(CPUState
*env
, uint32_t val
, uint32_t mask
)
1853 HELPER_LOG("%s: val 0x%x mask 0x%x\n", __FUNCTION__
, val
, mask
);
1854 uint16_t r
= val
& mask
;
1855 if (r
== 0 || mask
== 0) {
1857 } else if (r
== mask
) {
1864 /* set condition code for test under mask */
1865 static inline uint32_t cc_calc_tm_64(CPUState
*env
, uint64_t val
, uint32_t mask
)
1867 uint16_t r
= val
& mask
;
1868 HELPER_LOG("%s: val 0x%lx mask 0x%x r 0x%x\n", __FUNCTION__
, val
, mask
, r
);
1869 if (r
== 0 || mask
== 0) {
1871 } else if (r
== mask
) {
1874 while (!(mask
& 0x8000)) {
1886 static inline uint32_t cc_calc_nz(CPUState
*env
, uint64_t dst
)
1891 static inline uint32_t cc_calc_add_64(CPUState
*env
, int64_t a1
, int64_t a2
,
1894 if ((a1
> 0 && a2
> 0 && ar
< 0) || (a1
< 0 && a2
< 0 && ar
> 0)) {
1895 return 3; /* overflow */
1899 } else if (ar
> 0) {
1907 static inline uint32_t cc_calc_addu_64(CPUState
*env
, uint64_t a1
, uint64_t a2
,
1917 if (ar
< a1
|| ar
< a2
) {
1925 static inline uint32_t cc_calc_sub_64(CPUState
*env
, int64_t a1
, int64_t a2
,
1928 if ((a1
> 0 && a2
< 0 && ar
< 0) || (a1
< 0 && a2
> 0 && ar
> 0)) {
1929 return 3; /* overflow */
1933 } else if (ar
> 0) {
1941 static inline uint32_t cc_calc_subu_64(CPUState
*env
, uint64_t a1
, uint64_t a2
,
1955 static inline uint32_t cc_calc_abs_64(CPUState
*env
, int64_t dst
)
1957 if ((uint64_t)dst
== 0x8000000000000000ULL
) {
1966 static inline uint32_t cc_calc_nabs_64(CPUState
*env
, int64_t dst
)
1971 static inline uint32_t cc_calc_comp_64(CPUState
*env
, int64_t dst
)
1973 if ((uint64_t)dst
== 0x8000000000000000ULL
) {
1975 } else if (dst
< 0) {
1977 } else if (dst
> 0) {
1985 static inline uint32_t cc_calc_add_32(CPUState
*env
, int32_t a1
, int32_t a2
,
1988 if ((a1
> 0 && a2
> 0 && ar
< 0) || (a1
< 0 && a2
< 0 && ar
> 0)) {
1989 return 3; /* overflow */
1993 } else if (ar
> 0) {
2001 static inline uint32_t cc_calc_addu_32(CPUState
*env
, uint32_t a1
, uint32_t a2
,
2011 if (ar
< a1
|| ar
< a2
) {
2019 static inline uint32_t cc_calc_sub_32(CPUState
*env
, int32_t a1
, int32_t a2
,
2022 if ((a1
> 0 && a2
< 0 && ar
< 0) || (a1
< 0 && a2
> 0 && ar
> 0)) {
2023 return 3; /* overflow */
2027 } else if (ar
> 0) {
2035 static inline uint32_t cc_calc_subu_32(CPUState
*env
, uint32_t a1
, uint32_t a2
,
2049 static inline uint32_t cc_calc_abs_32(CPUState
*env
, int32_t dst
)
2051 if ((uint32_t)dst
== 0x80000000UL
) {
2060 static inline uint32_t cc_calc_nabs_32(CPUState
*env
, int32_t dst
)
2065 static inline uint32_t cc_calc_comp_32(CPUState
*env
, int32_t dst
)
2067 if ((uint32_t)dst
== 0x80000000UL
) {
2069 } else if (dst
< 0) {
2071 } else if (dst
> 0) {
2078 /* calculate condition code for insert character under mask insn */
2079 static inline uint32_t cc_calc_icm_32(CPUState
*env
, uint32_t mask
, uint32_t val
)
2081 HELPER_LOG("%s: mask 0x%x val %d\n", __FUNCTION__
, mask
, val
);
2087 } else if (val
& 0x80000000) {
2094 if (!val
|| !mask
) {
2110 static inline uint32_t cc_calc_slag(CPUState
*env
, uint64_t src
, uint64_t shift
)
2112 uint64_t mask
= ((1ULL << shift
) - 1ULL) << (64 - shift
);
2115 /* check if the sign bit stays the same */
2116 if (src
& (1ULL << 63)) {
2122 if ((src
& mask
) != match
) {
2127 r
= ((src
<< shift
) & ((1ULL << 63) - 1)) | (src
& (1ULL << 63));
2129 if ((int64_t)r
== 0) {
2131 } else if ((int64_t)r
< 0) {
2139 static inline uint32_t do_calc_cc(CPUState
*env
, uint32_t cc_op
, uint64_t src
,
2140 uint64_t dst
, uint64_t vr
)
2149 /* cc_op value _is_ cc */
2152 case CC_OP_LTGT0_32
:
2153 r
= cc_calc_ltgt0_32(env
, dst
);
2155 case CC_OP_LTGT0_64
:
2156 r
= cc_calc_ltgt0_64(env
, dst
);
2159 r
= cc_calc_ltgt_32(env
, src
, dst
);
2162 r
= cc_calc_ltgt_64(env
, src
, dst
);
2164 case CC_OP_LTUGTU_32
:
2165 r
= cc_calc_ltugtu_32(env
, src
, dst
);
2167 case CC_OP_LTUGTU_64
:
2168 r
= cc_calc_ltugtu_64(env
, src
, dst
);
2171 r
= cc_calc_tm_32(env
, src
, dst
);
2174 r
= cc_calc_tm_64(env
, src
, dst
);
2177 r
= cc_calc_nz(env
, dst
);
2180 r
= cc_calc_add_64(env
, src
, dst
, vr
);
2183 r
= cc_calc_addu_64(env
, src
, dst
, vr
);
2186 r
= cc_calc_sub_64(env
, src
, dst
, vr
);
2189 r
= cc_calc_subu_64(env
, src
, dst
, vr
);
2192 r
= cc_calc_abs_64(env
, dst
);
2195 r
= cc_calc_nabs_64(env
, dst
);
2198 r
= cc_calc_comp_64(env
, dst
);
2202 r
= cc_calc_add_32(env
, src
, dst
, vr
);
2205 r
= cc_calc_addu_32(env
, src
, dst
, vr
);
2208 r
= cc_calc_sub_32(env
, src
, dst
, vr
);
2211 r
= cc_calc_subu_32(env
, src
, dst
, vr
);
2214 r
= cc_calc_abs_64(env
, dst
);
2217 r
= cc_calc_nabs_64(env
, dst
);
2220 r
= cc_calc_comp_32(env
, dst
);
2224 r
= cc_calc_icm_32(env
, src
, dst
);
2227 r
= cc_calc_slag(env
, src
, dst
);
2230 case CC_OP_LTGT_F32
:
2231 r
= set_cc_f32(src
, dst
);
2233 case CC_OP_LTGT_F64
:
2234 r
= set_cc_f64(src
, dst
);
2237 r
= set_cc_nz_f32(dst
);
2240 r
= set_cc_nz_f64(dst
);
2244 cpu_abort(env
, "Unknown CC operation: %s\n", cc_name(cc_op
));
2247 HELPER_LOG("%s: %15s 0x%016lx 0x%016lx 0x%016lx = %d\n", __FUNCTION__
,
2248 cc_name(cc_op
), src
, dst
, vr
, r
);
2252 uint32_t calc_cc(CPUState
*env
, uint32_t cc_op
, uint64_t src
, uint64_t dst
,
2255 return do_calc_cc(env
, cc_op
, src
, dst
, vr
);
2258 uint32_t HELPER(calc_cc
)(uint32_t cc_op
, uint64_t src
, uint64_t dst
,
2261 return do_calc_cc(env
, cc_op
, src
, dst
, vr
);
2264 uint64_t HELPER(cvd
)(int32_t bin
)
2267 uint64_t dec
= 0x0c;
2275 for (shift
= 4; (shift
< 64) && bin
; shift
+= 4) {
2276 int current_number
= bin
% 10;
2278 dec
|= (current_number
) << shift
;
2285 void HELPER(unpk
)(uint32_t len
, uint64_t dest
, uint64_t src
)
2287 int len_dest
= len
>> 4;
2288 int len_src
= len
& 0xf;
2290 int second_nibble
= 0;
2295 /* last byte is special, it only flips the nibbles */
2297 stb(dest
, (b
<< 4) | (b
>> 4));
2301 /* now pad every nibble with 0xf0 */
2303 while (len_dest
> 0) {
2304 uint8_t cur_byte
= 0;
2307 cur_byte
= ldub(src
);
2313 /* only advance one nibble at a time */
2314 if (second_nibble
) {
2319 second_nibble
= !second_nibble
;
2322 cur_byte
= (cur_byte
& 0xf);
2326 stb(dest
, cur_byte
);
2330 void HELPER(tr
)(uint32_t len
, uint64_t array
, uint64_t trans
)
2334 for (i
= 0; i
<= len
; i
++) {
2335 uint8_t byte
= ldub(array
+ i
);
2336 uint8_t new_byte
= ldub(trans
+ byte
);
2337 stb(array
+ i
, new_byte
);
2341 #ifndef CONFIG_USER_ONLY
2343 void HELPER(load_psw
)(uint64_t mask
, uint64_t addr
)
2345 load_psw(env
, mask
, addr
);
2349 static void program_interrupt(CPUState
*env
, uint32_t code
, int ilc
)
2351 qemu_log("program interrupt at %#" PRIx64
"\n", env
->psw
.addr
);
2353 if (kvm_enabled()) {
2355 kvm_s390_interrupt(env
, KVM_S390_PROGRAM_INT
, code
);
2358 env
->int_pgm_code
= code
;
2359 env
->int_pgm_ilc
= ilc
;
2360 env
->exception_index
= EXCP_PGM
;
2365 static void ext_interrupt(CPUState
*env
, int type
, uint32_t param
,
2368 cpu_inject_ext(env
, type
, param
, param64
);
2371 int sclp_service_call(CPUState
*env
, uint32_t sccb
, uint64_t code
)
2377 printf("sclp(0x%x, 0x%" PRIx64
")\n", sccb
, code
);
2380 if (sccb
& ~0x7ffffff8ul
) {
2381 fprintf(stderr
, "KVM: invalid sccb address 0x%x\n", sccb
);
2387 case SCLP_CMDW_READ_SCP_INFO
:
2388 case SCLP_CMDW_READ_SCP_INFO_FORCED
:
2389 while ((ram_size
>> (20 + shift
)) > 65535) {
2392 stw_phys(sccb
+ SCP_MEM_CODE
, ram_size
>> (20 + shift
));
2393 stb_phys(sccb
+ SCP_INCREMENT
, 1 << shift
);
2394 stw_phys(sccb
+ SCP_RESPONSE_CODE
, 0x10);
2396 if (kvm_enabled()) {
2398 kvm_s390_interrupt_internal(env
, KVM_S390_INT_SERVICE
,
2403 ext_interrupt(env
, EXT_SERVICE
, sccb
& ~3, 0);
2408 printf("KVM: invalid sclp call 0x%x / 0x%" PRIx64
"x\n", sccb
, code
);
2418 /* SCLP service call */
2419 uint32_t HELPER(servc
)(uint32_t r1
, uint64_t r2
)
2421 if (sclp_service_call(env
, r1
, r2
)) {
2429 uint64_t HELPER(diag
)(uint32_t num
, uint64_t mem
, uint64_t code
)
2436 r
= s390_virtio_hypercall(env
, mem
, code
);
2452 program_interrupt(env
, PGM_OPERATION
, ILC_LATER_INC
);
2459 void HELPER(stidp
)(uint64_t a1
)
2461 stq(a1
, env
->cpu_num
);
2465 void HELPER(spx
)(uint64_t a1
)
2470 env
->psa
= prefix
& 0xfffff000;
2471 qemu_log("prefix: %#x\n", prefix
);
2472 tlb_flush_page(env
, 0);
2473 tlb_flush_page(env
, TARGET_PAGE_SIZE
);
2477 uint32_t HELPER(sck
)(uint64_t a1
)
2479 /* XXX not implemented - is it necessary? */
2484 static inline uint64_t clock_value(CPUState
*env
)
2488 time
= env
->tod_offset
+
2489 time2tod(qemu_get_clock_ns(vm_clock
) - env
->tod_basetime
);
2495 uint32_t HELPER(stck
)(uint64_t a1
)
2497 stq(a1
, clock_value(env
));
2502 /* Store Clock Extended */
2503 uint32_t HELPER(stcke
)(uint64_t a1
)
2506 /* basically the same value as stck */
2507 stq(a1
+ 1, clock_value(env
) | env
->cpu_num
);
2508 /* more fine grained than stck */
2510 /* XXX programmable fields */
2517 /* Set Clock Comparator */
2518 void HELPER(sckc
)(uint64_t a1
)
2520 uint64_t time
= ldq(a1
);
2522 if (time
== -1ULL) {
2526 /* difference between now and then */
2527 time
-= clock_value(env
);
2529 time
= (time
* 125) >> 9;
2531 qemu_mod_timer(env
->tod_timer
, qemu_get_clock_ns(vm_clock
) + time
);
2534 /* Store Clock Comparator */
2535 void HELPER(stckc
)(uint64_t a1
)
2542 void HELPER(spt
)(uint64_t a1
)
2544 uint64_t time
= ldq(a1
);
2546 if (time
== -1ULL) {
2551 time
= (time
* 125) >> 9;
2553 qemu_mod_timer(env
->cpu_timer
, qemu_get_clock_ns(vm_clock
) + time
);
2556 /* Store CPU Timer */
2557 void HELPER(stpt
)(uint64_t a1
)
2563 /* Store System Information */
2564 uint32_t HELPER(stsi
)(uint64_t a0
, uint32_t r0
, uint32_t r1
)
2569 if ((r0
& STSI_LEVEL_MASK
) <= STSI_LEVEL_3
&&
2570 ((r0
& STSI_R0_RESERVED_MASK
) || (r1
& STSI_R1_RESERVED_MASK
))) {
2571 /* valid function code, invalid reserved bits */
2572 program_interrupt(env
, PGM_SPECIFICATION
, 2);
2575 sel1
= r0
& STSI_R0_SEL1_MASK
;
2576 sel2
= r1
& STSI_R1_SEL2_MASK
;
2578 /* XXX: spec exception if sysib is not 4k-aligned */
2580 switch (r0
& STSI_LEVEL_MASK
) {
2582 if ((sel1
== 1) && (sel2
== 1)) {
2583 /* Basic Machine Configuration */
2584 struct sysib_111 sysib
;
2586 memset(&sysib
, 0, sizeof(sysib
));
2587 ebcdic_put(sysib
.manuf
, "QEMU ", 16);
2588 /* same as machine type number in STORE CPU ID */
2589 ebcdic_put(sysib
.type
, "QEMU", 4);
2590 /* same as model number in STORE CPU ID */
2591 ebcdic_put(sysib
.model
, "QEMU ", 16);
2592 ebcdic_put(sysib
.sequence
, "QEMU ", 16);
2593 ebcdic_put(sysib
.plant
, "QEMU", 4);
2594 cpu_physical_memory_rw(a0
, (uint8_t*)&sysib
, sizeof(sysib
), 1);
2595 } else if ((sel1
== 2) && (sel2
== 1)) {
2596 /* Basic Machine CPU */
2597 struct sysib_121 sysib
;
2599 memset(&sysib
, 0, sizeof(sysib
));
2600 /* XXX make different for different CPUs? */
2601 ebcdic_put(sysib
.sequence
, "QEMUQEMUQEMUQEMU", 16);
2602 ebcdic_put(sysib
.plant
, "QEMU", 4);
2603 stw_p(&sysib
.cpu_addr
, env
->cpu_num
);
2604 cpu_physical_memory_rw(a0
, (uint8_t*)&sysib
, sizeof(sysib
), 1);
2605 } else if ((sel1
== 2) && (sel2
== 2)) {
2606 /* Basic Machine CPUs */
2607 struct sysib_122 sysib
;
2609 memset(&sysib
, 0, sizeof(sysib
));
2610 stl_p(&sysib
.capability
, 0x443afc29);
2611 /* XXX change when SMP comes */
2612 stw_p(&sysib
.total_cpus
, 1);
2613 stw_p(&sysib
.active_cpus
, 1);
2614 stw_p(&sysib
.standby_cpus
, 0);
2615 stw_p(&sysib
.reserved_cpus
, 0);
2616 cpu_physical_memory_rw(a0
, (uint8_t*)&sysib
, sizeof(sysib
), 1);
2623 if ((sel1
== 2) && (sel2
== 1)) {
2625 struct sysib_221 sysib
;
2627 memset(&sysib
, 0, sizeof(sysib
));
2628 /* XXX make different for different CPUs? */
2629 ebcdic_put(sysib
.sequence
, "QEMUQEMUQEMUQEMU", 16);
2630 ebcdic_put(sysib
.plant
, "QEMU", 4);
2631 stw_p(&sysib
.cpu_addr
, env
->cpu_num
);
2632 stw_p(&sysib
.cpu_id
, 0);
2633 cpu_physical_memory_rw(a0
, (uint8_t*)&sysib
, sizeof(sysib
), 1);
2634 } else if ((sel1
== 2) && (sel2
== 2)) {
2636 struct sysib_222 sysib
;
2638 memset(&sysib
, 0, sizeof(sysib
));
2639 stw_p(&sysib
.lpar_num
, 0);
2641 /* XXX change when SMP comes */
2642 stw_p(&sysib
.total_cpus
, 1);
2643 stw_p(&sysib
.conf_cpus
, 1);
2644 stw_p(&sysib
.standby_cpus
, 0);
2645 stw_p(&sysib
.reserved_cpus
, 0);
2646 ebcdic_put(sysib
.name
, "QEMU ", 8);
2647 stl_p(&sysib
.caf
, 1000);
2648 stw_p(&sysib
.dedicated_cpus
, 0);
2649 stw_p(&sysib
.shared_cpus
, 0);
2650 cpu_physical_memory_rw(a0
, (uint8_t*)&sysib
, sizeof(sysib
), 1);
2658 if ((sel1
== 2) && (sel2
== 2)) {
2660 struct sysib_322 sysib
;
2662 memset(&sysib
, 0, sizeof(sysib
));
2664 /* XXX change when SMP comes */
2665 stw_p(&sysib
.vm
[0].total_cpus
, 1);
2666 stw_p(&sysib
.vm
[0].conf_cpus
, 1);
2667 stw_p(&sysib
.vm
[0].standby_cpus
, 0);
2668 stw_p(&sysib
.vm
[0].reserved_cpus
, 0);
2669 ebcdic_put(sysib
.vm
[0].name
, "KVMguest", 8);
2670 stl_p(&sysib
.vm
[0].caf
, 1000);
2671 ebcdic_put(sysib
.vm
[0].cpi
, "KVM/Linux ", 16);
2672 cpu_physical_memory_rw(a0
, (uint8_t*)&sysib
, sizeof(sysib
), 1);
2678 case STSI_LEVEL_CURRENT
:
2679 env
->regs
[0] = STSI_LEVEL_3
;
2689 void HELPER(lctlg
)(uint32_t r1
, uint64_t a2
, uint32_t r3
)
2694 for (i
= r1
;; i
= (i
+ 1) % 16) {
2695 env
->cregs
[i
] = ldq(src
);
2696 HELPER_LOG("load ctl %d from 0x%" PRIx64
" == 0x%" PRIx64
"\n",
2697 i
, src
, env
->cregs
[i
]);
2698 src
+= sizeof(uint64_t);
2708 void HELPER(lctl
)(uint32_t r1
, uint64_t a2
, uint32_t r3
)
2713 for (i
= r1
;; i
= (i
+ 1) % 16) {
2714 env
->cregs
[i
] = (env
->cregs
[i
] & 0xFFFFFFFF00000000ULL
) | ldl(src
);
2715 src
+= sizeof(uint32_t);
2725 void HELPER(stctg
)(uint32_t r1
, uint64_t a2
, uint32_t r3
)
2730 for (i
= r1
;; i
= (i
+ 1) % 16) {
2731 stq(dest
, env
->cregs
[i
]);
2732 dest
+= sizeof(uint64_t);
2740 void HELPER(stctl
)(uint32_t r1
, uint64_t a2
, uint32_t r3
)
2745 for (i
= r1
;; i
= (i
+ 1) % 16) {
2746 stl(dest
, env
->cregs
[i
]);
2747 dest
+= sizeof(uint32_t);
2755 uint32_t HELPER(tprot
)(uint64_t a1
, uint64_t a2
)
2762 /* insert storage key extended */
2763 uint64_t HELPER(iske
)(uint64_t r2
)
2765 uint64_t addr
= get_address(0, 0, r2
);
2767 if (addr
> ram_size
) {
2771 return env
->storage_keys
[addr
/ TARGET_PAGE_SIZE
];
2774 /* set storage key extended */
2775 void HELPER(sske
)(uint32_t r1
, uint64_t r2
)
2777 uint64_t addr
= get_address(0, 0, r2
);
2779 if (addr
> ram_size
) {
2783 env
->storage_keys
[addr
/ TARGET_PAGE_SIZE
] = r1
;
2786 /* reset reference bit extended */
2787 uint32_t HELPER(rrbe
)(uint32_t r1
, uint64_t r2
)
2791 if (r2
> ram_size
) {
2795 key
= env
->storage_keys
[r2
/ TARGET_PAGE_SIZE
];
2796 re
= key
& (SK_R
| SK_C
);
2797 env
->storage_keys
[r2
/ TARGET_PAGE_SIZE
] = (key
& ~SK_R
);
2802 * 0 Reference bit zero; change bit zero
2803 * 1 Reference bit zero; change bit one
2804 * 2 Reference bit one; change bit zero
2805 * 3 Reference bit one; change bit one
2811 /* compare and swap and purge */
2812 uint32_t HELPER(csp
)(uint32_t r1
, uint32_t r2
)
2815 uint32_t o1
= env
->regs
[r1
];
2816 uint64_t a2
= get_address_31fix(r2
) & ~3ULL;
2817 uint32_t o2
= ldl(a2
);
2820 stl(a2
, env
->regs
[(r1
+ 1) & 15]);
2821 if (env
->regs
[r2
] & 0x3) {
2822 /* flush TLB / ALB */
2827 env
->regs
[r1
] = (env
->regs
[r1
] & 0xffffffff00000000ULL
) | o2
;
2834 static uint32_t mvc_asc(int64_t l
, uint64_t a1
, uint64_t mode1
, uint64_t a2
,
2837 target_ulong src
, dest
;
2838 int flags
, cc
= 0, i
;
2842 } else if (l
> 256) {
2848 if (mmu_translate(env
, a1
& TARGET_PAGE_MASK
, 1, mode1
, &dest
, &flags
)) {
2851 dest
|= a1
& ~TARGET_PAGE_MASK
;
2853 if (mmu_translate(env
, a2
& TARGET_PAGE_MASK
, 0, mode2
, &src
, &flags
)) {
2856 src
|= a2
& ~TARGET_PAGE_MASK
;
2858 /* XXX replace w/ memcpy */
2859 for (i
= 0; i
< l
; i
++) {
2860 /* XXX be more clever */
2861 if ((((dest
+ i
) & TARGET_PAGE_MASK
) != (dest
& TARGET_PAGE_MASK
)) ||
2862 (((src
+ i
) & TARGET_PAGE_MASK
) != (src
& TARGET_PAGE_MASK
))) {
2863 mvc_asc(l
- i
, a1
+ i
, mode1
, a2
+ i
, mode2
);
2866 stb_phys(dest
+ i
, ldub_phys(src
+ i
));
2872 uint32_t HELPER(mvcs
)(uint64_t l
, uint64_t a1
, uint64_t a2
)
2874 HELPER_LOG("%s: %16" PRIx64
" %16" PRIx64
" %16" PRIx64
"\n",
2875 __FUNCTION__
, l
, a1
, a2
);
2877 return mvc_asc(l
, a1
, PSW_ASC_SECONDARY
, a2
, PSW_ASC_PRIMARY
);
2880 uint32_t HELPER(mvcp
)(uint64_t l
, uint64_t a1
, uint64_t a2
)
2882 HELPER_LOG("%s: %16" PRIx64
" %16" PRIx64
" %16" PRIx64
"\n",
2883 __FUNCTION__
, l
, a1
, a2
);
2885 return mvc_asc(l
, a1
, PSW_ASC_PRIMARY
, a2
, PSW_ASC_SECONDARY
);
2888 uint32_t HELPER(sigp
)(uint64_t order_code
, uint32_t r1
, uint64_t cpu_addr
)
2892 HELPER_LOG("%s: %016" PRIx64
" %08x %016" PRIx64
"\n",
2893 __FUNCTION__
, order_code
, r1
, cpu_addr
);
2895 /* Remember: Use "R1 or R1+1, whichever is the odd-numbered register"
2896 as parameter (input). Status (output) is always R1. */
2898 switch (order_code
) {
2903 /* enumerate CPU status */
2905 /* XXX implement when SMP comes */
2908 env
->regs
[r1
] &= 0xffffffff00000000ULL
;
2911 #if !defined (CONFIG_USER_ONLY)
2913 qemu_system_reset_request();
2917 qemu_system_shutdown_request();
2923 fprintf(stderr
, "XXX unknown sigp: 0x%" PRIx64
"\n", order_code
);
2930 void HELPER(sacf
)(uint64_t a1
)
2932 HELPER_LOG("%s: %16" PRIx64
"\n", __FUNCTION__
, a1
);
2934 switch (a1
& 0xf00) {
2936 env
->psw
.mask
&= ~PSW_MASK_ASC
;
2937 env
->psw
.mask
|= PSW_ASC_PRIMARY
;
2940 env
->psw
.mask
&= ~PSW_MASK_ASC
;
2941 env
->psw
.mask
|= PSW_ASC_SECONDARY
;
2944 env
->psw
.mask
&= ~PSW_MASK_ASC
;
2945 env
->psw
.mask
|= PSW_ASC_HOME
;
2948 qemu_log("unknown sacf mode: %" PRIx64
"\n", a1
);
2949 program_interrupt(env
, PGM_SPECIFICATION
, 2);
2954 /* invalidate pte */
2955 void HELPER(ipte
)(uint64_t pte_addr
, uint64_t vaddr
)
2957 uint64_t page
= vaddr
& TARGET_PAGE_MASK
;
2960 /* XXX broadcast to other CPUs */
2962 /* XXX Linux is nice enough to give us the exact pte address.
2963 According to spec we'd have to find it out ourselves */
2964 /* XXX Linux is fine with overwriting the pte, the spec requires
2965 us to only set the invalid bit */
2966 stq_phys(pte_addr
, pte
| _PAGE_INVALID
);
2968 /* XXX we exploit the fact that Linux passes the exact virtual
2969 address here - it's not obliged to! */
2970 tlb_flush_page(env
, page
);
2972 /* XXX 31-bit hack */
2973 if (page
& 0x80000000) {
2974 tlb_flush_page(env
, page
& ~0x80000000);
2976 tlb_flush_page(env
, page
| 0x80000000);
2980 /* flush local tlb */
2981 void HELPER(ptlb
)(void)
2986 /* store using real address */
2987 void HELPER(stura
)(uint64_t addr
, uint32_t v1
)
2989 stw_phys(get_address(0, 0, addr
), v1
);
2992 /* load real address */
2993 uint32_t HELPER(lra
)(uint64_t addr
, uint32_t r1
)
2996 int old_exc
= env
->exception_index
;
2997 uint64_t asc
= env
->psw
.mask
& PSW_MASK_ASC
;
3001 /* XXX incomplete - has more corner cases */
3002 if (!(env
->psw
.mask
& PSW_MASK_64
) && (addr
>> 32)) {
3003 program_interrupt(env
, PGM_SPECIAL_OP
, 2);
3006 env
->exception_index
= old_exc
;
3007 if (mmu_translate(env
, addr
, 0, asc
, &ret
, &flags
)) {
3010 if (env
->exception_index
== EXCP_PGM
) {
3011 ret
= env
->int_pgm_code
| 0x80000000;
3013 ret
|= addr
& ~TARGET_PAGE_MASK
;
3015 env
->exception_index
= old_exc
;
3017 if (!(env
->psw
.mask
& PSW_MASK_64
)) {
3018 env
->regs
[r1
] = (env
->regs
[r1
] & 0xffffffff00000000ULL
) | (ret
& 0xffffffffULL
);
3020 env
->regs
[r1
] = ret
;