2 * S/390 helper routines
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2009 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
22 #include "dyngen-exec.h"
23 #include "host-utils.h"
27 #include "qemu-timer.h"
29 #include <linux/kvm.h>
32 /*****************************************************************************/
34 #if !defined (CONFIG_USER_ONLY)
35 #include "softmmu_exec.h"
37 #define MMUSUFFIX _mmu
40 #include "softmmu_template.h"
43 #include "softmmu_template.h"
46 #include "softmmu_template.h"
49 #include "softmmu_template.h"
51 /* try to fill the TLB and return an exception if error. If retaddr is
52 NULL, it means that the function was called in C code (i.e. not
53 from generated code or from helper.c) */
54 /* XXX: fix it to restore all registers */
55 void tlb_fill(CPUState
*env1
, target_ulong addr
, int is_write
, int mmu_idx
,
65 ret
= cpu_s390x_handle_mmu_fault(env
, addr
, is_write
, mmu_idx
);
66 if (unlikely(ret
!= 0)) {
67 if (likely(retaddr
)) {
68 /* now we have a real cpu fault */
69 pc
= (unsigned long)retaddr
;
72 /* the PC is inside the translated code. It means that we have
73 a virtual CPU fault */
74 cpu_restore_state(tb
, env
, pc
);
84 /* #define DEBUG_HELPER */
86 #define HELPER_LOG(x...) qemu_log(x)
88 #define HELPER_LOG(x...)
91 /* raise an exception */
92 void HELPER(exception
)(uint32_t excp
)
94 HELPER_LOG("%s: exception %d\n", __FUNCTION__
, excp
);
95 env
->exception_index
= excp
;
99 #ifndef CONFIG_USER_ONLY
100 static void mvc_fast_memset(CPUState
*env
, uint32_t l
, uint64_t dest
,
103 target_phys_addr_t dest_phys
;
104 target_phys_addr_t len
= l
;
106 uint64_t asc
= env
->psw
.mask
& PSW_MASK_ASC
;
109 if (mmu_translate(env
, dest
, 1, asc
, &dest_phys
, &flags
)) {
111 cpu_abort(env
, "should never reach here");
113 dest_phys
|= dest
& ~TARGET_PAGE_MASK
;
115 dest_p
= cpu_physical_memory_map(dest_phys
, &len
, 1);
117 memset(dest_p
, byte
, len
);
119 cpu_physical_memory_unmap(dest_p
, 1, len
, len
);
122 static void mvc_fast_memmove(CPUState
*env
, uint32_t l
, uint64_t dest
,
125 target_phys_addr_t dest_phys
;
126 target_phys_addr_t src_phys
;
127 target_phys_addr_t len
= l
;
130 uint64_t asc
= env
->psw
.mask
& PSW_MASK_ASC
;
133 if (mmu_translate(env
, dest
, 1, asc
, &dest_phys
, &flags
)) {
135 cpu_abort(env
, "should never reach here");
137 dest_phys
|= dest
& ~TARGET_PAGE_MASK
;
139 if (mmu_translate(env
, src
, 0, asc
, &src_phys
, &flags
)) {
141 cpu_abort(env
, "should never reach here");
143 src_phys
|= src
& ~TARGET_PAGE_MASK
;
145 dest_p
= cpu_physical_memory_map(dest_phys
, &len
, 1);
146 src_p
= cpu_physical_memory_map(src_phys
, &len
, 0);
148 memmove(dest_p
, src_p
, len
);
150 cpu_physical_memory_unmap(dest_p
, 1, len
, len
);
151 cpu_physical_memory_unmap(src_p
, 0, len
, len
);
156 uint32_t HELPER(nc
)(uint32_t l
, uint64_t dest
, uint64_t src
)
162 HELPER_LOG("%s l %d dest %" PRIx64
" src %" PRIx64
"\n",
163 __FUNCTION__
, l
, dest
, src
);
164 for (i
= 0; i
<= l
; i
++) {
165 x
= ldub(dest
+ i
) & ldub(src
+ i
);
175 uint32_t HELPER(xc
)(uint32_t l
, uint64_t dest
, uint64_t src
)
181 HELPER_LOG("%s l %d dest %" PRIx64
" src %" PRIx64
"\n",
182 __FUNCTION__
, l
, dest
, src
);
184 #ifndef CONFIG_USER_ONLY
185 /* xor with itself is the same as memset(0) */
186 if ((l
> 32) && (src
== dest
) &&
187 (src
& TARGET_PAGE_MASK
) == ((src
+ l
) & TARGET_PAGE_MASK
)) {
188 mvc_fast_memset(env
, l
+ 1, dest
, 0);
193 memset(g2h(dest
), 0, l
+ 1);
198 for (i
= 0; i
<= l
; i
++) {
199 x
= ldub(dest
+ i
) ^ ldub(src
+ i
);
209 uint32_t HELPER(oc
)(uint32_t l
, uint64_t dest
, uint64_t src
)
215 HELPER_LOG("%s l %d dest %" PRIx64
" src %" PRIx64
"\n",
216 __FUNCTION__
, l
, dest
, src
);
217 for (i
= 0; i
<= l
; i
++) {
218 x
= ldub(dest
+ i
) | ldub(src
+ i
);
228 void HELPER(mvc
)(uint32_t l
, uint64_t dest
, uint64_t src
)
232 uint32_t l_64
= (l
+ 1) / 8;
234 HELPER_LOG("%s l %d dest %" PRIx64
" src %" PRIx64
"\n",
235 __FUNCTION__
, l
, dest
, src
);
237 #ifndef CONFIG_USER_ONLY
239 (src
& TARGET_PAGE_MASK
) == ((src
+ l
) & TARGET_PAGE_MASK
) &&
240 (dest
& TARGET_PAGE_MASK
) == ((dest
+ l
) & TARGET_PAGE_MASK
)) {
241 if (dest
== (src
+ 1)) {
242 mvc_fast_memset(env
, l
+ 1, dest
, ldub(src
));
244 } else if ((src
& TARGET_PAGE_MASK
) != (dest
& TARGET_PAGE_MASK
)) {
245 mvc_fast_memmove(env
, l
+ 1, dest
, src
);
250 if (dest
== (src
+ 1)) {
251 memset(g2h(dest
), ldub(src
), l
+ 1);
254 memmove(g2h(dest
), g2h(src
), l
+ 1);
259 /* handle the parts that fit into 8-byte loads/stores */
260 if (dest
!= (src
+ 1)) {
261 for (i
= 0; i
< l_64
; i
++) {
262 stq(dest
+ x
, ldq(src
+ x
));
267 /* slow version crossing pages with byte accesses */
268 for (i
= x
; i
<= l
; i
++) {
269 stb(dest
+ i
, ldub(src
+ i
));
273 /* compare unsigned byte arrays */
274 uint32_t HELPER(clc
)(uint32_t l
, uint64_t s1
, uint64_t s2
)
279 HELPER_LOG("%s l %d s1 %" PRIx64
" s2 %" PRIx64
"\n",
280 __FUNCTION__
, l
, s1
, s2
);
281 for (i
= 0; i
<= l
; i
++) {
284 HELPER_LOG("%02x (%c)/%02x (%c) ", x
, x
, y
, y
);
299 /* compare logical under mask */
300 uint32_t HELPER(clm
)(uint32_t r1
, uint32_t mask
, uint64_t addr
)
304 HELPER_LOG("%s: r1 0x%x mask 0x%x addr 0x%" PRIx64
"\n", __FUNCTION__
, r1
,
310 r
= (r1
& 0xff000000UL
) >> 24;
311 HELPER_LOG("mask 0x%x %02x/%02x (0x%" PRIx64
") ", mask
, r
, d
,
322 mask
= (mask
<< 1) & 0xf;
329 /* store character under mask */
330 void HELPER(stcm
)(uint32_t r1
, uint32_t mask
, uint64_t addr
)
333 HELPER_LOG("%s: r1 0x%x mask 0x%x addr 0x%lx\n", __FUNCTION__
, r1
, mask
,
337 r
= (r1
& 0xff000000UL
) >> 24;
339 HELPER_LOG("mask 0x%x %02x (0x%lx) ", mask
, r
, addr
);
342 mask
= (mask
<< 1) & 0xf;
348 /* 64/64 -> 128 unsigned multiplication */
349 void HELPER(mlg
)(uint32_t r1
, uint64_t v2
)
351 #if HOST_LONG_BITS == 64 && defined(__GNUC__)
352 /* assuming 64-bit hosts have __uint128_t */
353 __uint128_t res
= (__uint128_t
)env
->regs
[r1
+ 1];
354 res
*= (__uint128_t
)v2
;
355 env
->regs
[r1
] = (uint64_t)(res
>> 64);
356 env
->regs
[r1
+ 1] = (uint64_t)res
;
358 mulu64(&env
->regs
[r1
+ 1], &env
->regs
[r1
], env
->regs
[r1
+ 1], v2
);
362 /* 128 -> 64/64 unsigned division */
363 void HELPER(dlg
)(uint32_t r1
, uint64_t v2
)
365 uint64_t divisor
= v2
;
367 if (!env
->regs
[r1
]) {
368 /* 64 -> 64/64 case */
369 env
->regs
[r1
] = env
->regs
[r1
+1] % divisor
;
370 env
->regs
[r1
+1] = env
->regs
[r1
+1] / divisor
;
374 #if HOST_LONG_BITS == 64 && defined(__GNUC__)
375 /* assuming 64-bit hosts have __uint128_t */
376 __uint128_t dividend
= (((__uint128_t
)env
->regs
[r1
]) << 64) |
378 __uint128_t quotient
= dividend
/ divisor
;
379 env
->regs
[r1
+1] = quotient
;
380 __uint128_t remainder
= dividend
% divisor
;
381 env
->regs
[r1
] = remainder
;
383 /* 32-bit hosts would need special wrapper functionality - just abort if
384 we encounter such a case; it's very unlikely anyways. */
385 cpu_abort(env
, "128 -> 64/64 division not implemented\n");
390 static inline uint64_t get_address(int x2
, int b2
, int d2
)
403 if (!(env
->psw
.mask
& PSW_MASK_64
)) {
410 static inline uint64_t get_address_31fix(int reg
)
412 uint64_t r
= env
->regs
[reg
];
415 if (!(env
->psw
.mask
& PSW_MASK_64
)) {
422 /* search string (c is byte to search, r2 is string, r1 end of string) */
423 uint32_t HELPER(srst
)(uint32_t c
, uint32_t r1
, uint32_t r2
)
427 uint64_t str
= get_address_31fix(r2
);
428 uint64_t end
= get_address_31fix(r1
);
430 HELPER_LOG("%s: c %d *r1 0x%" PRIx64
" *r2 0x%" PRIx64
"\n", __FUNCTION__
,
431 c
, env
->regs
[r1
], env
->regs
[r2
]);
433 for (i
= str
; i
!= end
; i
++) {
444 /* unsigned string compare (c is string terminator) */
445 uint32_t HELPER(clst
)(uint32_t c
, uint32_t r1
, uint32_t r2
)
447 uint64_t s1
= get_address_31fix(r1
);
448 uint64_t s2
= get_address_31fix(r2
);
452 #ifdef CONFIG_USER_ONLY
454 HELPER_LOG("%s: comparing '%s' and '%s'\n",
455 __FUNCTION__
, (char*)g2h(s1
), (char*)g2h(s2
));
461 if ((v1
== c
|| v2
== c
) || (v1
!= v2
)) {
471 cc
= (v1
< v2
) ? 1 : 2;
472 /* FIXME: 31-bit mode! */
480 void HELPER(mvpg
)(uint64_t r0
, uint64_t r1
, uint64_t r2
)
482 /* XXX missing r0 handling */
483 #ifdef CONFIG_USER_ONLY
486 for (i
= 0; i
< TARGET_PAGE_SIZE
; i
++) {
487 stb(r1
+ i
, ldub(r2
+ i
));
490 mvc_fast_memmove(env
, TARGET_PAGE_SIZE
, r1
, r2
);
494 /* string copy (c is string terminator) */
495 void HELPER(mvst
)(uint32_t c
, uint32_t r1
, uint32_t r2
)
497 uint64_t dest
= get_address_31fix(r1
);
498 uint64_t src
= get_address_31fix(r2
);
501 #ifdef CONFIG_USER_ONLY
503 HELPER_LOG("%s: copy '%s' to 0x%lx\n", __FUNCTION__
, (char*)g2h(src
),
516 env
->regs
[r1
] = dest
; /* FIXME: 31-bit mode! */
519 /* compare and swap 64-bit */
520 uint32_t HELPER(csg
)(uint32_t r1
, uint64_t a2
, uint32_t r3
)
522 /* FIXME: locking? */
524 uint64_t v2
= ldq(a2
);
525 if (env
->regs
[r1
] == v2
) {
527 stq(a2
, env
->regs
[r3
]);
535 /* compare double and swap 64-bit */
536 uint32_t HELPER(cdsg
)(uint32_t r1
, uint64_t a2
, uint32_t r3
)
538 /* FIXME: locking? */
540 uint64_t v2_hi
= ldq(a2
);
541 uint64_t v2_lo
= ldq(a2
+ 8);
542 uint64_t v1_hi
= env
->regs
[r1
];
543 uint64_t v1_lo
= env
->regs
[r1
+ 1];
545 if ((v1_hi
== v2_hi
) && (v1_lo
== v2_lo
)) {
547 stq(a2
, env
->regs
[r3
]);
548 stq(a2
+ 8, env
->regs
[r3
+ 1]);
551 env
->regs
[r1
] = v2_hi
;
552 env
->regs
[r1
+ 1] = v2_lo
;
558 /* compare and swap 32-bit */
559 uint32_t HELPER(cs
)(uint32_t r1
, uint64_t a2
, uint32_t r3
)
561 /* FIXME: locking? */
563 HELPER_LOG("%s: r1 %d a2 0x%lx r3 %d\n", __FUNCTION__
, r1
, a2
, r3
);
564 uint32_t v2
= ldl(a2
);
565 if (((uint32_t)env
->regs
[r1
]) == v2
) {
567 stl(a2
, (uint32_t)env
->regs
[r3
]);
570 env
->regs
[r1
] = (env
->regs
[r1
] & 0xffffffff00000000ULL
) | v2
;
575 static uint32_t helper_icm(uint32_t r1
, uint64_t address
, uint32_t mask
)
577 int pos
= 24; /* top of the lower half of r1 */
578 uint64_t rmask
= 0xff000000ULL
;
585 env
->regs
[r1
] &= ~rmask
;
587 if ((val
& 0x80) && !ccd
) {
591 if (val
&& cc
== 0) {
594 env
->regs
[r1
] |= (uint64_t)val
<< pos
;
597 mask
= (mask
<< 1) & 0xf;
605 /* execute instruction
606 this instruction executes an insn modified with the contents of r1
607 it does not change the executed instruction in memory
608 it does not change the program counter
609 in other words: tricky...
610 currently implemented by interpreting the cases it is most commonly used in
612 uint32_t HELPER(ex
)(uint32_t cc
, uint64_t v1
, uint64_t addr
, uint64_t ret
)
614 uint16_t insn
= lduw_code(addr
);
615 HELPER_LOG("%s: v1 0x%lx addr 0x%lx insn 0x%x\n", __FUNCTION__
, v1
, addr
,
617 if ((insn
& 0xf0ff) == 0xd000) {
618 uint32_t l
, insn2
, b1
, b2
, d1
, d2
;
620 insn2
= ldl_code(addr
+ 2);
621 b1
= (insn2
>> 28) & 0xf;
622 b2
= (insn2
>> 12) & 0xf;
623 d1
= (insn2
>> 16) & 0xfff;
625 switch (insn
& 0xf00) {
627 helper_mvc(l
, get_address(0, b1
, d1
), get_address(0, b2
, d2
));
630 cc
= helper_clc(l
, get_address(0, b1
, d1
), get_address(0, b2
, d2
));
633 cc
= helper_xc(l
, get_address(0, b1
, d1
), get_address(0, b2
, d2
));
639 } else if ((insn
& 0xff00) == 0x0a00) {
640 /* supervisor call */
641 HELPER_LOG("%s: svc %ld via execute\n", __FUNCTION__
, (insn
|v1
) & 0xff);
642 env
->psw
.addr
= ret
- 4;
643 env
->int_svc_code
= (insn
|v1
) & 0xff;
644 env
->int_svc_ilc
= 4;
645 helper_exception(EXCP_SVC
);
646 } else if ((insn
& 0xff00) == 0xbf00) {
647 uint32_t insn2
, r1
, r3
, b2
, d2
;
648 insn2
= ldl_code(addr
+ 2);
649 r1
= (insn2
>> 20) & 0xf;
650 r3
= (insn2
>> 16) & 0xf;
651 b2
= (insn2
>> 12) & 0xf;
653 cc
= helper_icm(r1
, get_address(0, b2
, d2
), r3
);
656 cpu_abort(env
, "EXECUTE on instruction prefix 0x%x not implemented\n",
662 /* absolute value 32-bit */
663 uint32_t HELPER(abs_i32
)(int32_t val
)
672 /* negative absolute value 32-bit */
673 int32_t HELPER(nabs_i32
)(int32_t val
)
682 /* absolute value 64-bit */
683 uint64_t HELPER(abs_i64
)(int64_t val
)
685 HELPER_LOG("%s: val 0x%" PRIx64
"\n", __FUNCTION__
, val
);
694 /* negative absolute value 64-bit */
695 int64_t HELPER(nabs_i64
)(int64_t val
)
704 /* add with carry 32-bit unsigned */
705 uint32_t HELPER(addc_u32
)(uint32_t cc
, uint32_t v1
, uint32_t v2
)
717 /* store character under mask high operates on the upper half of r1 */
718 void HELPER(stcmh
)(uint32_t r1
, uint64_t address
, uint32_t mask
)
720 int pos
= 56; /* top of the upper half of r1 */
724 stb(address
, (env
->regs
[r1
] >> pos
) & 0xff);
727 mask
= (mask
<< 1) & 0xf;
732 /* insert character under mask high; same as icm, but operates on the
734 uint32_t HELPER(icmh
)(uint32_t r1
, uint64_t address
, uint32_t mask
)
736 int pos
= 56; /* top of the upper half of r1 */
737 uint64_t rmask
= 0xff00000000000000ULL
;
744 env
->regs
[r1
] &= ~rmask
;
746 if ((val
& 0x80) && !ccd
) {
750 if (val
&& cc
== 0) {
753 env
->regs
[r1
] |= (uint64_t)val
<< pos
;
756 mask
= (mask
<< 1) & 0xf;
764 /* insert psw mask and condition code into r1 */
765 void HELPER(ipm
)(uint32_t cc
, uint32_t r1
)
767 uint64_t r
= env
->regs
[r1
];
769 r
&= 0xffffffff00ffffffULL
;
770 r
|= (cc
<< 28) | ( (env
->psw
.mask
>> 40) & 0xf );
772 HELPER_LOG("%s: cc %d psw.mask 0x%lx r1 0x%lx\n", __FUNCTION__
,
773 cc
, env
->psw
.mask
, r
);
776 /* load access registers r1 to r3 from memory at a2 */
777 void HELPER(lam
)(uint32_t r1
, uint64_t a2
, uint32_t r3
)
781 for (i
= r1
;; i
= (i
+ 1) % 16) {
782 env
->aregs
[i
] = ldl(a2
);
791 /* store access registers r1 to r3 in memory at a2 */
792 void HELPER(stam
)(uint32_t r1
, uint64_t a2
, uint32_t r3
)
796 for (i
= r1
;; i
= (i
+ 1) % 16) {
797 stl(a2
, env
->aregs
[i
]);
807 uint32_t HELPER(mvcl
)(uint32_t r1
, uint32_t r2
)
809 uint64_t destlen
= env
->regs
[r1
+ 1] & 0xffffff;
810 uint64_t dest
= get_address_31fix(r1
);
811 uint64_t srclen
= env
->regs
[r2
+ 1] & 0xffffff;
812 uint64_t src
= get_address_31fix(r2
);
813 uint8_t pad
= src
>> 24;
817 if (destlen
== srclen
) {
819 } else if (destlen
< srclen
) {
825 if (srclen
> destlen
) {
829 for (; destlen
&& srclen
; src
++, dest
++, destlen
--, srclen
--) {
834 for (; destlen
; dest
++, destlen
--) {
838 env
->regs
[r1
+ 1] = destlen
;
839 /* can't use srclen here, we trunc'ed it */
840 env
->regs
[r2
+ 1] -= src
- env
->regs
[r2
];
841 env
->regs
[r1
] = dest
;
847 /* move long extended another memcopy insn with more bells and whistles */
848 uint32_t HELPER(mvcle
)(uint32_t r1
, uint64_t a2
, uint32_t r3
)
850 uint64_t destlen
= env
->regs
[r1
+ 1];
851 uint64_t dest
= env
->regs
[r1
];
852 uint64_t srclen
= env
->regs
[r3
+ 1];
853 uint64_t src
= env
->regs
[r3
];
854 uint8_t pad
= a2
& 0xff;
858 if (!(env
->psw
.mask
& PSW_MASK_64
)) {
859 destlen
= (uint32_t)destlen
;
860 srclen
= (uint32_t)srclen
;
865 if (destlen
== srclen
) {
867 } else if (destlen
< srclen
) {
873 if (srclen
> destlen
) {
877 for (; destlen
&& srclen
; src
++, dest
++, destlen
--, srclen
--) {
882 for (; destlen
; dest
++, destlen
--) {
886 env
->regs
[r1
+ 1] = destlen
;
887 /* can't use srclen here, we trunc'ed it */
888 /* FIXME: 31-bit mode! */
889 env
->regs
[r3
+ 1] -= src
- env
->regs
[r3
];
890 env
->regs
[r1
] = dest
;
896 /* compare logical long extended memcompare insn with padding */
897 uint32_t HELPER(clcle
)(uint32_t r1
, uint64_t a2
, uint32_t r3
)
899 uint64_t destlen
= env
->regs
[r1
+ 1];
900 uint64_t dest
= get_address_31fix(r1
);
901 uint64_t srclen
= env
->regs
[r3
+ 1];
902 uint64_t src
= get_address_31fix(r3
);
903 uint8_t pad
= a2
& 0xff;
904 uint8_t v1
= 0,v2
= 0;
907 if (!(destlen
|| srclen
)) {
911 if (srclen
> destlen
) {
915 for (; destlen
|| srclen
; src
++, dest
++, destlen
--, srclen
--) {
916 v1
= srclen
? ldub(src
) : pad
;
917 v2
= destlen
? ldub(dest
) : pad
;
919 cc
= (v1
< v2
) ? 1 : 2;
924 env
->regs
[r1
+ 1] = destlen
;
925 /* can't use srclen here, we trunc'ed it */
926 env
->regs
[r3
+ 1] -= src
- env
->regs
[r3
];
927 env
->regs
[r1
] = dest
;
933 /* subtract unsigned v2 from v1 with borrow */
934 uint32_t HELPER(slb
)(uint32_t cc
, uint32_t r1
, uint32_t v2
)
936 uint32_t v1
= env
->regs
[r1
];
937 uint32_t res
= v1
+ (~v2
) + (cc
>> 1);
939 env
->regs
[r1
] = (env
->regs
[r1
] & 0xffffffff00000000ULL
) | res
;
948 /* subtract unsigned v2 from v1 with borrow */
949 uint32_t HELPER(slbg
)(uint32_t cc
, uint32_t r1
, uint64_t v1
, uint64_t v2
)
951 uint64_t res
= v1
+ (~v2
) + (cc
>> 1);
962 static inline int float_comp_to_cc(int float_compare
)
964 switch (float_compare
) {
965 case float_relation_equal
:
967 case float_relation_less
:
969 case float_relation_greater
:
971 case float_relation_unordered
:
974 cpu_abort(env
, "unknown return value for float compare\n");
978 /* condition codes for binary FP ops */
979 static uint32_t set_cc_f32(float32 v1
, float32 v2
)
981 return float_comp_to_cc(float32_compare_quiet(v1
, v2
, &env
->fpu_status
));
984 static uint32_t set_cc_f64(float64 v1
, float64 v2
)
986 return float_comp_to_cc(float64_compare_quiet(v1
, v2
, &env
->fpu_status
));
989 /* condition codes for unary FP ops */
990 static uint32_t set_cc_nz_f32(float32 v
)
992 if (float32_is_any_nan(v
)) {
994 } else if (float32_is_zero(v
)) {
996 } else if (float32_is_neg(v
)) {
1003 static uint32_t set_cc_nz_f64(float64 v
)
1005 if (float64_is_any_nan(v
)) {
1007 } else if (float64_is_zero(v
)) {
1009 } else if (float64_is_neg(v
)) {
1016 static uint32_t set_cc_nz_f128(float128 v
)
1018 if (float128_is_any_nan(v
)) {
1020 } else if (float128_is_zero(v
)) {
1022 } else if (float128_is_neg(v
)) {
1029 /* convert 32-bit int to 64-bit float */
1030 void HELPER(cdfbr
)(uint32_t f1
, int32_t v2
)
1032 HELPER_LOG("%s: converting %d to f%d\n", __FUNCTION__
, v2
, f1
);
1033 env
->fregs
[f1
].d
= int32_to_float64(v2
, &env
->fpu_status
);
1036 /* convert 32-bit int to 128-bit float */
1037 void HELPER(cxfbr
)(uint32_t f1
, int32_t v2
)
1040 v1
.q
= int32_to_float128(v2
, &env
->fpu_status
);
1041 env
->fregs
[f1
].ll
= v1
.ll
.upper
;
1042 env
->fregs
[f1
+ 2].ll
= v1
.ll
.lower
;
1045 /* convert 64-bit int to 32-bit float */
1046 void HELPER(cegbr
)(uint32_t f1
, int64_t v2
)
1048 HELPER_LOG("%s: converting %ld to f%d\n", __FUNCTION__
, v2
, f1
);
1049 env
->fregs
[f1
].l
.upper
= int64_to_float32(v2
, &env
->fpu_status
);
1052 /* convert 64-bit int to 64-bit float */
1053 void HELPER(cdgbr
)(uint32_t f1
, int64_t v2
)
1055 HELPER_LOG("%s: converting %ld to f%d\n", __FUNCTION__
, v2
, f1
);
1056 env
->fregs
[f1
].d
= int64_to_float64(v2
, &env
->fpu_status
);
1059 /* convert 64-bit int to 128-bit float */
1060 void HELPER(cxgbr
)(uint32_t f1
, int64_t v2
)
1063 x1
.q
= int64_to_float128(v2
, &env
->fpu_status
);
1064 HELPER_LOG("%s: converted %ld to 0x%lx and 0x%lx\n", __FUNCTION__
, v2
,
1065 x1
.ll
.upper
, x1
.ll
.lower
);
1066 env
->fregs
[f1
].ll
= x1
.ll
.upper
;
1067 env
->fregs
[f1
+ 2].ll
= x1
.ll
.lower
;
1070 /* convert 32-bit int to 32-bit float */
1071 void HELPER(cefbr
)(uint32_t f1
, int32_t v2
)
1073 env
->fregs
[f1
].l
.upper
= int32_to_float32(v2
, &env
->fpu_status
);
1074 HELPER_LOG("%s: converting %d to 0x%d in f%d\n", __FUNCTION__
, v2
,
1075 env
->fregs
[f1
].l
.upper
, f1
);
1078 /* 32-bit FP addition RR */
1079 uint32_t HELPER(aebr
)(uint32_t f1
, uint32_t f2
)
1081 env
->fregs
[f1
].l
.upper
= float32_add(env
->fregs
[f1
].l
.upper
,
1082 env
->fregs
[f2
].l
.upper
,
1084 HELPER_LOG("%s: adding 0x%d resulting in 0x%d in f%d\n", __FUNCTION__
,
1085 env
->fregs
[f2
].l
.upper
, env
->fregs
[f1
].l
.upper
, f1
);
1087 return set_cc_nz_f32(env
->fregs
[f1
].l
.upper
);
1090 /* 64-bit FP addition RR */
1091 uint32_t HELPER(adbr
)(uint32_t f1
, uint32_t f2
)
1093 env
->fregs
[f1
].d
= float64_add(env
->fregs
[f1
].d
, env
->fregs
[f2
].d
,
1095 HELPER_LOG("%s: adding 0x%ld resulting in 0x%ld in f%d\n", __FUNCTION__
,
1096 env
->fregs
[f2
].d
, env
->fregs
[f1
].d
, f1
);
1098 return set_cc_nz_f64(env
->fregs
[f1
].d
);
1101 /* 32-bit FP subtraction RR */
1102 uint32_t HELPER(sebr
)(uint32_t f1
, uint32_t f2
)
1104 env
->fregs
[f1
].l
.upper
= float32_sub(env
->fregs
[f1
].l
.upper
,
1105 env
->fregs
[f2
].l
.upper
,
1107 HELPER_LOG("%s: adding 0x%d resulting in 0x%d in f%d\n", __FUNCTION__
,
1108 env
->fregs
[f2
].l
.upper
, env
->fregs
[f1
].l
.upper
, f1
);
1110 return set_cc_nz_f32(env
->fregs
[f1
].l
.upper
);
1113 /* 64-bit FP subtraction RR */
1114 uint32_t HELPER(sdbr
)(uint32_t f1
, uint32_t f2
)
1116 env
->fregs
[f1
].d
= float64_sub(env
->fregs
[f1
].d
, env
->fregs
[f2
].d
,
1118 HELPER_LOG("%s: subtracting 0x%ld resulting in 0x%ld in f%d\n",
1119 __FUNCTION__
, env
->fregs
[f2
].d
, env
->fregs
[f1
].d
, f1
);
1121 return set_cc_nz_f64(env
->fregs
[f1
].d
);
1124 /* 32-bit FP division RR */
1125 void HELPER(debr
)(uint32_t f1
, uint32_t f2
)
1127 env
->fregs
[f1
].l
.upper
= float32_div(env
->fregs
[f1
].l
.upper
,
1128 env
->fregs
[f2
].l
.upper
,
1132 /* 128-bit FP division RR */
1133 void HELPER(dxbr
)(uint32_t f1
, uint32_t f2
)
1136 v1
.ll
.upper
= env
->fregs
[f1
].ll
;
1137 v1
.ll
.lower
= env
->fregs
[f1
+ 2].ll
;
1139 v2
.ll
.upper
= env
->fregs
[f2
].ll
;
1140 v2
.ll
.lower
= env
->fregs
[f2
+ 2].ll
;
1142 res
.q
= float128_div(v1
.q
, v2
.q
, &env
->fpu_status
);
1143 env
->fregs
[f1
].ll
= res
.ll
.upper
;
1144 env
->fregs
[f1
+ 2].ll
= res
.ll
.lower
;
1147 /* 64-bit FP multiplication RR */
1148 void HELPER(mdbr
)(uint32_t f1
, uint32_t f2
)
1150 env
->fregs
[f1
].d
= float64_mul(env
->fregs
[f1
].d
, env
->fregs
[f2
].d
,
1154 /* 128-bit FP multiplication RR */
1155 void HELPER(mxbr
)(uint32_t f1
, uint32_t f2
)
1158 v1
.ll
.upper
= env
->fregs
[f1
].ll
;
1159 v1
.ll
.lower
= env
->fregs
[f1
+ 2].ll
;
1161 v2
.ll
.upper
= env
->fregs
[f2
].ll
;
1162 v2
.ll
.lower
= env
->fregs
[f2
+ 2].ll
;
1164 res
.q
= float128_mul(v1
.q
, v2
.q
, &env
->fpu_status
);
1165 env
->fregs
[f1
].ll
= res
.ll
.upper
;
1166 env
->fregs
[f1
+ 2].ll
= res
.ll
.lower
;
1169 /* convert 32-bit float to 64-bit float */
1170 void HELPER(ldebr
)(uint32_t r1
, uint32_t r2
)
1172 env
->fregs
[r1
].d
= float32_to_float64(env
->fregs
[r2
].l
.upper
,
1176 /* convert 128-bit float to 64-bit float */
1177 void HELPER(ldxbr
)(uint32_t f1
, uint32_t f2
)
1180 x2
.ll
.upper
= env
->fregs
[f2
].ll
;
1181 x2
.ll
.lower
= env
->fregs
[f2
+ 2].ll
;
1182 env
->fregs
[f1
].d
= float128_to_float64(x2
.q
, &env
->fpu_status
);
1183 HELPER_LOG("%s: to 0x%ld\n", __FUNCTION__
, env
->fregs
[f1
].d
);
1186 /* convert 64-bit float to 128-bit float */
1187 void HELPER(lxdbr
)(uint32_t f1
, uint32_t f2
)
1190 res
.q
= float64_to_float128(env
->fregs
[f2
].d
, &env
->fpu_status
);
1191 env
->fregs
[f1
].ll
= res
.ll
.upper
;
1192 env
->fregs
[f1
+ 2].ll
= res
.ll
.lower
;
1195 /* convert 64-bit float to 32-bit float */
1196 void HELPER(ledbr
)(uint32_t f1
, uint32_t f2
)
1198 float64 d2
= env
->fregs
[f2
].d
;
1199 env
->fregs
[f1
].l
.upper
= float64_to_float32(d2
, &env
->fpu_status
);
1202 /* convert 128-bit float to 32-bit float */
1203 void HELPER(lexbr
)(uint32_t f1
, uint32_t f2
)
1206 x2
.ll
.upper
= env
->fregs
[f2
].ll
;
1207 x2
.ll
.lower
= env
->fregs
[f2
+ 2].ll
;
1208 env
->fregs
[f1
].l
.upper
= float128_to_float32(x2
.q
, &env
->fpu_status
);
1209 HELPER_LOG("%s: to 0x%d\n", __FUNCTION__
, env
->fregs
[f1
].l
.upper
);
1212 /* absolute value of 32-bit float */
1213 uint32_t HELPER(lpebr
)(uint32_t f1
, uint32_t f2
)
1216 float32 v2
= env
->fregs
[f2
].d
;
1217 v1
= float32_abs(v2
);
1218 env
->fregs
[f1
].d
= v1
;
1219 return set_cc_nz_f32(v1
);
1222 /* absolute value of 64-bit float */
1223 uint32_t HELPER(lpdbr
)(uint32_t f1
, uint32_t f2
)
1226 float64 v2
= env
->fregs
[f2
].d
;
1227 v1
= float64_abs(v2
);
1228 env
->fregs
[f1
].d
= v1
;
1229 return set_cc_nz_f64(v1
);
1232 /* absolute value of 128-bit float */
1233 uint32_t HELPER(lpxbr
)(uint32_t f1
, uint32_t f2
)
1237 v2
.ll
.upper
= env
->fregs
[f2
].ll
;
1238 v2
.ll
.lower
= env
->fregs
[f2
+ 2].ll
;
1239 v1
.q
= float128_abs(v2
.q
);
1240 env
->fregs
[f1
].ll
= v1
.ll
.upper
;
1241 env
->fregs
[f1
+ 2].ll
= v1
.ll
.lower
;
1242 return set_cc_nz_f128(v1
.q
);
1245 /* load and test 64-bit float */
1246 uint32_t HELPER(ltdbr
)(uint32_t f1
, uint32_t f2
)
1248 env
->fregs
[f1
].d
= env
->fregs
[f2
].d
;
1249 return set_cc_nz_f64(env
->fregs
[f1
].d
);
1252 /* load and test 32-bit float */
1253 uint32_t HELPER(ltebr
)(uint32_t f1
, uint32_t f2
)
1255 env
->fregs
[f1
].l
.upper
= env
->fregs
[f2
].l
.upper
;
1256 return set_cc_nz_f32(env
->fregs
[f1
].l
.upper
);
1259 /* load and test 128-bit float */
1260 uint32_t HELPER(ltxbr
)(uint32_t f1
, uint32_t f2
)
1263 x
.ll
.upper
= env
->fregs
[f2
].ll
;
1264 x
.ll
.lower
= env
->fregs
[f2
+ 2].ll
;
1265 env
->fregs
[f1
].ll
= x
.ll
.upper
;
1266 env
->fregs
[f1
+ 2].ll
= x
.ll
.lower
;
1267 return set_cc_nz_f128(x
.q
);
1270 /* load complement of 32-bit float */
1271 uint32_t HELPER(lcebr
)(uint32_t f1
, uint32_t f2
)
1273 env
->fregs
[f1
].l
.upper
= float32_chs(env
->fregs
[f2
].l
.upper
);
1275 return set_cc_nz_f32(env
->fregs
[f1
].l
.upper
);
1278 /* load complement of 64-bit float */
1279 uint32_t HELPER(lcdbr
)(uint32_t f1
, uint32_t f2
)
1281 env
->fregs
[f1
].d
= float64_chs(env
->fregs
[f2
].d
);
1283 return set_cc_nz_f64(env
->fregs
[f1
].d
);
1286 /* load complement of 128-bit float */
1287 uint32_t HELPER(lcxbr
)(uint32_t f1
, uint32_t f2
)
1290 x2
.ll
.upper
= env
->fregs
[f2
].ll
;
1291 x2
.ll
.lower
= env
->fregs
[f2
+ 2].ll
;
1292 x1
.q
= float128_chs(x2
.q
);
1293 env
->fregs
[f1
].ll
= x1
.ll
.upper
;
1294 env
->fregs
[f1
+ 2].ll
= x1
.ll
.lower
;
1295 return set_cc_nz_f128(x1
.q
);
1298 /* 32-bit FP addition RM */
1299 void HELPER(aeb
)(uint32_t f1
, uint32_t val
)
1301 float32 v1
= env
->fregs
[f1
].l
.upper
;
1304 HELPER_LOG("%s: adding 0x%d from f%d and 0x%d\n", __FUNCTION__
,
1306 env
->fregs
[f1
].l
.upper
= float32_add(v1
, v2
.f
, &env
->fpu_status
);
1309 /* 32-bit FP division RM */
1310 void HELPER(deb
)(uint32_t f1
, uint32_t val
)
1312 float32 v1
= env
->fregs
[f1
].l
.upper
;
1315 HELPER_LOG("%s: dividing 0x%d from f%d by 0x%d\n", __FUNCTION__
,
1317 env
->fregs
[f1
].l
.upper
= float32_div(v1
, v2
.f
, &env
->fpu_status
);
1320 /* 32-bit FP multiplication RM */
1321 void HELPER(meeb
)(uint32_t f1
, uint32_t val
)
1323 float32 v1
= env
->fregs
[f1
].l
.upper
;
1326 HELPER_LOG("%s: multiplying 0x%d from f%d and 0x%d\n", __FUNCTION__
,
1328 env
->fregs
[f1
].l
.upper
= float32_mul(v1
, v2
.f
, &env
->fpu_status
);
1331 /* 32-bit FP compare RR */
1332 uint32_t HELPER(cebr
)(uint32_t f1
, uint32_t f2
)
1334 float32 v1
= env
->fregs
[f1
].l
.upper
;
1335 float32 v2
= env
->fregs
[f2
].l
.upper
;;
1336 HELPER_LOG("%s: comparing 0x%d from f%d and 0x%d\n", __FUNCTION__
,
1338 return set_cc_f32(v1
, v2
);
1341 /* 64-bit FP compare RR */
1342 uint32_t HELPER(cdbr
)(uint32_t f1
, uint32_t f2
)
1344 float64 v1
= env
->fregs
[f1
].d
;
1345 float64 v2
= env
->fregs
[f2
].d
;;
1346 HELPER_LOG("%s: comparing 0x%ld from f%d and 0x%ld\n", __FUNCTION__
,
1348 return set_cc_f64(v1
, v2
);
1351 /* 128-bit FP compare RR */
1352 uint32_t HELPER(cxbr
)(uint32_t f1
, uint32_t f2
)
1355 v1
.ll
.upper
= env
->fregs
[f1
].ll
;
1356 v1
.ll
.lower
= env
->fregs
[f1
+ 2].ll
;
1358 v2
.ll
.upper
= env
->fregs
[f2
].ll
;
1359 v2
.ll
.lower
= env
->fregs
[f2
+ 2].ll
;
1361 return float_comp_to_cc(float128_compare_quiet(v1
.q
, v2
.q
,
1365 /* 64-bit FP compare RM */
1366 uint32_t HELPER(cdb
)(uint32_t f1
, uint64_t a2
)
1368 float64 v1
= env
->fregs
[f1
].d
;
1371 HELPER_LOG("%s: comparing 0x%ld from f%d and 0x%lx\n", __FUNCTION__
, v1
,
1373 return set_cc_f64(v1
, v2
.d
);
1376 /* 64-bit FP addition RM */
1377 uint32_t HELPER(adb
)(uint32_t f1
, uint64_t a2
)
1379 float64 v1
= env
->fregs
[f1
].d
;
1382 HELPER_LOG("%s: adding 0x%lx from f%d and 0x%lx\n", __FUNCTION__
,
1384 env
->fregs
[f1
].d
= v1
= float64_add(v1
, v2
.d
, &env
->fpu_status
);
1385 return set_cc_nz_f64(v1
);
1388 /* 32-bit FP subtraction RM */
1389 void HELPER(seb
)(uint32_t f1
, uint32_t val
)
1391 float32 v1
= env
->fregs
[f1
].l
.upper
;
1394 env
->fregs
[f1
].l
.upper
= float32_sub(v1
, v2
.f
, &env
->fpu_status
);
1397 /* 64-bit FP subtraction RM */
1398 uint32_t HELPER(sdb
)(uint32_t f1
, uint64_t a2
)
1400 float64 v1
= env
->fregs
[f1
].d
;
1403 env
->fregs
[f1
].d
= v1
= float64_sub(v1
, v2
.d
, &env
->fpu_status
);
1404 return set_cc_nz_f64(v1
);
1407 /* 64-bit FP multiplication RM */
1408 void HELPER(mdb
)(uint32_t f1
, uint64_t a2
)
1410 float64 v1
= env
->fregs
[f1
].d
;
1413 HELPER_LOG("%s: multiplying 0x%lx from f%d and 0x%ld\n", __FUNCTION__
,
1415 env
->fregs
[f1
].d
= float64_mul(v1
, v2
.d
, &env
->fpu_status
);
1418 /* 64-bit FP division RM */
1419 void HELPER(ddb
)(uint32_t f1
, uint64_t a2
)
1421 float64 v1
= env
->fregs
[f1
].d
;
1424 HELPER_LOG("%s: dividing 0x%lx from f%d by 0x%ld\n", __FUNCTION__
,
1426 env
->fregs
[f1
].d
= float64_div(v1
, v2
.d
, &env
->fpu_status
);
1429 static void set_round_mode(int m3
)
1436 /* biased round no nearest */
1438 /* round to nearest */
1439 set_float_rounding_mode(float_round_nearest_even
, &env
->fpu_status
);
1443 set_float_rounding_mode(float_round_to_zero
, &env
->fpu_status
);
1447 set_float_rounding_mode(float_round_up
, &env
->fpu_status
);
1451 set_float_rounding_mode(float_round_down
, &env
->fpu_status
);
1456 /* convert 32-bit float to 64-bit int */
1457 uint32_t HELPER(cgebr
)(uint32_t r1
, uint32_t f2
, uint32_t m3
)
1459 float32 v2
= env
->fregs
[f2
].l
.upper
;
1461 env
->regs
[r1
] = float32_to_int64(v2
, &env
->fpu_status
);
1462 return set_cc_nz_f32(v2
);
1465 /* convert 64-bit float to 64-bit int */
1466 uint32_t HELPER(cgdbr
)(uint32_t r1
, uint32_t f2
, uint32_t m3
)
1468 float64 v2
= env
->fregs
[f2
].d
;
1470 env
->regs
[r1
] = float64_to_int64(v2
, &env
->fpu_status
);
1471 return set_cc_nz_f64(v2
);
1474 /* convert 128-bit float to 64-bit int */
1475 uint32_t HELPER(cgxbr
)(uint32_t r1
, uint32_t f2
, uint32_t m3
)
1478 v2
.ll
.upper
= env
->fregs
[f2
].ll
;
1479 v2
.ll
.lower
= env
->fregs
[f2
+ 2].ll
;
1481 env
->regs
[r1
] = float128_to_int64(v2
.q
, &env
->fpu_status
);
1482 if (float128_is_any_nan(v2
.q
)) {
1484 } else if (float128_is_zero(v2
.q
)) {
1486 } else if (float128_is_neg(v2
.q
)) {
1493 /* convert 32-bit float to 32-bit int */
1494 uint32_t HELPER(cfebr
)(uint32_t r1
, uint32_t f2
, uint32_t m3
)
1496 float32 v2
= env
->fregs
[f2
].l
.upper
;
1498 env
->regs
[r1
] = (env
->regs
[r1
] & 0xffffffff00000000ULL
) |
1499 float32_to_int32(v2
, &env
->fpu_status
);
1500 return set_cc_nz_f32(v2
);
1503 /* convert 64-bit float to 32-bit int */
1504 uint32_t HELPER(cfdbr
)(uint32_t r1
, uint32_t f2
, uint32_t m3
)
1506 float64 v2
= env
->fregs
[f2
].d
;
1508 env
->regs
[r1
] = (env
->regs
[r1
] & 0xffffffff00000000ULL
) |
1509 float64_to_int32(v2
, &env
->fpu_status
);
1510 return set_cc_nz_f64(v2
);
1513 /* convert 128-bit float to 32-bit int */
1514 uint32_t HELPER(cfxbr
)(uint32_t r1
, uint32_t f2
, uint32_t m3
)
1517 v2
.ll
.upper
= env
->fregs
[f2
].ll
;
1518 v2
.ll
.lower
= env
->fregs
[f2
+ 2].ll
;
1519 env
->regs
[r1
] = (env
->regs
[r1
] & 0xffffffff00000000ULL
) |
1520 float128_to_int32(v2
.q
, &env
->fpu_status
);
1521 return set_cc_nz_f128(v2
.q
);
1524 /* load 32-bit FP zero */
1525 void HELPER(lzer
)(uint32_t f1
)
1527 env
->fregs
[f1
].l
.upper
= float32_zero
;
1530 /* load 64-bit FP zero */
1531 void HELPER(lzdr
)(uint32_t f1
)
1533 env
->fregs
[f1
].d
= float64_zero
;
1536 /* load 128-bit FP zero */
1537 void HELPER(lzxr
)(uint32_t f1
)
1540 x
.q
= float64_to_float128(float64_zero
, &env
->fpu_status
);
1541 env
->fregs
[f1
].ll
= x
.ll
.upper
;
1542 env
->fregs
[f1
+ 1].ll
= x
.ll
.lower
;
1545 /* 128-bit FP subtraction RR */
1546 uint32_t HELPER(sxbr
)(uint32_t f1
, uint32_t f2
)
1549 v1
.ll
.upper
= env
->fregs
[f1
].ll
;
1550 v1
.ll
.lower
= env
->fregs
[f1
+ 2].ll
;
1552 v2
.ll
.upper
= env
->fregs
[f2
].ll
;
1553 v2
.ll
.lower
= env
->fregs
[f2
+ 2].ll
;
1555 res
.q
= float128_sub(v1
.q
, v2
.q
, &env
->fpu_status
);
1556 env
->fregs
[f1
].ll
= res
.ll
.upper
;
1557 env
->fregs
[f1
+ 2].ll
= res
.ll
.lower
;
1558 return set_cc_nz_f128(res
.q
);
1561 /* 128-bit FP addition RR */
1562 uint32_t HELPER(axbr
)(uint32_t f1
, uint32_t f2
)
1565 v1
.ll
.upper
= env
->fregs
[f1
].ll
;
1566 v1
.ll
.lower
= env
->fregs
[f1
+ 2].ll
;
1568 v2
.ll
.upper
= env
->fregs
[f2
].ll
;
1569 v2
.ll
.lower
= env
->fregs
[f2
+ 2].ll
;
1571 res
.q
= float128_add(v1
.q
, v2
.q
, &env
->fpu_status
);
1572 env
->fregs
[f1
].ll
= res
.ll
.upper
;
1573 env
->fregs
[f1
+ 2].ll
= res
.ll
.lower
;
1574 return set_cc_nz_f128(res
.q
);
1577 /* 32-bit FP multiplication RR */
1578 void HELPER(meebr
)(uint32_t f1
, uint32_t f2
)
1580 env
->fregs
[f1
].l
.upper
= float32_mul(env
->fregs
[f1
].l
.upper
,
1581 env
->fregs
[f2
].l
.upper
,
1585 /* 64-bit FP division RR */
1586 void HELPER(ddbr
)(uint32_t f1
, uint32_t f2
)
1588 env
->fregs
[f1
].d
= float64_div(env
->fregs
[f1
].d
, env
->fregs
[f2
].d
,
1592 /* 64-bit FP multiply and add RM */
1593 void HELPER(madb
)(uint32_t f1
, uint64_t a2
, uint32_t f3
)
1595 HELPER_LOG("%s: f1 %d a2 0x%lx f3 %d\n", __FUNCTION__
, f1
, a2
, f3
);
1598 env
->fregs
[f1
].d
= float64_add(env
->fregs
[f1
].d
,
1599 float64_mul(v2
.d
, env
->fregs
[f3
].d
,
1604 /* 64-bit FP multiply and add RR */
1605 void HELPER(madbr
)(uint32_t f1
, uint32_t f3
, uint32_t f2
)
1607 HELPER_LOG("%s: f1 %d f2 %d f3 %d\n", __FUNCTION__
, f1
, f2
, f3
);
1608 env
->fregs
[f1
].d
= float64_add(float64_mul(env
->fregs
[f2
].d
,
1611 env
->fregs
[f1
].d
, &env
->fpu_status
);
1614 /* 64-bit FP multiply and subtract RR */
1615 void HELPER(msdbr
)(uint32_t f1
, uint32_t f3
, uint32_t f2
)
1617 HELPER_LOG("%s: f1 %d f2 %d f3 %d\n", __FUNCTION__
, f1
, f2
, f3
);
1618 env
->fregs
[f1
].d
= float64_sub(float64_mul(env
->fregs
[f2
].d
,
1621 env
->fregs
[f1
].d
, &env
->fpu_status
);
1624 /* 32-bit FP multiply and add RR */
1625 void HELPER(maebr
)(uint32_t f1
, uint32_t f3
, uint32_t f2
)
1627 env
->fregs
[f1
].l
.upper
= float32_add(env
->fregs
[f1
].l
.upper
,
1628 float32_mul(env
->fregs
[f2
].l
.upper
,
1629 env
->fregs
[f3
].l
.upper
,
1634 /* convert 64-bit float to 128-bit float */
1635 void HELPER(lxdb
)(uint32_t f1
, uint64_t a2
)
1640 v1
.q
= float64_to_float128(v2
.d
, &env
->fpu_status
);
1641 env
->fregs
[f1
].ll
= v1
.ll
.upper
;
1642 env
->fregs
[f1
+ 2].ll
= v1
.ll
.lower
;
1645 /* test data class 32-bit */
1646 uint32_t HELPER(tceb
)(uint32_t f1
, uint64_t m2
)
1648 float32 v1
= env
->fregs
[f1
].l
.upper
;
1649 int neg
= float32_is_neg(v1
);
1652 HELPER_LOG("%s: v1 0x%lx m2 0x%lx neg %d\n", __FUNCTION__
, (long)v1
, m2
, neg
);
1653 if ((float32_is_zero(v1
) && (m2
& (1 << (11-neg
)))) ||
1654 (float32_is_infinity(v1
) && (m2
& (1 << (5-neg
)))) ||
1655 (float32_is_any_nan(v1
) && (m2
& (1 << (3-neg
)))) ||
1656 (float32_is_signaling_nan(v1
) && (m2
& (1 << (1-neg
))))) {
1658 } else if (m2
& (1 << (9-neg
))) {
1659 /* assume normalized number */
1663 /* FIXME: denormalized? */
1667 /* test data class 64-bit */
1668 uint32_t HELPER(tcdb
)(uint32_t f1
, uint64_t m2
)
1670 float64 v1
= env
->fregs
[f1
].d
;
1671 int neg
= float64_is_neg(v1
);
1674 HELPER_LOG("%s: v1 0x%lx m2 0x%lx neg %d\n", __FUNCTION__
, v1
, m2
, neg
);
1675 if ((float64_is_zero(v1
) && (m2
& (1 << (11-neg
)))) ||
1676 (float64_is_infinity(v1
) && (m2
& (1 << (5-neg
)))) ||
1677 (float64_is_any_nan(v1
) && (m2
& (1 << (3-neg
)))) ||
1678 (float64_is_signaling_nan(v1
) && (m2
& (1 << (1-neg
))))) {
1680 } else if (m2
& (1 << (9-neg
))) {
1681 /* assume normalized number */
1684 /* FIXME: denormalized? */
1688 /* test data class 128-bit */
1689 uint32_t HELPER(tcxb
)(uint32_t f1
, uint64_t m2
)
1693 v1
.ll
.upper
= env
->fregs
[f1
].ll
;
1694 v1
.ll
.lower
= env
->fregs
[f1
+ 2].ll
;
1696 int neg
= float128_is_neg(v1
.q
);
1697 if ((float128_is_zero(v1
.q
) && (m2
& (1 << (11-neg
)))) ||
1698 (float128_is_infinity(v1
.q
) && (m2
& (1 << (5-neg
)))) ||
1699 (float128_is_any_nan(v1
.q
) && (m2
& (1 << (3-neg
)))) ||
1700 (float128_is_signaling_nan(v1
.q
) && (m2
& (1 << (1-neg
))))) {
1702 } else if (m2
& (1 << (9-neg
))) {
1703 /* assume normalized number */
1706 /* FIXME: denormalized? */
1710 /* find leftmost one */
1711 uint32_t HELPER(flogr
)(uint32_t r1
, uint64_t v2
)
1716 while (!(v2
& 0x8000000000000000ULL
) && v2
) {
1723 env
->regs
[r1
+ 1] = 0;
1726 env
->regs
[r1
] = res
;
1727 env
->regs
[r1
+ 1] = ov2
& ~(0x8000000000000000ULL
>> res
);
1732 /* square root 64-bit RR */
1733 void HELPER(sqdbr
)(uint32_t f1
, uint32_t f2
)
1735 env
->fregs
[f1
].d
= float64_sqrt(env
->fregs
[f2
].d
, &env
->fpu_status
);
1739 void HELPER(cksm
)(uint32_t r1
, uint32_t r2
)
1741 uint64_t src
= get_address_31fix(r2
);
1742 uint64_t src_len
= env
->regs
[(r2
+ 1) & 15];
1743 uint64_t cksm
= (uint32_t)env
->regs
[r1
];
1745 while (src_len
>= 4) {
1748 /* move to next word */
1757 cksm
+= ldub(src
) << 24;
1760 cksm
+= lduw(src
) << 16;
1763 cksm
+= lduw(src
) << 16;
1764 cksm
+= ldub(src
+ 2) << 8;
1768 /* indicate we've processed everything */
1769 env
->regs
[r2
] = src
+ src_len
;
1770 env
->regs
[(r2
+ 1) & 15] = 0;
1773 env
->regs
[r1
] = (env
->regs
[r1
] & 0xffffffff00000000ULL
) |
1774 ((uint32_t)cksm
+ (cksm
>> 32));
1777 static inline uint32_t cc_calc_ltgt_32(CPUState
*env
, int32_t src
,
1782 } else if (src
< dst
) {
1789 static inline uint32_t cc_calc_ltgt0_32(CPUState
*env
, int32_t dst
)
1791 return cc_calc_ltgt_32(env
, dst
, 0);
1794 static inline uint32_t cc_calc_ltgt_64(CPUState
*env
, int64_t src
,
1799 } else if (src
< dst
) {
1806 static inline uint32_t cc_calc_ltgt0_64(CPUState
*env
, int64_t dst
)
1808 return cc_calc_ltgt_64(env
, dst
, 0);
1811 static inline uint32_t cc_calc_ltugtu_32(CPUState
*env
, uint32_t src
,
1816 } else if (src
< dst
) {
1823 static inline uint32_t cc_calc_ltugtu_64(CPUState
*env
, uint64_t src
,
1828 } else if (src
< dst
) {
1835 static inline uint32_t cc_calc_tm_32(CPUState
*env
, uint32_t val
, uint32_t mask
)
1837 HELPER_LOG("%s: val 0x%x mask 0x%x\n", __FUNCTION__
, val
, mask
);
1838 uint16_t r
= val
& mask
;
1839 if (r
== 0 || mask
== 0) {
1841 } else if (r
== mask
) {
1848 /* set condition code for test under mask */
1849 static inline uint32_t cc_calc_tm_64(CPUState
*env
, uint64_t val
, uint32_t mask
)
1851 uint16_t r
= val
& mask
;
1852 HELPER_LOG("%s: val 0x%lx mask 0x%x r 0x%x\n", __FUNCTION__
, val
, mask
, r
);
1853 if (r
== 0 || mask
== 0) {
1855 } else if (r
== mask
) {
1858 while (!(mask
& 0x8000)) {
1870 static inline uint32_t cc_calc_nz(CPUState
*env
, uint64_t dst
)
1875 static inline uint32_t cc_calc_add_64(CPUState
*env
, int64_t a1
, int64_t a2
,
1878 if ((a1
> 0 && a2
> 0 && ar
< 0) || (a1
< 0 && a2
< 0 && ar
> 0)) {
1879 return 3; /* overflow */
1883 } else if (ar
> 0) {
1891 static inline uint32_t cc_calc_addu_64(CPUState
*env
, uint64_t a1
, uint64_t a2
,
1901 if (ar
< a1
|| ar
< a2
) {
1909 static inline uint32_t cc_calc_sub_64(CPUState
*env
, int64_t a1
, int64_t a2
,
1912 if ((a1
> 0 && a2
< 0 && ar
< 0) || (a1
< 0 && a2
> 0 && ar
> 0)) {
1913 return 3; /* overflow */
1917 } else if (ar
> 0) {
1925 static inline uint32_t cc_calc_subu_64(CPUState
*env
, uint64_t a1
, uint64_t a2
,
1939 static inline uint32_t cc_calc_abs_64(CPUState
*env
, int64_t dst
)
1941 if ((uint64_t)dst
== 0x8000000000000000ULL
) {
1950 static inline uint32_t cc_calc_nabs_64(CPUState
*env
, int64_t dst
)
1955 static inline uint32_t cc_calc_comp_64(CPUState
*env
, int64_t dst
)
1957 if ((uint64_t)dst
== 0x8000000000000000ULL
) {
1959 } else if (dst
< 0) {
1961 } else if (dst
> 0) {
1969 static inline uint32_t cc_calc_add_32(CPUState
*env
, int32_t a1
, int32_t a2
,
1972 if ((a1
> 0 && a2
> 0 && ar
< 0) || (a1
< 0 && a2
< 0 && ar
> 0)) {
1973 return 3; /* overflow */
1977 } else if (ar
> 0) {
1985 static inline uint32_t cc_calc_addu_32(CPUState
*env
, uint32_t a1
, uint32_t a2
,
1995 if (ar
< a1
|| ar
< a2
) {
2003 static inline uint32_t cc_calc_sub_32(CPUState
*env
, int32_t a1
, int32_t a2
,
2006 if ((a1
> 0 && a2
< 0 && ar
< 0) || (a1
< 0 && a2
> 0 && ar
> 0)) {
2007 return 3; /* overflow */
2011 } else if (ar
> 0) {
2019 static inline uint32_t cc_calc_subu_32(CPUState
*env
, uint32_t a1
, uint32_t a2
,
2033 static inline uint32_t cc_calc_abs_32(CPUState
*env
, int32_t dst
)
2035 if ((uint32_t)dst
== 0x80000000UL
) {
2044 static inline uint32_t cc_calc_nabs_32(CPUState
*env
, int32_t dst
)
2049 static inline uint32_t cc_calc_comp_32(CPUState
*env
, int32_t dst
)
2051 if ((uint32_t)dst
== 0x80000000UL
) {
2053 } else if (dst
< 0) {
2055 } else if (dst
> 0) {
2062 /* calculate condition code for insert character under mask insn */
2063 static inline uint32_t cc_calc_icm_32(CPUState
*env
, uint32_t mask
, uint32_t val
)
2065 HELPER_LOG("%s: mask 0x%x val %d\n", __FUNCTION__
, mask
, val
);
2071 } else if (val
& 0x80000000) {
2078 if (!val
|| !mask
) {
2094 static inline uint32_t cc_calc_slag(CPUState
*env
, uint64_t src
, uint64_t shift
)
2096 uint64_t mask
= ((1ULL << shift
) - 1ULL) << (64 - shift
);
2099 /* check if the sign bit stays the same */
2100 if (src
& (1ULL << 63)) {
2106 if ((src
& mask
) != match
) {
2111 r
= ((src
<< shift
) & ((1ULL << 63) - 1)) | (src
& (1ULL << 63));
2113 if ((int64_t)r
== 0) {
2115 } else if ((int64_t)r
< 0) {
2123 static inline uint32_t do_calc_cc(CPUState
*env
, uint32_t cc_op
, uint64_t src
,
2124 uint64_t dst
, uint64_t vr
)
2133 /* cc_op value _is_ cc */
2136 case CC_OP_LTGT0_32
:
2137 r
= cc_calc_ltgt0_32(env
, dst
);
2139 case CC_OP_LTGT0_64
:
2140 r
= cc_calc_ltgt0_64(env
, dst
);
2143 r
= cc_calc_ltgt_32(env
, src
, dst
);
2146 r
= cc_calc_ltgt_64(env
, src
, dst
);
2148 case CC_OP_LTUGTU_32
:
2149 r
= cc_calc_ltugtu_32(env
, src
, dst
);
2151 case CC_OP_LTUGTU_64
:
2152 r
= cc_calc_ltugtu_64(env
, src
, dst
);
2155 r
= cc_calc_tm_32(env
, src
, dst
);
2158 r
= cc_calc_tm_64(env
, src
, dst
);
2161 r
= cc_calc_nz(env
, dst
);
2164 r
= cc_calc_add_64(env
, src
, dst
, vr
);
2167 r
= cc_calc_addu_64(env
, src
, dst
, vr
);
2170 r
= cc_calc_sub_64(env
, src
, dst
, vr
);
2173 r
= cc_calc_subu_64(env
, src
, dst
, vr
);
2176 r
= cc_calc_abs_64(env
, dst
);
2179 r
= cc_calc_nabs_64(env
, dst
);
2182 r
= cc_calc_comp_64(env
, dst
);
2186 r
= cc_calc_add_32(env
, src
, dst
, vr
);
2189 r
= cc_calc_addu_32(env
, src
, dst
, vr
);
2192 r
= cc_calc_sub_32(env
, src
, dst
, vr
);
2195 r
= cc_calc_subu_32(env
, src
, dst
, vr
);
2198 r
= cc_calc_abs_64(env
, dst
);
2201 r
= cc_calc_nabs_64(env
, dst
);
2204 r
= cc_calc_comp_32(env
, dst
);
2208 r
= cc_calc_icm_32(env
, src
, dst
);
2211 r
= cc_calc_slag(env
, src
, dst
);
2214 case CC_OP_LTGT_F32
:
2215 r
= set_cc_f32(src
, dst
);
2217 case CC_OP_LTGT_F64
:
2218 r
= set_cc_f64(src
, dst
);
2221 r
= set_cc_nz_f32(dst
);
2224 r
= set_cc_nz_f64(dst
);
2228 cpu_abort(env
, "Unknown CC operation: %s\n", cc_name(cc_op
));
2231 HELPER_LOG("%s: %15s 0x%016lx 0x%016lx 0x%016lx = %d\n", __FUNCTION__
,
2232 cc_name(cc_op
), src
, dst
, vr
, r
);
2236 uint32_t calc_cc(CPUState
*env
, uint32_t cc_op
, uint64_t src
, uint64_t dst
,
2239 return do_calc_cc(env
, cc_op
, src
, dst
, vr
);
2242 uint32_t HELPER(calc_cc
)(uint32_t cc_op
, uint64_t src
, uint64_t dst
,
2245 return do_calc_cc(env
, cc_op
, src
, dst
, vr
);
2248 uint64_t HELPER(cvd
)(int32_t bin
)
2251 uint64_t dec
= 0x0c;
2259 for (shift
= 4; (shift
< 64) && bin
; shift
+= 4) {
2260 int current_number
= bin
% 10;
2262 dec
|= (current_number
) << shift
;
2269 void HELPER(unpk
)(uint32_t len
, uint64_t dest
, uint64_t src
)
2271 int len_dest
= len
>> 4;
2272 int len_src
= len
& 0xf;
2274 int second_nibble
= 0;
2279 /* last byte is special, it only flips the nibbles */
2281 stb(dest
, (b
<< 4) | (b
>> 4));
2285 /* now pad every nibble with 0xf0 */
2287 while (len_dest
> 0) {
2288 uint8_t cur_byte
= 0;
2291 cur_byte
= ldub(src
);
2297 /* only advance one nibble at a time */
2298 if (second_nibble
) {
2303 second_nibble
= !second_nibble
;
2306 cur_byte
= (cur_byte
& 0xf);
2310 stb(dest
, cur_byte
);
2314 void HELPER(tr
)(uint32_t len
, uint64_t array
, uint64_t trans
)
2318 for (i
= 0; i
<= len
; i
++) {
2319 uint8_t byte
= ldub(array
+ i
);
2320 uint8_t new_byte
= ldub(trans
+ byte
);
2321 stb(array
+ i
, new_byte
);
2325 #ifndef CONFIG_USER_ONLY
2327 void HELPER(load_psw
)(uint64_t mask
, uint64_t addr
)
2329 load_psw(env
, mask
, addr
);
2333 static void program_interrupt(CPUState
*env
, uint32_t code
, int ilc
)
2335 qemu_log("program interrupt at %#" PRIx64
"\n", env
->psw
.addr
);
2337 if (kvm_enabled()) {
2339 kvm_s390_interrupt(env
, KVM_S390_PROGRAM_INT
, code
);
2342 env
->int_pgm_code
= code
;
2343 env
->int_pgm_ilc
= ilc
;
2344 env
->exception_index
= EXCP_PGM
;
2349 static void ext_interrupt(CPUState
*env
, int type
, uint32_t param
,
2352 cpu_inject_ext(env
, type
, param
, param64
);
2355 int sclp_service_call(CPUState
*env
, uint32_t sccb
, uint64_t code
)
2361 printf("sclp(0x%x, 0x%" PRIx64
")\n", sccb
, code
);
2364 if (sccb
& ~0x7ffffff8ul
) {
2365 fprintf(stderr
, "KVM: invalid sccb address 0x%x\n", sccb
);
2371 case SCLP_CMDW_READ_SCP_INFO
:
2372 case SCLP_CMDW_READ_SCP_INFO_FORCED
:
2373 while ((ram_size
>> (20 + shift
)) > 65535) {
2376 stw_phys(sccb
+ SCP_MEM_CODE
, ram_size
>> (20 + shift
));
2377 stb_phys(sccb
+ SCP_INCREMENT
, 1 << shift
);
2378 stw_phys(sccb
+ SCP_RESPONSE_CODE
, 0x10);
2380 if (kvm_enabled()) {
2382 kvm_s390_interrupt_internal(env
, KVM_S390_INT_SERVICE
,
2387 ext_interrupt(env
, EXT_SERVICE
, sccb
& ~3, 0);
2392 printf("KVM: invalid sclp call 0x%x / 0x%" PRIx64
"x\n", sccb
, code
);
2402 /* SCLP service call */
2403 uint32_t HELPER(servc
)(uint32_t r1
, uint64_t r2
)
2405 if (sclp_service_call(env
, r1
, r2
)) {
2413 uint64_t HELPER(diag
)(uint32_t num
, uint64_t mem
, uint64_t code
)
2420 r
= s390_virtio_hypercall(env
, mem
, code
);
2436 program_interrupt(env
, PGM_OPERATION
, ILC_LATER_INC
);
2443 void HELPER(stidp
)(uint64_t a1
)
2445 stq(a1
, env
->cpu_num
);
2449 void HELPER(spx
)(uint64_t a1
)
2454 env
->psa
= prefix
& 0xfffff000;
2455 qemu_log("prefix: %#x\n", prefix
);
2456 tlb_flush_page(env
, 0);
2457 tlb_flush_page(env
, TARGET_PAGE_SIZE
);
2461 uint32_t HELPER(sck
)(uint64_t a1
)
2463 /* XXX not implemented - is it necessary? */
2468 static inline uint64_t clock_value(CPUState
*env
)
2472 time
= env
->tod_offset
+
2473 time2tod(qemu_get_clock_ns(vm_clock
) - env
->tod_basetime
);
2479 uint32_t HELPER(stck
)(uint64_t a1
)
2481 stq(a1
, clock_value(env
));
2486 /* Store Clock Extended */
2487 uint32_t HELPER(stcke
)(uint64_t a1
)
2490 /* basically the same value as stck */
2491 stq(a1
+ 1, clock_value(env
) | env
->cpu_num
);
2492 /* more fine grained than stck */
2494 /* XXX programmable fields */
2501 /* Set Clock Comparator */
2502 void HELPER(sckc
)(uint64_t a1
)
2504 uint64_t time
= ldq(a1
);
2506 if (time
== -1ULL) {
2510 /* difference between now and then */
2511 time
-= clock_value(env
);
2513 time
= (time
* 125) >> 9;
2515 qemu_mod_timer(env
->tod_timer
, qemu_get_clock_ns(vm_clock
) + time
);
2518 /* Store Clock Comparator */
2519 void HELPER(stckc
)(uint64_t a1
)
2526 void HELPER(spt
)(uint64_t a1
)
2528 uint64_t time
= ldq(a1
);
2530 if (time
== -1ULL) {
2535 time
= (time
* 125) >> 9;
2537 qemu_mod_timer(env
->cpu_timer
, qemu_get_clock_ns(vm_clock
) + time
);
2540 /* Store CPU Timer */
2541 void HELPER(stpt
)(uint64_t a1
)
2547 /* Store System Information */
2548 uint32_t HELPER(stsi
)(uint64_t a0
, uint32_t r0
, uint32_t r1
)
2553 if ((r0
& STSI_LEVEL_MASK
) <= STSI_LEVEL_3
&&
2554 ((r0
& STSI_R0_RESERVED_MASK
) || (r1
& STSI_R1_RESERVED_MASK
))) {
2555 /* valid function code, invalid reserved bits */
2556 program_interrupt(env
, PGM_SPECIFICATION
, 2);
2559 sel1
= r0
& STSI_R0_SEL1_MASK
;
2560 sel2
= r1
& STSI_R1_SEL2_MASK
;
2562 /* XXX: spec exception if sysib is not 4k-aligned */
2564 switch (r0
& STSI_LEVEL_MASK
) {
2566 if ((sel1
== 1) && (sel2
== 1)) {
2567 /* Basic Machine Configuration */
2568 struct sysib_111 sysib
;
2570 memset(&sysib
, 0, sizeof(sysib
));
2571 ebcdic_put(sysib
.manuf
, "QEMU ", 16);
2572 /* same as machine type number in STORE CPU ID */
2573 ebcdic_put(sysib
.type
, "QEMU", 4);
2574 /* same as model number in STORE CPU ID */
2575 ebcdic_put(sysib
.model
, "QEMU ", 16);
2576 ebcdic_put(sysib
.sequence
, "QEMU ", 16);
2577 ebcdic_put(sysib
.plant
, "QEMU", 4);
2578 cpu_physical_memory_rw(a0
, (uint8_t*)&sysib
, sizeof(sysib
), 1);
2579 } else if ((sel1
== 2) && (sel2
== 1)) {
2580 /* Basic Machine CPU */
2581 struct sysib_121 sysib
;
2583 memset(&sysib
, 0, sizeof(sysib
));
2584 /* XXX make different for different CPUs? */
2585 ebcdic_put(sysib
.sequence
, "QEMUQEMUQEMUQEMU", 16);
2586 ebcdic_put(sysib
.plant
, "QEMU", 4);
2587 stw_p(&sysib
.cpu_addr
, env
->cpu_num
);
2588 cpu_physical_memory_rw(a0
, (uint8_t*)&sysib
, sizeof(sysib
), 1);
2589 } else if ((sel1
== 2) && (sel2
== 2)) {
2590 /* Basic Machine CPUs */
2591 struct sysib_122 sysib
;
2593 memset(&sysib
, 0, sizeof(sysib
));
2594 stl_p(&sysib
.capability
, 0x443afc29);
2595 /* XXX change when SMP comes */
2596 stw_p(&sysib
.total_cpus
, 1);
2597 stw_p(&sysib
.active_cpus
, 1);
2598 stw_p(&sysib
.standby_cpus
, 0);
2599 stw_p(&sysib
.reserved_cpus
, 0);
2600 cpu_physical_memory_rw(a0
, (uint8_t*)&sysib
, sizeof(sysib
), 1);
2607 if ((sel1
== 2) && (sel2
== 1)) {
2609 struct sysib_221 sysib
;
2611 memset(&sysib
, 0, sizeof(sysib
));
2612 /* XXX make different for different CPUs? */
2613 ebcdic_put(sysib
.sequence
, "QEMUQEMUQEMUQEMU", 16);
2614 ebcdic_put(sysib
.plant
, "QEMU", 4);
2615 stw_p(&sysib
.cpu_addr
, env
->cpu_num
);
2616 stw_p(&sysib
.cpu_id
, 0);
2617 cpu_physical_memory_rw(a0
, (uint8_t*)&sysib
, sizeof(sysib
), 1);
2618 } else if ((sel1
== 2) && (sel2
== 2)) {
2620 struct sysib_222 sysib
;
2622 memset(&sysib
, 0, sizeof(sysib
));
2623 stw_p(&sysib
.lpar_num
, 0);
2625 /* XXX change when SMP comes */
2626 stw_p(&sysib
.total_cpus
, 1);
2627 stw_p(&sysib
.conf_cpus
, 1);
2628 stw_p(&sysib
.standby_cpus
, 0);
2629 stw_p(&sysib
.reserved_cpus
, 0);
2630 ebcdic_put(sysib
.name
, "QEMU ", 8);
2631 stl_p(&sysib
.caf
, 1000);
2632 stw_p(&sysib
.dedicated_cpus
, 0);
2633 stw_p(&sysib
.shared_cpus
, 0);
2634 cpu_physical_memory_rw(a0
, (uint8_t*)&sysib
, sizeof(sysib
), 1);
2642 if ((sel1
== 2) && (sel2
== 2)) {
2644 struct sysib_322 sysib
;
2646 memset(&sysib
, 0, sizeof(sysib
));
2648 /* XXX change when SMP comes */
2649 stw_p(&sysib
.vm
[0].total_cpus
, 1);
2650 stw_p(&sysib
.vm
[0].conf_cpus
, 1);
2651 stw_p(&sysib
.vm
[0].standby_cpus
, 0);
2652 stw_p(&sysib
.vm
[0].reserved_cpus
, 0);
2653 ebcdic_put(sysib
.vm
[0].name
, "KVMguest", 8);
2654 stl_p(&sysib
.vm
[0].caf
, 1000);
2655 ebcdic_put(sysib
.vm
[0].cpi
, "KVM/Linux ", 16);
2656 cpu_physical_memory_rw(a0
, (uint8_t*)&sysib
, sizeof(sysib
), 1);
2662 case STSI_LEVEL_CURRENT
:
2663 env
->regs
[0] = STSI_LEVEL_3
;
2673 void HELPER(lctlg
)(uint32_t r1
, uint64_t a2
, uint32_t r3
)
2678 for (i
= r1
;; i
= (i
+ 1) % 16) {
2679 env
->cregs
[i
] = ldq(src
);
2680 HELPER_LOG("load ctl %d from 0x%" PRIx64
" == 0x%" PRIx64
"\n",
2681 i
, src
, env
->cregs
[i
]);
2682 src
+= sizeof(uint64_t);
2692 void HELPER(lctl
)(uint32_t r1
, uint64_t a2
, uint32_t r3
)
2697 for (i
= r1
;; i
= (i
+ 1) % 16) {
2698 env
->cregs
[i
] = (env
->cregs
[i
] & 0xFFFFFFFF00000000ULL
) | ldl(src
);
2699 src
+= sizeof(uint32_t);
2709 void HELPER(stctg
)(uint32_t r1
, uint64_t a2
, uint32_t r3
)
2714 for (i
= r1
;; i
= (i
+ 1) % 16) {
2715 stq(dest
, env
->cregs
[i
]);
2716 dest
+= sizeof(uint64_t);
2724 void HELPER(stctl
)(uint32_t r1
, uint64_t a2
, uint32_t r3
)
2729 for (i
= r1
;; i
= (i
+ 1) % 16) {
2730 stl(dest
, env
->cregs
[i
]);
2731 dest
+= sizeof(uint32_t);
2739 uint32_t HELPER(tprot
)(uint64_t a1
, uint64_t a2
)
2746 /* insert storage key extended */
2747 uint64_t HELPER(iske
)(uint64_t r2
)
2749 uint64_t addr
= get_address(0, 0, r2
);
2751 if (addr
> ram_size
) {
2755 /* XXX maybe use qemu's internal keys? */
2756 return env
->storage_keys
[addr
/ TARGET_PAGE_SIZE
];
2759 /* set storage key extended */
2760 void HELPER(sske
)(uint32_t r1
, uint64_t r2
)
2762 uint64_t addr
= get_address(0, 0, r2
);
2764 if (addr
> ram_size
) {
2768 env
->storage_keys
[addr
/ TARGET_PAGE_SIZE
] = r1
;
2771 /* reset reference bit extended */
2772 uint32_t HELPER(rrbe
)(uint32_t r1
, uint64_t r2
)
2774 if (r2
> ram_size
) {
2780 env
->storage_keys
[r2
/ TARGET_PAGE_SIZE
] &= ~SK_REFERENCED
;
2786 * 0 Reference bit zero; change bit zero
2787 * 1 Reference bit zero; change bit one
2788 * 2 Reference bit one; change bit zero
2789 * 3 Reference bit one; change bit one
2794 /* compare and swap and purge */
2795 uint32_t HELPER(csp
)(uint32_t r1
, uint32_t r2
)
2798 uint32_t o1
= env
->regs
[r1
];
2799 uint64_t a2
= get_address_31fix(r2
) & ~3ULL;
2800 uint32_t o2
= ldl(a2
);
2803 stl(a2
, env
->regs
[(r1
+ 1) & 15]);
2804 if (env
->regs
[r2
] & 0x3) {
2805 /* flush TLB / ALB */
2810 env
->regs
[r1
] = (env
->regs
[r1
] & 0xffffffff00000000ULL
) | o2
;
2817 static uint32_t mvc_asc(int64_t l
, uint64_t a1
, uint64_t mode1
, uint64_t a2
,
2820 target_ulong src
, dest
;
2821 int flags
, cc
= 0, i
;
2825 } else if (l
> 256) {
2831 if (mmu_translate(env
, a1
& TARGET_PAGE_MASK
, 1, mode1
, &dest
, &flags
)) {
2834 dest
|= a1
& ~TARGET_PAGE_MASK
;
2836 if (mmu_translate(env
, a2
& TARGET_PAGE_MASK
, 0, mode2
, &src
, &flags
)) {
2839 src
|= a2
& ~TARGET_PAGE_MASK
;
2841 /* XXX replace w/ memcpy */
2842 for (i
= 0; i
< l
; i
++) {
2843 /* XXX be more clever */
2844 if ((((dest
+ i
) & TARGET_PAGE_MASK
) != (dest
& TARGET_PAGE_MASK
)) ||
2845 (((src
+ i
) & TARGET_PAGE_MASK
) != (src
& TARGET_PAGE_MASK
))) {
2846 mvc_asc(l
- i
, a1
+ i
, mode1
, a2
+ i
, mode2
);
2849 stb_phys(dest
+ i
, ldub_phys(src
+ i
));
2855 uint32_t HELPER(mvcs
)(uint64_t l
, uint64_t a1
, uint64_t a2
)
2857 HELPER_LOG("%s: %16" PRIx64
" %16" PRIx64
" %16" PRIx64
"\n",
2858 __FUNCTION__
, l
, a1
, a2
);
2860 return mvc_asc(l
, a1
, PSW_ASC_SECONDARY
, a2
, PSW_ASC_PRIMARY
);
2863 uint32_t HELPER(mvcp
)(uint64_t l
, uint64_t a1
, uint64_t a2
)
2865 HELPER_LOG("%s: %16" PRIx64
" %16" PRIx64
" %16" PRIx64
"\n",
2866 __FUNCTION__
, l
, a1
, a2
);
2868 return mvc_asc(l
, a1
, PSW_ASC_PRIMARY
, a2
, PSW_ASC_SECONDARY
);
2871 uint32_t HELPER(sigp
)(uint64_t order_code
, uint32_t r1
, uint64_t cpu_addr
)
2875 HELPER_LOG("%s: %016" PRIx64
" %08x %016" PRIx64
"\n",
2876 __FUNCTION__
, order_code
, r1
, cpu_addr
);
2878 /* Remember: Use "R1 or R1+1, whichever is the odd-numbered register"
2879 as parameter (input). Status (output) is always R1. */
2881 switch (order_code
) {
2886 /* enumerate CPU status */
2888 /* XXX implement when SMP comes */
2891 env
->regs
[r1
] &= 0xffffffff00000000ULL
;
2896 fprintf(stderr
, "XXX unknown sigp: 0x%" PRIx64
"\n", order_code
);
2903 void HELPER(sacf
)(uint64_t a1
)
2905 HELPER_LOG("%s: %16" PRIx64
"\n", __FUNCTION__
, a1
);
2907 switch (a1
& 0xf00) {
2909 env
->psw
.mask
&= ~PSW_MASK_ASC
;
2910 env
->psw
.mask
|= PSW_ASC_PRIMARY
;
2913 env
->psw
.mask
&= ~PSW_MASK_ASC
;
2914 env
->psw
.mask
|= PSW_ASC_SECONDARY
;
2917 env
->psw
.mask
&= ~PSW_MASK_ASC
;
2918 env
->psw
.mask
|= PSW_ASC_HOME
;
2921 qemu_log("unknown sacf mode: %" PRIx64
"\n", a1
);
2922 program_interrupt(env
, PGM_SPECIFICATION
, 2);
2927 /* invalidate pte */
2928 void HELPER(ipte
)(uint64_t pte_addr
, uint64_t vaddr
)
2930 uint64_t page
= vaddr
& TARGET_PAGE_MASK
;
2933 /* XXX broadcast to other CPUs */
2935 /* XXX Linux is nice enough to give us the exact pte address.
2936 According to spec we'd have to find it out ourselves */
2937 /* XXX Linux is fine with overwriting the pte, the spec requires
2938 us to only set the invalid bit */
2939 stq_phys(pte_addr
, pte
| _PAGE_INVALID
);
2941 /* XXX we exploit the fact that Linux passes the exact virtual
2942 address here - it's not obliged to! */
2943 tlb_flush_page(env
, page
);
2946 /* flush local tlb */
2947 void HELPER(ptlb
)(void)
2952 /* store using real address */
2953 void HELPER(stura
)(uint64_t addr
, uint32_t v1
)
2955 stw_phys(get_address(0, 0, addr
), v1
);
2958 /* load real address */
2959 uint32_t HELPER(lra
)(uint64_t addr
, uint32_t r1
)
2962 int old_exc
= env
->exception_index
;
2963 uint64_t asc
= env
->psw
.mask
& PSW_MASK_ASC
;
2967 /* XXX incomplete - has more corner cases */
2968 if (!(env
->psw
.mask
& PSW_MASK_64
) && (addr
>> 32)) {
2969 program_interrupt(env
, PGM_SPECIAL_OP
, 2);
2972 env
->exception_index
= old_exc
;
2973 if (mmu_translate(env
, addr
, 0, asc
, &ret
, &flags
)) {
2976 if (env
->exception_index
== EXCP_PGM
) {
2977 ret
= env
->int_pgm_code
| 0x80000000;
2979 ret
|= addr
& ~TARGET_PAGE_MASK
;
2981 env
->exception_index
= old_exc
;
2983 if (!(env
->psw
.mask
& PSW_MASK_64
)) {
2984 env
->regs
[r1
] = (env
->regs
[r1
] & 0xffffffff00000000ULL
) | (ret
& 0xffffffffULL
);
2986 env
->regs
[r1
] = ret
;