2 * S/390 helper routines
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2009 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
22 #include "host-utils.h"
26 #include "qemu-timer.h"
28 /*****************************************************************************/
30 #if !defined (CONFIG_USER_ONLY)
32 #define MMUSUFFIX _mmu
35 #include "softmmu_template.h"
38 #include "softmmu_template.h"
41 #include "softmmu_template.h"
44 #include "softmmu_template.h"
46 /* try to fill the TLB and return an exception if error. If retaddr is
47 NULL, it means that the function was called in C code (i.e. not
48 from generated code or from helper.c) */
49 /* XXX: fix it to restore all registers */
50 void tlb_fill (target_ulong addr
, int is_write
, int mmu_idx
, void *retaddr
)
57 /* XXX: hack to restore env in all cases, even if not called from
61 ret
= cpu_s390x_handle_mmu_fault(env
, addr
, is_write
, mmu_idx
, 1);
62 if (unlikely(ret
!= 0)) {
63 if (likely(retaddr
)) {
64 /* now we have a real cpu fault */
65 pc
= (unsigned long)retaddr
;
68 /* the PC is inside the translated code. It means that we have
69 a virtual CPU fault */
70 cpu_restore_state(tb
, env
, pc
);
80 /* #define DEBUG_HELPER */
82 #define HELPER_LOG(x...) qemu_log(x)
84 #define HELPER_LOG(x...)
87 /* raise an exception */
88 void HELPER(exception
)(uint32_t excp
)
90 HELPER_LOG("%s: exception %d\n", __FUNCTION__
, excp
);
91 env
->exception_index
= excp
;
95 #ifndef CONFIG_USER_ONLY
96 static void mvc_fast_memset(CPUState
*env
, uint32_t l
, uint64_t dest
,
99 target_phys_addr_t dest_phys
;
100 target_phys_addr_t len
= l
;
102 uint64_t asc
= env
->psw
.mask
& PSW_MASK_ASC
;
105 if (mmu_translate(env
, dest
, 1, asc
, &dest_phys
, &flags
)) {
107 cpu_abort(env
, "should never reach here");
109 dest_phys
|= dest
& ~TARGET_PAGE_MASK
;
111 dest_p
= cpu_physical_memory_map(dest_phys
, &len
, 1);
113 memset(dest_p
, byte
, len
);
115 cpu_physical_memory_unmap(dest_p
, 1, len
, len
);
118 static void mvc_fast_memmove(CPUState
*env
, uint32_t l
, uint64_t dest
,
121 target_phys_addr_t dest_phys
;
122 target_phys_addr_t src_phys
;
123 target_phys_addr_t len
= l
;
126 uint64_t asc
= env
->psw
.mask
& PSW_MASK_ASC
;
129 if (mmu_translate(env
, dest
, 1, asc
, &dest_phys
, &flags
)) {
131 cpu_abort(env
, "should never reach here");
133 dest_phys
|= dest
& ~TARGET_PAGE_MASK
;
135 if (mmu_translate(env
, src
, 0, asc
, &src_phys
, &flags
)) {
137 cpu_abort(env
, "should never reach here");
139 src_phys
|= src
& ~TARGET_PAGE_MASK
;
141 dest_p
= cpu_physical_memory_map(dest_phys
, &len
, 1);
142 src_p
= cpu_physical_memory_map(src_phys
, &len
, 0);
144 memmove(dest_p
, src_p
, len
);
146 cpu_physical_memory_unmap(dest_p
, 1, len
, len
);
147 cpu_physical_memory_unmap(src_p
, 0, len
, len
);
152 uint32_t HELPER(nc
)(uint32_t l
, uint64_t dest
, uint64_t src
)
158 HELPER_LOG("%s l %d dest %" PRIx64
" src %" PRIx64
"\n",
159 __FUNCTION__
, l
, dest
, src
);
160 for (i
= 0; i
<= l
; i
++) {
161 x
= ldub(dest
+ i
) & ldub(src
+ i
);
171 uint32_t HELPER(xc
)(uint32_t l
, uint64_t dest
, uint64_t src
)
177 HELPER_LOG("%s l %d dest %" PRIx64
" src %" PRIx64
"\n",
178 __FUNCTION__
, l
, dest
, src
);
180 #ifndef CONFIG_USER_ONLY
181 /* xor with itself is the same as memset(0) */
182 if ((l
> 32) && (src
== dest
) &&
183 (src
& TARGET_PAGE_MASK
) == ((src
+ l
) & TARGET_PAGE_MASK
)) {
184 mvc_fast_memset(env
, l
+ 1, dest
, 0);
189 memset(g2h(dest
), 0, l
+ 1);
194 for (i
= 0; i
<= l
; i
++) {
195 x
= ldub(dest
+ i
) ^ ldub(src
+ i
);
205 uint32_t HELPER(oc
)(uint32_t l
, uint64_t dest
, uint64_t src
)
211 HELPER_LOG("%s l %d dest %" PRIx64
" src %" PRIx64
"\n",
212 __FUNCTION__
, l
, dest
, src
);
213 for (i
= 0; i
<= l
; i
++) {
214 x
= ldub(dest
+ i
) | ldub(src
+ i
);
224 void HELPER(mvc
)(uint32_t l
, uint64_t dest
, uint64_t src
)
228 uint32_t l_64
= (l
+ 1) / 8;
230 HELPER_LOG("%s l %d dest %" PRIx64
" src %" PRIx64
"\n",
231 __FUNCTION__
, l
, dest
, src
);
233 #ifndef CONFIG_USER_ONLY
235 (src
& TARGET_PAGE_MASK
) == ((src
+ l
) & TARGET_PAGE_MASK
) &&
236 (dest
& TARGET_PAGE_MASK
) == ((dest
+ l
) & TARGET_PAGE_MASK
)) {
237 if (dest
== (src
+ 1)) {
238 mvc_fast_memset(env
, l
+ 1, dest
, ldub(src
));
240 } else if ((src
& TARGET_PAGE_MASK
) != (dest
& TARGET_PAGE_MASK
)) {
241 mvc_fast_memmove(env
, l
+ 1, dest
, src
);
246 if (dest
== (src
+ 1)) {
247 memset(g2h(dest
), ldub(src
), l
+ 1);
250 memmove(g2h(dest
), g2h(src
), l
+ 1);
255 /* handle the parts that fit into 8-byte loads/stores */
256 if (dest
!= (src
+ 1)) {
257 for (i
= 0; i
< l_64
; i
++) {
258 stq(dest
+ x
, ldq(src
+ x
));
263 /* slow version crossing pages with byte accesses */
264 for (i
= x
; i
<= l
; i
++) {
265 stb(dest
+ i
, ldub(src
+ i
));
269 /* compare unsigned byte arrays */
270 uint32_t HELPER(clc
)(uint32_t l
, uint64_t s1
, uint64_t s2
)
275 HELPER_LOG("%s l %d s1 %" PRIx64
" s2 %" PRIx64
"\n",
276 __FUNCTION__
, l
, s1
, s2
);
277 for (i
= 0; i
<= l
; i
++) {
280 HELPER_LOG("%02x (%c)/%02x (%c) ", x
, x
, y
, y
);
295 /* compare logical under mask */
296 uint32_t HELPER(clm
)(uint32_t r1
, uint32_t mask
, uint64_t addr
)
300 HELPER_LOG("%s: r1 0x%x mask 0x%x addr 0x%" PRIx64
"\n", __FUNCTION__
, r1
,
306 r
= (r1
& 0xff000000UL
) >> 24;
307 HELPER_LOG("mask 0x%x %02x/%02x (0x%" PRIx64
") ", mask
, r
, d
,
318 mask
= (mask
<< 1) & 0xf;
325 /* store character under mask */
326 void HELPER(stcm
)(uint32_t r1
, uint32_t mask
, uint64_t addr
)
329 HELPER_LOG("%s: r1 0x%x mask 0x%x addr 0x%lx\n", __FUNCTION__
, r1
, mask
,
333 r
= (r1
& 0xff000000UL
) >> 24;
335 HELPER_LOG("mask 0x%x %02x (0x%lx) ", mask
, r
, addr
);
338 mask
= (mask
<< 1) & 0xf;
344 /* 64/64 -> 128 unsigned multiplication */
345 void HELPER(mlg
)(uint32_t r1
, uint64_t v2
)
347 #if HOST_LONG_BITS == 64 && defined(__GNUC__)
348 /* assuming 64-bit hosts have __uint128_t */
349 __uint128_t res
= (__uint128_t
)env
->regs
[r1
+ 1];
350 res
*= (__uint128_t
)v2
;
351 env
->regs
[r1
] = (uint64_t)(res
>> 64);
352 env
->regs
[r1
+ 1] = (uint64_t)res
;
354 mulu64(&env
->regs
[r1
+ 1], &env
->regs
[r1
], env
->regs
[r1
+ 1], v2
);
358 /* 128 -> 64/64 unsigned division */
359 void HELPER(dlg
)(uint32_t r1
, uint64_t v2
)
361 uint64_t divisor
= v2
;
363 if (!env
->regs
[r1
]) {
364 /* 64 -> 64/64 case */
365 env
->regs
[r1
] = env
->regs
[r1
+1] % divisor
;
366 env
->regs
[r1
+1] = env
->regs
[r1
+1] / divisor
;
370 #if HOST_LONG_BITS == 64 && defined(__GNUC__)
371 /* assuming 64-bit hosts have __uint128_t */
372 __uint128_t dividend
= (((__uint128_t
)env
->regs
[r1
]) << 64) |
374 __uint128_t quotient
= dividend
/ divisor
;
375 env
->regs
[r1
+1] = quotient
;
376 __uint128_t remainder
= dividend
% divisor
;
377 env
->regs
[r1
] = remainder
;
379 /* 32-bit hosts would need special wrapper functionality - just abort if
380 we encounter such a case; it's very unlikely anyways. */
381 cpu_abort(env
, "128 -> 64/64 division not implemented\n");
386 static inline uint64_t get_address(int x2
, int b2
, int d2
)
399 if (!(env
->psw
.mask
& PSW_MASK_64
)) {
406 static inline uint64_t get_address_31fix(int reg
)
408 uint64_t r
= env
->regs
[reg
];
411 if (!(env
->psw
.mask
& PSW_MASK_64
)) {
418 /* search string (c is byte to search, r2 is string, r1 end of string) */
419 uint32_t HELPER(srst
)(uint32_t c
, uint32_t r1
, uint32_t r2
)
423 uint64_t str
= get_address_31fix(r2
);
424 uint64_t end
= get_address_31fix(r1
);
426 HELPER_LOG("%s: c %d *r1 0x%" PRIx64
" *r2 0x%" PRIx64
"\n", __FUNCTION__
,
427 c
, env
->regs
[r1
], env
->regs
[r2
]);
429 for (i
= str
; i
!= end
; i
++) {
440 /* unsigned string compare (c is string terminator) */
441 uint32_t HELPER(clst
)(uint32_t c
, uint32_t r1
, uint32_t r2
)
443 uint64_t s1
= get_address_31fix(r1
);
444 uint64_t s2
= get_address_31fix(r2
);
448 #ifdef CONFIG_USER_ONLY
450 HELPER_LOG("%s: comparing '%s' and '%s'\n",
451 __FUNCTION__
, (char*)g2h(s1
), (char*)g2h(s2
));
457 if ((v1
== c
|| v2
== c
) || (v1
!= v2
)) {
467 cc
= (v1
< v2
) ? 1 : 2;
468 /* FIXME: 31-bit mode! */
476 void HELPER(mvpg
)(uint64_t r0
, uint64_t r1
, uint64_t r2
)
478 /* XXX missing r0 handling */
479 #ifdef CONFIG_USER_ONLY
482 for (i
= 0; i
< TARGET_PAGE_SIZE
; i
++) {
483 stb(r1
+ i
, ldub(r2
+ i
));
486 mvc_fast_memmove(env
, TARGET_PAGE_SIZE
, r1
, r2
);
490 /* string copy (c is string terminator) */
491 void HELPER(mvst
)(uint32_t c
, uint32_t r1
, uint32_t r2
)
493 uint64_t dest
= get_address_31fix(r1
);
494 uint64_t src
= get_address_31fix(r2
);
497 #ifdef CONFIG_USER_ONLY
499 HELPER_LOG("%s: copy '%s' to 0x%lx\n", __FUNCTION__
, (char*)g2h(src
),
512 env
->regs
[r1
] = dest
; /* FIXME: 31-bit mode! */
515 /* compare and swap 64-bit */
516 uint32_t HELPER(csg
)(uint32_t r1
, uint64_t a2
, uint32_t r3
)
518 /* FIXME: locking? */
520 uint64_t v2
= ldq(a2
);
521 if (env
->regs
[r1
] == v2
) {
523 stq(a2
, env
->regs
[r3
]);
531 /* compare double and swap 64-bit */
532 uint32_t HELPER(cdsg
)(uint32_t r1
, uint64_t a2
, uint32_t r3
)
534 /* FIXME: locking? */
536 uint64_t v2_hi
= ldq(a2
);
537 uint64_t v2_lo
= ldq(a2
+ 8);
538 uint64_t v1_hi
= env
->regs
[r1
];
539 uint64_t v1_lo
= env
->regs
[r1
+ 1];
541 if ((v1_hi
== v2_hi
) && (v1_lo
== v2_lo
)) {
543 stq(a2
, env
->regs
[r3
]);
544 stq(a2
+ 8, env
->regs
[r3
+ 1]);
547 env
->regs
[r1
] = v2_hi
;
548 env
->regs
[r1
+ 1] = v2_lo
;
554 /* compare and swap 32-bit */
555 uint32_t HELPER(cs
)(uint32_t r1
, uint64_t a2
, uint32_t r3
)
557 /* FIXME: locking? */
559 HELPER_LOG("%s: r1 %d a2 0x%lx r3 %d\n", __FUNCTION__
, r1
, a2
, r3
);
560 uint32_t v2
= ldl(a2
);
561 if (((uint32_t)env
->regs
[r1
]) == v2
) {
563 stl(a2
, (uint32_t)env
->regs
[r3
]);
566 env
->regs
[r1
] = (env
->regs
[r1
] & 0xffffffff00000000ULL
) | v2
;
571 static uint32_t helper_icm(uint32_t r1
, uint64_t address
, uint32_t mask
)
573 int pos
= 24; /* top of the lower half of r1 */
574 uint64_t rmask
= 0xff000000ULL
;
581 env
->regs
[r1
] &= ~rmask
;
583 if ((val
& 0x80) && !ccd
) {
587 if (val
&& cc
== 0) {
590 env
->regs
[r1
] |= (uint64_t)val
<< pos
;
593 mask
= (mask
<< 1) & 0xf;
601 /* execute instruction
602 this instruction executes an insn modified with the contents of r1
603 it does not change the executed instruction in memory
604 it does not change the program counter
605 in other words: tricky...
606 currently implemented by interpreting the cases it is most commonly used in
608 uint32_t HELPER(ex
)(uint32_t cc
, uint64_t v1
, uint64_t addr
, uint64_t ret
)
610 uint16_t insn
= lduw_code(addr
);
611 HELPER_LOG("%s: v1 0x%lx addr 0x%lx insn 0x%x\n", __FUNCTION__
, v1
, addr
,
613 if ((insn
& 0xf0ff) == 0xd000) {
614 uint32_t l
, insn2
, b1
, b2
, d1
, d2
;
616 insn2
= ldl_code(addr
+ 2);
617 b1
= (insn2
>> 28) & 0xf;
618 b2
= (insn2
>> 12) & 0xf;
619 d1
= (insn2
>> 16) & 0xfff;
621 switch (insn
& 0xf00) {
623 helper_mvc(l
, get_address(0, b1
, d1
), get_address(0, b2
, d2
));
626 cc
= helper_clc(l
, get_address(0, b1
, d1
), get_address(0, b2
, d2
));
629 cc
= helper_xc(l
, get_address(0, b1
, d1
), get_address(0, b2
, d2
));
635 } else if ((insn
& 0xff00) == 0x0a00) {
636 /* supervisor call */
637 HELPER_LOG("%s: svc %ld via execute\n", __FUNCTION__
, (insn
|v1
) & 0xff);
638 env
->psw
.addr
= ret
- 4;
639 env
->int_svc_code
= (insn
|v1
) & 0xff;
640 env
->int_svc_ilc
= 4;
641 helper_exception(EXCP_SVC
);
642 } else if ((insn
& 0xff00) == 0xbf00) {
643 uint32_t insn2
, r1
, r3
, b2
, d2
;
644 insn2
= ldl_code(addr
+ 2);
645 r1
= (insn2
>> 20) & 0xf;
646 r3
= (insn2
>> 16) & 0xf;
647 b2
= (insn2
>> 12) & 0xf;
649 cc
= helper_icm(r1
, get_address(0, b2
, d2
), r3
);
652 cpu_abort(env
, "EXECUTE on instruction prefix 0x%x not implemented\n",
658 /* absolute value 32-bit */
659 uint32_t HELPER(abs_i32
)(int32_t val
)
668 /* negative absolute value 32-bit */
669 int32_t HELPER(nabs_i32
)(int32_t val
)
678 /* absolute value 64-bit */
679 uint64_t HELPER(abs_i64
)(int64_t val
)
681 HELPER_LOG("%s: val 0x%" PRIx64
"\n", __FUNCTION__
, val
);
690 /* negative absolute value 64-bit */
691 int64_t HELPER(nabs_i64
)(int64_t val
)
700 /* add with carry 32-bit unsigned */
701 uint32_t HELPER(addc_u32
)(uint32_t cc
, uint32_t v1
, uint32_t v2
)
713 /* store character under mask high operates on the upper half of r1 */
714 void HELPER(stcmh
)(uint32_t r1
, uint64_t address
, uint32_t mask
)
716 int pos
= 56; /* top of the upper half of r1 */
720 stb(address
, (env
->regs
[r1
] >> pos
) & 0xff);
723 mask
= (mask
<< 1) & 0xf;
728 /* insert character under mask high; same as icm, but operates on the
730 uint32_t HELPER(icmh
)(uint32_t r1
, uint64_t address
, uint32_t mask
)
732 int pos
= 56; /* top of the upper half of r1 */
733 uint64_t rmask
= 0xff00000000000000ULL
;
740 env
->regs
[r1
] &= ~rmask
;
742 if ((val
& 0x80) && !ccd
) {
746 if (val
&& cc
== 0) {
749 env
->regs
[r1
] |= (uint64_t)val
<< pos
;
752 mask
= (mask
<< 1) & 0xf;
760 /* insert psw mask and condition code into r1 */
761 void HELPER(ipm
)(uint32_t cc
, uint32_t r1
)
763 uint64_t r
= env
->regs
[r1
];
765 r
&= 0xffffffff00ffffffULL
;
766 r
|= (cc
<< 28) | ( (env
->psw
.mask
>> 40) & 0xf );
768 HELPER_LOG("%s: cc %d psw.mask 0x%lx r1 0x%lx\n", __FUNCTION__
,
769 cc
, env
->psw
.mask
, r
);
772 /* load access registers r1 to r3 from memory at a2 */
773 void HELPER(lam
)(uint32_t r1
, uint64_t a2
, uint32_t r3
)
777 for (i
= r1
;; i
= (i
+ 1) % 16) {
778 env
->aregs
[i
] = ldl(a2
);
787 /* store access registers r1 to r3 in memory at a2 */
788 void HELPER(stam
)(uint32_t r1
, uint64_t a2
, uint32_t r3
)
792 for (i
= r1
;; i
= (i
+ 1) % 16) {
793 stl(a2
, env
->aregs
[i
]);
803 uint32_t HELPER(mvcl
)(uint32_t r1
, uint32_t r2
)
805 uint64_t destlen
= env
->regs
[r1
+ 1] & 0xffffff;
806 uint64_t dest
= get_address_31fix(r1
);
807 uint64_t srclen
= env
->regs
[r2
+ 1] & 0xffffff;
808 uint64_t src
= get_address_31fix(r2
);
809 uint8_t pad
= src
>> 24;
813 if (destlen
== srclen
) {
815 } else if (destlen
< srclen
) {
821 if (srclen
> destlen
) {
825 for (; destlen
&& srclen
; src
++, dest
++, destlen
--, srclen
--) {
830 for (; destlen
; dest
++, destlen
--) {
834 env
->regs
[r1
+ 1] = destlen
;
835 /* can't use srclen here, we trunc'ed it */
836 env
->regs
[r2
+ 1] -= src
- env
->regs
[r2
];
837 env
->regs
[r1
] = dest
;
843 /* move long extended another memcopy insn with more bells and whistles */
844 uint32_t HELPER(mvcle
)(uint32_t r1
, uint64_t a2
, uint32_t r3
)
846 uint64_t destlen
= env
->regs
[r1
+ 1];
847 uint64_t dest
= env
->regs
[r1
];
848 uint64_t srclen
= env
->regs
[r3
+ 1];
849 uint64_t src
= env
->regs
[r3
];
850 uint8_t pad
= a2
& 0xff;
854 if (!(env
->psw
.mask
& PSW_MASK_64
)) {
855 destlen
= (uint32_t)destlen
;
856 srclen
= (uint32_t)srclen
;
861 if (destlen
== srclen
) {
863 } else if (destlen
< srclen
) {
869 if (srclen
> destlen
) {
873 for (; destlen
&& srclen
; src
++, dest
++, destlen
--, srclen
--) {
878 for (; destlen
; dest
++, destlen
--) {
882 env
->regs
[r1
+ 1] = destlen
;
883 /* can't use srclen here, we trunc'ed it */
884 /* FIXME: 31-bit mode! */
885 env
->regs
[r3
+ 1] -= src
- env
->regs
[r3
];
886 env
->regs
[r1
] = dest
;
892 /* compare logical long extended memcompare insn with padding */
893 uint32_t HELPER(clcle
)(uint32_t r1
, uint64_t a2
, uint32_t r3
)
895 uint64_t destlen
= env
->regs
[r1
+ 1];
896 uint64_t dest
= get_address_31fix(r1
);
897 uint64_t srclen
= env
->regs
[r3
+ 1];
898 uint64_t src
= get_address_31fix(r3
);
899 uint8_t pad
= a2
& 0xff;
900 uint8_t v1
= 0,v2
= 0;
903 if (!(destlen
|| srclen
)) {
907 if (srclen
> destlen
) {
911 for (; destlen
|| srclen
; src
++, dest
++, destlen
--, srclen
--) {
912 v1
= srclen
? ldub(src
) : pad
;
913 v2
= destlen
? ldub(dest
) : pad
;
915 cc
= (v1
< v2
) ? 1 : 2;
920 env
->regs
[r1
+ 1] = destlen
;
921 /* can't use srclen here, we trunc'ed it */
922 env
->regs
[r3
+ 1] -= src
- env
->regs
[r3
];
923 env
->regs
[r1
] = dest
;
929 /* subtract unsigned v2 from v1 with borrow */
930 uint32_t HELPER(slb
)(uint32_t cc
, uint32_t r1
, uint32_t v2
)
932 uint32_t v1
= env
->regs
[r1
];
933 uint32_t res
= v1
+ (~v2
) + (cc
>> 1);
935 env
->regs
[r1
] = (env
->regs
[r1
] & 0xffffffff00000000ULL
) | res
;
944 /* subtract unsigned v2 from v1 with borrow */
945 uint32_t HELPER(slbg
)(uint32_t cc
, uint32_t r1
, uint64_t v1
, uint64_t v2
)
947 uint64_t res
= v1
+ (~v2
) + (cc
>> 1);
958 static inline int float_comp_to_cc(int float_compare
)
960 switch (float_compare
) {
961 case float_relation_equal
:
963 case float_relation_less
:
965 case float_relation_greater
:
967 case float_relation_unordered
:
970 cpu_abort(env
, "unknown return value for float compare\n");
974 /* condition codes for binary FP ops */
975 static uint32_t set_cc_f32(float32 v1
, float32 v2
)
977 return float_comp_to_cc(float32_compare_quiet(v1
, v2
, &env
->fpu_status
));
980 static uint32_t set_cc_f64(float64 v1
, float64 v2
)
982 return float_comp_to_cc(float64_compare_quiet(v1
, v2
, &env
->fpu_status
));
985 /* condition codes for unary FP ops */
986 static uint32_t set_cc_nz_f32(float32 v
)
988 if (float32_is_any_nan(v
)) {
990 } else if (float32_is_zero(v
)) {
992 } else if (float32_is_neg(v
)) {
999 static uint32_t set_cc_nz_f64(float64 v
)
1001 if (float64_is_any_nan(v
)) {
1003 } else if (float64_is_zero(v
)) {
1005 } else if (float64_is_neg(v
)) {
1012 static uint32_t set_cc_nz_f128(float128 v
)
1014 if (float128_is_any_nan(v
)) {
1016 } else if (float128_is_zero(v
)) {
1018 } else if (float128_is_neg(v
)) {
1025 /* convert 32-bit int to 64-bit float */
1026 void HELPER(cdfbr
)(uint32_t f1
, int32_t v2
)
1028 HELPER_LOG("%s: converting %d to f%d\n", __FUNCTION__
, v2
, f1
);
1029 env
->fregs
[f1
].d
= int32_to_float64(v2
, &env
->fpu_status
);
1032 /* convert 32-bit int to 128-bit float */
1033 void HELPER(cxfbr
)(uint32_t f1
, int32_t v2
)
1036 v1
.q
= int32_to_float128(v2
, &env
->fpu_status
);
1037 env
->fregs
[f1
].ll
= v1
.ll
.upper
;
1038 env
->fregs
[f1
+ 2].ll
= v1
.ll
.lower
;
1041 /* convert 64-bit int to 32-bit float */
1042 void HELPER(cegbr
)(uint32_t f1
, int64_t v2
)
1044 HELPER_LOG("%s: converting %ld to f%d\n", __FUNCTION__
, v2
, f1
);
1045 env
->fregs
[f1
].l
.upper
= int64_to_float32(v2
, &env
->fpu_status
);
1048 /* convert 64-bit int to 64-bit float */
1049 void HELPER(cdgbr
)(uint32_t f1
, int64_t v2
)
1051 HELPER_LOG("%s: converting %ld to f%d\n", __FUNCTION__
, v2
, f1
);
1052 env
->fregs
[f1
].d
= int64_to_float64(v2
, &env
->fpu_status
);
1055 /* convert 64-bit int to 128-bit float */
1056 void HELPER(cxgbr
)(uint32_t f1
, int64_t v2
)
1059 x1
.q
= int64_to_float128(v2
, &env
->fpu_status
);
1060 HELPER_LOG("%s: converted %ld to 0x%lx and 0x%lx\n", __FUNCTION__
, v2
,
1061 x1
.ll
.upper
, x1
.ll
.lower
);
1062 env
->fregs
[f1
].ll
= x1
.ll
.upper
;
1063 env
->fregs
[f1
+ 2].ll
= x1
.ll
.lower
;
1066 /* convert 32-bit int to 32-bit float */
1067 void HELPER(cefbr
)(uint32_t f1
, int32_t v2
)
1069 env
->fregs
[f1
].l
.upper
= int32_to_float32(v2
, &env
->fpu_status
);
1070 HELPER_LOG("%s: converting %d to 0x%d in f%d\n", __FUNCTION__
, v2
,
1071 env
->fregs
[f1
].l
.upper
, f1
);
1074 /* 32-bit FP addition RR */
1075 uint32_t HELPER(aebr
)(uint32_t f1
, uint32_t f2
)
1077 env
->fregs
[f1
].l
.upper
= float32_add(env
->fregs
[f1
].l
.upper
,
1078 env
->fregs
[f2
].l
.upper
,
1080 HELPER_LOG("%s: adding 0x%d resulting in 0x%d in f%d\n", __FUNCTION__
,
1081 env
->fregs
[f2
].l
.upper
, env
->fregs
[f1
].l
.upper
, f1
);
1083 return set_cc_nz_f32(env
->fregs
[f1
].l
.upper
);
1086 /* 64-bit FP addition RR */
1087 uint32_t HELPER(adbr
)(uint32_t f1
, uint32_t f2
)
1089 env
->fregs
[f1
].d
= float64_add(env
->fregs
[f1
].d
, env
->fregs
[f2
].d
,
1091 HELPER_LOG("%s: adding 0x%ld resulting in 0x%ld in f%d\n", __FUNCTION__
,
1092 env
->fregs
[f2
].d
, env
->fregs
[f1
].d
, f1
);
1094 return set_cc_nz_f64(env
->fregs
[f1
].d
);
1097 /* 32-bit FP subtraction RR */
1098 uint32_t HELPER(sebr
)(uint32_t f1
, uint32_t f2
)
1100 env
->fregs
[f1
].l
.upper
= float32_sub(env
->fregs
[f1
].l
.upper
,
1101 env
->fregs
[f2
].l
.upper
,
1103 HELPER_LOG("%s: adding 0x%d resulting in 0x%d in f%d\n", __FUNCTION__
,
1104 env
->fregs
[f2
].l
.upper
, env
->fregs
[f1
].l
.upper
, f1
);
1106 return set_cc_nz_f32(env
->fregs
[f1
].l
.upper
);
1109 /* 64-bit FP subtraction RR */
1110 uint32_t HELPER(sdbr
)(uint32_t f1
, uint32_t f2
)
1112 env
->fregs
[f1
].d
= float64_sub(env
->fregs
[f1
].d
, env
->fregs
[f2
].d
,
1114 HELPER_LOG("%s: subtracting 0x%ld resulting in 0x%ld in f%d\n",
1115 __FUNCTION__
, env
->fregs
[f2
].d
, env
->fregs
[f1
].d
, f1
);
1117 return set_cc_nz_f64(env
->fregs
[f1
].d
);
1120 /* 32-bit FP division RR */
1121 void HELPER(debr
)(uint32_t f1
, uint32_t f2
)
1123 env
->fregs
[f1
].l
.upper
= float32_div(env
->fregs
[f1
].l
.upper
,
1124 env
->fregs
[f2
].l
.upper
,
1128 /* 128-bit FP division RR */
1129 void HELPER(dxbr
)(uint32_t f1
, uint32_t f2
)
1132 v1
.ll
.upper
= env
->fregs
[f1
].ll
;
1133 v1
.ll
.lower
= env
->fregs
[f1
+ 2].ll
;
1135 v2
.ll
.upper
= env
->fregs
[f2
].ll
;
1136 v2
.ll
.lower
= env
->fregs
[f2
+ 2].ll
;
1138 res
.q
= float128_div(v1
.q
, v2
.q
, &env
->fpu_status
);
1139 env
->fregs
[f1
].ll
= res
.ll
.upper
;
1140 env
->fregs
[f1
+ 2].ll
= res
.ll
.lower
;
1143 /* 64-bit FP multiplication RR */
1144 void HELPER(mdbr
)(uint32_t f1
, uint32_t f2
)
1146 env
->fregs
[f1
].d
= float64_mul(env
->fregs
[f1
].d
, env
->fregs
[f2
].d
,
1150 /* 128-bit FP multiplication RR */
1151 void HELPER(mxbr
)(uint32_t f1
, uint32_t f2
)
1154 v1
.ll
.upper
= env
->fregs
[f1
].ll
;
1155 v1
.ll
.lower
= env
->fregs
[f1
+ 2].ll
;
1157 v2
.ll
.upper
= env
->fregs
[f2
].ll
;
1158 v2
.ll
.lower
= env
->fregs
[f2
+ 2].ll
;
1160 res
.q
= float128_mul(v1
.q
, v2
.q
, &env
->fpu_status
);
1161 env
->fregs
[f1
].ll
= res
.ll
.upper
;
1162 env
->fregs
[f1
+ 2].ll
= res
.ll
.lower
;
1165 /* convert 32-bit float to 64-bit float */
1166 void HELPER(ldebr
)(uint32_t r1
, uint32_t r2
)
1168 env
->fregs
[r1
].d
= float32_to_float64(env
->fregs
[r2
].l
.upper
,
1172 /* convert 128-bit float to 64-bit float */
1173 void HELPER(ldxbr
)(uint32_t f1
, uint32_t f2
)
1176 x2
.ll
.upper
= env
->fregs
[f2
].ll
;
1177 x2
.ll
.lower
= env
->fregs
[f2
+ 2].ll
;
1178 env
->fregs
[f1
].d
= float128_to_float64(x2
.q
, &env
->fpu_status
);
1179 HELPER_LOG("%s: to 0x%ld\n", __FUNCTION__
, env
->fregs
[f1
].d
);
1182 /* convert 64-bit float to 128-bit float */
1183 void HELPER(lxdbr
)(uint32_t f1
, uint32_t f2
)
1186 res
.q
= float64_to_float128(env
->fregs
[f2
].d
, &env
->fpu_status
);
1187 env
->fregs
[f1
].ll
= res
.ll
.upper
;
1188 env
->fregs
[f1
+ 2].ll
= res
.ll
.lower
;
1191 /* convert 64-bit float to 32-bit float */
1192 void HELPER(ledbr
)(uint32_t f1
, uint32_t f2
)
1194 float64 d2
= env
->fregs
[f2
].d
;
1195 env
->fregs
[f1
].l
.upper
= float64_to_float32(d2
, &env
->fpu_status
);
1198 /* convert 128-bit float to 32-bit float */
1199 void HELPER(lexbr
)(uint32_t f1
, uint32_t f2
)
1202 x2
.ll
.upper
= env
->fregs
[f2
].ll
;
1203 x2
.ll
.lower
= env
->fregs
[f2
+ 2].ll
;
1204 env
->fregs
[f1
].l
.upper
= float128_to_float32(x2
.q
, &env
->fpu_status
);
1205 HELPER_LOG("%s: to 0x%d\n", __FUNCTION__
, env
->fregs
[f1
].l
.upper
);
1208 /* absolute value of 32-bit float */
1209 uint32_t HELPER(lpebr
)(uint32_t f1
, uint32_t f2
)
1212 float32 v2
= env
->fregs
[f2
].d
;
1213 v1
= float32_abs(v2
);
1214 env
->fregs
[f1
].d
= v1
;
1215 return set_cc_nz_f32(v1
);
1218 /* absolute value of 64-bit float */
1219 uint32_t HELPER(lpdbr
)(uint32_t f1
, uint32_t f2
)
1222 float64 v2
= env
->fregs
[f2
].d
;
1223 v1
= float64_abs(v2
);
1224 env
->fregs
[f1
].d
= v1
;
1225 return set_cc_nz_f64(v1
);
1228 /* absolute value of 128-bit float */
1229 uint32_t HELPER(lpxbr
)(uint32_t f1
, uint32_t f2
)
1233 v2
.ll
.upper
= env
->fregs
[f2
].ll
;
1234 v2
.ll
.lower
= env
->fregs
[f2
+ 2].ll
;
1235 v1
.q
= float128_abs(v2
.q
);
1236 env
->fregs
[f1
].ll
= v1
.ll
.upper
;
1237 env
->fregs
[f1
+ 2].ll
= v1
.ll
.lower
;
1238 return set_cc_nz_f128(v1
.q
);
1241 /* load and test 64-bit float */
1242 uint32_t HELPER(ltdbr
)(uint32_t f1
, uint32_t f2
)
1244 env
->fregs
[f1
].d
= env
->fregs
[f2
].d
;
1245 return set_cc_nz_f64(env
->fregs
[f1
].d
);
1248 /* load and test 32-bit float */
1249 uint32_t HELPER(ltebr
)(uint32_t f1
, uint32_t f2
)
1251 env
->fregs
[f1
].l
.upper
= env
->fregs
[f2
].l
.upper
;
1252 return set_cc_nz_f32(env
->fregs
[f1
].l
.upper
);
1255 /* load and test 128-bit float */
1256 uint32_t HELPER(ltxbr
)(uint32_t f1
, uint32_t f2
)
1259 x
.ll
.upper
= env
->fregs
[f2
].ll
;
1260 x
.ll
.lower
= env
->fregs
[f2
+ 2].ll
;
1261 env
->fregs
[f1
].ll
= x
.ll
.upper
;
1262 env
->fregs
[f1
+ 2].ll
= x
.ll
.lower
;
1263 return set_cc_nz_f128(x
.q
);
1266 /* load complement of 32-bit float */
1267 uint32_t HELPER(lcebr
)(uint32_t f1
, uint32_t f2
)
1269 env
->fregs
[f1
].l
.upper
= float32_chs(env
->fregs
[f2
].l
.upper
);
1271 return set_cc_nz_f32(env
->fregs
[f1
].l
.upper
);
1274 /* load complement of 64-bit float */
1275 uint32_t HELPER(lcdbr
)(uint32_t f1
, uint32_t f2
)
1277 env
->fregs
[f1
].d
= float64_chs(env
->fregs
[f2
].d
);
1279 return set_cc_nz_f64(env
->fregs
[f1
].d
);
1282 /* load complement of 128-bit float */
1283 uint32_t HELPER(lcxbr
)(uint32_t f1
, uint32_t f2
)
1286 x2
.ll
.upper
= env
->fregs
[f2
].ll
;
1287 x2
.ll
.lower
= env
->fregs
[f2
+ 2].ll
;
1288 x1
.q
= float128_chs(x2
.q
);
1289 env
->fregs
[f1
].ll
= x1
.ll
.upper
;
1290 env
->fregs
[f1
+ 2].ll
= x1
.ll
.lower
;
1291 return set_cc_nz_f128(x1
.q
);
1294 /* 32-bit FP addition RM */
1295 void HELPER(aeb
)(uint32_t f1
, uint32_t val
)
1297 float32 v1
= env
->fregs
[f1
].l
.upper
;
1300 HELPER_LOG("%s: adding 0x%d from f%d and 0x%d\n", __FUNCTION__
,
1302 env
->fregs
[f1
].l
.upper
= float32_add(v1
, v2
.f
, &env
->fpu_status
);
1305 /* 32-bit FP division RM */
1306 void HELPER(deb
)(uint32_t f1
, uint32_t val
)
1308 float32 v1
= env
->fregs
[f1
].l
.upper
;
1311 HELPER_LOG("%s: dividing 0x%d from f%d by 0x%d\n", __FUNCTION__
,
1313 env
->fregs
[f1
].l
.upper
= float32_div(v1
, v2
.f
, &env
->fpu_status
);
1316 /* 32-bit FP multiplication RM */
1317 void HELPER(meeb
)(uint32_t f1
, uint32_t val
)
1319 float32 v1
= env
->fregs
[f1
].l
.upper
;
1322 HELPER_LOG("%s: multiplying 0x%d from f%d and 0x%d\n", __FUNCTION__
,
1324 env
->fregs
[f1
].l
.upper
= float32_mul(v1
, v2
.f
, &env
->fpu_status
);
1327 /* 32-bit FP compare RR */
1328 uint32_t HELPER(cebr
)(uint32_t f1
, uint32_t f2
)
1330 float32 v1
= env
->fregs
[f1
].l
.upper
;
1331 float32 v2
= env
->fregs
[f2
].l
.upper
;;
1332 HELPER_LOG("%s: comparing 0x%d from f%d and 0x%d\n", __FUNCTION__
,
1334 return set_cc_f32(v1
, v2
);
1337 /* 64-bit FP compare RR */
1338 uint32_t HELPER(cdbr
)(uint32_t f1
, uint32_t f2
)
1340 float64 v1
= env
->fregs
[f1
].d
;
1341 float64 v2
= env
->fregs
[f2
].d
;;
1342 HELPER_LOG("%s: comparing 0x%ld from f%d and 0x%ld\n", __FUNCTION__
,
1344 return set_cc_f64(v1
, v2
);
1347 /* 128-bit FP compare RR */
1348 uint32_t HELPER(cxbr
)(uint32_t f1
, uint32_t f2
)
1351 v1
.ll
.upper
= env
->fregs
[f1
].ll
;
1352 v1
.ll
.lower
= env
->fregs
[f1
+ 2].ll
;
1354 v2
.ll
.upper
= env
->fregs
[f2
].ll
;
1355 v2
.ll
.lower
= env
->fregs
[f2
+ 2].ll
;
1357 return float_comp_to_cc(float128_compare_quiet(v1
.q
, v2
.q
,
1361 /* 64-bit FP compare RM */
1362 uint32_t HELPER(cdb
)(uint32_t f1
, uint64_t a2
)
1364 float64 v1
= env
->fregs
[f1
].d
;
1367 HELPER_LOG("%s: comparing 0x%ld from f%d and 0x%lx\n", __FUNCTION__
, v1
,
1369 return set_cc_f64(v1
, v2
.d
);
1372 /* 64-bit FP addition RM */
1373 uint32_t HELPER(adb
)(uint32_t f1
, uint64_t a2
)
1375 float64 v1
= env
->fregs
[f1
].d
;
1378 HELPER_LOG("%s: adding 0x%lx from f%d and 0x%lx\n", __FUNCTION__
,
1380 env
->fregs
[f1
].d
= v1
= float64_add(v1
, v2
.d
, &env
->fpu_status
);
1381 return set_cc_nz_f64(v1
);
1384 /* 32-bit FP subtraction RM */
1385 void HELPER(seb
)(uint32_t f1
, uint32_t val
)
1387 float32 v1
= env
->fregs
[f1
].l
.upper
;
1390 env
->fregs
[f1
].l
.upper
= float32_sub(v1
, v2
.f
, &env
->fpu_status
);
1393 /* 64-bit FP subtraction RM */
1394 uint32_t HELPER(sdb
)(uint32_t f1
, uint64_t a2
)
1396 float64 v1
= env
->fregs
[f1
].d
;
1399 env
->fregs
[f1
].d
= v1
= float64_sub(v1
, v2
.d
, &env
->fpu_status
);
1400 return set_cc_nz_f64(v1
);
1403 /* 64-bit FP multiplication RM */
1404 void HELPER(mdb
)(uint32_t f1
, uint64_t a2
)
1406 float64 v1
= env
->fregs
[f1
].d
;
1409 HELPER_LOG("%s: multiplying 0x%lx from f%d and 0x%ld\n", __FUNCTION__
,
1411 env
->fregs
[f1
].d
= float64_mul(v1
, v2
.d
, &env
->fpu_status
);
1414 /* 64-bit FP division RM */
1415 void HELPER(ddb
)(uint32_t f1
, uint64_t a2
)
1417 float64 v1
= env
->fregs
[f1
].d
;
1420 HELPER_LOG("%s: dividing 0x%lx from f%d by 0x%ld\n", __FUNCTION__
,
1422 env
->fregs
[f1
].d
= float64_div(v1
, v2
.d
, &env
->fpu_status
);
1425 static void set_round_mode(int m3
)
1432 /* biased round no nearest */
1434 /* round to nearest */
1435 set_float_rounding_mode(float_round_nearest_even
, &env
->fpu_status
);
1439 set_float_rounding_mode(float_round_to_zero
, &env
->fpu_status
);
1443 set_float_rounding_mode(float_round_up
, &env
->fpu_status
);
1447 set_float_rounding_mode(float_round_down
, &env
->fpu_status
);
1452 /* convert 32-bit float to 64-bit int */
1453 uint32_t HELPER(cgebr
)(uint32_t r1
, uint32_t f2
, uint32_t m3
)
1455 float32 v2
= env
->fregs
[f2
].l
.upper
;
1457 env
->regs
[r1
] = float32_to_int64(v2
, &env
->fpu_status
);
1458 return set_cc_nz_f32(v2
);
1461 /* convert 64-bit float to 64-bit int */
1462 uint32_t HELPER(cgdbr
)(uint32_t r1
, uint32_t f2
, uint32_t m3
)
1464 float64 v2
= env
->fregs
[f2
].d
;
1466 env
->regs
[r1
] = float64_to_int64(v2
, &env
->fpu_status
);
1467 return set_cc_nz_f64(v2
);
1470 /* convert 128-bit float to 64-bit int */
1471 uint32_t HELPER(cgxbr
)(uint32_t r1
, uint32_t f2
, uint32_t m3
)
1474 v2
.ll
.upper
= env
->fregs
[f2
].ll
;
1475 v2
.ll
.lower
= env
->fregs
[f2
+ 2].ll
;
1477 env
->regs
[r1
] = float128_to_int64(v2
.q
, &env
->fpu_status
);
1478 if (float128_is_any_nan(v2
.q
)) {
1480 } else if (float128_is_zero(v2
.q
)) {
1482 } else if (float128_is_neg(v2
.q
)) {
1489 /* convert 32-bit float to 32-bit int */
1490 uint32_t HELPER(cfebr
)(uint32_t r1
, uint32_t f2
, uint32_t m3
)
1492 float32 v2
= env
->fregs
[f2
].l
.upper
;
1494 env
->regs
[r1
] = (env
->regs
[r1
] & 0xffffffff00000000ULL
) |
1495 float32_to_int32(v2
, &env
->fpu_status
);
1496 return set_cc_nz_f32(v2
);
1499 /* convert 64-bit float to 32-bit int */
1500 uint32_t HELPER(cfdbr
)(uint32_t r1
, uint32_t f2
, uint32_t m3
)
1502 float64 v2
= env
->fregs
[f2
].d
;
1504 env
->regs
[r1
] = (env
->regs
[r1
] & 0xffffffff00000000ULL
) |
1505 float64_to_int32(v2
, &env
->fpu_status
);
1506 return set_cc_nz_f64(v2
);
1509 /* convert 128-bit float to 32-bit int */
1510 uint32_t HELPER(cfxbr
)(uint32_t r1
, uint32_t f2
, uint32_t m3
)
1513 v2
.ll
.upper
= env
->fregs
[f2
].ll
;
1514 v2
.ll
.lower
= env
->fregs
[f2
+ 2].ll
;
1515 env
->regs
[r1
] = (env
->regs
[r1
] & 0xffffffff00000000ULL
) |
1516 float128_to_int32(v2
.q
, &env
->fpu_status
);
1517 return set_cc_nz_f128(v2
.q
);
1520 /* load 32-bit FP zero */
1521 void HELPER(lzer
)(uint32_t f1
)
1523 env
->fregs
[f1
].l
.upper
= float32_zero
;
1526 /* load 64-bit FP zero */
1527 void HELPER(lzdr
)(uint32_t f1
)
1529 env
->fregs
[f1
].d
= float64_zero
;
1532 /* load 128-bit FP zero */
1533 void HELPER(lzxr
)(uint32_t f1
)
1536 x
.q
= float64_to_float128(float64_zero
, &env
->fpu_status
);
1537 env
->fregs
[f1
].ll
= x
.ll
.upper
;
1538 env
->fregs
[f1
+ 1].ll
= x
.ll
.lower
;
1541 /* 128-bit FP subtraction RR */
1542 uint32_t HELPER(sxbr
)(uint32_t f1
, uint32_t f2
)
1545 v1
.ll
.upper
= env
->fregs
[f1
].ll
;
1546 v1
.ll
.lower
= env
->fregs
[f1
+ 2].ll
;
1548 v2
.ll
.upper
= env
->fregs
[f2
].ll
;
1549 v2
.ll
.lower
= env
->fregs
[f2
+ 2].ll
;
1551 res
.q
= float128_sub(v1
.q
, v2
.q
, &env
->fpu_status
);
1552 env
->fregs
[f1
].ll
= res
.ll
.upper
;
1553 env
->fregs
[f1
+ 2].ll
= res
.ll
.lower
;
1554 return set_cc_nz_f128(res
.q
);
1557 /* 128-bit FP addition RR */
1558 uint32_t HELPER(axbr
)(uint32_t f1
, uint32_t f2
)
1561 v1
.ll
.upper
= env
->fregs
[f1
].ll
;
1562 v1
.ll
.lower
= env
->fregs
[f1
+ 2].ll
;
1564 v2
.ll
.upper
= env
->fregs
[f2
].ll
;
1565 v2
.ll
.lower
= env
->fregs
[f2
+ 2].ll
;
1567 res
.q
= float128_add(v1
.q
, v2
.q
, &env
->fpu_status
);
1568 env
->fregs
[f1
].ll
= res
.ll
.upper
;
1569 env
->fregs
[f1
+ 2].ll
= res
.ll
.lower
;
1570 return set_cc_nz_f128(res
.q
);
1573 /* 32-bit FP multiplication RR */
1574 void HELPER(meebr
)(uint32_t f1
, uint32_t f2
)
1576 env
->fregs
[f1
].l
.upper
= float32_mul(env
->fregs
[f1
].l
.upper
,
1577 env
->fregs
[f2
].l
.upper
,
1581 /* 64-bit FP division RR */
1582 void HELPER(ddbr
)(uint32_t f1
, uint32_t f2
)
1584 env
->fregs
[f1
].d
= float64_div(env
->fregs
[f1
].d
, env
->fregs
[f2
].d
,
1588 /* 64-bit FP multiply and add RM */
1589 void HELPER(madb
)(uint32_t f1
, uint64_t a2
, uint32_t f3
)
1591 HELPER_LOG("%s: f1 %d a2 0x%lx f3 %d\n", __FUNCTION__
, f1
, a2
, f3
);
1594 env
->fregs
[f1
].d
= float64_add(env
->fregs
[f1
].d
,
1595 float64_mul(v2
.d
, env
->fregs
[f3
].d
,
1600 /* 64-bit FP multiply and add RR */
1601 void HELPER(madbr
)(uint32_t f1
, uint32_t f3
, uint32_t f2
)
1603 HELPER_LOG("%s: f1 %d f2 %d f3 %d\n", __FUNCTION__
, f1
, f2
, f3
);
1604 env
->fregs
[f1
].d
= float64_add(float64_mul(env
->fregs
[f2
].d
,
1607 env
->fregs
[f1
].d
, &env
->fpu_status
);
1610 /* 64-bit FP multiply and subtract RR */
1611 void HELPER(msdbr
)(uint32_t f1
, uint32_t f3
, uint32_t f2
)
1613 HELPER_LOG("%s: f1 %d f2 %d f3 %d\n", __FUNCTION__
, f1
, f2
, f3
);
1614 env
->fregs
[f1
].d
= float64_sub(float64_mul(env
->fregs
[f2
].d
,
1617 env
->fregs
[f1
].d
, &env
->fpu_status
);
1620 /* 32-bit FP multiply and add RR */
1621 void HELPER(maebr
)(uint32_t f1
, uint32_t f3
, uint32_t f2
)
1623 env
->fregs
[f1
].l
.upper
= float32_add(env
->fregs
[f1
].l
.upper
,
1624 float32_mul(env
->fregs
[f2
].l
.upper
,
1625 env
->fregs
[f3
].l
.upper
,
1630 /* convert 64-bit float to 128-bit float */
1631 void HELPER(lxdb
)(uint32_t f1
, uint64_t a2
)
1636 v1
.q
= float64_to_float128(v2
.d
, &env
->fpu_status
);
1637 env
->fregs
[f1
].ll
= v1
.ll
.upper
;
1638 env
->fregs
[f1
+ 2].ll
= v1
.ll
.lower
;
1641 /* test data class 32-bit */
1642 uint32_t HELPER(tceb
)(uint32_t f1
, uint64_t m2
)
1644 float32 v1
= env
->fregs
[f1
].l
.upper
;
1645 int neg
= float32_is_neg(v1
);
1648 HELPER_LOG("%s: v1 0x%lx m2 0x%lx neg %d\n", __FUNCTION__
, (long)v1
, m2
, neg
);
1649 if ((float32_is_zero(v1
) && (m2
& (1 << (11-neg
)))) ||
1650 (float32_is_infinity(v1
) && (m2
& (1 << (5-neg
)))) ||
1651 (float32_is_any_nan(v1
) && (m2
& (1 << (3-neg
)))) ||
1652 (float32_is_signaling_nan(v1
) && (m2
& (1 << (1-neg
))))) {
1654 } else if (m2
& (1 << (9-neg
))) {
1655 /* assume normalized number */
1659 /* FIXME: denormalized? */
1663 /* test data class 64-bit */
1664 uint32_t HELPER(tcdb
)(uint32_t f1
, uint64_t m2
)
1666 float64 v1
= env
->fregs
[f1
].d
;
1667 int neg
= float64_is_neg(v1
);
1670 HELPER_LOG("%s: v1 0x%lx m2 0x%lx neg %d\n", __FUNCTION__
, v1
, m2
, neg
);
1671 if ((float64_is_zero(v1
) && (m2
& (1 << (11-neg
)))) ||
1672 (float64_is_infinity(v1
) && (m2
& (1 << (5-neg
)))) ||
1673 (float64_is_any_nan(v1
) && (m2
& (1 << (3-neg
)))) ||
1674 (float64_is_signaling_nan(v1
) && (m2
& (1 << (1-neg
))))) {
1676 } else if (m2
& (1 << (9-neg
))) {
1677 /* assume normalized number */
1680 /* FIXME: denormalized? */
1684 /* test data class 128-bit */
1685 uint32_t HELPER(tcxb
)(uint32_t f1
, uint64_t m2
)
1689 v1
.ll
.upper
= env
->fregs
[f1
].ll
;
1690 v1
.ll
.lower
= env
->fregs
[f1
+ 2].ll
;
1692 int neg
= float128_is_neg(v1
.q
);
1693 if ((float128_is_zero(v1
.q
) && (m2
& (1 << (11-neg
)))) ||
1694 (float128_is_infinity(v1
.q
) && (m2
& (1 << (5-neg
)))) ||
1695 (float128_is_any_nan(v1
.q
) && (m2
& (1 << (3-neg
)))) ||
1696 (float128_is_signaling_nan(v1
.q
) && (m2
& (1 << (1-neg
))))) {
1698 } else if (m2
& (1 << (9-neg
))) {
1699 /* assume normalized number */
1702 /* FIXME: denormalized? */
1706 /* find leftmost one */
1707 uint32_t HELPER(flogr
)(uint32_t r1
, uint64_t v2
)
1712 while (!(v2
& 0x8000000000000000ULL
) && v2
) {
1719 env
->regs
[r1
+ 1] = 0;
1722 env
->regs
[r1
] = res
;
1723 env
->regs
[r1
+ 1] = ov2
& ~(0x8000000000000000ULL
>> res
);
1728 /* square root 64-bit RR */
1729 void HELPER(sqdbr
)(uint32_t f1
, uint32_t f2
)
1731 env
->fregs
[f1
].d
= float64_sqrt(env
->fregs
[f2
].d
, &env
->fpu_status
);
1734 static inline uint64_t cksm_overflow(uint64_t cksm
)
1736 if (cksm
> 0xffffffffULL
) {
1737 cksm
&= 0xffffffffULL
;
1744 void HELPER(cksm
)(uint32_t r1
, uint32_t r2
)
1746 uint64_t src
= get_address_31fix(r2
);
1747 uint64_t src_len
= env
->regs
[(r2
+ 1) & 15];
1750 while (src_len
>= 4) {
1752 cksm
= cksm_overflow(cksm
);
1754 /* move to next word */
1764 cksm
= cksm_overflow(cksm
);
1768 cksm
= cksm_overflow(cksm
);
1771 /* XXX check if this really is correct */
1772 cksm
+= lduw(src
) << 8;
1773 cksm
+= ldub(src
+ 2);
1774 cksm
= cksm_overflow(cksm
);
1778 /* indicate we've processed everything */
1779 env
->regs
[(r2
+ 1) & 15] = 0;
1782 env
->regs
[r1
] = (env
->regs
[r1
] & 0xffffffff00000000ULL
) | (uint32_t)cksm
;
1785 static inline uint32_t cc_calc_ltgt_32(CPUState
*env
, int32_t src
,
1790 } else if (src
< dst
) {
1797 static inline uint32_t cc_calc_ltgt0_32(CPUState
*env
, int32_t dst
)
1799 return cc_calc_ltgt_32(env
, dst
, 0);
1802 static inline uint32_t cc_calc_ltgt_64(CPUState
*env
, int64_t src
,
1807 } else if (src
< dst
) {
1814 static inline uint32_t cc_calc_ltgt0_64(CPUState
*env
, int64_t dst
)
1816 return cc_calc_ltgt_64(env
, dst
, 0);
1819 static inline uint32_t cc_calc_ltugtu_32(CPUState
*env
, uint32_t src
,
1824 } else if (src
< dst
) {
1831 static inline uint32_t cc_calc_ltugtu_64(CPUState
*env
, uint64_t src
,
1836 } else if (src
< dst
) {
1843 static inline uint32_t cc_calc_tm_32(CPUState
*env
, uint32_t val
, uint32_t mask
)
1845 HELPER_LOG("%s: val 0x%x mask 0x%x\n", __FUNCTION__
, val
, mask
);
1846 uint16_t r
= val
& mask
;
1847 if (r
== 0 || mask
== 0) {
1849 } else if (r
== mask
) {
1856 /* set condition code for test under mask */
1857 static inline uint32_t cc_calc_tm_64(CPUState
*env
, uint64_t val
, uint32_t mask
)
1859 uint16_t r
= val
& mask
;
1860 HELPER_LOG("%s: val 0x%lx mask 0x%x r 0x%x\n", __FUNCTION__
, val
, mask
, r
);
1861 if (r
== 0 || mask
== 0) {
1863 } else if (r
== mask
) {
1866 while (!(mask
& 0x8000)) {
1878 static inline uint32_t cc_calc_nz(CPUState
*env
, uint64_t dst
)
1883 static inline uint32_t cc_calc_add_64(CPUState
*env
, int64_t a1
, int64_t a2
,
1886 if ((a1
> 0 && a2
> 0 && ar
< 0) || (a1
< 0 && a2
< 0 && ar
> 0)) {
1887 return 3; /* overflow */
1891 } else if (ar
> 0) {
1899 static inline uint32_t cc_calc_addu_64(CPUState
*env
, uint64_t a1
, uint64_t a2
,
1909 if (ar
< a1
|| ar
< a2
) {
1917 static inline uint32_t cc_calc_sub_64(CPUState
*env
, int64_t a1
, int64_t a2
,
1920 if ((a1
> 0 && a2
< 0 && ar
< 0) || (a1
< 0 && a2
> 0 && ar
> 0)) {
1921 return 3; /* overflow */
1925 } else if (ar
> 0) {
1933 static inline uint32_t cc_calc_subu_64(CPUState
*env
, uint64_t a1
, uint64_t a2
,
1947 static inline uint32_t cc_calc_abs_64(CPUState
*env
, int64_t dst
)
1949 if ((uint64_t)dst
== 0x8000000000000000ULL
) {
1958 static inline uint32_t cc_calc_nabs_64(CPUState
*env
, int64_t dst
)
1963 static inline uint32_t cc_calc_comp_64(CPUState
*env
, int64_t dst
)
1965 if ((uint64_t)dst
== 0x8000000000000000ULL
) {
1967 } else if (dst
< 0) {
1969 } else if (dst
> 0) {
1977 static inline uint32_t cc_calc_add_32(CPUState
*env
, int32_t a1
, int32_t a2
,
1980 if ((a1
> 0 && a2
> 0 && ar
< 0) || (a1
< 0 && a2
< 0 && ar
> 0)) {
1981 return 3; /* overflow */
1985 } else if (ar
> 0) {
1993 static inline uint32_t cc_calc_addu_32(CPUState
*env
, uint32_t a1
, uint32_t a2
,
2003 if (ar
< a1
|| ar
< a2
) {
2011 static inline uint32_t cc_calc_sub_32(CPUState
*env
, int32_t a1
, int32_t a2
,
2014 if ((a1
> 0 && a2
< 0 && ar
< 0) || (a1
< 0 && a2
> 0 && ar
> 0)) {
2015 return 3; /* overflow */
2019 } else if (ar
> 0) {
2027 static inline uint32_t cc_calc_subu_32(CPUState
*env
, uint32_t a1
, uint32_t a2
,
2041 static inline uint32_t cc_calc_abs_32(CPUState
*env
, int32_t dst
)
2043 if ((uint32_t)dst
== 0x80000000UL
) {
2052 static inline uint32_t cc_calc_nabs_32(CPUState
*env
, int32_t dst
)
2057 static inline uint32_t cc_calc_comp_32(CPUState
*env
, int32_t dst
)
2059 if ((uint32_t)dst
== 0x80000000UL
) {
2061 } else if (dst
< 0) {
2063 } else if (dst
> 0) {
2070 /* calculate condition code for insert character under mask insn */
2071 static inline uint32_t cc_calc_icm_32(CPUState
*env
, uint32_t mask
, uint32_t val
)
2073 HELPER_LOG("%s: mask 0x%x val %d\n", __FUNCTION__
, mask
, val
);
2079 } else if (val
& 0x80000000) {
2086 if (!val
|| !mask
) {
2102 static inline uint32_t cc_calc_slag(CPUState
*env
, uint64_t src
, uint64_t shift
)
2104 uint64_t mask
= ((1ULL << shift
) - 1ULL) << (64 - shift
);
2107 /* check if the sign bit stays the same */
2108 if (src
& (1ULL << 63)) {
2114 if ((src
& mask
) != match
) {
2119 r
= ((src
<< shift
) & ((1ULL << 63) - 1)) | (src
& (1ULL << 63));
2121 if ((int64_t)r
== 0) {
2123 } else if ((int64_t)r
< 0) {
2131 static inline uint32_t do_calc_cc(CPUState
*env
, uint32_t cc_op
, uint64_t src
,
2132 uint64_t dst
, uint64_t vr
)
2141 /* cc_op value _is_ cc */
2144 case CC_OP_LTGT0_32
:
2145 r
= cc_calc_ltgt0_32(env
, dst
);
2147 case CC_OP_LTGT0_64
:
2148 r
= cc_calc_ltgt0_64(env
, dst
);
2151 r
= cc_calc_ltgt_32(env
, src
, dst
);
2154 r
= cc_calc_ltgt_64(env
, src
, dst
);
2156 case CC_OP_LTUGTU_32
:
2157 r
= cc_calc_ltugtu_32(env
, src
, dst
);
2159 case CC_OP_LTUGTU_64
:
2160 r
= cc_calc_ltugtu_64(env
, src
, dst
);
2163 r
= cc_calc_tm_32(env
, src
, dst
);
2166 r
= cc_calc_tm_64(env
, src
, dst
);
2169 r
= cc_calc_nz(env
, dst
);
2172 r
= cc_calc_add_64(env
, src
, dst
, vr
);
2175 r
= cc_calc_addu_64(env
, src
, dst
, vr
);
2178 r
= cc_calc_sub_64(env
, src
, dst
, vr
);
2181 r
= cc_calc_subu_64(env
, src
, dst
, vr
);
2184 r
= cc_calc_abs_64(env
, dst
);
2187 r
= cc_calc_nabs_64(env
, dst
);
2190 r
= cc_calc_comp_64(env
, dst
);
2194 r
= cc_calc_add_32(env
, src
, dst
, vr
);
2197 r
= cc_calc_addu_32(env
, src
, dst
, vr
);
2200 r
= cc_calc_sub_32(env
, src
, dst
, vr
);
2203 r
= cc_calc_subu_32(env
, src
, dst
, vr
);
2206 r
= cc_calc_abs_64(env
, dst
);
2209 r
= cc_calc_nabs_64(env
, dst
);
2212 r
= cc_calc_comp_32(env
, dst
);
2216 r
= cc_calc_icm_32(env
, src
, dst
);
2219 r
= cc_calc_slag(env
, src
, dst
);
2222 case CC_OP_LTGT_F32
:
2223 r
= set_cc_f32(src
, dst
);
2225 case CC_OP_LTGT_F64
:
2226 r
= set_cc_f64(src
, dst
);
2229 r
= set_cc_nz_f32(dst
);
2232 r
= set_cc_nz_f64(dst
);
2236 cpu_abort(env
, "Unknown CC operation: %s\n", cc_name(cc_op
));
2239 HELPER_LOG("%s: %15s 0x%016lx 0x%016lx 0x%016lx = %d\n", __FUNCTION__
,
2240 cc_name(cc_op
), src
, dst
, vr
, r
);
2244 uint32_t calc_cc(CPUState
*env
, uint32_t cc_op
, uint64_t src
, uint64_t dst
,
2247 return do_calc_cc(env
, cc_op
, src
, dst
, vr
);
2250 uint32_t HELPER(calc_cc
)(uint32_t cc_op
, uint64_t src
, uint64_t dst
,
2253 return do_calc_cc(env
, cc_op
, src
, dst
, vr
);
2256 uint64_t HELPER(cvd
)(int32_t bin
)
2259 uint64_t dec
= 0x0c;
2267 for (shift
= 4; (shift
< 64) && bin
; shift
+= 4) {
2268 int current_number
= bin
% 10;
2270 dec
|= (current_number
) << shift
;
2277 void HELPER(unpk
)(uint32_t len
, uint64_t dest
, uint64_t src
)
2279 int len_dest
= len
>> 4;
2280 int len_src
= len
& 0xf;
2282 int second_nibble
= 0;
2287 /* last byte is special, it only flips the nibbles */
2289 stb(dest
, (b
<< 4) | (b
>> 4));
2293 /* now pad every nibble with 0xf0 */
2295 while (len_dest
> 0) {
2296 uint8_t cur_byte
= 0;
2299 cur_byte
= ldub(src
);
2305 /* only advance one nibble at a time */
2306 if (second_nibble
) {
2311 second_nibble
= !second_nibble
;
2314 cur_byte
= (cur_byte
& 0xf);
2318 stb(dest
, cur_byte
);
2322 void HELPER(tr
)(uint32_t len
, uint64_t array
, uint64_t trans
)
2326 for (i
= 0; i
<= len
; i
++) {
2327 uint8_t byte
= ldub(array
+ i
);
2328 uint8_t new_byte
= ldub(trans
+ byte
);
2329 stb(array
+ i
, new_byte
);
2333 #ifndef CONFIG_USER_ONLY
2335 void HELPER(load_psw
)(uint64_t mask
, uint64_t addr
)
2337 load_psw(env
, mask
, addr
);
2341 static void program_interrupt(CPUState
*env
, uint32_t code
, int ilc
)
2343 qemu_log("program interrupt at %#" PRIx64
"\n", env
->psw
.addr
);
2345 if (kvm_enabled()) {
2346 kvm_s390_interrupt(env
, KVM_S390_PROGRAM_INT
, code
);
2348 env
->int_pgm_code
= code
;
2349 env
->int_pgm_ilc
= ilc
;
2350 env
->exception_index
= EXCP_PGM
;
2355 static void ext_interrupt(CPUState
*env
, int type
, uint32_t param
,
2358 cpu_inject_ext(env
, type
, param
, param64
);
2361 int sclp_service_call(CPUState
*env
, uint32_t sccb
, uint64_t code
)
2367 printf("sclp(0x%x, 0x%" PRIx64
")\n", sccb
, code
);
2370 if (sccb
& ~0x7ffffff8ul
) {
2371 fprintf(stderr
, "KVM: invalid sccb address 0x%x\n", sccb
);
2377 case SCLP_CMDW_READ_SCP_INFO
:
2378 case SCLP_CMDW_READ_SCP_INFO_FORCED
:
2379 while ((ram_size
>> (20 + shift
)) > 65535) {
2382 stw_phys(sccb
+ SCP_MEM_CODE
, ram_size
>> (20 + shift
));
2383 stb_phys(sccb
+ SCP_INCREMENT
, 1 << shift
);
2384 stw_phys(sccb
+ SCP_RESPONSE_CODE
, 0x10);
2386 if (kvm_enabled()) {
2388 kvm_s390_interrupt_internal(env
, KVM_S390_INT_SERVICE
,
2393 ext_interrupt(env
, EXT_SERVICE
, sccb
& ~3, 0);
2398 printf("KVM: invalid sclp call 0x%x / 0x%" PRIx64
"x\n", sccb
, code
);
2408 /* SCLP service call */
2409 uint32_t HELPER(servc
)(uint32_t r1
, uint64_t r2
)
2411 if (sclp_service_call(env
, r1
, r2
)) {
2419 uint64_t HELPER(diag
)(uint32_t num
, uint64_t mem
, uint64_t code
)
2426 r
= s390_virtio_hypercall(env
, mem
, code
);
2442 program_interrupt(env
, PGM_OPERATION
, ILC_LATER_INC
);
2449 void HELPER(stidp
)(uint64_t a1
)
2451 stq(a1
, env
->cpu_num
);
2455 void HELPER(spx
)(uint64_t a1
)
2460 env
->psa
= prefix
& 0xfffff000;
2461 qemu_log("prefix: %#x\n", prefix
);
2462 tlb_flush_page(env
, 0);
2463 tlb_flush_page(env
, TARGET_PAGE_SIZE
);
2467 uint32_t HELPER(sck
)(uint64_t a1
)
2469 /* XXX not implemented - is it necessary? */
2474 static inline uint64_t clock_value(CPUState
*env
)
2478 time
= env
->tod_offset
+
2479 time2tod(qemu_get_clock_ns(vm_clock
) - env
->tod_basetime
);
2485 uint32_t HELPER(stck
)(uint64_t a1
)
2487 stq(a1
, clock_value(env
));
2492 /* Store Clock Extended */
2493 uint32_t HELPER(stcke
)(uint64_t a1
)
2496 /* basically the same value as stck */
2497 stq(a1
+ 1, clock_value(env
) | env
->cpu_num
);
2498 /* more fine grained than stck */
2500 /* XXX programmable fields */
2507 /* Set Clock Comparator */
2508 void HELPER(sckc
)(uint64_t a1
)
2510 uint64_t time
= ldq(a1
);
2512 if (time
== -1ULL) {
2516 /* difference between now and then */
2517 time
-= clock_value(env
);
2519 time
= (time
* 125) >> 9;
2521 qemu_mod_timer(env
->tod_timer
, qemu_get_clock_ns(vm_clock
) + time
);
2524 /* Store Clock Comparator */
2525 void HELPER(stckc
)(uint64_t a1
)
2532 void HELPER(spt
)(uint64_t a1
)
2534 uint64_t time
= ldq(a1
);
2536 if (time
== -1ULL) {
2541 time
= (time
* 125) >> 9;
2543 qemu_mod_timer(env
->cpu_timer
, qemu_get_clock_ns(vm_clock
) + time
);
2546 /* Store CPU Timer */
2547 void HELPER(stpt
)(uint64_t a1
)
2553 /* Store System Information */
2554 uint32_t HELPER(stsi
)(uint64_t a0
, uint32_t r0
, uint32_t r1
)
2559 if ((r0
& STSI_LEVEL_MASK
) <= STSI_LEVEL_3
&&
2560 ((r0
& STSI_R0_RESERVED_MASK
) || (r1
& STSI_R1_RESERVED_MASK
))) {
2561 /* valid function code, invalid reserved bits */
2562 program_interrupt(env
, PGM_SPECIFICATION
, 2);
2565 sel1
= r0
& STSI_R0_SEL1_MASK
;
2566 sel2
= r1
& STSI_R1_SEL2_MASK
;
2568 /* XXX: spec exception if sysib is not 4k-aligned */
2570 switch (r0
& STSI_LEVEL_MASK
) {
2572 if ((sel1
== 1) && (sel2
== 1)) {
2573 /* Basic Machine Configuration */
2574 struct sysib_111 sysib
;
2576 memset(&sysib
, 0, sizeof(sysib
));
2577 ebcdic_put(sysib
.manuf
, "QEMU ", 16);
2578 /* same as machine type number in STORE CPU ID */
2579 ebcdic_put(sysib
.type
, "QEMU", 4);
2580 /* same as model number in STORE CPU ID */
2581 ebcdic_put(sysib
.model
, "QEMU ", 16);
2582 ebcdic_put(sysib
.sequence
, "QEMU ", 16);
2583 ebcdic_put(sysib
.plant
, "QEMU", 4);
2584 cpu_physical_memory_rw(a0
, (uint8_t*)&sysib
, sizeof(sysib
), 1);
2585 } else if ((sel1
== 2) && (sel2
== 1)) {
2586 /* Basic Machine CPU */
2587 struct sysib_121 sysib
;
2589 memset(&sysib
, 0, sizeof(sysib
));
2590 /* XXX make different for different CPUs? */
2591 ebcdic_put(sysib
.sequence
, "QEMUQEMUQEMUQEMU", 16);
2592 ebcdic_put(sysib
.plant
, "QEMU", 4);
2593 stw_p(&sysib
.cpu_addr
, env
->cpu_num
);
2594 cpu_physical_memory_rw(a0
, (uint8_t*)&sysib
, sizeof(sysib
), 1);
2595 } else if ((sel1
== 2) && (sel2
== 2)) {
2596 /* Basic Machine CPUs */
2597 struct sysib_122 sysib
;
2599 memset(&sysib
, 0, sizeof(sysib
));
2600 stl_p(&sysib
.capability
, 0x443afc29);
2601 /* XXX change when SMP comes */
2602 stw_p(&sysib
.total_cpus
, 1);
2603 stw_p(&sysib
.active_cpus
, 1);
2604 stw_p(&sysib
.standby_cpus
, 0);
2605 stw_p(&sysib
.reserved_cpus
, 0);
2606 cpu_physical_memory_rw(a0
, (uint8_t*)&sysib
, sizeof(sysib
), 1);
2613 if ((sel1
== 2) && (sel2
== 1)) {
2615 struct sysib_221 sysib
;
2617 memset(&sysib
, 0, sizeof(sysib
));
2618 /* XXX make different for different CPUs? */
2619 ebcdic_put(sysib
.sequence
, "QEMUQEMUQEMUQEMU", 16);
2620 ebcdic_put(sysib
.plant
, "QEMU", 4);
2621 stw_p(&sysib
.cpu_addr
, env
->cpu_num
);
2622 stw_p(&sysib
.cpu_id
, 0);
2623 cpu_physical_memory_rw(a0
, (uint8_t*)&sysib
, sizeof(sysib
), 1);
2624 } else if ((sel1
== 2) && (sel2
== 2)) {
2626 struct sysib_222 sysib
;
2628 memset(&sysib
, 0, sizeof(sysib
));
2629 stw_p(&sysib
.lpar_num
, 0);
2631 /* XXX change when SMP comes */
2632 stw_p(&sysib
.total_cpus
, 1);
2633 stw_p(&sysib
.conf_cpus
, 1);
2634 stw_p(&sysib
.standby_cpus
, 0);
2635 stw_p(&sysib
.reserved_cpus
, 0);
2636 ebcdic_put(sysib
.name
, "QEMU ", 8);
2637 stl_p(&sysib
.caf
, 1000);
2638 stw_p(&sysib
.dedicated_cpus
, 0);
2639 stw_p(&sysib
.shared_cpus
, 0);
2640 cpu_physical_memory_rw(a0
, (uint8_t*)&sysib
, sizeof(sysib
), 1);
2648 if ((sel1
== 2) && (sel2
== 2)) {
2650 struct sysib_322 sysib
;
2652 memset(&sysib
, 0, sizeof(sysib
));
2654 /* XXX change when SMP comes */
2655 stw_p(&sysib
.vm
[0].total_cpus
, 1);
2656 stw_p(&sysib
.vm
[0].conf_cpus
, 1);
2657 stw_p(&sysib
.vm
[0].standby_cpus
, 0);
2658 stw_p(&sysib
.vm
[0].reserved_cpus
, 0);
2659 ebcdic_put(sysib
.vm
[0].name
, "KVMguest", 8);
2660 stl_p(&sysib
.vm
[0].caf
, 1000);
2661 ebcdic_put(sysib
.vm
[0].cpi
, "KVM/Linux ", 16);
2662 cpu_physical_memory_rw(a0
, (uint8_t*)&sysib
, sizeof(sysib
), 1);
2668 case STSI_LEVEL_CURRENT
:
2669 env
->regs
[0] = STSI_LEVEL_3
;
2679 void HELPER(lctlg
)(uint32_t r1
, uint64_t a2
, uint32_t r3
)
2684 for (i
= r1
;; i
= (i
+ 1) % 16) {
2685 env
->cregs
[i
] = ldq(src
);
2686 HELPER_LOG("load ctl %d from 0x%" PRIx64
" == 0x%" PRIx64
"\n",
2687 i
, src
, env
->cregs
[i
]);
2688 src
+= sizeof(uint64_t);
2698 void HELPER(lctl
)(uint32_t r1
, uint64_t a2
, uint32_t r3
)
2703 for (i
= r1
;; i
= (i
+ 1) % 16) {
2704 env
->cregs
[i
] = (env
->cregs
[i
] & 0xFFFFFFFF00000000ULL
) | ldl(src
);
2705 src
+= sizeof(uint32_t);
2715 void HELPER(stctg
)(uint32_t r1
, uint64_t a2
, uint32_t r3
)
2720 for (i
= r1
;; i
= (i
+ 1) % 16) {
2721 stq(dest
, env
->cregs
[i
]);
2722 dest
+= sizeof(uint64_t);
2730 void HELPER(stctl
)(uint32_t r1
, uint64_t a2
, uint32_t r3
)
2735 for (i
= r1
;; i
= (i
+ 1) % 16) {
2736 stl(dest
, env
->cregs
[i
]);
2737 dest
+= sizeof(uint32_t);
2745 uint32_t HELPER(tprot
)(uint64_t a1
, uint64_t a2
)
2752 /* insert storage key extended */
2753 uint64_t HELPER(iske
)(uint64_t r2
)
2755 uint64_t addr
= get_address(0, 0, r2
);
2757 if (addr
> ram_size
) {
2761 /* XXX maybe use qemu's internal keys? */
2762 return env
->storage_keys
[addr
/ TARGET_PAGE_SIZE
];
2765 /* set storage key extended */
2766 void HELPER(sske
)(uint32_t r1
, uint64_t r2
)
2768 uint64_t addr
= get_address(0, 0, r2
);
2770 if (addr
> ram_size
) {
2774 env
->storage_keys
[addr
/ TARGET_PAGE_SIZE
] = r1
;
2777 /* reset reference bit extended */
2778 uint32_t HELPER(rrbe
)(uint32_t r1
, uint64_t r2
)
2780 if (r2
> ram_size
) {
2786 env
->storage_keys
[r2
/ TARGET_PAGE_SIZE
] &= ~SK_REFERENCED
;
2792 * 0 Reference bit zero; change bit zero
2793 * 1 Reference bit zero; change bit one
2794 * 2 Reference bit one; change bit zero
2795 * 3 Reference bit one; change bit one
2800 /* compare and swap and purge */
2801 uint32_t HELPER(csp
)(uint32_t r1
, uint32_t r2
)
2804 uint32_t o1
= env
->regs
[r1
];
2805 uint64_t a2
= get_address_31fix(r2
) & ~3ULL;
2806 uint32_t o2
= ldl(a2
);
2809 stl(a2
, env
->regs
[(r1
+ 1) & 15]);
2810 if (env
->regs
[r2
] & 0x3) {
2811 /* flush TLB / ALB */
2816 env
->regs
[r1
] = (env
->regs
[r1
] & 0xffffffff00000000ULL
) | o2
;
2823 static uint32_t mvc_asc(int64_t l
, uint64_t a1
, uint64_t mode1
, uint64_t a2
,
2826 target_ulong src
, dest
;
2827 int flags
, cc
= 0, i
;
2831 } else if (l
> 256) {
2837 if (mmu_translate(env
, a1
& TARGET_PAGE_MASK
, 1, mode1
, &dest
, &flags
)) {
2840 dest
|= a1
& ~TARGET_PAGE_MASK
;
2842 if (mmu_translate(env
, a2
& TARGET_PAGE_MASK
, 0, mode2
, &src
, &flags
)) {
2845 src
|= a2
& ~TARGET_PAGE_MASK
;
2847 /* XXX replace w/ memcpy */
2848 for (i
= 0; i
< l
; i
++) {
2849 /* XXX be more clever */
2850 if ((((dest
+ i
) & TARGET_PAGE_MASK
) != (dest
& TARGET_PAGE_MASK
)) ||
2851 (((src
+ i
) & TARGET_PAGE_MASK
) != (src
& TARGET_PAGE_MASK
))) {
2852 mvc_asc(l
- i
, a1
+ i
, mode1
, a2
+ i
, mode2
);
2855 stb_phys(dest
+ i
, ldub_phys(src
+ i
));
2861 uint32_t HELPER(mvcs
)(uint64_t l
, uint64_t a1
, uint64_t a2
)
2863 HELPER_LOG("%s: %16" PRIx64
" %16" PRIx64
" %16" PRIx64
"\n",
2864 __FUNCTION__
, l
, a1
, a2
);
2866 return mvc_asc(l
, a1
, PSW_ASC_SECONDARY
, a2
, PSW_ASC_PRIMARY
);
2869 uint32_t HELPER(mvcp
)(uint64_t l
, uint64_t a1
, uint64_t a2
)
2871 HELPER_LOG("%s: %16" PRIx64
" %16" PRIx64
" %16" PRIx64
"\n",
2872 __FUNCTION__
, l
, a1
, a2
);
2874 return mvc_asc(l
, a1
, PSW_ASC_PRIMARY
, a2
, PSW_ASC_SECONDARY
);
2877 uint32_t HELPER(sigp
)(uint64_t order_code
, uint32_t r1
, uint64_t cpu_addr
)
2881 HELPER_LOG("%s: %016" PRIx64
" %08x %016" PRIx64
"\n",
2882 __FUNCTION__
, order_code
, r1
, cpu_addr
);
2884 /* Remember: Use "R1 or R1+1, whichever is the odd-numbered register"
2885 as parameter (input). Status (output) is always R1. */
2887 switch (order_code
) {
2892 /* enumerate CPU status */
2894 /* XXX implement when SMP comes */
2897 env
->regs
[r1
] &= 0xffffffff00000000ULL
;
2902 fprintf(stderr
, "XXX unknown sigp: 0x%" PRIx64
"\n", order_code
);
2909 void HELPER(sacf
)(uint64_t a1
)
2911 HELPER_LOG("%s: %16" PRIx64
"\n", __FUNCTION__
, a1
);
2913 switch (a1
& 0xf00) {
2915 env
->psw
.mask
&= ~PSW_MASK_ASC
;
2916 env
->psw
.mask
|= PSW_ASC_PRIMARY
;
2919 env
->psw
.mask
&= ~PSW_MASK_ASC
;
2920 env
->psw
.mask
|= PSW_ASC_SECONDARY
;
2923 env
->psw
.mask
&= ~PSW_MASK_ASC
;
2924 env
->psw
.mask
|= PSW_ASC_HOME
;
2927 qemu_log("unknown sacf mode: %" PRIx64
"\n", a1
);
2928 program_interrupt(env
, PGM_SPECIFICATION
, 2);
2933 /* invalidate pte */
2934 void HELPER(ipte
)(uint64_t pte_addr
, uint64_t vaddr
)
2936 uint64_t page
= vaddr
& TARGET_PAGE_MASK
;
2939 /* XXX broadcast to other CPUs */
2941 /* XXX Linux is nice enough to give us the exact pte address.
2942 According to spec we'd have to find it out ourselves */
2943 /* XXX Linux is fine with overwriting the pte, the spec requires
2944 us to only set the invalid bit */
2945 stq_phys(pte_addr
, pte
| _PAGE_INVALID
);
2947 /* XXX we exploit the fact that Linux passes the exact virtual
2948 address here - it's not obliged to! */
2949 tlb_flush_page(env
, page
);
2952 /* flush local tlb */
2953 void HELPER(ptlb
)(void)
2958 /* store using real address */
2959 void HELPER(stura
)(uint64_t addr
, uint32_t v1
)
2961 stw_phys(get_address(0, 0, addr
), v1
);
2964 /* load real address */
2965 uint32_t HELPER(lra
)(uint64_t addr
, uint32_t r1
)
2968 int old_exc
= env
->exception_index
;
2969 uint64_t asc
= env
->psw
.mask
& PSW_MASK_ASC
;
2973 /* XXX incomplete - has more corner cases */
2974 if (!(env
->psw
.mask
& PSW_MASK_64
) && (addr
>> 32)) {
2975 program_interrupt(env
, PGM_SPECIAL_OP
, 2);
2978 env
->exception_index
= old_exc
;
2979 if (mmu_translate(env
, addr
, 0, asc
, &ret
, &flags
)) {
2982 if (env
->exception_index
== EXCP_PGM
) {
2983 ret
= env
->int_pgm_code
| 0x80000000;
2985 ret
|= addr
& ~TARGET_PAGE_MASK
;
2987 env
->exception_index
= old_exc
;
2989 if (!(env
->psw
.mask
& PSW_MASK_64
)) {
2990 env
->regs
[r1
] = (env
->regs
[r1
] & 0xffffffff00000000ULL
) | (ret
& 0xffffffffULL
);
2992 env
->regs
[r1
] = ret
;