2 * S/390 helper routines
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2009 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
22 #include "dyngen-exec.h"
23 #include "host-utils.h"
27 #include "qemu-timer.h"
29 #include <linux/kvm.h>
32 /*****************************************************************************/
34 #if !defined (CONFIG_USER_ONLY)
35 #include "softmmu_exec.h"
37 #define MMUSUFFIX _mmu
40 #include "softmmu_template.h"
43 #include "softmmu_template.h"
46 #include "softmmu_template.h"
49 #include "softmmu_template.h"
51 /* try to fill the TLB and return an exception if error. If retaddr is
52 NULL, it means that the function was called in C code (i.e. not
53 from generated code or from helper.c) */
54 /* XXX: fix it to restore all registers */
55 void tlb_fill (target_ulong addr
, int is_write
, int mmu_idx
, void *retaddr
)
62 /* XXX: hack to restore env in all cases, even if not called from
66 ret
= cpu_s390x_handle_mmu_fault(env
, addr
, is_write
, mmu_idx
);
67 if (unlikely(ret
!= 0)) {
68 if (likely(retaddr
)) {
69 /* now we have a real cpu fault */
70 pc
= (unsigned long)retaddr
;
73 /* the PC is inside the translated code. It means that we have
74 a virtual CPU fault */
75 cpu_restore_state(tb
, env
, pc
);
85 /* #define DEBUG_HELPER */
87 #define HELPER_LOG(x...) qemu_log(x)
89 #define HELPER_LOG(x...)
92 /* raise an exception */
93 void HELPER(exception
)(uint32_t excp
)
95 HELPER_LOG("%s: exception %d\n", __FUNCTION__
, excp
);
96 env
->exception_index
= excp
;
100 #ifndef CONFIG_USER_ONLY
101 static void mvc_fast_memset(CPUState
*env
, uint32_t l
, uint64_t dest
,
104 target_phys_addr_t dest_phys
;
105 target_phys_addr_t len
= l
;
107 uint64_t asc
= env
->psw
.mask
& PSW_MASK_ASC
;
110 if (mmu_translate(env
, dest
, 1, asc
, &dest_phys
, &flags
)) {
112 cpu_abort(env
, "should never reach here");
114 dest_phys
|= dest
& ~TARGET_PAGE_MASK
;
116 dest_p
= cpu_physical_memory_map(dest_phys
, &len
, 1);
118 memset(dest_p
, byte
, len
);
120 cpu_physical_memory_unmap(dest_p
, 1, len
, len
);
123 static void mvc_fast_memmove(CPUState
*env
, uint32_t l
, uint64_t dest
,
126 target_phys_addr_t dest_phys
;
127 target_phys_addr_t src_phys
;
128 target_phys_addr_t len
= l
;
131 uint64_t asc
= env
->psw
.mask
& PSW_MASK_ASC
;
134 if (mmu_translate(env
, dest
, 1, asc
, &dest_phys
, &flags
)) {
136 cpu_abort(env
, "should never reach here");
138 dest_phys
|= dest
& ~TARGET_PAGE_MASK
;
140 if (mmu_translate(env
, src
, 0, asc
, &src_phys
, &flags
)) {
142 cpu_abort(env
, "should never reach here");
144 src_phys
|= src
& ~TARGET_PAGE_MASK
;
146 dest_p
= cpu_physical_memory_map(dest_phys
, &len
, 1);
147 src_p
= cpu_physical_memory_map(src_phys
, &len
, 0);
149 memmove(dest_p
, src_p
, len
);
151 cpu_physical_memory_unmap(dest_p
, 1, len
, len
);
152 cpu_physical_memory_unmap(src_p
, 0, len
, len
);
157 uint32_t HELPER(nc
)(uint32_t l
, uint64_t dest
, uint64_t src
)
163 HELPER_LOG("%s l %d dest %" PRIx64
" src %" PRIx64
"\n",
164 __FUNCTION__
, l
, dest
, src
);
165 for (i
= 0; i
<= l
; i
++) {
166 x
= ldub(dest
+ i
) & ldub(src
+ i
);
176 uint32_t HELPER(xc
)(uint32_t l
, uint64_t dest
, uint64_t src
)
182 HELPER_LOG("%s l %d dest %" PRIx64
" src %" PRIx64
"\n",
183 __FUNCTION__
, l
, dest
, src
);
185 #ifndef CONFIG_USER_ONLY
186 /* xor with itself is the same as memset(0) */
187 if ((l
> 32) && (src
== dest
) &&
188 (src
& TARGET_PAGE_MASK
) == ((src
+ l
) & TARGET_PAGE_MASK
)) {
189 mvc_fast_memset(env
, l
+ 1, dest
, 0);
194 memset(g2h(dest
), 0, l
+ 1);
199 for (i
= 0; i
<= l
; i
++) {
200 x
= ldub(dest
+ i
) ^ ldub(src
+ i
);
210 uint32_t HELPER(oc
)(uint32_t l
, uint64_t dest
, uint64_t src
)
216 HELPER_LOG("%s l %d dest %" PRIx64
" src %" PRIx64
"\n",
217 __FUNCTION__
, l
, dest
, src
);
218 for (i
= 0; i
<= l
; i
++) {
219 x
= ldub(dest
+ i
) | ldub(src
+ i
);
229 void HELPER(mvc
)(uint32_t l
, uint64_t dest
, uint64_t src
)
233 uint32_t l_64
= (l
+ 1) / 8;
235 HELPER_LOG("%s l %d dest %" PRIx64
" src %" PRIx64
"\n",
236 __FUNCTION__
, l
, dest
, src
);
238 #ifndef CONFIG_USER_ONLY
240 (src
& TARGET_PAGE_MASK
) == ((src
+ l
) & TARGET_PAGE_MASK
) &&
241 (dest
& TARGET_PAGE_MASK
) == ((dest
+ l
) & TARGET_PAGE_MASK
)) {
242 if (dest
== (src
+ 1)) {
243 mvc_fast_memset(env
, l
+ 1, dest
, ldub(src
));
245 } else if ((src
& TARGET_PAGE_MASK
) != (dest
& TARGET_PAGE_MASK
)) {
246 mvc_fast_memmove(env
, l
+ 1, dest
, src
);
251 if (dest
== (src
+ 1)) {
252 memset(g2h(dest
), ldub(src
), l
+ 1);
255 memmove(g2h(dest
), g2h(src
), l
+ 1);
260 /* handle the parts that fit into 8-byte loads/stores */
261 if (dest
!= (src
+ 1)) {
262 for (i
= 0; i
< l_64
; i
++) {
263 stq(dest
+ x
, ldq(src
+ x
));
268 /* slow version crossing pages with byte accesses */
269 for (i
= x
; i
<= l
; i
++) {
270 stb(dest
+ i
, ldub(src
+ i
));
274 /* compare unsigned byte arrays */
275 uint32_t HELPER(clc
)(uint32_t l
, uint64_t s1
, uint64_t s2
)
280 HELPER_LOG("%s l %d s1 %" PRIx64
" s2 %" PRIx64
"\n",
281 __FUNCTION__
, l
, s1
, s2
);
282 for (i
= 0; i
<= l
; i
++) {
285 HELPER_LOG("%02x (%c)/%02x (%c) ", x
, x
, y
, y
);
300 /* compare logical under mask */
301 uint32_t HELPER(clm
)(uint32_t r1
, uint32_t mask
, uint64_t addr
)
305 HELPER_LOG("%s: r1 0x%x mask 0x%x addr 0x%" PRIx64
"\n", __FUNCTION__
, r1
,
311 r
= (r1
& 0xff000000UL
) >> 24;
312 HELPER_LOG("mask 0x%x %02x/%02x (0x%" PRIx64
") ", mask
, r
, d
,
323 mask
= (mask
<< 1) & 0xf;
330 /* store character under mask */
331 void HELPER(stcm
)(uint32_t r1
, uint32_t mask
, uint64_t addr
)
334 HELPER_LOG("%s: r1 0x%x mask 0x%x addr 0x%lx\n", __FUNCTION__
, r1
, mask
,
338 r
= (r1
& 0xff000000UL
) >> 24;
340 HELPER_LOG("mask 0x%x %02x (0x%lx) ", mask
, r
, addr
);
343 mask
= (mask
<< 1) & 0xf;
349 /* 64/64 -> 128 unsigned multiplication */
350 void HELPER(mlg
)(uint32_t r1
, uint64_t v2
)
352 #if HOST_LONG_BITS == 64 && defined(__GNUC__)
353 /* assuming 64-bit hosts have __uint128_t */
354 __uint128_t res
= (__uint128_t
)env
->regs
[r1
+ 1];
355 res
*= (__uint128_t
)v2
;
356 env
->regs
[r1
] = (uint64_t)(res
>> 64);
357 env
->regs
[r1
+ 1] = (uint64_t)res
;
359 mulu64(&env
->regs
[r1
+ 1], &env
->regs
[r1
], env
->regs
[r1
+ 1], v2
);
363 /* 128 -> 64/64 unsigned division */
364 void HELPER(dlg
)(uint32_t r1
, uint64_t v2
)
366 uint64_t divisor
= v2
;
368 if (!env
->regs
[r1
]) {
369 /* 64 -> 64/64 case */
370 env
->regs
[r1
] = env
->regs
[r1
+1] % divisor
;
371 env
->regs
[r1
+1] = env
->regs
[r1
+1] / divisor
;
375 #if HOST_LONG_BITS == 64 && defined(__GNUC__)
376 /* assuming 64-bit hosts have __uint128_t */
377 __uint128_t dividend
= (((__uint128_t
)env
->regs
[r1
]) << 64) |
379 __uint128_t quotient
= dividend
/ divisor
;
380 env
->regs
[r1
+1] = quotient
;
381 __uint128_t remainder
= dividend
% divisor
;
382 env
->regs
[r1
] = remainder
;
384 /* 32-bit hosts would need special wrapper functionality - just abort if
385 we encounter such a case; it's very unlikely anyways. */
386 cpu_abort(env
, "128 -> 64/64 division not implemented\n");
391 static inline uint64_t get_address(int x2
, int b2
, int d2
)
404 if (!(env
->psw
.mask
& PSW_MASK_64
)) {
411 static inline uint64_t get_address_31fix(int reg
)
413 uint64_t r
= env
->regs
[reg
];
416 if (!(env
->psw
.mask
& PSW_MASK_64
)) {
423 /* search string (c is byte to search, r2 is string, r1 end of string) */
424 uint32_t HELPER(srst
)(uint32_t c
, uint32_t r1
, uint32_t r2
)
428 uint64_t str
= get_address_31fix(r2
);
429 uint64_t end
= get_address_31fix(r1
);
431 HELPER_LOG("%s: c %d *r1 0x%" PRIx64
" *r2 0x%" PRIx64
"\n", __FUNCTION__
,
432 c
, env
->regs
[r1
], env
->regs
[r2
]);
434 for (i
= str
; i
!= end
; i
++) {
445 /* unsigned string compare (c is string terminator) */
446 uint32_t HELPER(clst
)(uint32_t c
, uint32_t r1
, uint32_t r2
)
448 uint64_t s1
= get_address_31fix(r1
);
449 uint64_t s2
= get_address_31fix(r2
);
453 #ifdef CONFIG_USER_ONLY
455 HELPER_LOG("%s: comparing '%s' and '%s'\n",
456 __FUNCTION__
, (char*)g2h(s1
), (char*)g2h(s2
));
462 if ((v1
== c
|| v2
== c
) || (v1
!= v2
)) {
472 cc
= (v1
< v2
) ? 1 : 2;
473 /* FIXME: 31-bit mode! */
481 void HELPER(mvpg
)(uint64_t r0
, uint64_t r1
, uint64_t r2
)
483 /* XXX missing r0 handling */
484 #ifdef CONFIG_USER_ONLY
487 for (i
= 0; i
< TARGET_PAGE_SIZE
; i
++) {
488 stb(r1
+ i
, ldub(r2
+ i
));
491 mvc_fast_memmove(env
, TARGET_PAGE_SIZE
, r1
, r2
);
495 /* string copy (c is string terminator) */
496 void HELPER(mvst
)(uint32_t c
, uint32_t r1
, uint32_t r2
)
498 uint64_t dest
= get_address_31fix(r1
);
499 uint64_t src
= get_address_31fix(r2
);
502 #ifdef CONFIG_USER_ONLY
504 HELPER_LOG("%s: copy '%s' to 0x%lx\n", __FUNCTION__
, (char*)g2h(src
),
517 env
->regs
[r1
] = dest
; /* FIXME: 31-bit mode! */
520 /* compare and swap 64-bit */
521 uint32_t HELPER(csg
)(uint32_t r1
, uint64_t a2
, uint32_t r3
)
523 /* FIXME: locking? */
525 uint64_t v2
= ldq(a2
);
526 if (env
->regs
[r1
] == v2
) {
528 stq(a2
, env
->regs
[r3
]);
536 /* compare double and swap 64-bit */
537 uint32_t HELPER(cdsg
)(uint32_t r1
, uint64_t a2
, uint32_t r3
)
539 /* FIXME: locking? */
541 uint64_t v2_hi
= ldq(a2
);
542 uint64_t v2_lo
= ldq(a2
+ 8);
543 uint64_t v1_hi
= env
->regs
[r1
];
544 uint64_t v1_lo
= env
->regs
[r1
+ 1];
546 if ((v1_hi
== v2_hi
) && (v1_lo
== v2_lo
)) {
548 stq(a2
, env
->regs
[r3
]);
549 stq(a2
+ 8, env
->regs
[r3
+ 1]);
552 env
->regs
[r1
] = v2_hi
;
553 env
->regs
[r1
+ 1] = v2_lo
;
559 /* compare and swap 32-bit */
560 uint32_t HELPER(cs
)(uint32_t r1
, uint64_t a2
, uint32_t r3
)
562 /* FIXME: locking? */
564 HELPER_LOG("%s: r1 %d a2 0x%lx r3 %d\n", __FUNCTION__
, r1
, a2
, r3
);
565 uint32_t v2
= ldl(a2
);
566 if (((uint32_t)env
->regs
[r1
]) == v2
) {
568 stl(a2
, (uint32_t)env
->regs
[r3
]);
571 env
->regs
[r1
] = (env
->regs
[r1
] & 0xffffffff00000000ULL
) | v2
;
576 static uint32_t helper_icm(uint32_t r1
, uint64_t address
, uint32_t mask
)
578 int pos
= 24; /* top of the lower half of r1 */
579 uint64_t rmask
= 0xff000000ULL
;
586 env
->regs
[r1
] &= ~rmask
;
588 if ((val
& 0x80) && !ccd
) {
592 if (val
&& cc
== 0) {
595 env
->regs
[r1
] |= (uint64_t)val
<< pos
;
598 mask
= (mask
<< 1) & 0xf;
606 /* execute instruction
607 this instruction executes an insn modified with the contents of r1
608 it does not change the executed instruction in memory
609 it does not change the program counter
610 in other words: tricky...
611 currently implemented by interpreting the cases it is most commonly used in
613 uint32_t HELPER(ex
)(uint32_t cc
, uint64_t v1
, uint64_t addr
, uint64_t ret
)
615 uint16_t insn
= lduw_code(addr
);
616 HELPER_LOG("%s: v1 0x%lx addr 0x%lx insn 0x%x\n", __FUNCTION__
, v1
, addr
,
618 if ((insn
& 0xf0ff) == 0xd000) {
619 uint32_t l
, insn2
, b1
, b2
, d1
, d2
;
621 insn2
= ldl_code(addr
+ 2);
622 b1
= (insn2
>> 28) & 0xf;
623 b2
= (insn2
>> 12) & 0xf;
624 d1
= (insn2
>> 16) & 0xfff;
626 switch (insn
& 0xf00) {
628 helper_mvc(l
, get_address(0, b1
, d1
), get_address(0, b2
, d2
));
631 cc
= helper_clc(l
, get_address(0, b1
, d1
), get_address(0, b2
, d2
));
634 cc
= helper_xc(l
, get_address(0, b1
, d1
), get_address(0, b2
, d2
));
640 } else if ((insn
& 0xff00) == 0x0a00) {
641 /* supervisor call */
642 HELPER_LOG("%s: svc %ld via execute\n", __FUNCTION__
, (insn
|v1
) & 0xff);
643 env
->psw
.addr
= ret
- 4;
644 env
->int_svc_code
= (insn
|v1
) & 0xff;
645 env
->int_svc_ilc
= 4;
646 helper_exception(EXCP_SVC
);
647 } else if ((insn
& 0xff00) == 0xbf00) {
648 uint32_t insn2
, r1
, r3
, b2
, d2
;
649 insn2
= ldl_code(addr
+ 2);
650 r1
= (insn2
>> 20) & 0xf;
651 r3
= (insn2
>> 16) & 0xf;
652 b2
= (insn2
>> 12) & 0xf;
654 cc
= helper_icm(r1
, get_address(0, b2
, d2
), r3
);
657 cpu_abort(env
, "EXECUTE on instruction prefix 0x%x not implemented\n",
663 /* absolute value 32-bit */
664 uint32_t HELPER(abs_i32
)(int32_t val
)
673 /* negative absolute value 32-bit */
674 int32_t HELPER(nabs_i32
)(int32_t val
)
683 /* absolute value 64-bit */
684 uint64_t HELPER(abs_i64
)(int64_t val
)
686 HELPER_LOG("%s: val 0x%" PRIx64
"\n", __FUNCTION__
, val
);
695 /* negative absolute value 64-bit */
696 int64_t HELPER(nabs_i64
)(int64_t val
)
705 /* add with carry 32-bit unsigned */
706 uint32_t HELPER(addc_u32
)(uint32_t cc
, uint32_t v1
, uint32_t v2
)
718 /* store character under mask high operates on the upper half of r1 */
719 void HELPER(stcmh
)(uint32_t r1
, uint64_t address
, uint32_t mask
)
721 int pos
= 56; /* top of the upper half of r1 */
725 stb(address
, (env
->regs
[r1
] >> pos
) & 0xff);
728 mask
= (mask
<< 1) & 0xf;
733 /* insert character under mask high; same as icm, but operates on the
735 uint32_t HELPER(icmh
)(uint32_t r1
, uint64_t address
, uint32_t mask
)
737 int pos
= 56; /* top of the upper half of r1 */
738 uint64_t rmask
= 0xff00000000000000ULL
;
745 env
->regs
[r1
] &= ~rmask
;
747 if ((val
& 0x80) && !ccd
) {
751 if (val
&& cc
== 0) {
754 env
->regs
[r1
] |= (uint64_t)val
<< pos
;
757 mask
= (mask
<< 1) & 0xf;
765 /* insert psw mask and condition code into r1 */
766 void HELPER(ipm
)(uint32_t cc
, uint32_t r1
)
768 uint64_t r
= env
->regs
[r1
];
770 r
&= 0xffffffff00ffffffULL
;
771 r
|= (cc
<< 28) | ( (env
->psw
.mask
>> 40) & 0xf );
773 HELPER_LOG("%s: cc %d psw.mask 0x%lx r1 0x%lx\n", __FUNCTION__
,
774 cc
, env
->psw
.mask
, r
);
777 /* load access registers r1 to r3 from memory at a2 */
778 void HELPER(lam
)(uint32_t r1
, uint64_t a2
, uint32_t r3
)
782 for (i
= r1
;; i
= (i
+ 1) % 16) {
783 env
->aregs
[i
] = ldl(a2
);
792 /* store access registers r1 to r3 in memory at a2 */
793 void HELPER(stam
)(uint32_t r1
, uint64_t a2
, uint32_t r3
)
797 for (i
= r1
;; i
= (i
+ 1) % 16) {
798 stl(a2
, env
->aregs
[i
]);
808 uint32_t HELPER(mvcl
)(uint32_t r1
, uint32_t r2
)
810 uint64_t destlen
= env
->regs
[r1
+ 1] & 0xffffff;
811 uint64_t dest
= get_address_31fix(r1
);
812 uint64_t srclen
= env
->regs
[r2
+ 1] & 0xffffff;
813 uint64_t src
= get_address_31fix(r2
);
814 uint8_t pad
= src
>> 24;
818 if (destlen
== srclen
) {
820 } else if (destlen
< srclen
) {
826 if (srclen
> destlen
) {
830 for (; destlen
&& srclen
; src
++, dest
++, destlen
--, srclen
--) {
835 for (; destlen
; dest
++, destlen
--) {
839 env
->regs
[r1
+ 1] = destlen
;
840 /* can't use srclen here, we trunc'ed it */
841 env
->regs
[r2
+ 1] -= src
- env
->regs
[r2
];
842 env
->regs
[r1
] = dest
;
848 /* move long extended another memcopy insn with more bells and whistles */
849 uint32_t HELPER(mvcle
)(uint32_t r1
, uint64_t a2
, uint32_t r3
)
851 uint64_t destlen
= env
->regs
[r1
+ 1];
852 uint64_t dest
= env
->regs
[r1
];
853 uint64_t srclen
= env
->regs
[r3
+ 1];
854 uint64_t src
= env
->regs
[r3
];
855 uint8_t pad
= a2
& 0xff;
859 if (!(env
->psw
.mask
& PSW_MASK_64
)) {
860 destlen
= (uint32_t)destlen
;
861 srclen
= (uint32_t)srclen
;
866 if (destlen
== srclen
) {
868 } else if (destlen
< srclen
) {
874 if (srclen
> destlen
) {
878 for (; destlen
&& srclen
; src
++, dest
++, destlen
--, srclen
--) {
883 for (; destlen
; dest
++, destlen
--) {
887 env
->regs
[r1
+ 1] = destlen
;
888 /* can't use srclen here, we trunc'ed it */
889 /* FIXME: 31-bit mode! */
890 env
->regs
[r3
+ 1] -= src
- env
->regs
[r3
];
891 env
->regs
[r1
] = dest
;
897 /* compare logical long extended memcompare insn with padding */
898 uint32_t HELPER(clcle
)(uint32_t r1
, uint64_t a2
, uint32_t r3
)
900 uint64_t destlen
= env
->regs
[r1
+ 1];
901 uint64_t dest
= get_address_31fix(r1
);
902 uint64_t srclen
= env
->regs
[r3
+ 1];
903 uint64_t src
= get_address_31fix(r3
);
904 uint8_t pad
= a2
& 0xff;
905 uint8_t v1
= 0,v2
= 0;
908 if (!(destlen
|| srclen
)) {
912 if (srclen
> destlen
) {
916 for (; destlen
|| srclen
; src
++, dest
++, destlen
--, srclen
--) {
917 v1
= srclen
? ldub(src
) : pad
;
918 v2
= destlen
? ldub(dest
) : pad
;
920 cc
= (v1
< v2
) ? 1 : 2;
925 env
->regs
[r1
+ 1] = destlen
;
926 /* can't use srclen here, we trunc'ed it */
927 env
->regs
[r3
+ 1] -= src
- env
->regs
[r3
];
928 env
->regs
[r1
] = dest
;
934 /* subtract unsigned v2 from v1 with borrow */
935 uint32_t HELPER(slb
)(uint32_t cc
, uint32_t r1
, uint32_t v2
)
937 uint32_t v1
= env
->regs
[r1
];
938 uint32_t res
= v1
+ (~v2
) + (cc
>> 1);
940 env
->regs
[r1
] = (env
->regs
[r1
] & 0xffffffff00000000ULL
) | res
;
949 /* subtract unsigned v2 from v1 with borrow */
950 uint32_t HELPER(slbg
)(uint32_t cc
, uint32_t r1
, uint64_t v1
, uint64_t v2
)
952 uint64_t res
= v1
+ (~v2
) + (cc
>> 1);
963 static inline int float_comp_to_cc(int float_compare
)
965 switch (float_compare
) {
966 case float_relation_equal
:
968 case float_relation_less
:
970 case float_relation_greater
:
972 case float_relation_unordered
:
975 cpu_abort(env
, "unknown return value for float compare\n");
979 /* condition codes for binary FP ops */
980 static uint32_t set_cc_f32(float32 v1
, float32 v2
)
982 return float_comp_to_cc(float32_compare_quiet(v1
, v2
, &env
->fpu_status
));
985 static uint32_t set_cc_f64(float64 v1
, float64 v2
)
987 return float_comp_to_cc(float64_compare_quiet(v1
, v2
, &env
->fpu_status
));
990 /* condition codes for unary FP ops */
991 static uint32_t set_cc_nz_f32(float32 v
)
993 if (float32_is_any_nan(v
)) {
995 } else if (float32_is_zero(v
)) {
997 } else if (float32_is_neg(v
)) {
1004 static uint32_t set_cc_nz_f64(float64 v
)
1006 if (float64_is_any_nan(v
)) {
1008 } else if (float64_is_zero(v
)) {
1010 } else if (float64_is_neg(v
)) {
1017 static uint32_t set_cc_nz_f128(float128 v
)
1019 if (float128_is_any_nan(v
)) {
1021 } else if (float128_is_zero(v
)) {
1023 } else if (float128_is_neg(v
)) {
1030 /* convert 32-bit int to 64-bit float */
1031 void HELPER(cdfbr
)(uint32_t f1
, int32_t v2
)
1033 HELPER_LOG("%s: converting %d to f%d\n", __FUNCTION__
, v2
, f1
);
1034 env
->fregs
[f1
].d
= int32_to_float64(v2
, &env
->fpu_status
);
1037 /* convert 32-bit int to 128-bit float */
1038 void HELPER(cxfbr
)(uint32_t f1
, int32_t v2
)
1041 v1
.q
= int32_to_float128(v2
, &env
->fpu_status
);
1042 env
->fregs
[f1
].ll
= v1
.ll
.upper
;
1043 env
->fregs
[f1
+ 2].ll
= v1
.ll
.lower
;
1046 /* convert 64-bit int to 32-bit float */
1047 void HELPER(cegbr
)(uint32_t f1
, int64_t v2
)
1049 HELPER_LOG("%s: converting %ld to f%d\n", __FUNCTION__
, v2
, f1
);
1050 env
->fregs
[f1
].l
.upper
= int64_to_float32(v2
, &env
->fpu_status
);
1053 /* convert 64-bit int to 64-bit float */
1054 void HELPER(cdgbr
)(uint32_t f1
, int64_t v2
)
1056 HELPER_LOG("%s: converting %ld to f%d\n", __FUNCTION__
, v2
, f1
);
1057 env
->fregs
[f1
].d
= int64_to_float64(v2
, &env
->fpu_status
);
1060 /* convert 64-bit int to 128-bit float */
1061 void HELPER(cxgbr
)(uint32_t f1
, int64_t v2
)
1064 x1
.q
= int64_to_float128(v2
, &env
->fpu_status
);
1065 HELPER_LOG("%s: converted %ld to 0x%lx and 0x%lx\n", __FUNCTION__
, v2
,
1066 x1
.ll
.upper
, x1
.ll
.lower
);
1067 env
->fregs
[f1
].ll
= x1
.ll
.upper
;
1068 env
->fregs
[f1
+ 2].ll
= x1
.ll
.lower
;
1071 /* convert 32-bit int to 32-bit float */
1072 void HELPER(cefbr
)(uint32_t f1
, int32_t v2
)
1074 env
->fregs
[f1
].l
.upper
= int32_to_float32(v2
, &env
->fpu_status
);
1075 HELPER_LOG("%s: converting %d to 0x%d in f%d\n", __FUNCTION__
, v2
,
1076 env
->fregs
[f1
].l
.upper
, f1
);
1079 /* 32-bit FP addition RR */
1080 uint32_t HELPER(aebr
)(uint32_t f1
, uint32_t f2
)
1082 env
->fregs
[f1
].l
.upper
= float32_add(env
->fregs
[f1
].l
.upper
,
1083 env
->fregs
[f2
].l
.upper
,
1085 HELPER_LOG("%s: adding 0x%d resulting in 0x%d in f%d\n", __FUNCTION__
,
1086 env
->fregs
[f2
].l
.upper
, env
->fregs
[f1
].l
.upper
, f1
);
1088 return set_cc_nz_f32(env
->fregs
[f1
].l
.upper
);
1091 /* 64-bit FP addition RR */
1092 uint32_t HELPER(adbr
)(uint32_t f1
, uint32_t f2
)
1094 env
->fregs
[f1
].d
= float64_add(env
->fregs
[f1
].d
, env
->fregs
[f2
].d
,
1096 HELPER_LOG("%s: adding 0x%ld resulting in 0x%ld in f%d\n", __FUNCTION__
,
1097 env
->fregs
[f2
].d
, env
->fregs
[f1
].d
, f1
);
1099 return set_cc_nz_f64(env
->fregs
[f1
].d
);
1102 /* 32-bit FP subtraction RR */
1103 uint32_t HELPER(sebr
)(uint32_t f1
, uint32_t f2
)
1105 env
->fregs
[f1
].l
.upper
= float32_sub(env
->fregs
[f1
].l
.upper
,
1106 env
->fregs
[f2
].l
.upper
,
1108 HELPER_LOG("%s: adding 0x%d resulting in 0x%d in f%d\n", __FUNCTION__
,
1109 env
->fregs
[f2
].l
.upper
, env
->fregs
[f1
].l
.upper
, f1
);
1111 return set_cc_nz_f32(env
->fregs
[f1
].l
.upper
);
1114 /* 64-bit FP subtraction RR */
1115 uint32_t HELPER(sdbr
)(uint32_t f1
, uint32_t f2
)
1117 env
->fregs
[f1
].d
= float64_sub(env
->fregs
[f1
].d
, env
->fregs
[f2
].d
,
1119 HELPER_LOG("%s: subtracting 0x%ld resulting in 0x%ld in f%d\n",
1120 __FUNCTION__
, env
->fregs
[f2
].d
, env
->fregs
[f1
].d
, f1
);
1122 return set_cc_nz_f64(env
->fregs
[f1
].d
);
1125 /* 32-bit FP division RR */
1126 void HELPER(debr
)(uint32_t f1
, uint32_t f2
)
1128 env
->fregs
[f1
].l
.upper
= float32_div(env
->fregs
[f1
].l
.upper
,
1129 env
->fregs
[f2
].l
.upper
,
1133 /* 128-bit FP division RR */
1134 void HELPER(dxbr
)(uint32_t f1
, uint32_t f2
)
1137 v1
.ll
.upper
= env
->fregs
[f1
].ll
;
1138 v1
.ll
.lower
= env
->fregs
[f1
+ 2].ll
;
1140 v2
.ll
.upper
= env
->fregs
[f2
].ll
;
1141 v2
.ll
.lower
= env
->fregs
[f2
+ 2].ll
;
1143 res
.q
= float128_div(v1
.q
, v2
.q
, &env
->fpu_status
);
1144 env
->fregs
[f1
].ll
= res
.ll
.upper
;
1145 env
->fregs
[f1
+ 2].ll
= res
.ll
.lower
;
1148 /* 64-bit FP multiplication RR */
1149 void HELPER(mdbr
)(uint32_t f1
, uint32_t f2
)
1151 env
->fregs
[f1
].d
= float64_mul(env
->fregs
[f1
].d
, env
->fregs
[f2
].d
,
1155 /* 128-bit FP multiplication RR */
1156 void HELPER(mxbr
)(uint32_t f1
, uint32_t f2
)
1159 v1
.ll
.upper
= env
->fregs
[f1
].ll
;
1160 v1
.ll
.lower
= env
->fregs
[f1
+ 2].ll
;
1162 v2
.ll
.upper
= env
->fregs
[f2
].ll
;
1163 v2
.ll
.lower
= env
->fregs
[f2
+ 2].ll
;
1165 res
.q
= float128_mul(v1
.q
, v2
.q
, &env
->fpu_status
);
1166 env
->fregs
[f1
].ll
= res
.ll
.upper
;
1167 env
->fregs
[f1
+ 2].ll
= res
.ll
.lower
;
1170 /* convert 32-bit float to 64-bit float */
1171 void HELPER(ldebr
)(uint32_t r1
, uint32_t r2
)
1173 env
->fregs
[r1
].d
= float32_to_float64(env
->fregs
[r2
].l
.upper
,
1177 /* convert 128-bit float to 64-bit float */
1178 void HELPER(ldxbr
)(uint32_t f1
, uint32_t f2
)
1181 x2
.ll
.upper
= env
->fregs
[f2
].ll
;
1182 x2
.ll
.lower
= env
->fregs
[f2
+ 2].ll
;
1183 env
->fregs
[f1
].d
= float128_to_float64(x2
.q
, &env
->fpu_status
);
1184 HELPER_LOG("%s: to 0x%ld\n", __FUNCTION__
, env
->fregs
[f1
].d
);
1187 /* convert 64-bit float to 128-bit float */
1188 void HELPER(lxdbr
)(uint32_t f1
, uint32_t f2
)
1191 res
.q
= float64_to_float128(env
->fregs
[f2
].d
, &env
->fpu_status
);
1192 env
->fregs
[f1
].ll
= res
.ll
.upper
;
1193 env
->fregs
[f1
+ 2].ll
= res
.ll
.lower
;
1196 /* convert 64-bit float to 32-bit float */
1197 void HELPER(ledbr
)(uint32_t f1
, uint32_t f2
)
1199 float64 d2
= env
->fregs
[f2
].d
;
1200 env
->fregs
[f1
].l
.upper
= float64_to_float32(d2
, &env
->fpu_status
);
1203 /* convert 128-bit float to 32-bit float */
1204 void HELPER(lexbr
)(uint32_t f1
, uint32_t f2
)
1207 x2
.ll
.upper
= env
->fregs
[f2
].ll
;
1208 x2
.ll
.lower
= env
->fregs
[f2
+ 2].ll
;
1209 env
->fregs
[f1
].l
.upper
= float128_to_float32(x2
.q
, &env
->fpu_status
);
1210 HELPER_LOG("%s: to 0x%d\n", __FUNCTION__
, env
->fregs
[f1
].l
.upper
);
1213 /* absolute value of 32-bit float */
1214 uint32_t HELPER(lpebr
)(uint32_t f1
, uint32_t f2
)
1217 float32 v2
= env
->fregs
[f2
].d
;
1218 v1
= float32_abs(v2
);
1219 env
->fregs
[f1
].d
= v1
;
1220 return set_cc_nz_f32(v1
);
1223 /* absolute value of 64-bit float */
1224 uint32_t HELPER(lpdbr
)(uint32_t f1
, uint32_t f2
)
1227 float64 v2
= env
->fregs
[f2
].d
;
1228 v1
= float64_abs(v2
);
1229 env
->fregs
[f1
].d
= v1
;
1230 return set_cc_nz_f64(v1
);
1233 /* absolute value of 128-bit float */
1234 uint32_t HELPER(lpxbr
)(uint32_t f1
, uint32_t f2
)
1238 v2
.ll
.upper
= env
->fregs
[f2
].ll
;
1239 v2
.ll
.lower
= env
->fregs
[f2
+ 2].ll
;
1240 v1
.q
= float128_abs(v2
.q
);
1241 env
->fregs
[f1
].ll
= v1
.ll
.upper
;
1242 env
->fregs
[f1
+ 2].ll
= v1
.ll
.lower
;
1243 return set_cc_nz_f128(v1
.q
);
1246 /* load and test 64-bit float */
1247 uint32_t HELPER(ltdbr
)(uint32_t f1
, uint32_t f2
)
1249 env
->fregs
[f1
].d
= env
->fregs
[f2
].d
;
1250 return set_cc_nz_f64(env
->fregs
[f1
].d
);
1253 /* load and test 32-bit float */
1254 uint32_t HELPER(ltebr
)(uint32_t f1
, uint32_t f2
)
1256 env
->fregs
[f1
].l
.upper
= env
->fregs
[f2
].l
.upper
;
1257 return set_cc_nz_f32(env
->fregs
[f1
].l
.upper
);
1260 /* load and test 128-bit float */
1261 uint32_t HELPER(ltxbr
)(uint32_t f1
, uint32_t f2
)
1264 x
.ll
.upper
= env
->fregs
[f2
].ll
;
1265 x
.ll
.lower
= env
->fregs
[f2
+ 2].ll
;
1266 env
->fregs
[f1
].ll
= x
.ll
.upper
;
1267 env
->fregs
[f1
+ 2].ll
= x
.ll
.lower
;
1268 return set_cc_nz_f128(x
.q
);
1271 /* load complement of 32-bit float */
1272 uint32_t HELPER(lcebr
)(uint32_t f1
, uint32_t f2
)
1274 env
->fregs
[f1
].l
.upper
= float32_chs(env
->fregs
[f2
].l
.upper
);
1276 return set_cc_nz_f32(env
->fregs
[f1
].l
.upper
);
1279 /* load complement of 64-bit float */
1280 uint32_t HELPER(lcdbr
)(uint32_t f1
, uint32_t f2
)
1282 env
->fregs
[f1
].d
= float64_chs(env
->fregs
[f2
].d
);
1284 return set_cc_nz_f64(env
->fregs
[f1
].d
);
1287 /* load complement of 128-bit float */
1288 uint32_t HELPER(lcxbr
)(uint32_t f1
, uint32_t f2
)
1291 x2
.ll
.upper
= env
->fregs
[f2
].ll
;
1292 x2
.ll
.lower
= env
->fregs
[f2
+ 2].ll
;
1293 x1
.q
= float128_chs(x2
.q
);
1294 env
->fregs
[f1
].ll
= x1
.ll
.upper
;
1295 env
->fregs
[f1
+ 2].ll
= x1
.ll
.lower
;
1296 return set_cc_nz_f128(x1
.q
);
1299 /* 32-bit FP addition RM */
1300 void HELPER(aeb
)(uint32_t f1
, uint32_t val
)
1302 float32 v1
= env
->fregs
[f1
].l
.upper
;
1305 HELPER_LOG("%s: adding 0x%d from f%d and 0x%d\n", __FUNCTION__
,
1307 env
->fregs
[f1
].l
.upper
= float32_add(v1
, v2
.f
, &env
->fpu_status
);
1310 /* 32-bit FP division RM */
1311 void HELPER(deb
)(uint32_t f1
, uint32_t val
)
1313 float32 v1
= env
->fregs
[f1
].l
.upper
;
1316 HELPER_LOG("%s: dividing 0x%d from f%d by 0x%d\n", __FUNCTION__
,
1318 env
->fregs
[f1
].l
.upper
= float32_div(v1
, v2
.f
, &env
->fpu_status
);
1321 /* 32-bit FP multiplication RM */
1322 void HELPER(meeb
)(uint32_t f1
, uint32_t val
)
1324 float32 v1
= env
->fregs
[f1
].l
.upper
;
1327 HELPER_LOG("%s: multiplying 0x%d from f%d and 0x%d\n", __FUNCTION__
,
1329 env
->fregs
[f1
].l
.upper
= float32_mul(v1
, v2
.f
, &env
->fpu_status
);
1332 /* 32-bit FP compare RR */
1333 uint32_t HELPER(cebr
)(uint32_t f1
, uint32_t f2
)
1335 float32 v1
= env
->fregs
[f1
].l
.upper
;
1336 float32 v2
= env
->fregs
[f2
].l
.upper
;;
1337 HELPER_LOG("%s: comparing 0x%d from f%d and 0x%d\n", __FUNCTION__
,
1339 return set_cc_f32(v1
, v2
);
1342 /* 64-bit FP compare RR */
1343 uint32_t HELPER(cdbr
)(uint32_t f1
, uint32_t f2
)
1345 float64 v1
= env
->fregs
[f1
].d
;
1346 float64 v2
= env
->fregs
[f2
].d
;;
1347 HELPER_LOG("%s: comparing 0x%ld from f%d and 0x%ld\n", __FUNCTION__
,
1349 return set_cc_f64(v1
, v2
);
1352 /* 128-bit FP compare RR */
1353 uint32_t HELPER(cxbr
)(uint32_t f1
, uint32_t f2
)
1356 v1
.ll
.upper
= env
->fregs
[f1
].ll
;
1357 v1
.ll
.lower
= env
->fregs
[f1
+ 2].ll
;
1359 v2
.ll
.upper
= env
->fregs
[f2
].ll
;
1360 v2
.ll
.lower
= env
->fregs
[f2
+ 2].ll
;
1362 return float_comp_to_cc(float128_compare_quiet(v1
.q
, v2
.q
,
1366 /* 64-bit FP compare RM */
1367 uint32_t HELPER(cdb
)(uint32_t f1
, uint64_t a2
)
1369 float64 v1
= env
->fregs
[f1
].d
;
1372 HELPER_LOG("%s: comparing 0x%ld from f%d and 0x%lx\n", __FUNCTION__
, v1
,
1374 return set_cc_f64(v1
, v2
.d
);
1377 /* 64-bit FP addition RM */
1378 uint32_t HELPER(adb
)(uint32_t f1
, uint64_t a2
)
1380 float64 v1
= env
->fregs
[f1
].d
;
1383 HELPER_LOG("%s: adding 0x%lx from f%d and 0x%lx\n", __FUNCTION__
,
1385 env
->fregs
[f1
].d
= v1
= float64_add(v1
, v2
.d
, &env
->fpu_status
);
1386 return set_cc_nz_f64(v1
);
1389 /* 32-bit FP subtraction RM */
1390 void HELPER(seb
)(uint32_t f1
, uint32_t val
)
1392 float32 v1
= env
->fregs
[f1
].l
.upper
;
1395 env
->fregs
[f1
].l
.upper
= float32_sub(v1
, v2
.f
, &env
->fpu_status
);
1398 /* 64-bit FP subtraction RM */
1399 uint32_t HELPER(sdb
)(uint32_t f1
, uint64_t a2
)
1401 float64 v1
= env
->fregs
[f1
].d
;
1404 env
->fregs
[f1
].d
= v1
= float64_sub(v1
, v2
.d
, &env
->fpu_status
);
1405 return set_cc_nz_f64(v1
);
1408 /* 64-bit FP multiplication RM */
1409 void HELPER(mdb
)(uint32_t f1
, uint64_t a2
)
1411 float64 v1
= env
->fregs
[f1
].d
;
1414 HELPER_LOG("%s: multiplying 0x%lx from f%d and 0x%ld\n", __FUNCTION__
,
1416 env
->fregs
[f1
].d
= float64_mul(v1
, v2
.d
, &env
->fpu_status
);
1419 /* 64-bit FP division RM */
1420 void HELPER(ddb
)(uint32_t f1
, uint64_t a2
)
1422 float64 v1
= env
->fregs
[f1
].d
;
1425 HELPER_LOG("%s: dividing 0x%lx from f%d by 0x%ld\n", __FUNCTION__
,
1427 env
->fregs
[f1
].d
= float64_div(v1
, v2
.d
, &env
->fpu_status
);
1430 static void set_round_mode(int m3
)
1437 /* biased round no nearest */
1439 /* round to nearest */
1440 set_float_rounding_mode(float_round_nearest_even
, &env
->fpu_status
);
1444 set_float_rounding_mode(float_round_to_zero
, &env
->fpu_status
);
1448 set_float_rounding_mode(float_round_up
, &env
->fpu_status
);
1452 set_float_rounding_mode(float_round_down
, &env
->fpu_status
);
1457 /* convert 32-bit float to 64-bit int */
1458 uint32_t HELPER(cgebr
)(uint32_t r1
, uint32_t f2
, uint32_t m3
)
1460 float32 v2
= env
->fregs
[f2
].l
.upper
;
1462 env
->regs
[r1
] = float32_to_int64(v2
, &env
->fpu_status
);
1463 return set_cc_nz_f32(v2
);
1466 /* convert 64-bit float to 64-bit int */
1467 uint32_t HELPER(cgdbr
)(uint32_t r1
, uint32_t f2
, uint32_t m3
)
1469 float64 v2
= env
->fregs
[f2
].d
;
1471 env
->regs
[r1
] = float64_to_int64(v2
, &env
->fpu_status
);
1472 return set_cc_nz_f64(v2
);
1475 /* convert 128-bit float to 64-bit int */
1476 uint32_t HELPER(cgxbr
)(uint32_t r1
, uint32_t f2
, uint32_t m3
)
1479 v2
.ll
.upper
= env
->fregs
[f2
].ll
;
1480 v2
.ll
.lower
= env
->fregs
[f2
+ 2].ll
;
1482 env
->regs
[r1
] = float128_to_int64(v2
.q
, &env
->fpu_status
);
1483 if (float128_is_any_nan(v2
.q
)) {
1485 } else if (float128_is_zero(v2
.q
)) {
1487 } else if (float128_is_neg(v2
.q
)) {
1494 /* convert 32-bit float to 32-bit int */
1495 uint32_t HELPER(cfebr
)(uint32_t r1
, uint32_t f2
, uint32_t m3
)
1497 float32 v2
= env
->fregs
[f2
].l
.upper
;
1499 env
->regs
[r1
] = (env
->regs
[r1
] & 0xffffffff00000000ULL
) |
1500 float32_to_int32(v2
, &env
->fpu_status
);
1501 return set_cc_nz_f32(v2
);
1504 /* convert 64-bit float to 32-bit int */
1505 uint32_t HELPER(cfdbr
)(uint32_t r1
, uint32_t f2
, uint32_t m3
)
1507 float64 v2
= env
->fregs
[f2
].d
;
1509 env
->regs
[r1
] = (env
->regs
[r1
] & 0xffffffff00000000ULL
) |
1510 float64_to_int32(v2
, &env
->fpu_status
);
1511 return set_cc_nz_f64(v2
);
1514 /* convert 128-bit float to 32-bit int */
1515 uint32_t HELPER(cfxbr
)(uint32_t r1
, uint32_t f2
, uint32_t m3
)
1518 v2
.ll
.upper
= env
->fregs
[f2
].ll
;
1519 v2
.ll
.lower
= env
->fregs
[f2
+ 2].ll
;
1520 env
->regs
[r1
] = (env
->regs
[r1
] & 0xffffffff00000000ULL
) |
1521 float128_to_int32(v2
.q
, &env
->fpu_status
);
1522 return set_cc_nz_f128(v2
.q
);
1525 /* load 32-bit FP zero */
1526 void HELPER(lzer
)(uint32_t f1
)
1528 env
->fregs
[f1
].l
.upper
= float32_zero
;
1531 /* load 64-bit FP zero */
1532 void HELPER(lzdr
)(uint32_t f1
)
1534 env
->fregs
[f1
].d
= float64_zero
;
1537 /* load 128-bit FP zero */
1538 void HELPER(lzxr
)(uint32_t f1
)
1541 x
.q
= float64_to_float128(float64_zero
, &env
->fpu_status
);
1542 env
->fregs
[f1
].ll
= x
.ll
.upper
;
1543 env
->fregs
[f1
+ 1].ll
= x
.ll
.lower
;
1546 /* 128-bit FP subtraction RR */
1547 uint32_t HELPER(sxbr
)(uint32_t f1
, uint32_t f2
)
1550 v1
.ll
.upper
= env
->fregs
[f1
].ll
;
1551 v1
.ll
.lower
= env
->fregs
[f1
+ 2].ll
;
1553 v2
.ll
.upper
= env
->fregs
[f2
].ll
;
1554 v2
.ll
.lower
= env
->fregs
[f2
+ 2].ll
;
1556 res
.q
= float128_sub(v1
.q
, v2
.q
, &env
->fpu_status
);
1557 env
->fregs
[f1
].ll
= res
.ll
.upper
;
1558 env
->fregs
[f1
+ 2].ll
= res
.ll
.lower
;
1559 return set_cc_nz_f128(res
.q
);
1562 /* 128-bit FP addition RR */
1563 uint32_t HELPER(axbr
)(uint32_t f1
, uint32_t f2
)
1566 v1
.ll
.upper
= env
->fregs
[f1
].ll
;
1567 v1
.ll
.lower
= env
->fregs
[f1
+ 2].ll
;
1569 v2
.ll
.upper
= env
->fregs
[f2
].ll
;
1570 v2
.ll
.lower
= env
->fregs
[f2
+ 2].ll
;
1572 res
.q
= float128_add(v1
.q
, v2
.q
, &env
->fpu_status
);
1573 env
->fregs
[f1
].ll
= res
.ll
.upper
;
1574 env
->fregs
[f1
+ 2].ll
= res
.ll
.lower
;
1575 return set_cc_nz_f128(res
.q
);
1578 /* 32-bit FP multiplication RR */
1579 void HELPER(meebr
)(uint32_t f1
, uint32_t f2
)
1581 env
->fregs
[f1
].l
.upper
= float32_mul(env
->fregs
[f1
].l
.upper
,
1582 env
->fregs
[f2
].l
.upper
,
1586 /* 64-bit FP division RR */
1587 void HELPER(ddbr
)(uint32_t f1
, uint32_t f2
)
1589 env
->fregs
[f1
].d
= float64_div(env
->fregs
[f1
].d
, env
->fregs
[f2
].d
,
1593 /* 64-bit FP multiply and add RM */
1594 void HELPER(madb
)(uint32_t f1
, uint64_t a2
, uint32_t f3
)
1596 HELPER_LOG("%s: f1 %d a2 0x%lx f3 %d\n", __FUNCTION__
, f1
, a2
, f3
);
1599 env
->fregs
[f1
].d
= float64_add(env
->fregs
[f1
].d
,
1600 float64_mul(v2
.d
, env
->fregs
[f3
].d
,
1605 /* 64-bit FP multiply and add RR */
1606 void HELPER(madbr
)(uint32_t f1
, uint32_t f3
, uint32_t f2
)
1608 HELPER_LOG("%s: f1 %d f2 %d f3 %d\n", __FUNCTION__
, f1
, f2
, f3
);
1609 env
->fregs
[f1
].d
= float64_add(float64_mul(env
->fregs
[f2
].d
,
1612 env
->fregs
[f1
].d
, &env
->fpu_status
);
1615 /* 64-bit FP multiply and subtract RR */
1616 void HELPER(msdbr
)(uint32_t f1
, uint32_t f3
, uint32_t f2
)
1618 HELPER_LOG("%s: f1 %d f2 %d f3 %d\n", __FUNCTION__
, f1
, f2
, f3
);
1619 env
->fregs
[f1
].d
= float64_sub(float64_mul(env
->fregs
[f2
].d
,
1622 env
->fregs
[f1
].d
, &env
->fpu_status
);
1625 /* 32-bit FP multiply and add RR */
1626 void HELPER(maebr
)(uint32_t f1
, uint32_t f3
, uint32_t f2
)
1628 env
->fregs
[f1
].l
.upper
= float32_add(env
->fregs
[f1
].l
.upper
,
1629 float32_mul(env
->fregs
[f2
].l
.upper
,
1630 env
->fregs
[f3
].l
.upper
,
1635 /* convert 64-bit float to 128-bit float */
1636 void HELPER(lxdb
)(uint32_t f1
, uint64_t a2
)
1641 v1
.q
= float64_to_float128(v2
.d
, &env
->fpu_status
);
1642 env
->fregs
[f1
].ll
= v1
.ll
.upper
;
1643 env
->fregs
[f1
+ 2].ll
= v1
.ll
.lower
;
1646 /* test data class 32-bit */
1647 uint32_t HELPER(tceb
)(uint32_t f1
, uint64_t m2
)
1649 float32 v1
= env
->fregs
[f1
].l
.upper
;
1650 int neg
= float32_is_neg(v1
);
1653 HELPER_LOG("%s: v1 0x%lx m2 0x%lx neg %d\n", __FUNCTION__
, (long)v1
, m2
, neg
);
1654 if ((float32_is_zero(v1
) && (m2
& (1 << (11-neg
)))) ||
1655 (float32_is_infinity(v1
) && (m2
& (1 << (5-neg
)))) ||
1656 (float32_is_any_nan(v1
) && (m2
& (1 << (3-neg
)))) ||
1657 (float32_is_signaling_nan(v1
) && (m2
& (1 << (1-neg
))))) {
1659 } else if (m2
& (1 << (9-neg
))) {
1660 /* assume normalized number */
1664 /* FIXME: denormalized? */
1668 /* test data class 64-bit */
1669 uint32_t HELPER(tcdb
)(uint32_t f1
, uint64_t m2
)
1671 float64 v1
= env
->fregs
[f1
].d
;
1672 int neg
= float64_is_neg(v1
);
1675 HELPER_LOG("%s: v1 0x%lx m2 0x%lx neg %d\n", __FUNCTION__
, v1
, m2
, neg
);
1676 if ((float64_is_zero(v1
) && (m2
& (1 << (11-neg
)))) ||
1677 (float64_is_infinity(v1
) && (m2
& (1 << (5-neg
)))) ||
1678 (float64_is_any_nan(v1
) && (m2
& (1 << (3-neg
)))) ||
1679 (float64_is_signaling_nan(v1
) && (m2
& (1 << (1-neg
))))) {
1681 } else if (m2
& (1 << (9-neg
))) {
1682 /* assume normalized number */
1685 /* FIXME: denormalized? */
1689 /* test data class 128-bit */
1690 uint32_t HELPER(tcxb
)(uint32_t f1
, uint64_t m2
)
1694 v1
.ll
.upper
= env
->fregs
[f1
].ll
;
1695 v1
.ll
.lower
= env
->fregs
[f1
+ 2].ll
;
1697 int neg
= float128_is_neg(v1
.q
);
1698 if ((float128_is_zero(v1
.q
) && (m2
& (1 << (11-neg
)))) ||
1699 (float128_is_infinity(v1
.q
) && (m2
& (1 << (5-neg
)))) ||
1700 (float128_is_any_nan(v1
.q
) && (m2
& (1 << (3-neg
)))) ||
1701 (float128_is_signaling_nan(v1
.q
) && (m2
& (1 << (1-neg
))))) {
1703 } else if (m2
& (1 << (9-neg
))) {
1704 /* assume normalized number */
1707 /* FIXME: denormalized? */
1711 /* find leftmost one */
1712 uint32_t HELPER(flogr
)(uint32_t r1
, uint64_t v2
)
1717 while (!(v2
& 0x8000000000000000ULL
) && v2
) {
1724 env
->regs
[r1
+ 1] = 0;
1727 env
->regs
[r1
] = res
;
1728 env
->regs
[r1
+ 1] = ov2
& ~(0x8000000000000000ULL
>> res
);
1733 /* square root 64-bit RR */
1734 void HELPER(sqdbr
)(uint32_t f1
, uint32_t f2
)
1736 env
->fregs
[f1
].d
= float64_sqrt(env
->fregs
[f2
].d
, &env
->fpu_status
);
1740 void HELPER(cksm
)(uint32_t r1
, uint32_t r2
)
1742 uint64_t src
= get_address_31fix(r2
);
1743 uint64_t src_len
= env
->regs
[(r2
+ 1) & 15];
1744 uint64_t cksm
= (uint32_t)env
->regs
[r1
];
1746 while (src_len
>= 4) {
1749 /* move to next word */
1758 cksm
+= ldub(src
) << 24;
1761 cksm
+= lduw(src
) << 16;
1764 cksm
+= lduw(src
) << 16;
1765 cksm
+= ldub(src
+ 2) << 8;
1769 /* indicate we've processed everything */
1770 env
->regs
[r2
] = src
+ src_len
;
1771 env
->regs
[(r2
+ 1) & 15] = 0;
1774 env
->regs
[r1
] = (env
->regs
[r1
] & 0xffffffff00000000ULL
) |
1775 ((uint32_t)cksm
+ (cksm
>> 32));
1778 static inline uint32_t cc_calc_ltgt_32(CPUState
*env
, int32_t src
,
1783 } else if (src
< dst
) {
1790 static inline uint32_t cc_calc_ltgt0_32(CPUState
*env
, int32_t dst
)
1792 return cc_calc_ltgt_32(env
, dst
, 0);
1795 static inline uint32_t cc_calc_ltgt_64(CPUState
*env
, int64_t src
,
1800 } else if (src
< dst
) {
1807 static inline uint32_t cc_calc_ltgt0_64(CPUState
*env
, int64_t dst
)
1809 return cc_calc_ltgt_64(env
, dst
, 0);
1812 static inline uint32_t cc_calc_ltugtu_32(CPUState
*env
, uint32_t src
,
1817 } else if (src
< dst
) {
1824 static inline uint32_t cc_calc_ltugtu_64(CPUState
*env
, uint64_t src
,
1829 } else if (src
< dst
) {
1836 static inline uint32_t cc_calc_tm_32(CPUState
*env
, uint32_t val
, uint32_t mask
)
1838 HELPER_LOG("%s: val 0x%x mask 0x%x\n", __FUNCTION__
, val
, mask
);
1839 uint16_t r
= val
& mask
;
1840 if (r
== 0 || mask
== 0) {
1842 } else if (r
== mask
) {
1849 /* set condition code for test under mask */
1850 static inline uint32_t cc_calc_tm_64(CPUState
*env
, uint64_t val
, uint32_t mask
)
1852 uint16_t r
= val
& mask
;
1853 HELPER_LOG("%s: val 0x%lx mask 0x%x r 0x%x\n", __FUNCTION__
, val
, mask
, r
);
1854 if (r
== 0 || mask
== 0) {
1856 } else if (r
== mask
) {
1859 while (!(mask
& 0x8000)) {
1871 static inline uint32_t cc_calc_nz(CPUState
*env
, uint64_t dst
)
1876 static inline uint32_t cc_calc_add_64(CPUState
*env
, int64_t a1
, int64_t a2
,
1879 if ((a1
> 0 && a2
> 0 && ar
< 0) || (a1
< 0 && a2
< 0 && ar
> 0)) {
1880 return 3; /* overflow */
1884 } else if (ar
> 0) {
1892 static inline uint32_t cc_calc_addu_64(CPUState
*env
, uint64_t a1
, uint64_t a2
,
1902 if (ar
< a1
|| ar
< a2
) {
1910 static inline uint32_t cc_calc_sub_64(CPUState
*env
, int64_t a1
, int64_t a2
,
1913 if ((a1
> 0 && a2
< 0 && ar
< 0) || (a1
< 0 && a2
> 0 && ar
> 0)) {
1914 return 3; /* overflow */
1918 } else if (ar
> 0) {
1926 static inline uint32_t cc_calc_subu_64(CPUState
*env
, uint64_t a1
, uint64_t a2
,
1940 static inline uint32_t cc_calc_abs_64(CPUState
*env
, int64_t dst
)
1942 if ((uint64_t)dst
== 0x8000000000000000ULL
) {
1951 static inline uint32_t cc_calc_nabs_64(CPUState
*env
, int64_t dst
)
1956 static inline uint32_t cc_calc_comp_64(CPUState
*env
, int64_t dst
)
1958 if ((uint64_t)dst
== 0x8000000000000000ULL
) {
1960 } else if (dst
< 0) {
1962 } else if (dst
> 0) {
1970 static inline uint32_t cc_calc_add_32(CPUState
*env
, int32_t a1
, int32_t a2
,
1973 if ((a1
> 0 && a2
> 0 && ar
< 0) || (a1
< 0 && a2
< 0 && ar
> 0)) {
1974 return 3; /* overflow */
1978 } else if (ar
> 0) {
1986 static inline uint32_t cc_calc_addu_32(CPUState
*env
, uint32_t a1
, uint32_t a2
,
1996 if (ar
< a1
|| ar
< a2
) {
2004 static inline uint32_t cc_calc_sub_32(CPUState
*env
, int32_t a1
, int32_t a2
,
2007 if ((a1
> 0 && a2
< 0 && ar
< 0) || (a1
< 0 && a2
> 0 && ar
> 0)) {
2008 return 3; /* overflow */
2012 } else if (ar
> 0) {
2020 static inline uint32_t cc_calc_subu_32(CPUState
*env
, uint32_t a1
, uint32_t a2
,
2034 static inline uint32_t cc_calc_abs_32(CPUState
*env
, int32_t dst
)
2036 if ((uint32_t)dst
== 0x80000000UL
) {
2045 static inline uint32_t cc_calc_nabs_32(CPUState
*env
, int32_t dst
)
2050 static inline uint32_t cc_calc_comp_32(CPUState
*env
, int32_t dst
)
2052 if ((uint32_t)dst
== 0x80000000UL
) {
2054 } else if (dst
< 0) {
2056 } else if (dst
> 0) {
2063 /* calculate condition code for insert character under mask insn */
2064 static inline uint32_t cc_calc_icm_32(CPUState
*env
, uint32_t mask
, uint32_t val
)
2066 HELPER_LOG("%s: mask 0x%x val %d\n", __FUNCTION__
, mask
, val
);
2072 } else if (val
& 0x80000000) {
2079 if (!val
|| !mask
) {
2095 static inline uint32_t cc_calc_slag(CPUState
*env
, uint64_t src
, uint64_t shift
)
2097 uint64_t mask
= ((1ULL << shift
) - 1ULL) << (64 - shift
);
2100 /* check if the sign bit stays the same */
2101 if (src
& (1ULL << 63)) {
2107 if ((src
& mask
) != match
) {
2112 r
= ((src
<< shift
) & ((1ULL << 63) - 1)) | (src
& (1ULL << 63));
2114 if ((int64_t)r
== 0) {
2116 } else if ((int64_t)r
< 0) {
2124 static inline uint32_t do_calc_cc(CPUState
*env
, uint32_t cc_op
, uint64_t src
,
2125 uint64_t dst
, uint64_t vr
)
2134 /* cc_op value _is_ cc */
2137 case CC_OP_LTGT0_32
:
2138 r
= cc_calc_ltgt0_32(env
, dst
);
2140 case CC_OP_LTGT0_64
:
2141 r
= cc_calc_ltgt0_64(env
, dst
);
2144 r
= cc_calc_ltgt_32(env
, src
, dst
);
2147 r
= cc_calc_ltgt_64(env
, src
, dst
);
2149 case CC_OP_LTUGTU_32
:
2150 r
= cc_calc_ltugtu_32(env
, src
, dst
);
2152 case CC_OP_LTUGTU_64
:
2153 r
= cc_calc_ltugtu_64(env
, src
, dst
);
2156 r
= cc_calc_tm_32(env
, src
, dst
);
2159 r
= cc_calc_tm_64(env
, src
, dst
);
2162 r
= cc_calc_nz(env
, dst
);
2165 r
= cc_calc_add_64(env
, src
, dst
, vr
);
2168 r
= cc_calc_addu_64(env
, src
, dst
, vr
);
2171 r
= cc_calc_sub_64(env
, src
, dst
, vr
);
2174 r
= cc_calc_subu_64(env
, src
, dst
, vr
);
2177 r
= cc_calc_abs_64(env
, dst
);
2180 r
= cc_calc_nabs_64(env
, dst
);
2183 r
= cc_calc_comp_64(env
, dst
);
2187 r
= cc_calc_add_32(env
, src
, dst
, vr
);
2190 r
= cc_calc_addu_32(env
, src
, dst
, vr
);
2193 r
= cc_calc_sub_32(env
, src
, dst
, vr
);
2196 r
= cc_calc_subu_32(env
, src
, dst
, vr
);
2199 r
= cc_calc_abs_64(env
, dst
);
2202 r
= cc_calc_nabs_64(env
, dst
);
2205 r
= cc_calc_comp_32(env
, dst
);
2209 r
= cc_calc_icm_32(env
, src
, dst
);
2212 r
= cc_calc_slag(env
, src
, dst
);
2215 case CC_OP_LTGT_F32
:
2216 r
= set_cc_f32(src
, dst
);
2218 case CC_OP_LTGT_F64
:
2219 r
= set_cc_f64(src
, dst
);
2222 r
= set_cc_nz_f32(dst
);
2225 r
= set_cc_nz_f64(dst
);
2229 cpu_abort(env
, "Unknown CC operation: %s\n", cc_name(cc_op
));
2232 HELPER_LOG("%s: %15s 0x%016lx 0x%016lx 0x%016lx = %d\n", __FUNCTION__
,
2233 cc_name(cc_op
), src
, dst
, vr
, r
);
2237 uint32_t calc_cc(CPUState
*env
, uint32_t cc_op
, uint64_t src
, uint64_t dst
,
2240 return do_calc_cc(env
, cc_op
, src
, dst
, vr
);
2243 uint32_t HELPER(calc_cc
)(uint32_t cc_op
, uint64_t src
, uint64_t dst
,
2246 return do_calc_cc(env
, cc_op
, src
, dst
, vr
);
2249 uint64_t HELPER(cvd
)(int32_t bin
)
2252 uint64_t dec
= 0x0c;
2260 for (shift
= 4; (shift
< 64) && bin
; shift
+= 4) {
2261 int current_number
= bin
% 10;
2263 dec
|= (current_number
) << shift
;
2270 void HELPER(unpk
)(uint32_t len
, uint64_t dest
, uint64_t src
)
2272 int len_dest
= len
>> 4;
2273 int len_src
= len
& 0xf;
2275 int second_nibble
= 0;
2280 /* last byte is special, it only flips the nibbles */
2282 stb(dest
, (b
<< 4) | (b
>> 4));
2286 /* now pad every nibble with 0xf0 */
2288 while (len_dest
> 0) {
2289 uint8_t cur_byte
= 0;
2292 cur_byte
= ldub(src
);
2298 /* only advance one nibble at a time */
2299 if (second_nibble
) {
2304 second_nibble
= !second_nibble
;
2307 cur_byte
= (cur_byte
& 0xf);
2311 stb(dest
, cur_byte
);
2315 void HELPER(tr
)(uint32_t len
, uint64_t array
, uint64_t trans
)
2319 for (i
= 0; i
<= len
; i
++) {
2320 uint8_t byte
= ldub(array
+ i
);
2321 uint8_t new_byte
= ldub(trans
+ byte
);
2322 stb(array
+ i
, new_byte
);
2326 #ifndef CONFIG_USER_ONLY
2328 void HELPER(load_psw
)(uint64_t mask
, uint64_t addr
)
2330 load_psw(env
, mask
, addr
);
2334 static void program_interrupt(CPUState
*env
, uint32_t code
, int ilc
)
2336 qemu_log("program interrupt at %#" PRIx64
"\n", env
->psw
.addr
);
2338 if (kvm_enabled()) {
2340 kvm_s390_interrupt(env
, KVM_S390_PROGRAM_INT
, code
);
2343 env
->int_pgm_code
= code
;
2344 env
->int_pgm_ilc
= ilc
;
2345 env
->exception_index
= EXCP_PGM
;
2350 static void ext_interrupt(CPUState
*env
, int type
, uint32_t param
,
2353 cpu_inject_ext(env
, type
, param
, param64
);
2356 int sclp_service_call(CPUState
*env
, uint32_t sccb
, uint64_t code
)
2362 printf("sclp(0x%x, 0x%" PRIx64
")\n", sccb
, code
);
2365 if (sccb
& ~0x7ffffff8ul
) {
2366 fprintf(stderr
, "KVM: invalid sccb address 0x%x\n", sccb
);
2372 case SCLP_CMDW_READ_SCP_INFO
:
2373 case SCLP_CMDW_READ_SCP_INFO_FORCED
:
2374 while ((ram_size
>> (20 + shift
)) > 65535) {
2377 stw_phys(sccb
+ SCP_MEM_CODE
, ram_size
>> (20 + shift
));
2378 stb_phys(sccb
+ SCP_INCREMENT
, 1 << shift
);
2379 stw_phys(sccb
+ SCP_RESPONSE_CODE
, 0x10);
2381 if (kvm_enabled()) {
2383 kvm_s390_interrupt_internal(env
, KVM_S390_INT_SERVICE
,
2388 ext_interrupt(env
, EXT_SERVICE
, sccb
& ~3, 0);
2393 printf("KVM: invalid sclp call 0x%x / 0x%" PRIx64
"x\n", sccb
, code
);
2403 /* SCLP service call */
2404 uint32_t HELPER(servc
)(uint32_t r1
, uint64_t r2
)
2406 if (sclp_service_call(env
, r1
, r2
)) {
2414 uint64_t HELPER(diag
)(uint32_t num
, uint64_t mem
, uint64_t code
)
2421 r
= s390_virtio_hypercall(env
, mem
, code
);
2437 program_interrupt(env
, PGM_OPERATION
, ILC_LATER_INC
);
2444 void HELPER(stidp
)(uint64_t a1
)
2446 stq(a1
, env
->cpu_num
);
2450 void HELPER(spx
)(uint64_t a1
)
2455 env
->psa
= prefix
& 0xfffff000;
2456 qemu_log("prefix: %#x\n", prefix
);
2457 tlb_flush_page(env
, 0);
2458 tlb_flush_page(env
, TARGET_PAGE_SIZE
);
2462 uint32_t HELPER(sck
)(uint64_t a1
)
2464 /* XXX not implemented - is it necessary? */
2469 static inline uint64_t clock_value(CPUState
*env
)
2473 time
= env
->tod_offset
+
2474 time2tod(qemu_get_clock_ns(vm_clock
) - env
->tod_basetime
);
2480 uint32_t HELPER(stck
)(uint64_t a1
)
2482 stq(a1
, clock_value(env
));
2487 /* Store Clock Extended */
2488 uint32_t HELPER(stcke
)(uint64_t a1
)
2491 /* basically the same value as stck */
2492 stq(a1
+ 1, clock_value(env
) | env
->cpu_num
);
2493 /* more fine grained than stck */
2495 /* XXX programmable fields */
2502 /* Set Clock Comparator */
2503 void HELPER(sckc
)(uint64_t a1
)
2505 uint64_t time
= ldq(a1
);
2507 if (time
== -1ULL) {
2511 /* difference between now and then */
2512 time
-= clock_value(env
);
2514 time
= (time
* 125) >> 9;
2516 qemu_mod_timer(env
->tod_timer
, qemu_get_clock_ns(vm_clock
) + time
);
2519 /* Store Clock Comparator */
2520 void HELPER(stckc
)(uint64_t a1
)
2527 void HELPER(spt
)(uint64_t a1
)
2529 uint64_t time
= ldq(a1
);
2531 if (time
== -1ULL) {
2536 time
= (time
* 125) >> 9;
2538 qemu_mod_timer(env
->cpu_timer
, qemu_get_clock_ns(vm_clock
) + time
);
2541 /* Store CPU Timer */
2542 void HELPER(stpt
)(uint64_t a1
)
2548 /* Store System Information */
2549 uint32_t HELPER(stsi
)(uint64_t a0
, uint32_t r0
, uint32_t r1
)
2554 if ((r0
& STSI_LEVEL_MASK
) <= STSI_LEVEL_3
&&
2555 ((r0
& STSI_R0_RESERVED_MASK
) || (r1
& STSI_R1_RESERVED_MASK
))) {
2556 /* valid function code, invalid reserved bits */
2557 program_interrupt(env
, PGM_SPECIFICATION
, 2);
2560 sel1
= r0
& STSI_R0_SEL1_MASK
;
2561 sel2
= r1
& STSI_R1_SEL2_MASK
;
2563 /* XXX: spec exception if sysib is not 4k-aligned */
2565 switch (r0
& STSI_LEVEL_MASK
) {
2567 if ((sel1
== 1) && (sel2
== 1)) {
2568 /* Basic Machine Configuration */
2569 struct sysib_111 sysib
;
2571 memset(&sysib
, 0, sizeof(sysib
));
2572 ebcdic_put(sysib
.manuf
, "QEMU ", 16);
2573 /* same as machine type number in STORE CPU ID */
2574 ebcdic_put(sysib
.type
, "QEMU", 4);
2575 /* same as model number in STORE CPU ID */
2576 ebcdic_put(sysib
.model
, "QEMU ", 16);
2577 ebcdic_put(sysib
.sequence
, "QEMU ", 16);
2578 ebcdic_put(sysib
.plant
, "QEMU", 4);
2579 cpu_physical_memory_rw(a0
, (uint8_t*)&sysib
, sizeof(sysib
), 1);
2580 } else if ((sel1
== 2) && (sel2
== 1)) {
2581 /* Basic Machine CPU */
2582 struct sysib_121 sysib
;
2584 memset(&sysib
, 0, sizeof(sysib
));
2585 /* XXX make different for different CPUs? */
2586 ebcdic_put(sysib
.sequence
, "QEMUQEMUQEMUQEMU", 16);
2587 ebcdic_put(sysib
.plant
, "QEMU", 4);
2588 stw_p(&sysib
.cpu_addr
, env
->cpu_num
);
2589 cpu_physical_memory_rw(a0
, (uint8_t*)&sysib
, sizeof(sysib
), 1);
2590 } else if ((sel1
== 2) && (sel2
== 2)) {
2591 /* Basic Machine CPUs */
2592 struct sysib_122 sysib
;
2594 memset(&sysib
, 0, sizeof(sysib
));
2595 stl_p(&sysib
.capability
, 0x443afc29);
2596 /* XXX change when SMP comes */
2597 stw_p(&sysib
.total_cpus
, 1);
2598 stw_p(&sysib
.active_cpus
, 1);
2599 stw_p(&sysib
.standby_cpus
, 0);
2600 stw_p(&sysib
.reserved_cpus
, 0);
2601 cpu_physical_memory_rw(a0
, (uint8_t*)&sysib
, sizeof(sysib
), 1);
2608 if ((sel1
== 2) && (sel2
== 1)) {
2610 struct sysib_221 sysib
;
2612 memset(&sysib
, 0, sizeof(sysib
));
2613 /* XXX make different for different CPUs? */
2614 ebcdic_put(sysib
.sequence
, "QEMUQEMUQEMUQEMU", 16);
2615 ebcdic_put(sysib
.plant
, "QEMU", 4);
2616 stw_p(&sysib
.cpu_addr
, env
->cpu_num
);
2617 stw_p(&sysib
.cpu_id
, 0);
2618 cpu_physical_memory_rw(a0
, (uint8_t*)&sysib
, sizeof(sysib
), 1);
2619 } else if ((sel1
== 2) && (sel2
== 2)) {
2621 struct sysib_222 sysib
;
2623 memset(&sysib
, 0, sizeof(sysib
));
2624 stw_p(&sysib
.lpar_num
, 0);
2626 /* XXX change when SMP comes */
2627 stw_p(&sysib
.total_cpus
, 1);
2628 stw_p(&sysib
.conf_cpus
, 1);
2629 stw_p(&sysib
.standby_cpus
, 0);
2630 stw_p(&sysib
.reserved_cpus
, 0);
2631 ebcdic_put(sysib
.name
, "QEMU ", 8);
2632 stl_p(&sysib
.caf
, 1000);
2633 stw_p(&sysib
.dedicated_cpus
, 0);
2634 stw_p(&sysib
.shared_cpus
, 0);
2635 cpu_physical_memory_rw(a0
, (uint8_t*)&sysib
, sizeof(sysib
), 1);
2643 if ((sel1
== 2) && (sel2
== 2)) {
2645 struct sysib_322 sysib
;
2647 memset(&sysib
, 0, sizeof(sysib
));
2649 /* XXX change when SMP comes */
2650 stw_p(&sysib
.vm
[0].total_cpus
, 1);
2651 stw_p(&sysib
.vm
[0].conf_cpus
, 1);
2652 stw_p(&sysib
.vm
[0].standby_cpus
, 0);
2653 stw_p(&sysib
.vm
[0].reserved_cpus
, 0);
2654 ebcdic_put(sysib
.vm
[0].name
, "KVMguest", 8);
2655 stl_p(&sysib
.vm
[0].caf
, 1000);
2656 ebcdic_put(sysib
.vm
[0].cpi
, "KVM/Linux ", 16);
2657 cpu_physical_memory_rw(a0
, (uint8_t*)&sysib
, sizeof(sysib
), 1);
2663 case STSI_LEVEL_CURRENT
:
2664 env
->regs
[0] = STSI_LEVEL_3
;
2674 void HELPER(lctlg
)(uint32_t r1
, uint64_t a2
, uint32_t r3
)
2679 for (i
= r1
;; i
= (i
+ 1) % 16) {
2680 env
->cregs
[i
] = ldq(src
);
2681 HELPER_LOG("load ctl %d from 0x%" PRIx64
" == 0x%" PRIx64
"\n",
2682 i
, src
, env
->cregs
[i
]);
2683 src
+= sizeof(uint64_t);
2693 void HELPER(lctl
)(uint32_t r1
, uint64_t a2
, uint32_t r3
)
2698 for (i
= r1
;; i
= (i
+ 1) % 16) {
2699 env
->cregs
[i
] = (env
->cregs
[i
] & 0xFFFFFFFF00000000ULL
) | ldl(src
);
2700 src
+= sizeof(uint32_t);
2710 void HELPER(stctg
)(uint32_t r1
, uint64_t a2
, uint32_t r3
)
2715 for (i
= r1
;; i
= (i
+ 1) % 16) {
2716 stq(dest
, env
->cregs
[i
]);
2717 dest
+= sizeof(uint64_t);
2725 void HELPER(stctl
)(uint32_t r1
, uint64_t a2
, uint32_t r3
)
2730 for (i
= r1
;; i
= (i
+ 1) % 16) {
2731 stl(dest
, env
->cregs
[i
]);
2732 dest
+= sizeof(uint32_t);
2740 uint32_t HELPER(tprot
)(uint64_t a1
, uint64_t a2
)
2747 /* insert storage key extended */
2748 uint64_t HELPER(iske
)(uint64_t r2
)
2750 uint64_t addr
= get_address(0, 0, r2
);
2752 if (addr
> ram_size
) {
2756 /* XXX maybe use qemu's internal keys? */
2757 return env
->storage_keys
[addr
/ TARGET_PAGE_SIZE
];
2760 /* set storage key extended */
2761 void HELPER(sske
)(uint32_t r1
, uint64_t r2
)
2763 uint64_t addr
= get_address(0, 0, r2
);
2765 if (addr
> ram_size
) {
2769 env
->storage_keys
[addr
/ TARGET_PAGE_SIZE
] = r1
;
2772 /* reset reference bit extended */
2773 uint32_t HELPER(rrbe
)(uint32_t r1
, uint64_t r2
)
2775 if (r2
> ram_size
) {
2781 env
->storage_keys
[r2
/ TARGET_PAGE_SIZE
] &= ~SK_REFERENCED
;
2787 * 0 Reference bit zero; change bit zero
2788 * 1 Reference bit zero; change bit one
2789 * 2 Reference bit one; change bit zero
2790 * 3 Reference bit one; change bit one
2795 /* compare and swap and purge */
2796 uint32_t HELPER(csp
)(uint32_t r1
, uint32_t r2
)
2799 uint32_t o1
= env
->regs
[r1
];
2800 uint64_t a2
= get_address_31fix(r2
) & ~3ULL;
2801 uint32_t o2
= ldl(a2
);
2804 stl(a2
, env
->regs
[(r1
+ 1) & 15]);
2805 if (env
->regs
[r2
] & 0x3) {
2806 /* flush TLB / ALB */
2811 env
->regs
[r1
] = (env
->regs
[r1
] & 0xffffffff00000000ULL
) | o2
;
2818 static uint32_t mvc_asc(int64_t l
, uint64_t a1
, uint64_t mode1
, uint64_t a2
,
2821 target_ulong src
, dest
;
2822 int flags
, cc
= 0, i
;
2826 } else if (l
> 256) {
2832 if (mmu_translate(env
, a1
& TARGET_PAGE_MASK
, 1, mode1
, &dest
, &flags
)) {
2835 dest
|= a1
& ~TARGET_PAGE_MASK
;
2837 if (mmu_translate(env
, a2
& TARGET_PAGE_MASK
, 0, mode2
, &src
, &flags
)) {
2840 src
|= a2
& ~TARGET_PAGE_MASK
;
2842 /* XXX replace w/ memcpy */
2843 for (i
= 0; i
< l
; i
++) {
2844 /* XXX be more clever */
2845 if ((((dest
+ i
) & TARGET_PAGE_MASK
) != (dest
& TARGET_PAGE_MASK
)) ||
2846 (((src
+ i
) & TARGET_PAGE_MASK
) != (src
& TARGET_PAGE_MASK
))) {
2847 mvc_asc(l
- i
, a1
+ i
, mode1
, a2
+ i
, mode2
);
2850 stb_phys(dest
+ i
, ldub_phys(src
+ i
));
2856 uint32_t HELPER(mvcs
)(uint64_t l
, uint64_t a1
, uint64_t a2
)
2858 HELPER_LOG("%s: %16" PRIx64
" %16" PRIx64
" %16" PRIx64
"\n",
2859 __FUNCTION__
, l
, a1
, a2
);
2861 return mvc_asc(l
, a1
, PSW_ASC_SECONDARY
, a2
, PSW_ASC_PRIMARY
);
2864 uint32_t HELPER(mvcp
)(uint64_t l
, uint64_t a1
, uint64_t a2
)
2866 HELPER_LOG("%s: %16" PRIx64
" %16" PRIx64
" %16" PRIx64
"\n",
2867 __FUNCTION__
, l
, a1
, a2
);
2869 return mvc_asc(l
, a1
, PSW_ASC_PRIMARY
, a2
, PSW_ASC_SECONDARY
);
2872 uint32_t HELPER(sigp
)(uint64_t order_code
, uint32_t r1
, uint64_t cpu_addr
)
2876 HELPER_LOG("%s: %016" PRIx64
" %08x %016" PRIx64
"\n",
2877 __FUNCTION__
, order_code
, r1
, cpu_addr
);
2879 /* Remember: Use "R1 or R1+1, whichever is the odd-numbered register"
2880 as parameter (input). Status (output) is always R1. */
2882 switch (order_code
) {
2887 /* enumerate CPU status */
2889 /* XXX implement when SMP comes */
2892 env
->regs
[r1
] &= 0xffffffff00000000ULL
;
2897 fprintf(stderr
, "XXX unknown sigp: 0x%" PRIx64
"\n", order_code
);
2904 void HELPER(sacf
)(uint64_t a1
)
2906 HELPER_LOG("%s: %16" PRIx64
"\n", __FUNCTION__
, a1
);
2908 switch (a1
& 0xf00) {
2910 env
->psw
.mask
&= ~PSW_MASK_ASC
;
2911 env
->psw
.mask
|= PSW_ASC_PRIMARY
;
2914 env
->psw
.mask
&= ~PSW_MASK_ASC
;
2915 env
->psw
.mask
|= PSW_ASC_SECONDARY
;
2918 env
->psw
.mask
&= ~PSW_MASK_ASC
;
2919 env
->psw
.mask
|= PSW_ASC_HOME
;
2922 qemu_log("unknown sacf mode: %" PRIx64
"\n", a1
);
2923 program_interrupt(env
, PGM_SPECIFICATION
, 2);
2928 /* invalidate pte */
2929 void HELPER(ipte
)(uint64_t pte_addr
, uint64_t vaddr
)
2931 uint64_t page
= vaddr
& TARGET_PAGE_MASK
;
2934 /* XXX broadcast to other CPUs */
2936 /* XXX Linux is nice enough to give us the exact pte address.
2937 According to spec we'd have to find it out ourselves */
2938 /* XXX Linux is fine with overwriting the pte, the spec requires
2939 us to only set the invalid bit */
2940 stq_phys(pte_addr
, pte
| _PAGE_INVALID
);
2942 /* XXX we exploit the fact that Linux passes the exact virtual
2943 address here - it's not obliged to! */
2944 tlb_flush_page(env
, page
);
2947 /* flush local tlb */
2948 void HELPER(ptlb
)(void)
2953 /* store using real address */
2954 void HELPER(stura
)(uint64_t addr
, uint32_t v1
)
2956 stw_phys(get_address(0, 0, addr
), v1
);
2959 /* load real address */
2960 uint32_t HELPER(lra
)(uint64_t addr
, uint32_t r1
)
2963 int old_exc
= env
->exception_index
;
2964 uint64_t asc
= env
->psw
.mask
& PSW_MASK_ASC
;
2968 /* XXX incomplete - has more corner cases */
2969 if (!(env
->psw
.mask
& PSW_MASK_64
) && (addr
>> 32)) {
2970 program_interrupt(env
, PGM_SPECIAL_OP
, 2);
2973 env
->exception_index
= old_exc
;
2974 if (mmu_translate(env
, addr
, 0, asc
, &ret
, &flags
)) {
2977 if (env
->exception_index
== EXCP_PGM
) {
2978 ret
= env
->int_pgm_code
| 0x80000000;
2980 ret
|= addr
& ~TARGET_PAGE_MASK
;
2982 env
->exception_index
= old_exc
;
2984 if (!(env
->psw
.mask
& PSW_MASK_64
)) {
2985 env
->regs
[r1
] = (env
->regs
[r1
] & 0xffffffff00000000ULL
) | (ret
& 0xffffffffULL
);
2987 env
->regs
[r1
] = ret
;