2 /*--------------------------------------------------------------------*/
3 /*--- begin guest_mips_toIR.c ---*/
4 /*--------------------------------------------------------------------*/
7 This file is part of Valgrind, a dynamic binary instrumentation
10 Copyright (C) 2010-2017 RT-RK
11 mips-valgrind@rt-rk.com
13 This program is free software; you can redistribute it and/or
14 modify it under the terms of the GNU General Public License as
15 published by the Free Software Foundation; either version 2 of the
16 License, or (at your option) any later version.
18 This program is distributed in the hope that it will be useful, but
19 WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 General Public License for more details.
23 You should have received a copy of the GNU General Public License
24 along with this program; if not, see <http://www.gnu.org/licenses/>.
26 The GNU General Public License is contained in the file COPYING.
29 /* Translates MIPS code to IR. */
31 #include "libvex_basictypes.h"
32 #include "libvex_ir.h"
34 #include "libvex_guest_mips32.h"
35 #include "libvex_guest_mips64.h"
37 #include "main_util.h"
38 #include "main_globals.h"
39 #include "guest_generic_bb_to_IR.h"
40 #include "guest_mips_defs.h"
41 #include "mips_defs.h"
43 /*------------------------------------------------------------*/
45 /*------------------------------------------------------------*/
47 /* These are set at the start of the translation of a instruction, so
48 that we don't have to pass them around endlessly. CONST means does
49 not change during translation of the instruction. */
51 /* CONST: what is the host's endianness? This has to do with float vs
52 double register accesses on VFP, but it's complex and not properly
54 static VexEndness host_endness
;
56 /* Pointer to the guest code area. */
57 const UChar
*guest_code
;
59 /* CONST: The guest address for the instruction currently being
61 #if defined(VGP_mips32_linux)
62 static Addr32 guest_PC_curr_instr
;
64 static Addr64 guest_PC_curr_instr
;
67 /* MOD: The IRSB* into which we're generating code. */
70 /* Is our guest binary 32 or 64bit? Set at each call to
71 disInstr_MIPS below. */
74 /* CPU has FPU and 32 dbl. prec. FP registers. */
75 static Bool fp_mode64
= False
;
77 /* FPU works in FRE mode */
78 static Bool fp_mode64_fre
= False
;
80 /* CPU has MSA unit */
81 static Bool has_msa
= False
;
83 /* Define 1.0 in single and double precision. */
84 #define ONE_SINGLE 0x3F800000
85 #define ONE_DOUBLE 0x3FF0000000000000ULL
87 /*------------------------------------------------------------*/
88 /*--- Helper bits and pieces for deconstructing the ---*/
89 /*--- mips insn stream. ---*/
90 /*------------------------------------------------------------*/
92 /* ---------------- Integer registers ---------------- */
94 static UInt
integerGuestRegOffset(UInt iregNo
)
96 /* Do we care about endianness here? We do if sub-parts of integer
97 registers are accessed, but I don't think that ever happens on
104 ret
= offsetof(VexGuestMIPS32State
, guest_r0
);
108 ret
= offsetof(VexGuestMIPS32State
, guest_r1
);
112 ret
= offsetof(VexGuestMIPS32State
, guest_r2
);
116 ret
= offsetof(VexGuestMIPS32State
, guest_r3
);
120 ret
= offsetof(VexGuestMIPS32State
, guest_r4
);
124 ret
= offsetof(VexGuestMIPS32State
, guest_r5
);
128 ret
= offsetof(VexGuestMIPS32State
, guest_r6
);
132 ret
= offsetof(VexGuestMIPS32State
, guest_r7
);
136 ret
= offsetof(VexGuestMIPS32State
, guest_r8
);
140 ret
= offsetof(VexGuestMIPS32State
, guest_r9
);
144 ret
= offsetof(VexGuestMIPS32State
, guest_r10
);
148 ret
= offsetof(VexGuestMIPS32State
, guest_r11
);
152 ret
= offsetof(VexGuestMIPS32State
, guest_r12
);
156 ret
= offsetof(VexGuestMIPS32State
, guest_r13
);
160 ret
= offsetof(VexGuestMIPS32State
, guest_r14
);
164 ret
= offsetof(VexGuestMIPS32State
, guest_r15
);
168 ret
= offsetof(VexGuestMIPS32State
, guest_r16
);
172 ret
= offsetof(VexGuestMIPS32State
, guest_r17
);
176 ret
= offsetof(VexGuestMIPS32State
, guest_r18
);
180 ret
= offsetof(VexGuestMIPS32State
, guest_r19
);
184 ret
= offsetof(VexGuestMIPS32State
, guest_r20
);
188 ret
= offsetof(VexGuestMIPS32State
, guest_r21
);
192 ret
= offsetof(VexGuestMIPS32State
, guest_r22
);
196 ret
= offsetof(VexGuestMIPS32State
, guest_r23
);
200 ret
= offsetof(VexGuestMIPS32State
, guest_r24
);
204 ret
= offsetof(VexGuestMIPS32State
, guest_r25
);
208 ret
= offsetof(VexGuestMIPS32State
, guest_r26
);
212 ret
= offsetof(VexGuestMIPS32State
, guest_r27
);
216 ret
= offsetof(VexGuestMIPS32State
, guest_r28
);
220 ret
= offsetof(VexGuestMIPS32State
, guest_r29
);
224 ret
= offsetof(VexGuestMIPS32State
, guest_r30
);
228 ret
= offsetof(VexGuestMIPS32State
, guest_r31
);
238 ret
= offsetof(VexGuestMIPS64State
, guest_r0
);
242 ret
= offsetof(VexGuestMIPS64State
, guest_r1
);
246 ret
= offsetof(VexGuestMIPS64State
, guest_r2
);
250 ret
= offsetof(VexGuestMIPS64State
, guest_r3
);
254 ret
= offsetof(VexGuestMIPS64State
, guest_r4
);
258 ret
= offsetof(VexGuestMIPS64State
, guest_r5
);
262 ret
= offsetof(VexGuestMIPS64State
, guest_r6
);
266 ret
= offsetof(VexGuestMIPS64State
, guest_r7
);
270 ret
= offsetof(VexGuestMIPS64State
, guest_r8
);
274 ret
= offsetof(VexGuestMIPS64State
, guest_r9
);
278 ret
= offsetof(VexGuestMIPS64State
, guest_r10
);
282 ret
= offsetof(VexGuestMIPS64State
, guest_r11
);
286 ret
= offsetof(VexGuestMIPS64State
, guest_r12
);
290 ret
= offsetof(VexGuestMIPS64State
, guest_r13
);
294 ret
= offsetof(VexGuestMIPS64State
, guest_r14
);
298 ret
= offsetof(VexGuestMIPS64State
, guest_r15
);
302 ret
= offsetof(VexGuestMIPS64State
, guest_r16
);
306 ret
= offsetof(VexGuestMIPS64State
, guest_r17
);
310 ret
= offsetof(VexGuestMIPS64State
, guest_r18
);
314 ret
= offsetof(VexGuestMIPS64State
, guest_r19
);
318 ret
= offsetof(VexGuestMIPS64State
, guest_r20
);
322 ret
= offsetof(VexGuestMIPS64State
, guest_r21
);
326 ret
= offsetof(VexGuestMIPS64State
, guest_r22
);
330 ret
= offsetof(VexGuestMIPS64State
, guest_r23
);
334 ret
= offsetof(VexGuestMIPS64State
, guest_r24
);
338 ret
= offsetof(VexGuestMIPS64State
, guest_r25
);
342 ret
= offsetof(VexGuestMIPS64State
, guest_r26
);
346 ret
= offsetof(VexGuestMIPS64State
, guest_r27
);
350 ret
= offsetof(VexGuestMIPS64State
, guest_r28
);
354 ret
= offsetof(VexGuestMIPS64State
, guest_r29
);
358 ret
= offsetof(VexGuestMIPS64State
, guest_r30
);
362 ret
= offsetof(VexGuestMIPS64State
, guest_r31
);
373 #if defined(VGP_mips32_linux)
374 #define OFFB_PC offsetof(VexGuestMIPS32State, guest_PC)
376 #define OFFB_PC offsetof(VexGuestMIPS64State, guest_PC)
379 /* ---------------- Floating point registers ---------------- */
381 static UInt
floatGuestRegOffset(UInt fregNo
)
383 vassert(fregNo
< 32);
389 ret
= offsetof(VexGuestMIPS32State
, guest_f0
);
393 ret
= offsetof(VexGuestMIPS32State
, guest_f1
);
397 ret
= offsetof(VexGuestMIPS32State
, guest_f2
);
401 ret
= offsetof(VexGuestMIPS32State
, guest_f3
);
405 ret
= offsetof(VexGuestMIPS32State
, guest_f4
);
409 ret
= offsetof(VexGuestMIPS32State
, guest_f5
);
413 ret
= offsetof(VexGuestMIPS32State
, guest_f6
);
417 ret
= offsetof(VexGuestMIPS32State
, guest_f7
);
421 ret
= offsetof(VexGuestMIPS32State
, guest_f8
);
425 ret
= offsetof(VexGuestMIPS32State
, guest_f9
);
429 ret
= offsetof(VexGuestMIPS32State
, guest_f10
);
433 ret
= offsetof(VexGuestMIPS32State
, guest_f11
);
437 ret
= offsetof(VexGuestMIPS32State
, guest_f12
);
441 ret
= offsetof(VexGuestMIPS32State
, guest_f13
);
445 ret
= offsetof(VexGuestMIPS32State
, guest_f14
);
449 ret
= offsetof(VexGuestMIPS32State
, guest_f15
);
453 ret
= offsetof(VexGuestMIPS32State
, guest_f16
);
457 ret
= offsetof(VexGuestMIPS32State
, guest_f17
);
461 ret
= offsetof(VexGuestMIPS32State
, guest_f18
);
465 ret
= offsetof(VexGuestMIPS32State
, guest_f19
);
469 ret
= offsetof(VexGuestMIPS32State
, guest_f20
);
473 ret
= offsetof(VexGuestMIPS32State
, guest_f21
);
477 ret
= offsetof(VexGuestMIPS32State
, guest_f22
);
481 ret
= offsetof(VexGuestMIPS32State
, guest_f23
);
485 ret
= offsetof(VexGuestMIPS32State
, guest_f24
);
489 ret
= offsetof(VexGuestMIPS32State
, guest_f25
);
493 ret
= offsetof(VexGuestMIPS32State
, guest_f26
);
497 ret
= offsetof(VexGuestMIPS32State
, guest_f27
);
501 ret
= offsetof(VexGuestMIPS32State
, guest_f28
);
505 ret
= offsetof(VexGuestMIPS32State
, guest_f29
);
509 ret
= offsetof(VexGuestMIPS32State
, guest_f30
);
513 ret
= offsetof(VexGuestMIPS32State
, guest_f31
);
523 ret
= offsetof(VexGuestMIPS64State
, guest_f0
);
527 ret
= offsetof(VexGuestMIPS64State
, guest_f1
);
531 ret
= offsetof(VexGuestMIPS64State
, guest_f2
);
535 ret
= offsetof(VexGuestMIPS64State
, guest_f3
);
539 ret
= offsetof(VexGuestMIPS64State
, guest_f4
);
543 ret
= offsetof(VexGuestMIPS64State
, guest_f5
);
547 ret
= offsetof(VexGuestMIPS64State
, guest_f6
);
551 ret
= offsetof(VexGuestMIPS64State
, guest_f7
);
555 ret
= offsetof(VexGuestMIPS64State
, guest_f8
);
559 ret
= offsetof(VexGuestMIPS64State
, guest_f9
);
563 ret
= offsetof(VexGuestMIPS64State
, guest_f10
);
567 ret
= offsetof(VexGuestMIPS64State
, guest_f11
);
571 ret
= offsetof(VexGuestMIPS64State
, guest_f12
);
575 ret
= offsetof(VexGuestMIPS64State
, guest_f13
);
579 ret
= offsetof(VexGuestMIPS64State
, guest_f14
);
583 ret
= offsetof(VexGuestMIPS64State
, guest_f15
);
587 ret
= offsetof(VexGuestMIPS64State
, guest_f16
);
591 ret
= offsetof(VexGuestMIPS64State
, guest_f17
);
595 ret
= offsetof(VexGuestMIPS64State
, guest_f18
);
599 ret
= offsetof(VexGuestMIPS64State
, guest_f19
);
603 ret
= offsetof(VexGuestMIPS64State
, guest_f20
);
607 ret
= offsetof(VexGuestMIPS64State
, guest_f21
);
611 ret
= offsetof(VexGuestMIPS64State
, guest_f22
);
615 ret
= offsetof(VexGuestMIPS64State
, guest_f23
);
619 ret
= offsetof(VexGuestMIPS64State
, guest_f24
);
623 ret
= offsetof(VexGuestMIPS64State
, guest_f25
);
627 ret
= offsetof(VexGuestMIPS64State
, guest_f26
);
631 ret
= offsetof(VexGuestMIPS64State
, guest_f27
);
635 ret
= offsetof(VexGuestMIPS64State
, guest_f28
);
639 ret
= offsetof(VexGuestMIPS64State
, guest_f29
);
643 ret
= offsetof(VexGuestMIPS64State
, guest_f30
);
647 ret
= offsetof(VexGuestMIPS64State
, guest_f31
);
658 /* ---------------- MIPS32 DSP ASE(r2) accumulators ---------------- */
660 UInt
accumulatorGuestRegOffset(UInt acNo
)
668 ret
= offsetof(VexGuestMIPS32State
, guest_ac0
);
672 ret
= offsetof(VexGuestMIPS32State
, guest_ac1
);
676 ret
= offsetof(VexGuestMIPS32State
, guest_ac2
);
680 ret
= offsetof(VexGuestMIPS32State
, guest_ac3
);
691 /* ---------------- MIPS32 MSA registers ---------------- */
693 static UInt
msaGuestRegOffset(UInt msaRegNo
)
695 vassert(msaRegNo
<= 31);
701 ret
= offsetof(VexGuestMIPS64State
, guest_w0
);
705 ret
= offsetof(VexGuestMIPS64State
, guest_w1
);
709 ret
= offsetof(VexGuestMIPS64State
, guest_w2
);
713 ret
= offsetof(VexGuestMIPS64State
, guest_w3
);
717 ret
= offsetof(VexGuestMIPS64State
, guest_w4
);
721 ret
= offsetof(VexGuestMIPS64State
, guest_w5
);
725 ret
= offsetof(VexGuestMIPS64State
, guest_w6
);
729 ret
= offsetof(VexGuestMIPS64State
, guest_w7
);
733 ret
= offsetof(VexGuestMIPS64State
, guest_w8
);
737 ret
= offsetof(VexGuestMIPS64State
, guest_w9
);
741 ret
= offsetof(VexGuestMIPS64State
, guest_w10
);
745 ret
= offsetof(VexGuestMIPS64State
, guest_w11
);
749 ret
= offsetof(VexGuestMIPS64State
, guest_w12
);
753 ret
= offsetof(VexGuestMIPS64State
, guest_w13
);
757 ret
= offsetof(VexGuestMIPS64State
, guest_w14
);
761 ret
= offsetof(VexGuestMIPS64State
, guest_w15
);
765 ret
= offsetof(VexGuestMIPS64State
, guest_w16
);
769 ret
= offsetof(VexGuestMIPS64State
, guest_w17
);
773 ret
= offsetof(VexGuestMIPS64State
, guest_w18
);
777 ret
= offsetof(VexGuestMIPS64State
, guest_w19
);
781 ret
= offsetof(VexGuestMIPS64State
, guest_w20
);
785 ret
= offsetof(VexGuestMIPS64State
, guest_w21
);
789 ret
= offsetof(VexGuestMIPS64State
, guest_w22
);
793 ret
= offsetof(VexGuestMIPS64State
, guest_w23
);
797 ret
= offsetof(VexGuestMIPS64State
, guest_w24
);
801 ret
= offsetof(VexGuestMIPS64State
, guest_w25
);
805 ret
= offsetof(VexGuestMIPS64State
, guest_w26
);
809 ret
= offsetof(VexGuestMIPS64State
, guest_w27
);
813 ret
= offsetof(VexGuestMIPS64State
, guest_w28
);
817 ret
= offsetof(VexGuestMIPS64State
, guest_w29
);
821 ret
= offsetof(VexGuestMIPS64State
, guest_w30
);
825 ret
= offsetof(VexGuestMIPS64State
, guest_w31
);
835 ret
= offsetof(VexGuestMIPS32State
, guest_w0
);
839 ret
= offsetof(VexGuestMIPS32State
, guest_w1
);
843 ret
= offsetof(VexGuestMIPS32State
, guest_w2
);
847 ret
= offsetof(VexGuestMIPS32State
, guest_w3
);
851 ret
= offsetof(VexGuestMIPS32State
, guest_w4
);
855 ret
= offsetof(VexGuestMIPS32State
, guest_w5
);
859 ret
= offsetof(VexGuestMIPS32State
, guest_w6
);
863 ret
= offsetof(VexGuestMIPS32State
, guest_w7
);
867 ret
= offsetof(VexGuestMIPS32State
, guest_w8
);
871 ret
= offsetof(VexGuestMIPS32State
, guest_w9
);
875 ret
= offsetof(VexGuestMIPS32State
, guest_w10
);
879 ret
= offsetof(VexGuestMIPS32State
, guest_w11
);
883 ret
= offsetof(VexGuestMIPS32State
, guest_w12
);
887 ret
= offsetof(VexGuestMIPS32State
, guest_w13
);
891 ret
= offsetof(VexGuestMIPS32State
, guest_w14
);
895 ret
= offsetof(VexGuestMIPS32State
, guest_w15
);
899 ret
= offsetof(VexGuestMIPS32State
, guest_w16
);
903 ret
= offsetof(VexGuestMIPS32State
, guest_w17
);
907 ret
= offsetof(VexGuestMIPS32State
, guest_w18
);
911 ret
= offsetof(VexGuestMIPS32State
, guest_w19
);
915 ret
= offsetof(VexGuestMIPS32State
, guest_w20
);
919 ret
= offsetof(VexGuestMIPS32State
, guest_w21
);
923 ret
= offsetof(VexGuestMIPS32State
, guest_w22
);
927 ret
= offsetof(VexGuestMIPS32State
, guest_w23
);
931 ret
= offsetof(VexGuestMIPS32State
, guest_w24
);
935 ret
= offsetof(VexGuestMIPS32State
, guest_w25
);
939 ret
= offsetof(VexGuestMIPS32State
, guest_w26
);
943 ret
= offsetof(VexGuestMIPS32State
, guest_w27
);
947 ret
= offsetof(VexGuestMIPS32State
, guest_w28
);
951 ret
= offsetof(VexGuestMIPS32State
, guest_w29
);
955 ret
= offsetof(VexGuestMIPS32State
, guest_w30
);
959 ret
= offsetof(VexGuestMIPS32State
, guest_w31
);
972 /* Do a endian load of a 32-bit word, regardless of the endianness of the
974 static inline UInt
getUInt(const UChar
* p
)
977 #if defined (_MIPSEL)
982 #elif defined (_MIPSEB)
991 #define BITS2(_b1,_b0) \
992 (((_b1) << 1) | (_b0))
994 #define BITS3(_b2,_b1,_b0) \
995 (((_b2) << 2) | ((_b1) << 1) | (_b0))
997 #define BITS4(_b3,_b2,_b1,_b0) \
998 (((_b3) << 3) | ((_b2) << 2) | ((_b1) << 1) | (_b0))
1000 #define BITS5(_b4,_b3,_b2,_b1,_b0) \
1001 (((_b4) << 4) | BITS4((_b3),(_b2),(_b1),(_b0)))
1003 #define BITS6(_b5,_b4,_b3,_b2,_b1,_b0) \
1004 ((BITS2((_b5),(_b4)) << 4) \
1005 | BITS4((_b3),(_b2),(_b1),(_b0)))
1007 #define BITS8(_b7,_b6,_b5,_b4,_b3,_b2,_b1,_b0) \
1008 ((BITS4((_b7),(_b6),(_b5),(_b4)) << 4) \
1009 | BITS4((_b3),(_b2),(_b1),(_b0)))
1011 #define LOAD_STORE_PATTERN \
1012 t1 = newTemp(mode64 ? Ity_I64 : Ity_I32); \
1014 assign(t1, binop(Iop_Add32, getIReg(rs), \
1015 mkU32(extend_s_16to32(imm)))); \
1017 assign(t1, binop(Iop_Add64, getIReg(rs), \
1018 mkU64(extend_s_16to64(imm)))); \
1020 #define LOAD_STORE_PATTERN_MSA(imm) \
1021 t1 = newTemp(mode64 ? Ity_I64 : Ity_I32); \
1023 assign(t1, binop(Iop_Add32, getIReg(ws), \
1024 mkU32(extend_s_10to32(imm)))); \
1026 assign(t1, binop(Iop_Add64, getIReg(ws), \
1027 mkU64(extend_s_10to64(imm)))); \
1029 #define LOADX_STORE_PATTERN \
1030 t1 = newTemp(mode64 ? Ity_I64 : Ity_I32); \
1032 assign(t1, binop(Iop_Add32, getIReg(regRs), getIReg(regRt))); \
1034 assign(t1, binop(Iop_Add64, getIReg(regRs), getIReg(regRt)));
1036 #define LWX_SWX_PATTERN64 \
1037 t2 = newTemp(Ity_I64); \
1038 assign(t2, binop(Iop_And64, mkexpr(t1), mkU64(0xFFFFFFFFFFFFFFFCULL))); \
1039 t4 = newTemp(Ity_I32); \
1040 assign(t4, mkNarrowTo32( ty, binop(Iop_And64, \
1041 mkexpr(t1), mkU64(0x3))));
1043 #define LWX_SWX_PATTERN64_1 \
1044 t2 = newTemp(Ity_I64); \
1045 assign(t2, binop(Iop_And64, mkexpr(t1), mkU64(0xFFFFFFFFFFFFFFF8ULL))); \
1046 t4 = newTemp(Ity_I64); \
1047 assign(t4, binop(Iop_And64, mkexpr(t1), mkU64(0x7)));
1049 #define LWX_SWX_PATTERN \
1050 t2 = newTemp(Ity_I32); \
1051 assign(t2, binop(Iop_And32, mkexpr(t1), mkU32(0xFFFFFFFC))); \
1052 t4 = newTemp(Ity_I32); \
1053 assign(t4, binop(Iop_And32, mkexpr(t1), mkU32(0x00000003)))
1055 #define SXXV_PATTERN(op) \
1056 putIReg(rd, binop(op, \
1067 #define SXXV_PATTERN64(op) \
1068 putIReg(rd, mkWidenFrom32(ty, binop(op, \
1069 mkNarrowTo32(ty, getIReg(rt)), \
1072 mkNarrowTo32(ty, getIReg(rs)), \
1079 #define SXX_PATTERN(op) \
1080 putIReg(rd, binop(op, getIReg(rt), mkU8(sa)));
1082 #define ALU_PATTERN(op) \
1083 putIReg(rd, binop(op, getIReg(rs), getIReg(rt)));
1085 #define ALUI_PATTERN(op) \
1086 putIReg(rt, binop(op, getIReg(rs), mkU32(imm)));
1088 #define ALUI_PATTERN64(op) \
1089 putIReg(rt, binop(op, getIReg(rs), mkU64(imm)));
1091 #define ALU_PATTERN64(op) \
1092 putIReg(rd, mkWidenFrom32(ty, binop(op, \
1093 mkNarrowTo32(ty, getIReg(rs)), \
1094 mkNarrowTo32(ty, getIReg(rt))), True));
1096 #define FP_CONDITIONAL_CODE \
1097 t3 = newTemp(Ity_I32); \
1098 assign(t3, binop(Iop_And32, \
1099 IRExpr_ITE( binop(Iop_CmpEQ32, mkU32(cc), mkU32(0)), \
1100 binop(Iop_Shr32, getFCSR(), mkU8(23)), \
1101 binop(Iop_Shr32, getFCSR(), mkU8(24+cc))), \
1106 #define ILLEGAL_INSTRUCTON \
1107 putPC(mkU32(guest_PC_curr_instr + 4)); \
1108 dres->jk_StopHere = Ijk_SigILL; \
1109 dres->whatNext = Dis_StopHere;
1111 #define LLADDR_INVALID \
1112 (mode64 ? mkU64(0xFFFFFFFFFFFFFFFFULL) : mkU32(0xFFFFFFFF))
1114 /*------------------------------------------------------------*/
1115 /*--- Field helpers ---*/
1116 /*------------------------------------------------------------*/
1118 static Bool
branch_or_jump(const UChar
* addr
)
1121 UInt cins
= getUInt(addr
);
1123 UInt opcode
= get_opcode(cins
);
1124 UInt rt
= get_rt(cins
);
1125 UInt function
= get_function(cins
);
1127 /* bgtz, blez, bne, beq, jal */
1128 if (opcode
== 0x07 || opcode
== 0x06 || opcode
== 0x05 || opcode
== 0x04
1129 || opcode
== 0x03 || opcode
== 0x02) {
1134 if (opcode
== 0x01 && rt
== 0x01) {
1139 if (opcode
== 0x01 && rt
== 0x11) {
1144 if (opcode
== 0x01 && rt
== 0x10) {
1149 if (opcode
== 0x01 && rt
== 0x00) {
1154 if (opcode
== 0x00 && function
== 0x09) {
1159 if (opcode
== 0x00 && function
== 0x08) {
1163 if (opcode
== 0x11) {
1165 fmt
= get_fmt(cins
);
1200 if (opcode
== 0x01 && rt
== 0x1c) {
1204 /* Cavium Specific instructions. */
1205 if (opcode
== 0x32 || opcode
== 0x3A || opcode
== 0x36 || opcode
== 0x3E) {
1206 /* BBIT0, BBIT1, BBIT032, BBIT132 */
1213 static Bool
is_Branch_or_Jump_and_Link(const UChar
* addr
)
1215 UInt cins
= getUInt(addr
);
1217 UInt opcode
= get_opcode(cins
);
1218 UInt rt
= get_rt(cins
);
1219 UInt function
= get_function(cins
);
1222 if (opcode
== 0x02) {
1226 /* bgezal or bal(r6) */
1227 if (opcode
== 0x01 && rt
== 0x11) {
1232 if (opcode
== 0x01 && rt
== 0x10) {
1237 if (opcode
== 0x00 && function
== 0x09) {
1244 static Bool
branch_or_link_likely(const UChar
* addr
)
1246 UInt cins
= getUInt(addr
);
1247 UInt opcode
= get_opcode(cins
);
1248 UInt rt
= get_rt(cins
);
1250 /* bgtzl, blezl, bnel, beql */
1251 if (opcode
== 0x17 || opcode
== 0x16 || opcode
== 0x15 || opcode
== 0x14)
1255 if (opcode
== 0x01 && rt
== 0x03)
1259 if (opcode
== 0x01 && rt
== 0x13)
1263 if (opcode
== 0x01 && rt
== 0x12)
1267 if (opcode
== 0x01 && rt
== 0x02)
1273 /*------------------------------------------------------------*/
1274 /*--- Helper bits and pieces for creating IR fragments. ---*/
1275 /*------------------------------------------------------------*/
1277 /* Generate an expression for SRC rotated right by ROT. */
1278 static IRExpr
*genROR32(IRExpr
* src
, Int rot
)
1280 vassert(rot
>= 0 && rot
< 32);
1285 return binop(Iop_Or32
, binop(Iop_Shl32
, src
, mkU8(32 - rot
)),
1286 binop(Iop_Shr32
, src
, mkU8(rot
)));
1289 static IRExpr
*genRORV32(IRExpr
* src
, IRExpr
* rs
)
1291 IRTemp t0
= newTemp(Ity_I8
);
1292 IRTemp t1
= newTemp(Ity_I8
);
1294 assign(t0
, unop(Iop_32to8
, binop(Iop_And32
, rs
, mkU32(0x0000001F))));
1295 assign(t1
, binop(Iop_Sub8
, mkU8(32), mkexpr(t0
)));
1296 return binop(Iop_Or32
, binop(Iop_Shl32
, src
, mkexpr(t1
)),
1297 binop(Iop_Shr32
, src
, mkexpr(t0
)));
1300 static void jmp_lit32 ( /*MOD*/ DisResult
* dres
, IRJumpKind kind
, Addr32 d32
)
1302 vassert(dres
->whatNext
== Dis_Continue
);
1303 vassert(dres
->len
== 0);
1304 vassert(dres
->jk_StopHere
== Ijk_INVALID
);
1305 dres
->whatNext
= Dis_StopHere
;
1306 dres
->jk_StopHere
= kind
;
1307 stmt( IRStmt_Put( OFFB_PC
, mkU32(d32
) ) );
1310 static void jmp_lit64 ( /*MOD*/ DisResult
* dres
, IRJumpKind kind
, Addr64 d64
)
1312 vassert(dres
->whatNext
== Dis_Continue
);
1313 vassert(dres
->len
== 0);
1314 vassert(dres
->jk_StopHere
== Ijk_INVALID
);
1315 dres
->whatNext
= Dis_StopHere
;
1316 dres
->jk_StopHere
= kind
;
1317 stmt(IRStmt_Put(OFFB_PC
, mkU64(d64
)));
1320 /* Get value from accumulator (helper function for MIPS32 DSP ASE instructions).
1321 This function should be called before any other operation if widening
1322 multiplications are used. */
1323 IRExpr
*getAcc(UInt acNo
)
1327 return IRExpr_Get(accumulatorGuestRegOffset(acNo
), Ity_I64
);
1330 /* Get value from DSPControl register (helper function for MIPS32 DSP ASE
1332 IRExpr
*getDSPControl(void)
1335 return IRExpr_Get(offsetof(VexGuestMIPS32State
, guest_DSPControl
), Ity_I32
);
1338 /* Fetch a byte from the guest insn stream. */
1339 static UChar
getIByte(Int delta
)
1341 return guest_code
[delta
];
1344 IRExpr
*getIReg(UInt iregNo
)
1347 return mode64
? mkU64(0x0) : mkU32(0x0);
1349 IRType ty
= mode64
? Ity_I64
: Ity_I32
;
1350 vassert(iregNo
< 32);
1351 return IRExpr_Get(integerGuestRegOffset(iregNo
), ty
);
1355 static IRExpr
*getWReg(UInt wregNo
)
1357 vassert(wregNo
<= 31);
1358 return IRExpr_Get(msaGuestRegOffset(wregNo
), Ity_V128
);
1361 static IRExpr
*getHI(void)
1364 return IRExpr_Get(offsetof(VexGuestMIPS64State
, guest_HI
), Ity_I64
);
1366 return IRExpr_Get(offsetof(VexGuestMIPS32State
, guest_HI
), Ity_I32
);
1369 static IRExpr
*getLO(void)
1372 return IRExpr_Get(offsetof(VexGuestMIPS64State
, guest_LO
), Ity_I64
);
1374 return IRExpr_Get(offsetof(VexGuestMIPS32State
, guest_LO
), Ity_I32
);
1377 static IRExpr
*getFCSR(void)
1380 return IRExpr_Get(offsetof(VexGuestMIPS64State
, guest_FCSR
), Ity_I32
);
1382 return IRExpr_Get(offsetof(VexGuestMIPS32State
, guest_FCSR
), Ity_I32
);
1385 static IRExpr
*getLLaddr(void)
1388 return IRExpr_Get(offsetof(VexGuestMIPS64State
, guest_LLaddr
), Ity_I64
);
1390 return IRExpr_Get(offsetof(VexGuestMIPS32State
, guest_LLaddr
), Ity_I32
);
1393 static IRExpr
*getLLdata(void)
1396 return IRExpr_Get(offsetof(VexGuestMIPS64State
, guest_LLdata
), Ity_I64
);
1398 return IRExpr_Get(offsetof(VexGuestMIPS32State
, guest_LLdata
), Ity_I32
);
1401 static IRExpr
*getMSACSR(void)
1404 return IRExpr_Get(offsetof(VexGuestMIPS64State
, guest_MSACSR
), Ity_I32
);
1406 return IRExpr_Get(offsetof(VexGuestMIPS32State
, guest_MSACSR
), Ity_I32
);
1409 /* Get byte from register reg, byte pos from 0 to 3 (or 7 for MIPS64) . */
1410 static IRExpr
*getByteFromReg(UInt reg
, UInt byte_pos
)
1412 UInt pos
= byte_pos
* 8;
1415 return unop(Iop_64to8
, binop(Iop_And64
,
1416 binop(Iop_Shr64
, getIReg(reg
), mkU8(pos
)),
1419 return unop(Iop_32to8
, binop(Iop_And32
,
1420 binop(Iop_Shr32
, getIReg(reg
), mkU8(pos
)),
1424 static void putFCSR(IRExpr
* e
)
1427 stmt(IRStmt_Put(offsetof(VexGuestMIPS64State
, guest_FCSR
), e
));
1429 stmt(IRStmt_Put(offsetof(VexGuestMIPS32State
, guest_FCSR
), e
));
1432 static void putLLaddr(IRExpr
* e
)
1435 stmt(IRStmt_Put(offsetof(VexGuestMIPS64State
, guest_LLaddr
), e
));
1437 stmt(IRStmt_Put(offsetof(VexGuestMIPS32State
, guest_LLaddr
), e
));
1440 static void putLLdata(IRExpr
* e
)
1443 stmt(IRStmt_Put(offsetof(VexGuestMIPS64State
, guest_LLdata
), e
));
1445 stmt(IRStmt_Put(offsetof(VexGuestMIPS32State
, guest_LLdata
), e
));
1448 static void putMSACSR(IRExpr
* e
)
1451 stmt(IRStmt_Put(offsetof(VexGuestMIPS64State
, guest_MSACSR
), e
));
1453 stmt(IRStmt_Put(offsetof(VexGuestMIPS32State
, guest_MSACSR
), e
));
1456 /* fs - fpu source register number.
1457 inst - fpu instruction that needs to be executed.
1458 sz32 - size of source register.
1459 opN - number of operads:
1460 1 - unary operation.
1461 2 - binary operation. */
1462 static void calculateFCSR(UInt fs
, UInt ft
, UInt inst
, Bool sz32
, UInt opN
)
1465 IRTemp fcsr
= newTemp(Ity_I32
);
1467 /* IRExpr_GSPTR() => Need to pass pointer to guest state to helper. */
1469 d
= unsafeIRDirty_1_N(fcsr
, 0,
1470 "mips_dirtyhelper_calculate_FCSR_fp64",
1471 &mips_dirtyhelper_calculate_FCSR_fp64
,
1472 mkIRExprVec_4(IRExpr_GSPTR(),
1477 d
= unsafeIRDirty_1_N(fcsr
, 0,
1478 "mips_dirtyhelper_calculate_FCSR_fp32",
1479 &mips_dirtyhelper_calculate_FCSR_fp32
,
1480 mkIRExprVec_4(IRExpr_GSPTR(),
1485 if (opN
== 1) { /* Unary operation. */
1486 /* Declare we're reading guest state. */
1487 if (sz32
|| fp_mode64
)
1492 vex_bzero(&d
->fxState
, sizeof(d
->fxState
));
1494 d
->fxState
[0].fx
= Ifx_Read
; /* read */
1497 d
->fxState
[0].offset
= offsetof(VexGuestMIPS64State
, guest_FCSR
);
1499 d
->fxState
[0].offset
= offsetof(VexGuestMIPS32State
, guest_FCSR
);
1501 d
->fxState
[0].size
= sizeof(UInt
);
1502 d
->fxState
[1].fx
= Ifx_Read
; /* read */
1503 d
->fxState
[1].offset
= floatGuestRegOffset(fs
);
1504 d
->fxState
[1].size
= sizeof(ULong
);
1506 if (!(sz32
|| fp_mode64
)) {
1507 d
->fxState
[2].fx
= Ifx_Read
; /* read */
1508 d
->fxState
[2].offset
= floatGuestRegOffset(fs
+ 1);
1509 d
->fxState
[2].size
= sizeof(ULong
);
1511 } else if (opN
== 2) { /* Binary operation. */
1512 /* Declare we're reading guest state. */
1513 if (sz32
|| fp_mode64
)
1518 vex_bzero(&d
->fxState
, sizeof(d
->fxState
));
1520 d
->fxState
[0].fx
= Ifx_Read
; /* read */
1523 d
->fxState
[0].offset
= offsetof(VexGuestMIPS64State
, guest_FCSR
);
1525 d
->fxState
[0].offset
= offsetof(VexGuestMIPS32State
, guest_FCSR
);
1527 d
->fxState
[0].size
= sizeof(UInt
);
1528 d
->fxState
[1].fx
= Ifx_Read
; /* read */
1529 d
->fxState
[1].offset
= floatGuestRegOffset(fs
);
1530 d
->fxState
[1].size
= sizeof(ULong
);
1531 d
->fxState
[2].fx
= Ifx_Read
; /* read */
1532 d
->fxState
[2].offset
= floatGuestRegOffset(ft
);
1533 d
->fxState
[2].size
= sizeof(ULong
);
1535 if (!(sz32
|| fp_mode64
)) {
1536 d
->fxState
[3].fx
= Ifx_Read
; /* read */
1537 d
->fxState
[3].offset
= floatGuestRegOffset(fs
+ 1);
1538 d
->fxState
[3].size
= sizeof(ULong
);
1539 d
->fxState
[4].fx
= Ifx_Read
; /* read */
1540 d
->fxState
[4].offset
= floatGuestRegOffset(ft
+ 1);
1541 d
->fxState
[4].size
= sizeof(ULong
);
1545 stmt(IRStmt_Dirty(d
));
1547 putFCSR(mkexpr(fcsr
));
1550 /* ws, wt - source MSA register numbers.
1551 inst - MSA fp instruction that needs to be executed.
1552 opN - number of operads:
1553 1 - unary operation.
1554 2 - binary operation. */
1555 static void calculateMSACSR(UInt ws
, UInt wt
, UInt inst
, UInt opN
)
1558 IRTemp msacsr
= newTemp(Ity_I32
);
1559 /* IRExpr_BBPTR() => Need to pass pointer to guest state to helper. */
1560 d
= unsafeIRDirty_1_N(msacsr
, 0,
1561 "mips_dirtyhelper_calculate_MSACSR",
1562 &mips_dirtyhelper_calculate_MSACSR
,
1563 mkIRExprVec_4(IRExpr_GSPTR(),
1568 if (opN
== 1) { /* Unary operation. */
1569 /* Declare we're reading guest state. */
1571 vex_bzero(&d
->fxState
, sizeof(d
->fxState
));
1572 d
->fxState
[0].fx
= Ifx_Read
; /* read */
1575 d
->fxState
[0].offset
= offsetof(VexGuestMIPS64State
, guest_MSACSR
);
1577 d
->fxState
[0].offset
= offsetof(VexGuestMIPS32State
, guest_MSACSR
);
1579 d
->fxState
[0].size
= sizeof(UInt
);
1580 d
->fxState
[1].fx
= Ifx_Read
; /* read */
1581 d
->fxState
[1].offset
= msaGuestRegOffset(ws
);
1582 d
->fxState
[1].size
= sizeof(ULong
);
1583 } else if (opN
== 2) { /* Binary operation. */
1584 /* Declare we're reading guest state. */
1586 vex_bzero(&d
->fxState
, sizeof(d
->fxState
));
1587 d
->fxState
[0].fx
= Ifx_Read
; /* read */
1590 d
->fxState
[0].offset
= offsetof(VexGuestMIPS64State
, guest_MSACSR
);
1592 d
->fxState
[0].offset
= offsetof(VexGuestMIPS32State
, guest_MSACSR
);
1594 d
->fxState
[0].size
= sizeof(UInt
);
1595 d
->fxState
[1].fx
= Ifx_Read
; /* read */
1596 d
->fxState
[1].offset
= msaGuestRegOffset(ws
);
1597 d
->fxState
[1].size
= sizeof(ULong
);
1598 d
->fxState
[2].fx
= Ifx_Read
; /* read */
1599 d
->fxState
[2].offset
= msaGuestRegOffset(wt
);
1600 d
->fxState
[2].size
= sizeof(ULong
);
1603 stmt(IRStmt_Dirty(d
));
1604 putMSACSR(mkexpr(msacsr
));
1607 static IRExpr
*getULR(void)
1610 return IRExpr_Get(offsetof(VexGuestMIPS64State
, guest_ULR
), Ity_I64
);
1612 return IRExpr_Get(offsetof(VexGuestMIPS32State
, guest_ULR
), Ity_I32
);
1615 void putIReg(UInt archreg
, IRExpr
* e
)
1617 IRType ty
= mode64
? Ity_I64
: Ity_I32
;
1618 vassert(archreg
< 32);
1619 vassert(typeOfIRExpr(irsb
->tyenv
, e
) == ty
);
1622 stmt(IRStmt_Put(integerGuestRegOffset(archreg
), e
));
1625 static void putWReg(UInt wregNo
, IRExpr
* e
)
1627 vassert(wregNo
<= 31);
1628 vassert(typeOfIRExpr(irsb
->tyenv
, e
) == Ity_V128
);
1629 stmt(IRStmt_Put(msaGuestRegOffset(wregNo
), e
));
1630 stmt(IRStmt_Put(floatGuestRegOffset(wregNo
),
1631 unop(Iop_ReinterpI64asF64
, unop(Iop_V128to64
, e
))));
1634 IRExpr
*mkNarrowTo32(IRType ty
, IRExpr
* src
)
1636 vassert(ty
== Ity_I32
|| ty
== Ity_I64
);
1637 return ty
== Ity_I64
? unop(Iop_64to32
, src
) : src
;
1640 void putLO(IRExpr
* e
)
1643 stmt(IRStmt_Put(offsetof(VexGuestMIPS64State
, guest_LO
), e
));
1645 stmt(IRStmt_Put(offsetof(VexGuestMIPS32State
, guest_LO
), e
));
1646 /* Add value to lower 32 bits of ac0 to maintain compatibility between
1647 regular MIPS32 instruction set and MIPS DSP ASE. Keep higher 32bits
1649 IRTemp t_lo
= newTemp(Ity_I32
);
1650 IRTemp t_hi
= newTemp(Ity_I32
);
1652 assign(t_hi
, unop(Iop_64HIto32
, getAcc(0)));
1653 stmt(IRStmt_Put(accumulatorGuestRegOffset(0),
1654 binop(Iop_32HLto64
, mkexpr(t_hi
), mkexpr(t_lo
))));
1658 void putHI(IRExpr
* e
)
1661 stmt(IRStmt_Put(offsetof(VexGuestMIPS64State
, guest_HI
), e
));
1663 stmt(IRStmt_Put(offsetof(VexGuestMIPS32State
, guest_HI
), e
));
1664 /* Add value to higher 32 bits of ac0 to maintain compatibility between
1665 regular MIPS32 instruction set and MIPS DSP ASE. Keep lower 32bits
1667 IRTemp t_lo
= newTemp(Ity_I32
);
1668 IRTemp t_hi
= newTemp(Ity_I32
);
1670 assign(t_lo
, unop(Iop_64to32
, getAcc(0)));
1671 stmt(IRStmt_Put(accumulatorGuestRegOffset(0),
1672 binop(Iop_32HLto64
, mkexpr(t_hi
), mkexpr(t_lo
))));
1676 static IRExpr
*mkNarrowTo8 ( IRType ty
, IRExpr
* src
)
1678 vassert(ty
== Ity_I32
|| ty
== Ity_I64
);
1679 return ty
== Ity_I64
? unop(Iop_64to8
, src
) : unop(Iop_32to8
, src
);
1682 static IRExpr
*mkNarrowTo16 ( IRType ty
, IRExpr
* src
)
1684 vassert(ty
== Ity_I32
|| ty
== Ity_I64
);
1685 return ty
== Ity_I64
? unop(Iop_64to16
, src
) : unop(Iop_32to16
, src
);
1688 static void putPC(IRExpr
* e
)
1690 stmt(IRStmt_Put(OFFB_PC
, e
));
1693 static IRExpr
*mkWidenFrom32(IRType ty
, IRExpr
* src
, Bool sined
)
1695 vassert(ty
== Ity_I32
|| ty
== Ity_I64
);
1700 return (sined
) ? unop(Iop_32Sto64
, src
) : unop(Iop_32Uto64
, src
);
1703 /* Narrow 8/16/32 bit int expr to 8/16/32. Clearly only some
1704 of these combinations make sense. */
1705 static IRExpr
*narrowTo(IRType dst_ty
, IRExpr
* e
)
1707 IRType src_ty
= typeOfIRExpr(irsb
->tyenv
, e
);
1709 if (src_ty
== dst_ty
)
1712 if (src_ty
== Ity_I32
&& dst_ty
== Ity_I16
)
1713 return unop(Iop_32to16
, e
);
1715 if (src_ty
== Ity_I32
&& dst_ty
== Ity_I8
)
1716 return unop(Iop_32to8
, e
);
1718 if (src_ty
== Ity_I64
&& dst_ty
== Ity_I8
) {
1720 return unop(Iop_64to8
, e
);
1723 if (src_ty
== Ity_I64
&& dst_ty
== Ity_I16
) {
1725 return unop(Iop_64to16
, e
);
1728 vpanic("narrowTo(mips)");
1732 static IRExpr
*getLoFromF64(IRType ty
, IRExpr
* src
)
1734 vassert(ty
== Ity_F32
|| ty
== Ity_F64
);
1736 if (ty
== Ity_F64
) {
1738 t0
= newTemp(Ity_I64
);
1739 t1
= newTemp(Ity_I32
);
1740 assign(t0
, unop(Iop_ReinterpF64asI64
, src
));
1741 assign(t1
, unop(Iop_64to32
, mkexpr(t0
)));
1742 return unop(Iop_ReinterpI32asF32
, mkexpr(t1
));
1747 static inline IRExpr
*getHiFromF64(IRExpr
* src
)
1749 vassert(typeOfIRExpr(irsb
->tyenv
, src
) == Ity_F64
);
1750 return unop(Iop_ReinterpI32asF32
, unop(Iop_64HIto32
,
1751 unop(Iop_ReinterpF64asI64
, src
)));
1754 static IRExpr
*mkWidenFromF32(IRType ty
, IRExpr
* src
)
1756 vassert(ty
== Ity_F32
|| ty
== Ity_F64
);
1758 if (ty
== Ity_F64
) {
1759 IRTemp t0
= newTemp(Ity_I32
);
1760 IRTemp t1
= newTemp(Ity_I64
);
1761 assign(t0
, unop(Iop_ReinterpF32asI32
, src
));
1762 assign(t1
, binop(Iop_32HLto64
, mkU32(0x0), mkexpr(t0
)));
1763 return unop(Iop_ReinterpI64asF64
, mkexpr(t1
));
1768 /* Convenience function to move to next instruction on condition. */
1769 static void mips_next_insn_if(IRExpr
*condition
)
1771 vassert(typeOfIRExpr(irsb
->tyenv
, condition
) == Ity_I1
);
1773 stmt(IRStmt_Exit(condition
, Ijk_Boring
,
1774 mode64
? IRConst_U64(guest_PC_curr_instr
+ 4) :
1775 IRConst_U32(guest_PC_curr_instr
+ 4),
1779 static IRExpr
*dis_branch_likely(IRExpr
* guard
, UInt imm
)
1781 ULong branch_offset
;
1784 /* PC = PC + (SignExtend(signed_immed_24) << 2)
1785 An 18-bit signed offset (the 16-bit offset field shifted left 2 bits)
1786 is added to the address of the instruction following
1787 the branch (not the branch itself), in the branch delay slot, to form
1788 a PC-relative effective target address. */
1790 branch_offset
= extend_s_18to64(imm
<< 2);
1792 branch_offset
= extend_s_18to32(imm
<< 2);
1794 t0
= newTemp(Ity_I1
);
1798 stmt(IRStmt_Exit(mkexpr(t0
), Ijk_Boring
,
1799 IRConst_U64(guest_PC_curr_instr
+ 8), OFFB_PC
));
1801 stmt(IRStmt_Exit(mkexpr(t0
), Ijk_Boring
,
1802 IRConst_U32(guest_PC_curr_instr
+ 8), OFFB_PC
));
1804 irsb
->jumpkind
= Ijk_Boring
;
1807 return mkU64(guest_PC_curr_instr
+ 4 + branch_offset
);
1809 return mkU32(guest_PC_curr_instr
+ 4 + branch_offset
);
1812 static void dis_branch(Bool link
, IRExpr
* guard
, UInt imm
, IRStmt
** set
)
1814 ULong branch_offset
;
1817 if (link
) { /* LR (GPR31) = addr of the 2nd instr after branch instr */
1819 putIReg(31, mkU64(guest_PC_curr_instr
+ 8));
1821 putIReg(31, mkU32(guest_PC_curr_instr
+ 8));
1824 /* PC = PC + (SignExtend(signed_immed_24) << 2)
1825 An 18-bit signed offset (the 16-bit offset field shifted left 2 bits)
1826 is added to the address of the instruction following
1827 the branch (not the branch itself), in the branch delay slot, to form
1828 a PC-relative effective target address. */
1831 branch_offset
= extend_s_18to64(imm
<< 2);
1833 branch_offset
= extend_s_18to32(imm
<< 2);
1835 t0
= newTemp(Ity_I1
);
1839 *set
= IRStmt_Exit(mkexpr(t0
), link
? Ijk_Call
: Ijk_Boring
,
1840 IRConst_U64(guest_PC_curr_instr
+ 4 + branch_offset
),
1843 *set
= IRStmt_Exit(mkexpr(t0
), link
? Ijk_Call
: Ijk_Boring
,
1844 IRConst_U32(guest_PC_curr_instr
+ 4 +
1845 (UInt
) branch_offset
), OFFB_PC
);
1848 static void dis_branch_compact(Bool link
, IRExpr
* guard
, UInt imm
,
1851 ULong branch_offset
;
1854 if (link
) { /* LR (GPR31) = addr of the instr after branch instr */
1856 putIReg(31, mkU64(guest_PC_curr_instr
+ 4));
1858 putIReg(31, mkU32(guest_PC_curr_instr
+ 4));
1860 dres
->jk_StopHere
= Ijk_Call
;
1862 dres
->jk_StopHere
= Ijk_Boring
;
1865 dres
->whatNext
= Dis_StopHere
;
1867 /* PC = PC + (SignExtend(signed_immed_24) << 2)
1868 An 18-bit signed offset (the 16-bit offset field shifted left 2 bits)
1869 is added to the address of the instruction following
1870 the branch (not the branch itself), in the branch delay slot, to form
1871 a PC-relative effective target address. */
1874 branch_offset
= extend_s_18to64(imm
<< 2);
1876 branch_offset
= extend_s_18to32(imm
<< 2);
1878 t0
= newTemp(Ity_I1
);
1882 stmt(IRStmt_Exit(mkexpr(t0
), link
? Ijk_Call
: Ijk_Boring
,
1883 IRConst_U64(guest_PC_curr_instr
+ 4 + branch_offset
),
1885 putPC(mkU64(guest_PC_curr_instr
+ 4));
1887 stmt(IRStmt_Exit(mkexpr(t0
), link
? Ijk_Call
: Ijk_Boring
,
1888 IRConst_U32(guest_PC_curr_instr
+ 4 +
1889 (UInt
) branch_offset
), OFFB_PC
));
1890 putPC(mkU32(guest_PC_curr_instr
+ 4));
1894 static IRExpr
*getFReg(UInt fregNo
)
1896 vassert(fregNo
< 32);
1897 IRType ty
= fp_mode64
? Ity_F64
: Ity_F32
;
1898 return IRExpr_Get(floatGuestRegOffset(fregNo
), ty
);
1901 static IRExpr
*getDReg(UInt dregNo
)
1903 vassert(dregNo
< 32);
1906 return IRExpr_Get(floatGuestRegOffset(dregNo
), Ity_F64
);
1908 /* Read a floating point register pair and combine their contents into a
1910 IRTemp t0
= newTemp(Ity_F32
);
1911 IRTemp t1
= newTemp(Ity_F32
);
1912 IRTemp t2
= newTemp(Ity_F64
);
1913 IRTemp t3
= newTemp(Ity_I32
);
1914 IRTemp t4
= newTemp(Ity_I32
);
1915 IRTemp t5
= newTemp(Ity_I64
);
1917 assign(t0
, getFReg(dregNo
& (~1)));
1918 assign(t1
, getFReg(dregNo
| 1));
1920 assign(t3
, unop(Iop_ReinterpF32asI32
, mkexpr(t0
)));
1921 assign(t4
, unop(Iop_ReinterpF32asI32
, mkexpr(t1
)));
1922 assign(t5
, binop(Iop_32HLto64
, mkexpr(t4
), mkexpr(t3
)));
1923 assign(t2
, unop(Iop_ReinterpI64asF64
, mkexpr(t5
)));
1929 static void putFReg(UInt dregNo
, IRExpr
* e
)
1931 vassert(dregNo
< 32);
1932 IRType ty
= fp_mode64
? Ity_F64
: Ity_F32
;
1933 vassert(typeOfIRExpr(irsb
->tyenv
, e
) == ty
);
1935 if (fp_mode64_fre
) {
1936 IRTemp t0
= newTemp(Ity_F32
);
1937 assign(t0
, getLoFromF64(ty
, e
));
1938 #if defined (_MIPSEL)
1939 stmt(IRStmt_Put(floatGuestRegOffset(dregNo
), mkexpr(t0
)));
1942 stmt(IRStmt_Put(floatGuestRegOffset(dregNo
) - 4, mkexpr(t0
)));
1945 stmt(IRStmt_Put(floatGuestRegOffset(dregNo
) + 4, mkexpr(t0
)));
1948 stmt(IRStmt_Put(floatGuestRegOffset(dregNo
& (~1)), mkexpr(t0
)));
1952 stmt(IRStmt_Put(floatGuestRegOffset(dregNo
), e
));
1955 if (has_msa
&& fp_mode64
) {
1956 stmt(IRStmt_Put(msaGuestRegOffset(dregNo
),
1957 binop(Iop_64HLtoV128
,
1958 unop(Iop_ReinterpF64asI64
, e
),
1959 unop(Iop_ReinterpF64asI64
, e
))));
1963 static void putDReg(UInt dregNo
, IRExpr
* e
)
1966 vassert(dregNo
< 32);
1967 IRType ty
= Ity_F64
;
1968 vassert(typeOfIRExpr(irsb
->tyenv
, e
) == ty
);
1969 stmt(IRStmt_Put(floatGuestRegOffset(dregNo
), e
));
1971 if (fp_mode64_fre
) {
1972 IRTemp t0
= newTemp(Ity_F32
);
1975 assign(t0
, getLoFromF64(ty
, e
));
1976 #if defined (_MIPSEL)
1977 stmt(IRStmt_Put(floatGuestRegOffset(dregNo
) - 4, mkexpr(t0
)));
1979 stmt(IRStmt_Put(floatGuestRegOffset(dregNo
& (~1)), mkexpr(t0
)));
1982 assign(t0
, getHiFromF64(e
));
1983 #if defined (_MIPSEL)
1984 stmt(IRStmt_Put(floatGuestRegOffset(dregNo
| 1), mkexpr(t0
)));
1986 stmt(IRStmt_Put(floatGuestRegOffset(dregNo
| 1) + 4, mkexpr(t0
)));
1992 stmt(IRStmt_Put(msaGuestRegOffset(dregNo
),
1993 binop(Iop_64HLtoV128
,
1994 unop(Iop_ReinterpF64asI64
, e
),
1995 unop(Iop_ReinterpF64asI64
, e
))));
1997 vassert(dregNo
< 32);
1998 vassert(typeOfIRExpr(irsb
->tyenv
, e
) == Ity_F64
);
1999 IRTemp t1
= newTemp(Ity_F64
);
2000 IRTemp t4
= newTemp(Ity_I32
);
2001 IRTemp t5
= newTemp(Ity_I32
);
2002 IRTemp t6
= newTemp(Ity_I64
);
2004 assign(t6
, unop(Iop_ReinterpF64asI64
, mkexpr(t1
)));
2005 assign(t4
, unop(Iop_64HIto32
, mkexpr(t6
))); /* hi */
2006 assign(t5
, unop(Iop_64to32
, mkexpr(t6
))); /* lo */
2007 putFReg(dregNo
& (~1), unop(Iop_ReinterpI32asF32
, mkexpr(t5
)));
2008 putFReg(dregNo
| 1, unop(Iop_ReinterpI32asF32
, mkexpr(t4
)));
2012 static void setFPUCondCode(IRExpr
* e
, UInt cc
)
2015 putFCSR(binop(Iop_And32
, getFCSR(), mkU32(0xFF7FFFFF)));
2016 putFCSR(binop(Iop_Or32
, getFCSR(), binop(Iop_Shl32
, e
, mkU8(23))));
2018 putFCSR(binop(Iop_And32
, getFCSR(), unop(Iop_Not32
,
2019 binop(Iop_Shl32
, mkU32(0x01000000), mkU8(cc
)))));
2020 putFCSR(binop(Iop_Or32
, getFCSR(), binop(Iop_Shl32
, e
, mkU8(24 + cc
))));
2024 static IRExpr
* get_IR_roundingmode ( void )
2027 rounding mode | MIPS | IR
2028 ------------------------
2029 to nearest | 00 | 00
2031 to +infinity | 10 | 10
2032 to -infinity | 11 | 01
2034 IRTemp rm_MIPS
= newTemp(Ity_I32
);
2035 /* Last two bits in FCSR are rounding mode. */
2038 assign(rm_MIPS
, binop(Iop_And32
, IRExpr_Get(offsetof(VexGuestMIPS64State
,
2039 guest_FCSR
), Ity_I32
), mkU32(3)));
2041 assign(rm_MIPS
, binop(Iop_And32
, IRExpr_Get(offsetof(VexGuestMIPS32State
,
2042 guest_FCSR
), Ity_I32
), mkU32(3)));
2044 /* rm_IR = XOR( rm_MIPS32, (rm_MIPS32 << 1) & 2) */
2046 return binop(Iop_Xor32
, mkexpr(rm_MIPS
), binop(Iop_And32
,
2047 binop(Iop_Shl32
, mkexpr(rm_MIPS
), mkU8(1)), mkU32(2)));
2050 static IRExpr
* get_IR_roundingmode_MSA ( void )
2053 rounding mode | MIPS | IR
2054 ------------------------
2055 to nearest | 00 | 00
2057 to +infinity | 10 | 10
2058 to -infinity | 11 | 01
2060 IRTemp rm_MIPS
= newTemp(Ity_I32
);
2061 /* Last two bits in MSACSR are rounding mode. */
2064 assign(rm_MIPS
, binop(Iop_And32
, IRExpr_Get(offsetof(VexGuestMIPS64State
,
2065 guest_MSACSR
), Ity_I32
), mkU32(3)));
2067 assign(rm_MIPS
, binop(Iop_And32
, IRExpr_Get(offsetof(VexGuestMIPS32State
,
2068 guest_MSACSR
), Ity_I32
), mkU32(3)));
2070 /* rm_IR = XOR( rm_MIPS32, (rm_MIPS32 << 1) & 2) */
2071 return binop(Iop_Xor32
, mkexpr(rm_MIPS
), binop(Iop_And32
,
2072 binop(Iop_Shl32
, mkexpr(rm_MIPS
), mkU8(1)), mkU32(2)));
2075 /* sz, ULong -> IRExpr */
2076 static IRExpr
*mkSzImm ( IRType ty
, ULong imm64
)
2078 vassert(ty
== Ity_I32
|| ty
== Ity_I64
);
2079 return ty
== Ity_I64
? mkU64(imm64
) : mkU32((UInt
) imm64
);
2082 static IRConst
*mkSzConst ( IRType ty
, ULong imm64
)
2084 vassert(ty
== Ity_I32
|| ty
== Ity_I64
);
2085 return (ty
== Ity_I64
? IRConst_U64(imm64
) : IRConst_U32((UInt
) imm64
));
2088 /* Make sure we get valid 32 and 64bit addresses */
2089 static Addr64
mkSzAddr ( IRType ty
, Addr64 addr
)
2091 vassert(ty
== Ity_I32
|| ty
== Ity_I64
);
2092 return (ty
== Ity_I64
? (Addr64
) addr
:
2093 (Addr64
) extend_s_32to64(toUInt(addr
)));
2096 /* Shift and Rotate instructions for MIPS64 */
2097 static Bool
dis_instr_shrt ( UInt theInstr
)
2099 UInt opc2
= get_function(theInstr
);
2100 UChar regRs
= get_rs(theInstr
);
2101 UChar regRt
= get_rt(theInstr
);
2102 UChar regRd
= get_rd(theInstr
);
2103 UChar uImmsa
= get_sa(theInstr
);
2104 Long sImmsa
= extend_s_16to64(uImmsa
);
2105 IRType ty
= mode64
? Ity_I64
: Ity_I32
;
2106 IRTemp tmp
= newTemp(ty
);
2107 IRTemp tmpOr
= newTemp(ty
);
2108 IRTemp tmpRt
= newTemp(ty
);
2109 IRTemp tmpRs
= newTemp(ty
);
2110 IRTemp tmpRd
= newTemp(ty
);
2112 assign(tmpRs
, getIReg(regRs
));
2113 assign(tmpRt
, getIReg(regRt
));
2117 if ((regRs
& 0x01) == 0) {
2118 /* Doubleword Shift Right Logical - DSRL; MIPS64 */
2119 DIP("dsrl r%u, r%u, %lld", regRd
, regRt
, sImmsa
);
2120 assign(tmpRd
, binop(Iop_Shr64
, mkexpr(tmpRt
), mkU8(uImmsa
)));
2121 putIReg(regRd
, mkexpr(tmpRd
));
2122 } else if ((regRs
& 0x01) == 1) {
2123 /* Doubleword Rotate Right - DROTR; MIPS64r2 */
2125 DIP("drotr r%u, r%u, %lld", regRd
, regRt
, sImmsa
);
2126 IRTemp tmpL
= newTemp(ty
);
2127 IRTemp tmpR
= newTemp(ty
);
2128 assign(tmpR
, binop(Iop_Shr64
, mkexpr(tmpRt
), mkU8(uImmsa
)));
2129 assign(tmp
, binop(Iop_Shl64
, mkexpr(tmpRt
), mkU8(63 - uImmsa
)));
2130 assign(tmpL
, binop(Iop_Shl64
, mkexpr(tmp
), mkU8(1)));
2131 assign(tmpRd
, binop(Iop_Or64
, mkexpr(tmpL
), mkexpr(tmpR
)));
2132 putIReg(regRd
, mkexpr(tmpRd
));
2139 if ((regRs
& 0x01) == 0) {
2140 /* Doubleword Shift Right Logical Plus 32 - DSRL32; MIPS64 */
2141 DIP("dsrl32 r%u, r%u, %lld", regRd
, regRt
, sImmsa
+ 32);
2142 assign(tmpRd
, binop(Iop_Shr64
, mkexpr(tmpRt
), mkU8(uImmsa
+ 32)));
2143 putIReg(regRd
, mkexpr(tmpRd
));
2144 } else if ((regRs
& 0x01) == 1) {
2145 /* Doubleword Rotate Right Plus 32 - DROTR32; MIPS64r2 */
2146 DIP("drotr32 r%u, r%u, %lld", regRd
, regRt
, sImmsa
);
2148 IRTemp tmpL
= newTemp(ty
);
2149 IRTemp tmpR
= newTemp(ty
);
2150 /* (tmpRt >> sa) | (tmpRt << (64 - sa)) */
2151 assign(tmpR
, binop(Iop_Shr64
, mkexpr(tmpRt
), mkU8(uImmsa
+ 32)));
2152 assign(tmp
, binop(Iop_Shl64
, mkexpr(tmpRt
),
2153 mkU8(63 - (uImmsa
+ 32))));
2154 assign(tmpL
, binop(Iop_Shl64
, mkexpr(tmp
), mkU8(1)));
2155 assign(tmpRd
, binop(Iop_Or64
, mkexpr(tmpL
), mkexpr(tmpR
)));
2156 putIReg(regRd
, mkexpr(tmpRd
));
2163 if ((uImmsa
& 0x01) == 0) {
2164 /* Doubleword Shift Right Logical Variable - DSRLV; MIPS64 */
2165 DIP("dsrlv r%u, r%u, r%u", regRd
, regRt
, regRs
);
2166 IRTemp tmpRs8
= newTemp(Ity_I8
);
2167 /* s = tmpRs[5..0] */
2168 assign(tmp
, binop(Iop_And64
, mkexpr(tmpRs
), mkU64(63)));
2169 assign(tmpRs8
, mkNarrowTo8(ty
, mkexpr(tmp
)));
2170 assign(tmpRd
, binop(Iop_Shr64
, mkexpr(tmpRt
), mkexpr(tmpRs8
)));
2171 putIReg(regRd
, mkexpr(tmpRd
));
2172 } else if ((uImmsa
& 0x01) == 1) {
2173 /* Doubleword Rotate Right Variable - DROTRV; MIPS64r2 */
2174 DIP("drotrv r%u, r%u, r%u", regRd
, regRt
, regRs
);
2175 IRTemp tmpL
= newTemp(ty
);
2176 IRTemp tmpR
= newTemp(ty
);
2177 IRTemp tmpRs8
= newTemp(Ity_I8
);
2178 IRTemp tmpLs8
= newTemp(Ity_I8
);
2179 IRTemp tmp64
= newTemp(ty
);
2182 (tmpRt << s) | (tmpRt >> m) */
2184 assign(tmp64
, binop(Iop_And64
, mkexpr(tmpRs
), mkSzImm(ty
, 63)));
2185 assign(tmp
, binop(Iop_Sub64
, mkU64(63), mkexpr(tmp64
)));
2187 assign(tmpLs8
, mkNarrowTo8(ty
, mkexpr(tmp
)));
2188 assign(tmpRs8
, mkNarrowTo8(ty
, mkexpr(tmp64
)));
2190 assign(tmpR
, binop(Iop_Shr64
, mkexpr(tmpRt
), mkexpr(tmpRs8
)));
2191 assign(tmpL
, binop(Iop_Shl64
, mkexpr(tmpRt
), mkexpr(tmpLs8
)));
2192 assign(tmpRd
, binop(Iop_Shl64
, mkexpr(tmpL
), mkU8(1)));
2193 assign(tmpOr
, binop(Iop_Or64
, mkexpr(tmpRd
), mkexpr(tmpR
)));
2195 putIReg(regRd
, mkexpr(tmpOr
));
2201 case 0x38: /* Doubleword Shift Left Logical - DSLL; MIPS64 */
2202 DIP("dsll r%u, r%u, %lld", regRd
, regRt
, sImmsa
);
2204 assign(tmpRd
, binop(Iop_Shl64
, mkexpr(tmpRt
), mkU8(uImmsa
)));
2205 putIReg(regRd
, mkexpr(tmpRd
));
2208 case 0x3C: /* Doubleword Shift Left Logical Plus 32 - DSLL32; MIPS64 */
2209 DIP("dsll32 r%u, r%u, %lld", regRd
, regRt
, sImmsa
);
2210 assign(tmpRd
, binop(Iop_Shl64
, mkexpr(tmpRt
), mkU8(uImmsa
+ 32)));
2211 putIReg(regRd
, mkexpr(tmpRd
));
2214 case 0x14: { /* Doubleword Shift Left Logical Variable - DSLLV; MIPS64 */
2215 DIP("dsllv r%u, r%u, r%u", regRd
, regRt
, regRs
);
2216 IRTemp tmpRs8
= newTemp(Ity_I8
);
2218 assign(tmp
, binop(Iop_And64
, mkexpr(tmpRs
), mkSzImm(ty
, 63)));
2219 assign(tmpRs8
, mkNarrowTo8(ty
, mkexpr(tmp
)));
2220 assign(tmpRd
, binop(Iop_Shl64
, mkexpr(tmpRt
), mkexpr(tmpRs8
)));
2221 putIReg(regRd
, mkexpr(tmpRd
));
2225 case 0x3B: /* Doubleword Shift Right Arithmetic - DSRA; MIPS64 */
2226 DIP("dsra r%u, r%u, %lld", regRd
, regRt
, sImmsa
);
2227 assign(tmpRd
, binop(Iop_Sar64
, mkexpr(tmpRt
), mkU8(uImmsa
)));
2228 putIReg(regRd
, mkexpr(tmpRd
));
2231 case 0x3F: /* Doubleword Shift Right Arithmetic Plus 32 - DSRA32;
2233 DIP("dsra32 r%u, r%u, %lld", regRd
, regRt
, sImmsa
);
2234 assign(tmpRd
, binop(Iop_Sar64
, mkexpr(tmpRt
), mkU8(uImmsa
+ 32)));
2235 putIReg(regRd
, mkexpr(tmpRd
));
2239 /* Doubleword Shift Right Arithmetic Variable - DSRAV;
2241 DIP("dsrav r%u, r%u, r%u", regRd
, regRt
, regRs
);
2242 IRTemp tmpRs8
= newTemp(Ity_I8
);
2243 assign(tmp
, binop(Iop_And64
, mkexpr(tmpRs
), mkSzImm(ty
, 63)));
2244 assign(tmpRs8
, mkNarrowTo8(ty
, mkexpr(tmp
)));
2245 assign(tmpRd
, binop(Iop_Sar64
, mkexpr(tmpRt
), mkexpr(tmpRs8
)));
2246 putIReg(regRd
, mkexpr(tmpRd
));
2259 static IROp
mkSzOp ( IRType ty
, IROp op8
)
2262 vassert(ty
== Ity_I8
|| ty
== Ity_I16
|| ty
== Ity_I32
|| ty
== Ity_I64
);
2263 vassert(op8
== Iop_Add8
|| op8
== Iop_Sub8
|| op8
== Iop_Mul8
2264 || op8
== Iop_Or8
|| op8
== Iop_And8
|| op8
== Iop_Xor8
2265 || op8
== Iop_Shl8
|| op8
== Iop_Shr8
|| op8
== Iop_Sar8
2266 || op8
== Iop_CmpEQ8
|| op8
== Iop_CmpNE8
|| op8
== Iop_Not8
);
2267 adj
= ty
== Ity_I8
? 0 : (ty
== Ity_I16
? 1 : (ty
== Ity_I32
? 2 : 3));
2271 /*********************************************************/
2272 /*--- Floating Point Compare ---*/
2273 /*********************************************************/
2274 /* Function that returns a string that represent mips cond
2275 mnemonic for the input code. */
2276 static const HChar
* showCondCode(UInt code
)
2346 vpanic("showCondCode");
2353 static Bool
dis_instr_CCondFmt ( UInt cins
)
2355 IRTemp t0
, t1
, t2
, t3
, tmp5
, tmp6
;
2356 IRTemp ccIR
= newTemp(Ity_I32
);
2357 IRTemp ccMIPS
= newTemp(Ity_I32
);
2358 UInt FC
= get_FC(cins
);
2359 UInt fmt
= get_fmt(cins
);
2360 UInt fs
= get_fs(cins
);
2361 UInt ft
= get_ft(cins
);
2362 UInt cond
= get_cond(cins
);
2364 if (FC
== 0x3) { /* C.cond.fmt */
2365 UInt fpc_cc
= get_fpc_cc(cins
);
2368 case 0x10: { /* C.cond.S */
2369 DIP("c.%s.s %u, f%u, f%u", showCondCode(cond
), fpc_cc
, fs
, ft
);
2372 t0
= newTemp(Ity_I32
);
2373 t1
= newTemp(Ity_I32
);
2374 t2
= newTemp(Ity_I32
);
2375 t3
= newTemp(Ity_I32
);
2377 tmp5
= newTemp(Ity_F64
);
2378 tmp6
= newTemp(Ity_F64
);
2380 assign(tmp5
, unop(Iop_F32toF64
, getLoFromF64(Ity_F64
,
2382 assign(tmp6
, unop(Iop_F32toF64
, getLoFromF64(Ity_F64
,
2385 assign(ccIR
, binop(Iop_CmpF64
, mkexpr(tmp5
), mkexpr(tmp6
)));
2386 putHI(mkWidenFrom32(mode64
? Ity_I64
: Ity_I32
,
2387 mkexpr(ccIR
), True
));
2388 /* Map compare result from IR to MIPS
2389 FP cmp result | MIPS | IR
2390 --------------------------
2397 /* ccMIPS = Shl(1, (~(ccIR>>5) & 2) | ((ccIR ^ (ccIR>>6)) & 1) */
2398 assign(ccMIPS
, binop(Iop_Shl32
, mkU32(1), unop(Iop_32to8
,
2399 binop(Iop_Or32
, binop(Iop_And32
, unop(Iop_Not32
,
2400 binop(Iop_Shr32
, mkexpr(ccIR
), mkU8(5))), mkU32(2)),
2401 binop(Iop_And32
, binop(Iop_Xor32
, mkexpr(ccIR
),
2402 binop(Iop_Shr32
, mkexpr(ccIR
), mkU8(6))),
2404 putLO(mkWidenFrom32(mode64
? Ity_I64
: Ity_I32
,
2405 mkexpr(ccMIPS
), True
));
2408 assign(t0
, binop(Iop_And32
, mkexpr(ccMIPS
), mkU32(0x1)));
2410 assign(t1
, binop(Iop_And32
, binop(Iop_Shr32
, mkexpr(ccMIPS
),
2411 mkU8(0x1)), mkU32(0x1)));
2413 assign(t2
, binop(Iop_And32
, unop(Iop_Not32
, binop(Iop_Shr32
,
2414 mkexpr(ccMIPS
), mkU8(0x2))), mkU32(0x1)));
2416 assign(t3
, binop(Iop_And32
, binop(Iop_Shr32
, mkexpr(ccMIPS
),
2417 mkU8(0x3)), mkU32(0x1)));
2421 setFPUCondCode(mkU32(0), fpc_cc
);
2425 setFPUCondCode(mkexpr(t0
), fpc_cc
);
2429 setFPUCondCode(mkexpr(t1
), fpc_cc
);
2433 setFPUCondCode(binop(Iop_Or32
, mkexpr(t0
), mkexpr(t1
)),
2438 setFPUCondCode(mkexpr(t3
), fpc_cc
);
2442 setFPUCondCode(binop(Iop_Or32
, mkexpr(t0
), mkexpr(t3
)),
2447 setFPUCondCode(binop(Iop_Or32
, mkexpr(t3
), mkexpr(t1
)),
2452 setFPUCondCode(mkexpr(t2
), fpc_cc
);
2456 setFPUCondCode(mkU32(0), fpc_cc
);
2460 setFPUCondCode(mkexpr(t0
), fpc_cc
);
2464 setFPUCondCode(mkexpr(t1
), fpc_cc
);
2468 setFPUCondCode(binop(Iop_Or32
, mkexpr(t0
), mkexpr(t1
)),
2473 setFPUCondCode(mkexpr(t3
), fpc_cc
);
2477 setFPUCondCode(binop(Iop_Or32
, mkexpr(t0
), mkexpr(t3
)),
2482 setFPUCondCode(binop(Iop_Or32
, mkexpr(t3
), mkexpr(t1
)),
2487 setFPUCondCode(mkexpr(t2
), fpc_cc
);
2495 t0
= newTemp(Ity_I32
);
2496 t1
= newTemp(Ity_I32
);
2497 t2
= newTemp(Ity_I32
);
2498 t3
= newTemp(Ity_I32
);
2500 assign(ccIR
, binop(Iop_CmpF64
, unop(Iop_F32toF64
, getFReg(fs
)),
2501 unop(Iop_F32toF64
, getFReg(ft
))));
2502 /* Map compare result from IR to MIPS
2503 FP cmp result | MIPS | IR
2504 --------------------------
2511 /* ccMIPS = Shl(1, (~(ccIR>>5) & 2) | ((ccIR ^ (ccIR>>6)) & 1) */
2512 assign(ccMIPS
, binop(Iop_Shl32
, mkU32(1), unop(Iop_32to8
,
2513 binop(Iop_Or32
, binop(Iop_And32
, unop(Iop_Not32
,
2514 binop(Iop_Shr32
, mkexpr(ccIR
), mkU8(5))),
2515 mkU32(2)), binop(Iop_And32
,
2516 binop(Iop_Xor32
, mkexpr(ccIR
),
2517 binop(Iop_Shr32
, mkexpr(ccIR
), mkU8(6))),
2520 assign(t0
, binop(Iop_And32
, mkexpr(ccMIPS
), mkU32(0x1)));
2522 assign(t1
, binop(Iop_And32
, binop(Iop_Shr32
, mkexpr(ccMIPS
),
2523 mkU8(0x1)), mkU32(0x1)));
2525 assign(t2
, binop(Iop_And32
, unop(Iop_Not32
, binop(Iop_Shr32
,
2526 mkexpr(ccMIPS
), mkU8(0x2))), mkU32(0x1)));
2528 assign(t3
, binop(Iop_And32
, binop(Iop_Shr32
, mkexpr(ccMIPS
),
2529 mkU8(0x3)), mkU32(0x1)));
2533 setFPUCondCode(mkU32(0), fpc_cc
);
2537 setFPUCondCode(mkexpr(t0
), fpc_cc
);
2541 setFPUCondCode(mkexpr(t1
), fpc_cc
);
2545 setFPUCondCode(binop(Iop_Or32
, mkexpr(t0
), mkexpr(t1
)),
2550 setFPUCondCode(mkexpr(t3
), fpc_cc
);
2554 setFPUCondCode(binop(Iop_Or32
, mkexpr(t0
), mkexpr(t3
)),
2559 setFPUCondCode(binop(Iop_Or32
, mkexpr(t3
), mkexpr(t1
)),
2564 setFPUCondCode(mkexpr(t2
), fpc_cc
);
2568 setFPUCondCode(mkU32(0), fpc_cc
);
2572 setFPUCondCode(mkexpr(t0
), fpc_cc
);
2576 setFPUCondCode(mkexpr(t1
), fpc_cc
);
2580 setFPUCondCode(binop(Iop_Or32
, mkexpr(t0
), mkexpr(t1
)),
2585 setFPUCondCode(mkexpr(t3
), fpc_cc
);
2589 setFPUCondCode(binop(Iop_Or32
, mkexpr(t0
), mkexpr(t3
)),
2594 setFPUCondCode(binop(Iop_Or32
, mkexpr(t3
), mkexpr(t1
)),
2599 setFPUCondCode(mkexpr(t2
), fpc_cc
);
2609 case 0x11: { /* C.cond.D */
2610 DIP("c.%s.d %u, f%u, f%u", showCondCode(cond
), fpc_cc
, fs
, ft
);
2611 t0
= newTemp(Ity_I32
);
2612 t1
= newTemp(Ity_I32
);
2613 t2
= newTemp(Ity_I32
);
2614 t3
= newTemp(Ity_I32
);
2615 assign(ccIR
, binop(Iop_CmpF64
, getDReg(fs
), getDReg(ft
)));
2616 /* Map compare result from IR to MIPS
2617 FP cmp result | MIPS | IR
2618 --------------------------
2625 /* ccMIPS = Shl(1, (~(ccIR>>5) & 2) | ((ccIR ^ (ccIR>>6)) & 1) */
2626 assign(ccMIPS
, binop(Iop_Shl32
, mkU32(1), unop(Iop_32to8
,
2627 binop(Iop_Or32
, binop(Iop_And32
, unop(Iop_Not32
,
2628 binop(Iop_Shr32
, mkexpr(ccIR
), mkU8(5))), mkU32(2)),
2629 binop(Iop_And32
, binop(Iop_Xor32
, mkexpr(ccIR
),
2630 binop(Iop_Shr32
, mkexpr(ccIR
), mkU8(6))),
2634 assign(t0
, binop(Iop_And32
, mkexpr(ccMIPS
), mkU32(0x1)));
2636 assign(t1
, binop(Iop_And32
, binop(Iop_Shr32
, mkexpr(ccMIPS
),
2637 mkU8(0x1)), mkU32(0x1)));
2639 assign(t2
, binop(Iop_And32
, unop(Iop_Not32
, binop(Iop_Shr32
,
2640 mkexpr(ccMIPS
), mkU8(0x2))), mkU32(0x1)));
2642 assign(t3
, binop(Iop_And32
, binop(Iop_Shr32
, mkexpr(ccMIPS
),
2643 mkU8(0x3)), mkU32(0x1)));
2647 setFPUCondCode(mkU32(0), fpc_cc
);
2651 setFPUCondCode(mkexpr(t0
), fpc_cc
);
2655 setFPUCondCode(mkexpr(t1
), fpc_cc
);
2659 setFPUCondCode(binop(Iop_Or32
, mkexpr(t0
), mkexpr(t1
)),
2664 setFPUCondCode(mkexpr(t3
), fpc_cc
);
2668 setFPUCondCode(binop(Iop_Or32
, mkexpr(t0
), mkexpr(t3
)),
2673 setFPUCondCode(binop(Iop_Or32
, mkexpr(t3
), mkexpr(t1
)),
2678 setFPUCondCode(mkexpr(t2
), fpc_cc
);
2682 setFPUCondCode(mkU32(0), fpc_cc
);
2686 setFPUCondCode(mkexpr(t0
), fpc_cc
);
2690 setFPUCondCode(mkexpr(t1
), fpc_cc
);
2694 setFPUCondCode(binop(Iop_Or32
, mkexpr(t0
), mkexpr(t1
)),
2699 setFPUCondCode(mkexpr(t3
), fpc_cc
);
2703 setFPUCondCode(binop(Iop_Or32
, mkexpr(t0
), mkexpr(t3
)),
2708 setFPUCondCode(binop(Iop_Or32
, mkexpr(t3
), mkexpr(t1
)),
2713 setFPUCondCode(mkexpr(t2
), fpc_cc
);
2732 /*********************************************************/
2733 /*--- Branch Instructions for mips64 ---*/
2734 /*********************************************************/
2735 static Bool
dis_instr_branch ( UInt theInstr
, DisResult
* dres
, IRStmt
** set
)
2738 UChar opc1
= get_opcode(theInstr
);
2739 UChar regRs
= get_rs(theInstr
);
2740 UChar regRt
= get_rt(theInstr
);
2741 UInt offset
= get_imm(theInstr
);
2742 Long sOffset
= extend_s_16to64(offset
);
2743 IRType ty
= mode64
? Ity_I64
: Ity_I32
;
2744 IROp opSlt
= mode64
? Iop_CmpLT64S
: Iop_CmpLT32S
;
2746 IRTemp tmp
= newTemp(ty
);
2747 IRTemp tmpRs
= newTemp(ty
);
2748 IRTemp tmpRt
= newTemp(ty
);
2749 IRTemp tmpLt
= newTemp(ty
);
2750 IRTemp tmpReg0
= newTemp(ty
);
2752 UChar regLnk
= 31; /* reg 31 is link reg in MIPS */
2754 Addr64 cia
= guest_PC_curr_instr
;
2756 IRExpr
*eConst0
= mkSzImm(ty
, (UInt
) 0);
2757 IRExpr
*eNia
= mkSzImm(ty
, cia
+ 8);
2758 IRExpr
*eCond
= NULL
;
2760 assign(tmpRs
, getIReg(regRs
));
2761 assign(tmpRt
, getIReg(regRt
));
2762 assign(tmpReg0
, getIReg(0));
2764 eCond
= binop(mkSzOp(ty
, Iop_CmpNE8
), mkexpr(tmpReg0
), mkexpr(tmpReg0
));
2769 case 0x00: { /* BLTZ rs, offset */
2770 addrTgt
= mkSzAddr(ty
, cia
+ 4 + (sOffset
<< 2));
2771 IRTemp tmpLtRes
= newTemp(Ity_I1
);
2773 assign(tmp
, eConst0
);
2774 assign(tmpLtRes
, binop(opSlt
, mkexpr(tmpRs
), mkexpr(tmp
)));
2775 assign(tmpLt
, mode64
? unop(Iop_1Uto64
, mkexpr(tmpLtRes
)) :
2776 unop(Iop_1Uto32
, mkexpr(tmpLtRes
)));
2778 eCond
= binop(mkSzOp(ty
, Iop_CmpNE8
), mkexpr(tmpLt
),
2781 jmpKind
= Ijk_Boring
;
2785 case 0x01: { /* BGEZ rs, offset */
2786 IRTemp tmpLtRes
= newTemp(Ity_I1
);
2787 addrTgt
= mkSzAddr(ty
, cia
+ 4 + (sOffset
<< 2));
2789 assign(tmp
, eConst0
);
2790 assign(tmpLtRes
, binop(opSlt
, mkexpr(tmpRs
), mkexpr(tmp
)));
2791 assign(tmpLt
, mode64
? unop(Iop_1Uto64
, mkexpr(tmpLtRes
)) :
2792 unop(Iop_1Uto32
, mkexpr(tmpLtRes
)));
2793 eCond
= binop(mkSzOp(ty
, Iop_CmpEQ8
), mkexpr(tmpLt
),
2796 jmpKind
= Ijk_Boring
;
2800 case 0x11: { /* BGEZAL rs, offset */
2801 addrTgt
= mkSzAddr(ty
, cia
+ 4 + (sOffset
<< 2));
2802 putIReg(regLnk
, eNia
);
2803 IRTemp tmpLtRes
= newTemp(Ity_I1
);
2805 assign(tmpLtRes
, binop(opSlt
, mkexpr(tmpRs
), eConst0
));
2806 assign(tmpLt
, mode64
? unop(Iop_1Uto64
, mkexpr(tmpLtRes
)) :
2807 unop(Iop_1Uto32
, mkexpr(tmpLtRes
)));
2809 eCond
= binop(mkSzOp(ty
, Iop_CmpEQ8
), mkexpr(tmpLt
),
2816 case 0x10: { /* BLTZAL rs, offset */
2817 IRTemp tmpLtRes
= newTemp(Ity_I1
);
2818 IRTemp tmpRes
= newTemp(ty
);
2820 addrTgt
= mkSzAddr(ty
, cia
+ 4 + (sOffset
<< 2));
2821 putIReg(regLnk
, eNia
);
2823 assign(tmp
, eConst0
);
2824 assign(tmpLtRes
, binop(opSlt
, mkexpr(tmpRs
), mkexpr(tmp
)));
2825 assign(tmpRes
, mode64
? unop(Iop_1Uto64
,
2826 mkexpr(tmpLtRes
)) : unop(Iop_1Uto32
, mkexpr(tmpLtRes
)));
2827 eCond
= binop(mkSzOp(ty
, Iop_CmpNE8
), mkexpr(tmpRes
),
2842 *set
= IRStmt_Exit(eCond
, jmpKind
, mkSzConst(ty
, addrTgt
), OFFB_PC
);
2846 /*********************************************************/
2847 /*--- Cavium Specific Instructions ---*/
2848 /*********************************************************/
2850 /* Convenience function to yield to thread scheduler */
2851 static void jump_back(IRExpr
*condition
)
2853 stmt( IRStmt_Exit(condition
,
2855 IRConst_U64( guest_PC_curr_instr
),
2859 /* Based on s390_irgen_load_and_add32. */
2860 static void mips_load_store32(IRTemp op1addr
, IRTemp new_val
,
2861 IRTemp expd
, UChar rd
, Bool putIntoRd
)
2864 IRTemp old_mem
= newTemp(Ity_I32
);
2865 IRType ty
= mode64
? Ity_I64
: Ity_I32
;
2867 cas
= mkIRCAS(IRTemp_INVALID
, old_mem
,
2868 #if defined (_MIPSEL)
2869 Iend_LE
, mkexpr(op1addr
),
2871 Iend_BE
, mkexpr(op1addr
),
2873 NULL
, mkexpr(expd
), /* expected value */
2874 NULL
, mkexpr(new_val
) /* new value */);
2875 stmt(IRStmt_CAS(cas
));
2877 /* If old_mem contains the expected value, then the CAS succeeded.
2878 Otherwise, it did not */
2879 jump_back(binop(Iop_CmpNE32
, mkexpr(old_mem
), mkexpr(expd
)));
2882 putIReg(rd
, mkWidenFrom32(ty
, mkexpr(old_mem
), True
));
2885 /* Based on s390_irgen_load_and_add64. */
2886 static void mips_load_store64(IRTemp op1addr
, IRTemp new_val
,
2887 IRTemp expd
, UChar rd
, Bool putIntoRd
)
2890 IRTemp old_mem
= newTemp(Ity_I64
);
2892 cas
= mkIRCAS(IRTemp_INVALID
, old_mem
,
2893 #if defined (_MIPSEL)
2894 Iend_LE
, mkexpr(op1addr
),
2896 Iend_BE
, mkexpr(op1addr
),
2898 NULL
, mkexpr(expd
), /* expected value */
2899 NULL
, mkexpr(new_val
) /* new value */);
2900 stmt(IRStmt_CAS(cas
));
2902 /* If old_mem contains the expected value, then the CAS succeeded.
2903 Otherwise, it did not */
2904 jump_back(binop(Iop_CmpNE64
, mkexpr(old_mem
), mkexpr(expd
)));
2907 putIReg(rd
, mkexpr(old_mem
));
2910 static Bool
dis_instr_CVM ( UInt theInstr
)
2912 UChar opc2
= get_function(theInstr
);
2913 UChar opc1
= get_opcode(theInstr
);
2914 UChar regRs
= get_rs(theInstr
);
2915 UChar regRt
= get_rt(theInstr
);
2916 UChar regRd
= get_rd(theInstr
);
2917 /* MIPS trap instructions extract code from theInstr[15:6].
2918 Cavium OCTEON instructions SNEI, SEQI extract immediate operands
2919 from the same bit field [15:6]. */
2920 UInt imm
= get_code(theInstr
);
2921 UChar lenM1
= get_msb(theInstr
);
2922 UChar p
= get_lsb(theInstr
);
2923 IRType ty
= mode64
? Ity_I64
: Ity_I32
;
2924 IRTemp tmp
= newTemp(ty
);
2925 IRTemp tmpRs
= newTemp(ty
);
2926 IRTemp tmpRt
= newTemp(ty
);
2927 IRTemp t1
= newTemp(ty
);
2929 assign(tmpRs
, getIReg(regRs
));
2934 case 0x03: { /* DMUL rd, rs, rt */
2935 DIP("dmul r%u, r%u, r%u", regRd
, regRs
, regRt
);
2936 IRTemp t0
= newTemp(Ity_I128
);
2937 assign(t0
, binop(Iop_MullU64
, getIReg(regRs
), getIReg(regRt
)));
2938 putIReg(regRd
, unop(Iop_128to64
, mkexpr(t0
)));
2942 case 0x18: { /* Store Atomic Add Word - SAA; Cavium OCTEON */
2943 DIP("saa r%u, (r%u)", regRt
, regRs
);
2944 IRTemp addr
= newTemp(Ity_I64
);
2945 IRTemp new_val
= newTemp(Ity_I32
);
2946 IRTemp old
= newTemp(Ity_I32
);
2947 assign(addr
, getIReg(regRs
));
2948 assign(old
, load(Ity_I32
, mkexpr(addr
)));
2949 assign(new_val
, binop(Iop_Add32
,
2951 mkNarrowTo32(ty
, getIReg(regRt
))));
2952 mips_load_store32(addr
, new_val
, old
, 0, False
);
2956 /* Store Atomic Add Doubleword - SAAD; Cavium OCTEON */
2958 DIP( "saad r%u, (r%u)", regRt
, regRs
);
2959 IRTemp addr
= newTemp(Ity_I64
);
2960 IRTemp new_val
= newTemp(Ity_I64
);
2961 IRTemp old
= newTemp(Ity_I64
);
2962 assign(addr
, getIReg(regRs
));
2963 assign(old
, load(Ity_I64
, mkexpr(addr
)));
2964 assign(new_val
, binop(Iop_Add64
,
2967 mips_load_store64(addr
, new_val
, old
, 0, False
);
2971 /* LAI, LAID, LAD, LADD, LAS, LASD,
2972 LAC, LACD, LAA, LAAD, LAW, LAWD */
2974 UInt opc3
= get_sa(theInstr
);
2975 IRTemp addr
= newTemp(Ity_I64
);
2978 /* Load Atomic Increment Word - LAI; Cavium OCTEON2 */
2980 DIP("lai r%u,(r%u)\n", regRd
, regRs
);
2981 IRTemp new_val
= newTemp(Ity_I32
);
2982 IRTemp old
= newTemp(Ity_I32
);
2983 assign(addr
, getIReg(regRs
));
2984 assign(old
, load(Ity_I32
, mkexpr(addr
)));
2985 assign(new_val
, binop(Iop_Add32
,
2988 mips_load_store32(addr
, new_val
, old
, regRd
, True
);
2992 /* Load Atomic Increment Doubleword - LAID; Cavium OCTEON2 */
2994 DIP("laid r%u,(r%u)\n", regRd
, regRs
);
2995 IRTemp new_val
= newTemp(Ity_I64
);
2996 IRTemp old
= newTemp(Ity_I64
);
2997 assign(addr
, getIReg(regRs
));
2998 assign(old
, load(Ity_I64
, mkexpr(addr
)));
2999 assign(new_val
, binop(Iop_Add64
,
3002 mips_load_store64(addr
, new_val
, old
, regRd
, True
);
3006 /* Load Atomic Decrement Word - LAD; Cavium OCTEON2 */
3008 DIP("lad r%u,(r%u)\n", regRd
, regRs
);
3009 IRTemp new_val
= newTemp(Ity_I32
);
3010 IRTemp old
= newTemp(Ity_I32
);
3011 assign(addr
, getIReg(regRs
));
3012 assign(old
, load(Ity_I32
, mkexpr(addr
)));
3013 assign(new_val
, binop(Iop_Sub32
,
3016 mips_load_store32(addr
, new_val
, old
, regRd
, True
);
3020 /* Load Atomic Decrement Doubleword - LADD; Cavium OCTEON2 */
3022 DIP("ladd r%u,(r%u)\n", regRd
, regRs
);
3023 IRTemp new_val
= newTemp(Ity_I64
);
3024 IRTemp old
= newTemp(Ity_I64
);
3025 assign(addr
, getIReg(regRs
));
3026 assign(old
, load(Ity_I64
, mkexpr(addr
)));
3027 assign(new_val
, binop(Iop_Sub64
,
3030 mips_load_store64(addr
, new_val
, old
, regRd
, True
);
3034 /* Load Atomic Set Word - LAS; Cavium OCTEON2 */
3036 DIP("las r%u,(r%u)\n", regRd
, regRs
);
3037 IRTemp new_val
= newTemp(Ity_I32
);
3038 IRTemp old
= newTemp(Ity_I32
);
3039 assign(addr
, getIReg(regRs
));
3040 assign(new_val
, mkU32(0xffffffff));
3041 assign(old
, load(Ity_I32
, mkexpr(addr
)));
3042 mips_load_store32(addr
, new_val
, old
, regRd
, True
);
3046 /* Load Atomic Set Doubleword - LASD; Cavium OCTEON2 */
3048 DIP("lasd r%u,(r%u)\n", regRd
, regRs
);
3049 IRTemp new_val
= newTemp(Ity_I64
);
3050 IRTemp old
= newTemp(Ity_I64
);
3051 assign(addr
, getIReg(regRs
));
3052 assign(new_val
, mkU64(0xffffffffffffffffULL
));
3053 assign(old
, load(Ity_I64
, mkexpr(addr
)));
3054 mips_load_store64(addr
, new_val
, old
, regRd
, True
);
3058 /* Load Atomic Clear Word - LAC; Cavium OCTEON2 */
3060 DIP("lac r%u,(r%u)\n", regRd
, regRs
);
3061 IRTemp new_val
= newTemp(Ity_I32
);
3062 IRTemp old
= newTemp(Ity_I32
);
3063 assign(addr
, getIReg(regRs
));
3064 assign(new_val
, mkU32(0));
3065 assign(old
, load(Ity_I32
, mkexpr(addr
)));
3066 mips_load_store32(addr
, new_val
, old
, regRd
, True
);
3070 /* Load Atomic Clear Doubleword - LACD; Cavium OCTEON2 */
3072 DIP("lacd r%u,(r%u)\n", regRd
, regRs
);
3073 IRTemp new_val
= newTemp(Ity_I64
);
3074 IRTemp old
= newTemp(Ity_I64
);
3075 assign(addr
, getIReg(regRs
));
3076 assign(new_val
, mkU64(0));
3077 assign(old
, load(Ity_I64
, mkexpr(addr
)));
3078 mips_load_store64(addr
, new_val
, old
, regRd
, True
);
3082 /* Load Atomic Add Word - LAA; Cavium OCTEON2 */
3084 DIP("laa r%u,(r%u),r%u\n", regRd
, regRs
, regRt
);
3085 IRTemp new_val
= newTemp(Ity_I32
);
3086 IRTemp old
= newTemp(Ity_I32
);
3087 assign(addr
, getIReg(regRs
));
3088 assign(old
, load(Ity_I32
, mkexpr(addr
)));
3089 assign(new_val
, binop(Iop_Add32
,
3091 mkNarrowTo32(ty
, getIReg(regRt
))));
3092 mips_load_store32(addr
, new_val
, old
, regRd
, True
);
3096 /* Load Atomic Add Doubleword - LAAD; Cavium OCTEON2 */
3098 DIP("laad r%u,(r%u),r%u\n", regRd
, regRs
, regRt
);
3099 IRTemp new_val
= newTemp(Ity_I64
);
3100 IRTemp old
= newTemp(Ity_I64
);
3101 assign(addr
, getIReg(regRs
));
3102 assign(old
, load(Ity_I64
, mkexpr(addr
)));
3103 assign(new_val
, binop(Iop_Add64
,
3104 load(Ity_I64
, mkexpr(addr
)),
3106 mips_load_store64(addr
, new_val
, old
, regRd
, True
);
3110 /* Load Atomic Swap Word - LAW; Cavium OCTEON2 */
3112 DIP("law r%u,(r%u)\n", regRd
, regRs
);
3113 IRTemp new_val
= newTemp(Ity_I32
);
3114 IRTemp old
= newTemp(Ity_I32
);
3115 assign(addr
, getIReg(regRs
));
3116 assign(new_val
, mkNarrowTo32(ty
, getIReg(regRt
)));
3117 assign(old
, load(Ity_I32
, mkexpr(addr
)));
3118 mips_load_store32(addr
, new_val
, old
, regRd
, True
);
3122 /* Load Atomic Swap Doubleword - LAWD; Cavium OCTEON2 */
3124 DIP("lawd r%u,(r%u)\n", regRd
, regRs
);
3125 IRTemp new_val
= newTemp(Ity_I64
);
3126 IRTemp old
= newTemp(Ity_I64
);
3127 assign(addr
, getIReg(regRs
));
3128 assign(new_val
, getIReg(regRt
));
3129 assign(old
, load(Ity_I64
, mkexpr(addr
)));
3130 mips_load_store64(addr
, new_val
, old
, regRd
, True
);
3135 vex_printf("Unknown laxx instruction, opc3=0x%x\n", opc3
);
3136 vex_printf("Instruction=0x%08x\n", theInstr
);
3143 /* Unsigned Byte Add - BADDU rd, rs, rt; Cavium OCTEON */
3145 DIP("BADDU r%u, r%u, r%u", regRs
, regRt
, regRd
);
3146 IRTemp t0
= newTemp(Ity_I8
);
3148 assign(t0
, binop(Iop_Add8
,
3149 mkNarrowTo8(ty
, getIReg(regRs
)),
3150 mkNarrowTo8(ty
, getIReg(regRt
))));
3153 putIReg(regRd
, binop(mkSzOp(ty
, Iop_And8
),
3154 unop(Iop_8Uto64
, mkexpr(t0
)),
3155 mkSzImm(ty
, 0xFF)));
3157 putIReg(regRd
, binop(mkSzOp(ty
, Iop_And8
),
3158 unop(Iop_8Uto32
, mkexpr(t0
)),
3159 mkSzImm(ty
, 0xFF)));
3164 case 0x2c: { /* Count Ones in a Word - POP; Cavium OCTEON */
3167 IRTemp old
= newTemp(ty
);
3168 IRTemp nyu
= IRTemp_INVALID
;
3169 assign(old
, getIReg(regRs
));
3170 DIP("pop r%u, r%u", regRd
, regRs
);
3172 for (i
= 0; i
< 5; i
++) {
3173 mask
[i
] = newTemp(ty
);
3178 assign(mask
[0], mkU64(0x0000000055555555));
3179 assign(mask
[1], mkU64(0x0000000033333333));
3180 assign(mask
[2], mkU64(0x000000000F0F0F0F));
3181 assign(mask
[3], mkU64(0x0000000000FF00FF));
3182 assign(mask
[4], mkU64(0x000000000000FFFF));
3184 for (i
= 0; i
< 5; i
++) {
3189 mkexpr(old
), mkexpr(mask
[i
])),
3192 mkexpr(old
), mkU8(shift
[i
])),
3197 assign(mask
[0], mkU32(0x55555555));
3198 assign(mask
[1], mkU32(0x33333333));
3199 assign(mask
[2], mkU32(0x0F0F0F0F));
3200 assign(mask
[3], mkU32(0x00FF00FF));
3201 assign(mask
[4], mkU32(0x0000FFFF));
3202 assign(old
, getIReg(regRs
));
3204 for (i
= 0; i
< 5; i
++) {
3209 mkexpr(old
), mkexpr(mask
[i
])),
3212 mkexpr(old
), mkU8(shift
[i
])),
3218 putIReg(regRd
, mkexpr(nyu
));
3222 /* Count Ones in a Doubleword - DPOP; Cavium OCTEON */
3226 IRTemp old
= newTemp(ty
);
3227 IRTemp nyu
= IRTemp_INVALID
;
3228 DIP("dpop r%u, r%u", regRd
, regRs
);
3230 for (i
= 0; i
< 6; i
++) {
3231 mask
[i
] = newTemp(ty
);
3235 vassert(mode64
); /*Caution! Only for Mode 64*/
3236 assign(mask
[0], mkU64(0x5555555555555555ULL
));
3237 assign(mask
[1], mkU64(0x3333333333333333ULL
));
3238 assign(mask
[2], mkU64(0x0F0F0F0F0F0F0F0FULL
));
3239 assign(mask
[3], mkU64(0x00FF00FF00FF00FFULL
));
3240 assign(mask
[4], mkU64(0x0000FFFF0000FFFFULL
));
3241 assign(mask
[5], mkU64(0x00000000FFFFFFFFULL
));
3242 assign(old
, getIReg(regRs
));
3244 for (i
= 0; i
< 6; i
++) {
3245 nyu
= newTemp(Ity_I64
);
3249 mkexpr(old
), mkexpr(mask
[i
])),
3252 mkexpr(old
), mkU8(shift
[i
])),
3257 putIReg(regRd
, mkexpr(nyu
));
3261 case 0x32: /* 5. CINS rd, rs, p, lenm1 */
3262 DIP("cins r%u, r%u, %u, %u\n", regRt
, regRs
, p
, lenM1
);
3263 assign ( tmp
, binop(Iop_Shl64
, mkexpr(tmpRs
),
3264 mkU8(64 - ( lenM1
+ 1 ))));
3265 assign ( tmpRt
, binop(Iop_Shr64
, mkexpr( tmp
),
3266 mkU8(64 - (p
+ lenM1
+ 1))));
3267 putIReg( regRt
, mkexpr(tmpRt
));
3270 case 0x33: /* 6. CINS32 rd, rs, p+32, lenm1 */
3271 DIP("cins32 r%u, r%u, %d, %d\n", regRt
, regRs
, p
+ 32, lenM1
);
3272 assign ( tmp
, binop(Iop_Shl64
, mkexpr(tmpRs
),
3273 mkU8(64 - ( lenM1
+ 1 ))));
3274 assign ( tmpRt
, binop(Iop_Shr64
, mkexpr( tmp
),
3275 mkU8(32 - (p
+ lenM1
+ 1))));
3276 putIReg( regRt
, mkexpr(tmpRt
));
3279 case 0x3A: /* 3. EXTS rt, rs, p len */
3280 DIP("exts r%u, r%u, %d, %d\n", regRt
, regRs
, p
, lenM1
);
3281 size
= lenM1
+ 1; /* lenm1+1 */
3282 UChar lsAmt
= 64 - (p
+ size
); /* p+lenm1+1 */
3283 UChar rsAmt
= 64 - size
; /* lenm1+1 */
3284 tmp
= newTemp(Ity_I64
);
3285 assign(tmp
, binop(Iop_Shl64
, mkexpr(tmpRs
), mkU8(lsAmt
)));
3286 putIReg(regRt
, binop(Iop_Sar64
, mkexpr(tmp
), mkU8(rsAmt
)));
3289 case 0x3B: /* 4. EXTS32 rt, rs, p len */
3290 DIP("exts32 r%u, r%u, %d, %d\n", regRt
, regRs
, p
, lenM1
);
3291 assign ( tmp
, binop(Iop_Shl64
, mkexpr(tmpRs
),
3292 mkU8(32 - (p
+ lenM1
+ 1))));
3293 assign ( tmpRt
, binop(Iop_Sar64
, mkexpr(tmp
),
3294 mkU8(64 - (lenM1
+ 1))) );
3295 putIReg( regRt
, mkexpr(tmpRt
));
3298 case 0x2B: /* 20. SNE rd, rs, rt */
3299 DIP("sne r%u, r%u, r%u", regRd
, regRs
, regRt
);
3302 putIReg(regRd
, unop(Iop_1Uto64
, binop(Iop_CmpNE64
,
3306 putIReg(regRd
, unop(Iop_1Uto32
, binop(Iop_CmpNE32
,
3312 case 0x2A: /* Set Equals - SEQ; Cavium OCTEON */
3313 DIP("seq r%u, r%u, %d", regRd
, regRs
, regRt
);
3316 putIReg(regRd
, unop(Iop_1Uto64
,
3317 binop(Iop_CmpEQ64
, getIReg(regRs
),
3320 putIReg(regRd
, unop(Iop_1Uto32
,
3321 binop(Iop_CmpEQ32
, getIReg(regRs
),
3326 case 0x2E: /* Set Equals Immediate - SEQI; Cavium OCTEON */
3327 DIP("seqi r%u, r%u, %u", regRt
, regRs
, imm
);
3330 putIReg(regRt
, unop(Iop_1Uto64
,
3331 binop(Iop_CmpEQ64
, getIReg(regRs
),
3332 mkU64(extend_s_10to64(imm
)))));
3334 putIReg(regRt
, unop(Iop_1Uto32
,
3335 binop(Iop_CmpEQ32
, getIReg(regRs
),
3336 mkU32(extend_s_10to32(imm
)))));
3340 case 0x2F: /* Set Not Equals Immediate - SNEI; Cavium OCTEON */
3341 DIP("snei r%u, r%u, %u", regRt
, regRs
, imm
);
3344 putIReg(regRt
, unop(Iop_1Uto64
,
3347 mkU64(extend_s_10to64(imm
)))));
3349 putIReg(regRt
, unop(Iop_1Uto32
,
3352 mkU32(extend_s_10to32(imm
)))));
3361 } /* opc1 0x1C ends here*/
3365 case 0x0A: { // lx - Load indexed instructions
3366 switch (get_sa(theInstr
)) {
3367 case 0x00: { // LWX rd, index(base)
3368 DIP("lwx r%u, r%u(r%u)", regRd
, regRt
, regRs
);
3369 LOADX_STORE_PATTERN
;
3370 putIReg(regRd
, mkWidenFrom32(ty
, load(Ity_I32
, mkexpr(t1
)),
3375 case 0x04: // LHX rd, index(base)
3376 DIP("lhx r%u, r%u(r%u)", regRd
, regRt
, regRs
);
3377 LOADX_STORE_PATTERN
;
3380 putIReg(regRd
, unop(Iop_16Sto64
, load(Ity_I16
,
3383 putIReg(regRd
, unop(Iop_16Sto32
, load(Ity_I16
,
3388 case 0x08: { // LDX rd, index(base)
3389 DIP("ldx r%u, r%u(r%u)", regRd
, regRt
, regRs
);
3390 vassert(mode64
); /* Currently Implemented only for n64 */
3391 LOADX_STORE_PATTERN
;
3392 putIReg(regRd
, load(Ity_I64
, mkexpr(t1
)));
3396 case 0x06: { // LBUX rd, index(base)
3397 DIP("lbux r%u, r%u(r%u)", regRd
, regRt
, regRs
);
3398 LOADX_STORE_PATTERN
;
3401 putIReg(regRd
, unop(Iop_8Uto64
, load(Ity_I8
,
3404 putIReg(regRd
, unop(Iop_8Uto32
, load(Ity_I8
,
3410 case 0x10: { // LWUX rd, index(base) (Cavium OCTEON)
3411 DIP("lwux r%u, r%u(r%u)", regRd
, regRt
, regRs
);
3412 LOADX_STORE_PATTERN
; /* same for both 32 and 64 modes*/
3413 putIReg(regRd
, mkWidenFrom32(ty
, load(Ity_I32
, mkexpr(t1
)),
3418 case 0x14: { // LHUX rd, index(base) (Cavium OCTEON)
3419 DIP("lhux r%u, r%u(r%u)", regRd
, regRt
, regRs
);
3420 LOADX_STORE_PATTERN
;
3424 unop(Iop_16Uto64
, load(Ity_I16
, mkexpr(t1
))));
3427 unop(Iop_16Uto32
, load(Ity_I16
, mkexpr(t1
))));
3432 case 0x16: { // LBX rd, index(base) (Cavium OCTEON)
3433 DIP("lbx r%u, r%u(r%u)", regRd
, regRs
, regRt
);
3434 LOADX_STORE_PATTERN
;
3438 unop(Iop_8Sto64
, load(Ity_I8
, mkexpr(t1
))));
3441 unop(Iop_8Sto32
, load(Ity_I8
, mkexpr(t1
))));
3447 vex_printf("\nUnhandled LX instruction opc3 = %x\n",
3454 } /* opc1 = 0x1F & opc2 = 0xA (LX) ends here*/
3457 } /* opc1 = 0x1F ends here*/
3461 } /* main opc1 switch ends here */
3466 static Int
msa_I8_logical(UInt cins
, UChar wd
, UChar ws
)
3472 operation
= (cins
>> 24) & 3;
3473 i8
= (cins
& 0x00FF0000) >> 16;
3475 switch (operation
) {
3476 case 0x00: { /* ANDI.B */
3477 DIP("ANDI.B w%d, w%d, %d", wd
, ws
, i8
);
3478 t1
= newTemp(Ity_V128
);
3479 t2
= newTemp(Ity_V128
);
3481 tmp
|= (tmp
<< 56) | (tmp
<< 48) | (tmp
<< 40) |
3482 (tmp
<< 32) | (tmp
<< 24) | (tmp
<< 16) |
3484 assign(t1
, getWReg(ws
));
3485 assign(t2
, binop(Iop_64HLtoV128
, mkU64(tmp
), mkU64(tmp
)));
3486 putWReg(wd
, binop(Iop_AndV128
, mkexpr(t1
), mkexpr(t2
)));
3490 case 0x01: { /* ORI.B */
3491 DIP("ORI.B w%d, w%d, %d", wd
, ws
, i8
);
3492 t1
= newTemp(Ity_V128
);
3493 t2
= newTemp(Ity_V128
);
3495 tmp
|= (tmp
<< 56) | (tmp
<< 48) | (tmp
<< 40) |
3496 (tmp
<< 32) | (tmp
<< 24) | (tmp
<< 16) |
3498 assign(t1
, getWReg(ws
));
3499 assign(t2
, binop(Iop_64HLtoV128
, mkU64(tmp
), mkU64(tmp
)));
3500 putWReg(wd
, binop(Iop_OrV128
, mkexpr(t1
), mkexpr(t2
)));
3504 case 0x02: { /* NORI.B */
3505 DIP("NORI.B w%d, w%d, %d", wd
, ws
, i8
);
3506 t1
= newTemp(Ity_V128
);
3507 t2
= newTemp(Ity_V128
);
3509 tmp
|= (tmp
<< 56) | (tmp
<< 48) | (tmp
<< 40) |
3510 (tmp
<< 32) | (tmp
<< 24) | (tmp
<< 16) |
3512 assign(t1
, getWReg(ws
));
3513 assign(t2
, binop(Iop_64HLtoV128
, mkU64(tmp
), mkU64(tmp
)));
3514 putWReg(wd
, unop(Iop_NotV128
, binop(Iop_OrV128
,
3515 mkexpr(t1
), mkexpr(t2
))));
3519 case 0x03: { /* XORI.B */
3520 DIP("XORI.B w%d, w%d, %d", wd
, ws
, i8
);
3521 t1
= newTemp(Ity_V128
);
3522 t2
= newTemp(Ity_V128
);
3524 tmp
|= (tmp
<< 56) | (tmp
<< 48) | (tmp
<< 40) |
3525 (tmp
<< 32) | (tmp
<< 24) | (tmp
<< 16) |
3527 assign(t1
, getWReg(ws
));
3528 assign(t2
, binop(Iop_64HLtoV128
, mkU64(tmp
), mkU64(tmp
)));
3529 putWReg(wd
, binop(Iop_XorV128
, mkexpr(t1
), mkexpr(t2
)));
3540 static Int
msa_I8_branch(UInt cins
, UChar wd
, UChar ws
)
3542 IRTemp t1
, t2
, t3
, t4
;
3546 operation
= (cins
>> 24) & 3;
3547 i8
= (cins
& 0x00FF0000) >> 16;
3549 switch (operation
) {
3550 case 0x00: { /* BMNZI.B */
3551 DIP("BMNZI.B w%d, w%d, %d", wd
, ws
, i8
);
3552 t1
= newTemp(Ity_V128
);
3553 t2
= newTemp(Ity_V128
);
3554 t3
= newTemp(Ity_V128
);
3555 t4
= newTemp(Ity_V128
);
3557 tmp
|= (tmp
<< 56) | (tmp
<< 48) | (tmp
<< 40) |
3558 (tmp
<< 32) | (tmp
<< 24) | (tmp
<< 16) |
3560 assign(t4
, binop(Iop_64HLtoV128
, mkU64(tmp
), mkU64(tmp
)));
3561 assign(t1
, binop(Iop_AndV128
, getWReg(ws
), mkexpr(t4
)));
3562 assign(t2
, binop(Iop_AndV128
, getWReg(wd
),
3563 unop(Iop_NotV128
, mkexpr(t4
))));
3564 assign(t3
, binop(Iop_OrV128
, mkexpr(t1
), mkexpr(t2
)));
3565 putWReg(wd
, mkexpr(t3
));
3569 case 0x01: { /* BMZI.B */
3570 DIP("BMZI.B w%d, w%d, %d", wd
, ws
, i8
);
3571 t1
= newTemp(Ity_V128
);
3572 t2
= newTemp(Ity_V128
);
3573 t3
= newTemp(Ity_V128
);
3574 t4
= newTemp(Ity_V128
);
3576 tmp
|= (tmp
<< 56) | (tmp
<< 48) | (tmp
<< 40) |
3577 (tmp
<< 32) | (tmp
<< 24) | (tmp
<< 16) |
3579 assign(t4
, binop(Iop_64HLtoV128
, mkU64(tmp
), mkU64(tmp
)));
3580 assign(t1
, binop(Iop_AndV128
, getWReg(wd
), mkexpr(t4
)));
3581 assign(t2
, binop(Iop_AndV128
, getWReg(ws
),
3582 unop(Iop_NotV128
, mkexpr(t4
))));
3583 assign(t3
, binop(Iop_OrV128
, mkexpr(t1
), mkexpr(t2
)));
3584 putWReg(wd
, mkexpr(t3
));
3588 case 0x02: { /* BSELI.B */
3589 DIP("BSELI.B w%d, w%d, %d", wd
, ws
, i8
);
3590 t1
= newTemp(Ity_V128
);
3591 t2
= newTemp(Ity_V128
);
3592 t3
= newTemp(Ity_V128
);
3593 t4
= newTemp(Ity_V128
);
3595 tmp
|= (tmp
<< 56) | (tmp
<< 48) | (tmp
<< 40) |
3596 (tmp
<< 32) | (tmp
<< 24) | (tmp
<< 16) |
3598 assign(t4
, binop(Iop_64HLtoV128
, mkU64(tmp
), mkU64(tmp
)));
3599 assign(t1
, binop(Iop_AndV128
, getWReg(wd
), mkexpr(t4
)));
3600 assign(t2
, binop(Iop_AndV128
, getWReg(ws
),
3601 unop(Iop_NotV128
, getWReg(wd
))));
3602 assign(t3
, binop(Iop_OrV128
, mkexpr(t1
), mkexpr(t2
)));
3603 putWReg(wd
, mkexpr(t3
));
3614 static Int
msa_I8_shift(UInt cins
, UChar wd
, UChar ws
)
3620 operation
= (cins
>> 24) & 3;
3621 i8
= (cins
& 0x00FF0000) >> 16;
3623 switch (operation
) {
3624 case 0x00: { /* SHF.B */
3625 DIP("SHF.B w%d, w%d, %d", wd
, ws
, i8
);
3626 t1
= newTemp(Ity_V128
);
3627 t2
= newTemp(Ity_V128
);
3628 assign(t1
, getWReg(wd
));
3629 assign(t2
, getWReg(ws
));
3633 for (i
= 0; i
< 16; i
++) {
3634 tmp
[i
] = newTemp(Ity_I8
);
3636 binop(Iop_GetElem8x16
, mkexpr(t2
),
3638 ((i8
>> (i
% 4) * 2) & 0x03))));
3641 putWReg(wd
, binop(Iop_64HLtoV128
,
3671 mkexpr(tmp
[0]))))));
3675 case 0x01: { /* SHF.H */
3676 DIP("SHF.H w%d, w%d, %d", wd
, ws
, i8
);
3677 t1
= newTemp(Ity_V128
);
3678 t2
= newTemp(Ity_V128
);
3679 assign(t1
, getWReg(wd
));
3680 assign(t2
, getWReg(ws
));
3684 for (i
= 0; i
< 8; i
++) {
3685 tmp
[i
] = newTemp(Ity_I16
);
3687 binop(Iop_GetElem16x8
, mkexpr(t2
),
3689 ((i8
>> (i
% 4) * 2) & 0x03))));
3692 putWReg(wd
, binop(Iop_64HLtoV128
,
3695 mkexpr(tmp
[7]), mkexpr(tmp
[6])),
3697 mkexpr(tmp
[5]), mkexpr(tmp
[4]))),
3700 mkexpr(tmp
[3]), mkexpr(tmp
[2])),
3702 mkexpr(tmp
[1]), mkexpr(tmp
[0])))));
3706 case 0x02: { /* SHF.W */
3707 DIP("SHF.W w%d, w%d, %d", wd
, ws
, i8
);
3708 t1
= newTemp(Ity_V128
);
3709 t2
= newTemp(Ity_V128
);
3710 assign(t1
, getWReg(wd
));
3711 assign(t2
, getWReg(ws
));
3715 for (i
= 0; i
< 4; i
++) {
3716 tmp
[i
] = newTemp(Ity_I32
);
3718 binop(Iop_GetElem32x4
, mkexpr(t2
),
3720 ((i8
>> (i
% 4) * 2) & 0x03))));
3723 putWReg(wd
, binop(Iop_64HLtoV128
,
3725 mkexpr(tmp
[3]), mkexpr(tmp
[2])),
3727 mkexpr(tmp
[1]), mkexpr(tmp
[0]))));
3738 static Int
msa_I5_06(UInt cins
, UChar wd
, UChar ws
) /* I5 (0x06) */
3744 operation
= (cins
& 0x03800000) >> 23;
3745 df
= (cins
& 0x00600000) >> 21;
3746 wt
= (cins
& 0x001F0000) >> 16;
3748 switch (operation
) {
3749 case 0x00: { /* ADDVI */
3753 case 0x00: { /* ADDVI.B */
3754 DIP("ADDVI.B w%d, w%d, %d", wd
, ws
, wt
);
3755 t1
= newTemp(Ity_V128
);
3756 t2
= newTemp(Ity_V128
);
3757 t3
= newTemp(Ity_V128
);
3758 tmp
|= (tmp
<< 56) | (tmp
<< 48) | (tmp
<< 40) |
3759 (tmp
<< 32) | (tmp
<< 24) | (tmp
<< 16) |
3761 assign(t1
, getWReg(ws
));
3762 assign(t2
, binop(Iop_64HLtoV128
, mkU64(tmp
), mkU64(tmp
)));
3763 assign(t3
, binop(Iop_Add8x16
, mkexpr(t1
), mkexpr(t2
)));
3764 putWReg(wd
, mkexpr(t3
));
3768 case 0x01: { /* ADDVI.H */
3769 DIP("ADDVI.H w%d, w%d, %d", wd
, ws
, wt
);
3770 t1
= newTemp(Ity_V128
);
3771 t2
= newTemp(Ity_V128
);
3772 t3
= newTemp(Ity_V128
);
3773 tmp
|= (tmp
<< 48) | (tmp
<< 32) | (tmp
<< 16);
3774 assign(t1
, getWReg(ws
));
3775 assign(t2
, binop(Iop_64HLtoV128
, mkU64(tmp
), mkU64(tmp
)));
3776 assign(t3
, binop(Iop_Add16x8
, mkexpr(t1
), mkexpr(t2
)));
3777 putWReg(wd
, mkexpr(t3
));
3781 case 0x02: { /* ADDVI.W */
3782 DIP("ADDVI.W w%d, w%d, %d", wd
, ws
, wt
);
3783 t1
= newTemp(Ity_V128
);
3784 t2
= newTemp(Ity_V128
);
3785 t3
= newTemp(Ity_V128
);
3787 assign(t1
, getWReg(ws
));
3788 assign(t2
, binop(Iop_64HLtoV128
, mkU64(tmp
), mkU64(tmp
)));
3789 assign(t3
, binop(Iop_Add32x4
, mkexpr(t1
), mkexpr(t2
)));
3790 putWReg(wd
, mkexpr(t3
));
3794 case 0x03: { /* ADDVI.D */
3795 DIP("ADDVI.D w%d, w%d, %d", wd
, ws
, wt
);
3796 t1
= newTemp(Ity_V128
);
3797 t2
= newTemp(Ity_V128
);
3798 t3
= newTemp(Ity_V128
);
3799 assign(t1
, getWReg(ws
));
3800 assign(t2
, binop(Iop_64HLtoV128
, mkU64(tmp
), mkU64(tmp
)));
3801 assign(t3
, binop(Iop_Add64x2
, mkexpr(t1
), mkexpr(t2
)));
3802 putWReg(wd
, mkexpr(t3
));
3810 case 0x01: { /* SUBVI */
3814 case 0x00: { /* SUBVI.B */
3815 DIP("SUBVI.B w%d, w%d, %d", wd
, ws
, wt
);
3816 t1
= newTemp(Ity_V128
);
3817 t2
= newTemp(Ity_V128
);
3818 t3
= newTemp(Ity_V128
);
3819 tmp
|= (tmp
<< 56) | (tmp
<< 48) | (tmp
<< 40) |
3820 (tmp
<< 32) | (tmp
<< 24) | (tmp
<< 16) |
3822 assign(t1
, getWReg(ws
));
3823 assign(t2
, binop(Iop_64HLtoV128
, mkU64(tmp
), mkU64(tmp
)));
3824 assign(t3
, binop(Iop_Sub8x16
, mkexpr(t1
), mkexpr(t2
)));
3825 putWReg(wd
, mkexpr(t3
));
3829 case 0x01: { /* SUBVI.H */
3830 DIP("SUBVI.H w%d, w%d, %d", wd
, ws
, wt
);
3831 t1
= newTemp(Ity_V128
);
3832 t2
= newTemp(Ity_V128
);
3833 t3
= newTemp(Ity_V128
);
3834 tmp
|= (tmp
<< 48) | (tmp
<< 32) | (tmp
<< 16);
3835 assign(t1
, getWReg(ws
));
3836 assign(t2
, binop(Iop_64HLtoV128
, mkU64(tmp
), mkU64(tmp
)));
3837 assign(t3
, binop(Iop_Sub16x8
, mkexpr(t1
), mkexpr(t2
)));
3838 putWReg(wd
, mkexpr(t3
));
3842 case 0x02: { /* SUBVI.W */
3843 DIP("SUBVI.W w%d, w%d, %d", wd
, ws
, wt
);
3844 t1
= newTemp(Ity_V128
);
3845 t2
= newTemp(Ity_V128
);
3846 t3
= newTemp(Ity_V128
);
3848 assign(t1
, getWReg(ws
));
3849 assign(t2
, binop(Iop_64HLtoV128
, mkU64(tmp
), mkU64(tmp
)));
3850 assign(t3
, binop(Iop_Sub32x4
, mkexpr(t1
), mkexpr(t2
)));
3851 putWReg(wd
, mkexpr(t3
));
3855 case 0x03: { /* SUBVI.D */
3856 DIP("SUBVI.D w%d, w%d, %d", wd
, ws
, wt
);
3857 t1
= newTemp(Ity_V128
);
3858 t2
= newTemp(Ity_V128
);
3859 t3
= newTemp(Ity_V128
);
3860 assign(t1
, getWReg(ws
));
3861 assign(t2
, binop(Iop_64HLtoV128
, mkU64(tmp
), mkU64(tmp
)));
3862 assign(t3
, binop(Iop_Sub64x2
, mkexpr(t1
), mkexpr(t2
)));
3863 putWReg(wd
, mkexpr(t3
));
3871 case 0x02: { /* MAXI_S */
3875 case 0x00: { /* MAXI_S.B */
3876 DIP("MAXI_S.B w%d, w%d, %d", wd
, ws
, wt
);
3877 t1
= newTemp(Ity_V128
);
3878 t2
= newTemp(Ity_V128
);
3879 t3
= newTemp(Ity_V128
);
3880 char stemp
= ((int)tmp
<< 27) >> 27;
3882 tmp
|= (tmp
<< 56) | (tmp
<< 48) | (tmp
<< 40) |
3883 (tmp
<< 32) | (tmp
<< 24) | (tmp
<< 16) |
3885 assign(t1
, getWReg(ws
));
3886 assign(t2
, binop(Iop_64HLtoV128
, mkU64(tmp
), mkU64(tmp
)));
3887 assign(t3
, binop(Iop_Max8Sx16
, mkexpr(t1
), mkexpr(t2
)));
3888 putWReg(wd
, mkexpr(t3
));
3892 case 0x01: { /* MAXI_S.H */
3893 DIP("MAXI_S.H w%d, w%d, %d", wd
, ws
, wt
);
3894 t1
= newTemp(Ity_V128
);
3895 t2
= newTemp(Ity_V128
);
3896 t3
= newTemp(Ity_V128
);
3897 short stemp
= ((int)tmp
<< 27) >> 27;
3898 tmp
= (UShort
)stemp
;
3899 tmp
|= (tmp
<< 48) | (tmp
<< 32) | (tmp
<< 16);
3900 assign(t1
, getWReg(ws
));
3901 assign(t2
, binop(Iop_64HLtoV128
, mkU64(tmp
), mkU64(tmp
)));
3902 assign(t3
, binop(Iop_Max16Sx8
, mkexpr(t1
), mkexpr(t2
)));
3903 putWReg(wd
, mkexpr(t3
));
3907 case 0x02: { /* MAXI_S.W */
3908 DIP("MAXI_S.W w%d, w%d, %d", wd
, ws
, wt
);
3909 t1
= newTemp(Ity_V128
);
3910 t2
= newTemp(Ity_V128
);
3911 t3
= newTemp(Ity_V128
);
3912 int stemp
= ((int)tmp
<< 27) >> 27;
3915 assign(t1
, getWReg(ws
));
3916 assign(t2
, binop(Iop_64HLtoV128
, mkU64(tmp
), mkU64(tmp
)));
3917 assign(t3
, binop(Iop_Max32Sx4
, mkexpr(t1
), mkexpr(t2
)));
3918 putWReg(wd
, mkexpr(t3
));
3922 case 0x03: { /* MAXI_S.D */
3923 DIP("MAXI_S.D w%d, w%d, %d", wd
, ws
, wt
);
3924 t1
= newTemp(Ity_V128
);
3925 t2
= newTemp(Ity_V128
);
3926 t3
= newTemp(Ity_V128
);
3927 Long stemp
= ((Long
)tmp
<< 59) >> 59;
3929 assign(t1
, getWReg(ws
));
3930 assign(t2
, binop(Iop_64HLtoV128
, mkU64(tmp
), mkU64(tmp
)));
3931 assign(t3
, binop(Iop_Max64Sx2
, mkexpr(t1
), mkexpr(t2
)));
3932 putWReg(wd
, mkexpr(t3
));
3940 case 0x03: { /* MAXI_U */
3944 case 0x00: { /* MAXI_U.B */
3945 DIP("MAXI_U.B w%d, w%d, %d", wd
, ws
, wt
);
3946 t1
= newTemp(Ity_V128
);
3947 t2
= newTemp(Ity_V128
);
3948 t3
= newTemp(Ity_V128
);
3949 tmp
|= (tmp
<< 56) | (tmp
<< 48) | (tmp
<< 40) |
3950 (tmp
<< 32) | (tmp
<< 24) | (tmp
<< 16) |
3952 assign(t1
, getWReg(ws
));
3953 assign(t2
, binop(Iop_64HLtoV128
, mkU64(tmp
), mkU64(tmp
)));
3954 assign(t3
, binop(Iop_Max8Ux16
, mkexpr(t1
), mkexpr(t2
)));
3955 putWReg(wd
, mkexpr(t3
));
3959 case 0x01: { /* MAXI_U.H */
3960 DIP("MAXI_U.H w%d, w%d, %d", wd
, ws
, wt
);
3961 t1
= newTemp(Ity_V128
);
3962 t2
= newTemp(Ity_V128
);
3963 t3
= newTemp(Ity_V128
);
3964 tmp
|= (tmp
<< 48) | (tmp
<< 32) | (tmp
<< 16);
3965 assign(t1
, getWReg(ws
));
3966 assign(t2
, binop(Iop_64HLtoV128
, mkU64(tmp
), mkU64(tmp
)));
3967 assign(t3
, binop(Iop_Max16Ux8
, mkexpr(t1
), mkexpr(t2
)));
3968 putWReg(wd
, mkexpr(t3
));
3972 case 0x02: { /* MAXI_U.W */
3973 DIP("MAXI_U.W w%d, w%d, %d", wd
, ws
, wt
);
3974 t1
= newTemp(Ity_V128
);
3975 t2
= newTemp(Ity_V128
);
3976 t3
= newTemp(Ity_V128
);
3978 assign(t1
, getWReg(ws
));
3979 assign(t2
, binop(Iop_64HLtoV128
, mkU64(tmp
), mkU64(tmp
)));
3980 assign(t3
, binop(Iop_Max32Ux4
, mkexpr(t1
), mkexpr(t2
)));
3981 putWReg(wd
, mkexpr(t3
));
3985 case 0x03: { /* MAXI_U.D */
3986 DIP("MAXI_U.D w%d, w%d, %d", wd
, ws
, wt
);
3987 t1
= newTemp(Ity_V128
);
3988 t2
= newTemp(Ity_V128
);
3989 t3
= newTemp(Ity_V128
);
3990 assign(t1
, getWReg(ws
));
3991 assign(t2
, binop(Iop_64HLtoV128
, mkU64(tmp
), mkU64(tmp
)));
3992 assign(t3
, binop(Iop_Max64Ux2
, mkexpr(t1
), mkexpr(t2
)));
3993 putWReg(wd
, mkexpr(t3
));
4001 case 0x04: { /* MINI_S */
4005 case 0x00: { /* MINI_S.B */
4006 DIP("MINI_S.B w%d, w%d, %d", wd
, ws
, wt
);
4007 t1
= newTemp(Ity_V128
);
4008 t2
= newTemp(Ity_V128
);
4009 t3
= newTemp(Ity_V128
);
4010 char stemp
= ((int)tmp
<< 27) >> 27;
4012 tmp
|= (tmp
<< 56) | (tmp
<< 48) | (tmp
<< 40) |
4013 (tmp
<< 32) | (tmp
<< 24) | (tmp
<< 16) |
4015 assign(t1
, getWReg(ws
));
4016 assign(t2
, binop(Iop_64HLtoV128
, mkU64(tmp
), mkU64(tmp
)));
4017 assign(t3
, binop(Iop_Min8Sx16
, mkexpr(t1
), mkexpr(t2
)));
4018 putWReg(wd
, mkexpr(t3
));
4022 case 0x01: { /* MINI_S.H */
4023 DIP("MINI_S.H w%d, w%d, %d", wd
, ws
, wt
);
4024 t1
= newTemp(Ity_V128
);
4025 t2
= newTemp(Ity_V128
);
4026 t3
= newTemp(Ity_V128
);
4027 short stemp
= ((int)tmp
<< 27) >> 27;
4028 tmp
= (UShort
)stemp
;
4029 tmp
|= (tmp
<< 48) | (tmp
<< 32) | (tmp
<< 16);
4030 assign(t1
, getWReg(ws
));
4031 assign(t2
, binop(Iop_64HLtoV128
, mkU64(tmp
), mkU64(tmp
)));
4032 assign(t3
, binop(Iop_Min16Sx8
, mkexpr(t1
), mkexpr(t2
)));
4033 putWReg(wd
, mkexpr(t3
));
4037 case 0x02: { /* MINI_S.W */
4038 DIP("MINI_S.W w%d, w%d, %d", wd
, ws
, wt
);
4039 t1
= newTemp(Ity_V128
);
4040 t2
= newTemp(Ity_V128
);
4041 t3
= newTemp(Ity_V128
);
4042 int stemp
= ((int)tmp
<< 27) >> 27;
4045 assign(t1
, getWReg(ws
));
4046 assign(t2
, binop(Iop_64HLtoV128
, mkU64(tmp
), mkU64(tmp
)));
4047 assign(t3
, binop(Iop_Min32Sx4
, mkexpr(t1
), mkexpr(t2
)));
4048 putWReg(wd
, mkexpr(t3
));
4052 case 0x03: { /* MINI_S.D */
4053 DIP("MINI_S.D w%d, w%d, %d", wd
, ws
, wt
);
4054 t1
= newTemp(Ity_V128
);
4055 t2
= newTemp(Ity_V128
);
4056 t3
= newTemp(Ity_V128
);
4057 Long stemp
= ((Long
)tmp
<< 59) >> 59;
4059 assign(t1
, getWReg(ws
));
4060 assign(t2
, binop(Iop_64HLtoV128
, mkU64(tmp
), mkU64(tmp
)));
4061 assign(t3
, binop(Iop_Min64Sx2
, mkexpr(t1
), mkexpr(t2
)));
4062 putWReg(wd
, mkexpr(t3
));
4070 case 0x05: { /* MINI_U */
4074 case 0x00: { /* MINI_U.B */
4075 DIP("MINI_U.B w%d, w%d, %d", wd
, ws
, wt
);
4076 t1
= newTemp(Ity_V128
);
4077 t2
= newTemp(Ity_V128
);
4078 t3
= newTemp(Ity_V128
);
4079 tmp
|= (tmp
<< 56) | (tmp
<< 48) | (tmp
<< 40) |
4080 (tmp
<< 32) | (tmp
<< 24) | (tmp
<< 16) |
4082 assign(t1
, getWReg(ws
));
4083 assign(t2
, binop(Iop_64HLtoV128
, mkU64(tmp
), mkU64(tmp
)));
4084 assign(t3
, binop(Iop_Min8Ux16
, mkexpr(t1
), mkexpr(t2
)));
4085 putWReg(wd
, mkexpr(t3
));
4089 case 0x01: { /* MINI_U.H */
4090 DIP("MINI_U.H w%d, w%d, %d", wd
, ws
, wt
);
4091 t1
= newTemp(Ity_V128
);
4092 t2
= newTemp(Ity_V128
);
4093 t3
= newTemp(Ity_V128
);
4094 tmp
|= (tmp
<< 48) | (tmp
<< 32) | (tmp
<< 16);
4095 assign(t1
, getWReg(ws
));
4096 assign(t2
, binop(Iop_64HLtoV128
, mkU64(tmp
), mkU64(tmp
)));
4097 assign(t3
, binop(Iop_Min16Ux8
, mkexpr(t1
), mkexpr(t2
)));
4098 putWReg(wd
, mkexpr(t3
));
4102 case 0x02: { /* MINI_U.W */
4103 DIP("MINI_U.W w%d, w%d, %d", wd
, ws
, wt
);
4104 t1
= newTemp(Ity_V128
);
4105 t2
= newTemp(Ity_V128
);
4106 t3
= newTemp(Ity_V128
);
4108 assign(t1
, getWReg(ws
));
4109 assign(t2
, binop(Iop_64HLtoV128
, mkU64(tmp
), mkU64(tmp
)));
4110 assign(t3
, binop(Iop_Min32Ux4
, mkexpr(t1
), mkexpr(t2
)));
4111 putWReg(wd
, mkexpr(t3
));
4115 case 0x03: { /* MINI_U.D */
4116 DIP("MINI_U.D w%d, w%d, %d", wd
, ws
, wt
);
4117 t1
= newTemp(Ity_V128
);
4118 t2
= newTemp(Ity_V128
);
4119 t3
= newTemp(Ity_V128
);
4120 assign(t1
, getWReg(ws
));
4121 assign(t2
, binop(Iop_64HLtoV128
, mkU64(tmp
), mkU64(tmp
)));
4122 assign(t3
, binop(Iop_Min64Ux2
, mkexpr(t1
), mkexpr(t2
)));
4123 putWReg(wd
, mkexpr(t3
));
4139 static Int
msa_I5_07(UInt cins
, UChar wd
, UChar ws
) /* I5 (0x07) / I10 */
4145 operation
= (cins
& 0x03800000) >> 23;
4146 df
= (cins
& 0x00600000) >> 21;
4147 i5
= (cins
& 0x001F0000) >> 16;
4149 switch (operation
) {
4154 case 0x00: { /* CEQI.B */
4155 DIP("CEQI.B w%d, w%d, %d", wd
, ws
, i5
);
4156 t1
= newTemp(Ity_V128
);
4157 t2
= newTemp(Ity_V128
);
4158 t3
= newTemp(Ity_V128
);
4159 char stemp
= ((int)tmp
<< 27) >> 27;
4161 tmp
|= (tmp
<< 56) | (tmp
<< 48) | (tmp
<< 40) |
4162 (tmp
<< 32) | (tmp
<< 24) | (tmp
<< 16) |
4164 assign(t1
, getWReg(ws
));
4165 assign(t2
, binop(Iop_64HLtoV128
, mkU64(tmp
), mkU64(tmp
)));
4166 assign(t3
, binop(Iop_CmpEQ8x16
, mkexpr(t1
), mkexpr(t2
)));
4167 putWReg(wd
, mkexpr(t3
));
4171 case 0x01: { /* CEQI.H */
4172 DIP("CEQI.H w%d, w%d, %d", wd
, ws
, i5
);
4173 t1
= newTemp(Ity_V128
);
4174 t2
= newTemp(Ity_V128
);
4175 t3
= newTemp(Ity_V128
);
4176 short stemp
= ((int)tmp
<< 27) >> 27;
4177 tmp
= (UShort
)stemp
;
4178 tmp
|= (tmp
<< 48) | (tmp
<< 32) | (tmp
<< 16);
4179 assign(t1
, getWReg(ws
));
4180 assign(t2
, binop(Iop_64HLtoV128
, mkU64(tmp
), mkU64(tmp
)));
4181 assign(t3
, binop(Iop_CmpEQ16x8
, mkexpr(t1
), mkexpr(t2
)));
4182 putWReg(wd
, mkexpr(t3
));
4186 case 0x02: { /* CEQI.W */
4187 DIP("CEQI.W w%d, w%d, %d", wd
, ws
, i5
);
4188 t1
= newTemp(Ity_V128
);
4189 t2
= newTemp(Ity_V128
);
4190 t3
= newTemp(Ity_V128
);
4191 int stemp
= ((int)tmp
<< 27) >> 27;
4194 assign(t1
, getWReg(ws
));
4195 assign(t2
, binop(Iop_64HLtoV128
, mkU64(tmp
), mkU64(tmp
)));
4196 assign(t3
, binop(Iop_CmpEQ32x4
, mkexpr(t1
), mkexpr(t2
)));
4197 putWReg(wd
, mkexpr(t3
));
4201 case 0x03: { /* CEQI.D */
4202 DIP("CEQI.D w%d, w%d, %d", wd
, ws
, i5
);
4203 t1
= newTemp(Ity_V128
);
4204 t2
= newTemp(Ity_V128
);
4205 t3
= newTemp(Ity_V128
);
4206 Long stemp
= ((Long
)tmp
<< 59) >> 59;
4208 assign(t1
, getWReg(ws
));
4209 assign(t2
, binop(Iop_64HLtoV128
, mkU64(tmp
), mkU64(tmp
)));
4210 assign(t3
, binop(Iop_CmpEQ64x2
, mkexpr(t1
), mkexpr(t2
)));
4211 putWReg(wd
, mkexpr(t3
));
4219 case 0x02: { /* CLTI_S.df */
4223 case 0x00: { /* CLTI_S.B */
4224 DIP("CLTI_S.B w%d, w%d, %d", wd
, ws
, i5
);
4225 t1
= newTemp(Ity_V128
);
4226 t2
= newTemp(Ity_V128
);
4227 t3
= newTemp(Ity_V128
);
4228 char stemp
= ((int)tmp
<< 27) >> 27;
4230 tmp
|= (tmp
<< 56) | (tmp
<< 48) | (tmp
<< 40) |
4231 (tmp
<< 32) | (tmp
<< 24) | (tmp
<< 16) |
4233 assign(t1
, getWReg(ws
));
4234 assign(t2
, binop(Iop_64HLtoV128
, mkU64(tmp
), mkU64(tmp
)));
4235 assign(t3
, binop(Iop_CmpGT8Sx16
, mkexpr(t2
), mkexpr(t1
)));
4236 putWReg(wd
, mkexpr(t3
));
4240 case 0x01: { /* CLTI_S.H */
4241 DIP("CLTI_S.H w%d, w%d, %d", wd
, ws
, i5
);
4242 t1
= newTemp(Ity_V128
);
4243 t2
= newTemp(Ity_V128
);
4244 t3
= newTemp(Ity_V128
);
4245 short stemp
= ((int)tmp
<< 27) >> 27;
4246 tmp
= (UShort
)stemp
;
4247 tmp
|= (tmp
<< 48) | (tmp
<< 32) | (tmp
<< 16);
4248 assign(t1
, getWReg(ws
));
4249 assign(t2
, binop(Iop_64HLtoV128
, mkU64(tmp
), mkU64(tmp
)));
4250 assign(t3
, binop(Iop_CmpGT16Sx8
, mkexpr(t2
), mkexpr(t1
)));
4251 putWReg(wd
, mkexpr(t3
));
4255 case 0x02: { /* CLTI_S.W */
4256 DIP("CLTI_S.W w%d, w%d, %d", wd
, ws
, i5
);
4257 t1
= newTemp(Ity_V128
);
4258 t2
= newTemp(Ity_V128
);
4259 t3
= newTemp(Ity_V128
);
4260 int stemp
= ((int)tmp
<< 27) >> 27;
4263 assign(t1
, getWReg(ws
));
4264 assign(t2
, binop(Iop_64HLtoV128
, mkU64(tmp
), mkU64(tmp
)));
4265 assign(t3
, binop(Iop_CmpGT32Sx4
, mkexpr(t2
), mkexpr(t1
)));
4266 putWReg(wd
, mkexpr(t3
));
4270 case 0x03: { /* CLTI_S.D */
4271 DIP("CLTI_S.D w%d, w%d, %d", wd
, ws
, i5
);
4272 t1
= newTemp(Ity_V128
);
4273 t2
= newTemp(Ity_V128
);
4274 t3
= newTemp(Ity_V128
);
4275 Long stemp
= ((Long
)tmp
<< 59) >> 59;
4277 assign(t1
, getWReg(ws
));
4278 assign(t2
, binop(Iop_64HLtoV128
, mkU64(tmp
), mkU64(tmp
)));
4279 assign(t3
, binop(Iop_CmpGT64Sx2
, mkexpr(t2
), mkexpr(t1
)));
4280 putWReg(wd
, mkexpr(t3
));
4291 case 0x03: { /* CLTI_U.df */
4295 case 0x00: { /* CLTI_U.B */
4296 DIP("CLTI_U.B w%d, w%d, %d", wd
, ws
, i5
);
4297 t1
= newTemp(Ity_V128
);
4298 t2
= newTemp(Ity_V128
);
4299 t3
= newTemp(Ity_V128
);
4300 tmp
|= (tmp
<< 56) | (tmp
<< 48) | (tmp
<< 40) |
4301 (tmp
<< 32) | (tmp
<< 24) | (tmp
<< 16) |
4303 assign(t1
, getWReg(ws
));
4304 assign(t2
, binop(Iop_64HLtoV128
, mkU64(tmp
), mkU64(tmp
)));
4305 assign(t3
, binop(Iop_CmpGT8Ux16
, mkexpr(t2
), mkexpr(t1
)));
4306 putWReg(wd
, mkexpr(t3
));
4310 case 0x01: { /* CLTI_U.H */
4311 DIP("CLTI_U.H w%d, w%d, %d", wd
, ws
, i5
);
4312 t1
= newTemp(Ity_V128
);
4313 t2
= newTemp(Ity_V128
);
4314 t3
= newTemp(Ity_V128
);
4315 tmp
|= (tmp
<< 48) | (tmp
<< 32) | (tmp
<< 16);
4316 assign(t1
, getWReg(ws
));
4317 assign(t2
, binop(Iop_64HLtoV128
, mkU64(tmp
), mkU64(tmp
)));
4318 assign(t3
, binop(Iop_CmpGT16Ux8
, mkexpr(t2
), mkexpr(t1
)));
4319 putWReg(wd
, mkexpr(t3
));
4323 case 0x02: { /* CLTI_U.W */
4324 DIP("CLTI_U.W w%d, w%d, %d", wd
, ws
, i5
);
4325 t1
= newTemp(Ity_V128
);
4326 t2
= newTemp(Ity_V128
);
4327 t3
= newTemp(Ity_V128
);
4329 assign(t1
, getWReg(ws
));
4330 assign(t2
, binop(Iop_64HLtoV128
, mkU64(tmp
), mkU64(tmp
)));
4331 assign(t3
, binop(Iop_CmpGT32Ux4
, mkexpr(t2
), mkexpr(t1
)));
4332 putWReg(wd
, mkexpr(t3
));
4336 case 0x03: { /* CLTI_U.D */
4337 DIP("CLTI_U.D w%d, w%d, %d", wd
, ws
, i5
);
4338 t1
= newTemp(Ity_V128
);
4339 t2
= newTemp(Ity_V128
);
4340 t3
= newTemp(Ity_V128
);
4341 assign(t1
, getWReg(ws
));
4342 assign(t2
, binop(Iop_64HLtoV128
, mkU64(tmp
), mkU64(tmp
)));
4343 assign(t3
, binop(Iop_CmpGT64Ux2
, mkexpr(t2
), mkexpr(t1
)));
4344 putWReg(wd
, mkexpr(t3
));
4352 case 0x04: { /* CLEI_S.df */
4356 case 0x00: { /* CLEI_S.B */
4357 DIP("CLEI_S.B w%d, w%d, %d", wd
, ws
, i5
);
4358 t1
= newTemp(Ity_V128
);
4359 t2
= newTemp(Ity_V128
);
4360 t3
= newTemp(Ity_V128
);
4361 char stemp
= ((int)tmp
<< 27) >> 27;
4363 tmp
|= (tmp
<< 56) | (tmp
<< 48) | (tmp
<< 40) |
4364 (tmp
<< 32) | (tmp
<< 24) | (tmp
<< 16) |
4366 assign(t1
, getWReg(ws
));
4367 assign(t2
, binop(Iop_64HLtoV128
, mkU64(tmp
), mkU64(tmp
)));
4368 assign(t3
, binop(Iop_OrV128
, binop(Iop_CmpGT8Sx16
,
4369 mkexpr(t2
), mkexpr(t1
)),
4370 binop(Iop_CmpEQ8x16
,
4371 mkexpr(t1
), mkexpr(t2
))));
4372 putWReg(wd
, mkexpr(t3
));
4376 case 0x01: { /* CLEI_S.H */
4377 DIP("CLEI_S.H w%d, w%d, %d", wd
, ws
, i5
);
4378 t1
= newTemp(Ity_V128
);
4379 t2
= newTemp(Ity_V128
);
4380 t3
= newTemp(Ity_V128
);
4381 short stemp
= ((int)tmp
<< 27) >> 27;
4382 tmp
= (UShort
)stemp
;
4383 tmp
|= (tmp
<< 48) | (tmp
<< 32) | (tmp
<< 16);
4384 assign(t1
, getWReg(ws
));
4385 assign(t2
, binop(Iop_64HLtoV128
, mkU64(tmp
), mkU64(tmp
)));
4386 assign(t3
, binop(Iop_OrV128
, binop(Iop_CmpGT16Sx8
,
4387 mkexpr(t2
), mkexpr(t1
)),
4388 binop(Iop_CmpEQ16x8
,
4389 mkexpr(t1
), mkexpr(t2
))));
4390 putWReg(wd
, mkexpr(t3
));
4394 case 0x02: { /* CLEI_S.W */
4395 DIP("CLEI_S.W w%d, w%d, %d", wd
, ws
, i5
);
4396 t1
= newTemp(Ity_V128
);
4397 t2
= newTemp(Ity_V128
);
4398 t3
= newTemp(Ity_V128
);
4399 int stemp
= ((int)tmp
<< 27) >> 27;
4402 assign(t1
, getWReg(ws
));
4403 assign(t2
, binop(Iop_64HLtoV128
, mkU64(tmp
), mkU64(tmp
)));
4404 assign(t3
, binop(Iop_OrV128
,
4405 binop(Iop_CmpGT32Sx4
,
4406 mkexpr(t2
), mkexpr(t1
)),
4407 binop(Iop_CmpEQ32x4
,
4408 mkexpr(t1
), mkexpr(t2
))));
4409 putWReg(wd
, mkexpr(t3
));
4413 case 0x03: { /* CLEI_S.D */
4414 DIP("CLEI_S.D w%d, w%d, %d", wd
, ws
, i5
);
4415 t1
= newTemp(Ity_V128
);
4416 t2
= newTemp(Ity_V128
);
4417 t3
= newTemp(Ity_V128
);
4418 Long stemp
= ((Long
)tmp
<< 59) >> 59;
4420 assign(t1
, getWReg(ws
));
4421 assign(t2
, binop(Iop_64HLtoV128
, mkU64(tmp
), mkU64(tmp
)));
4422 assign(t3
, binop(Iop_OrV128
,
4423 binop(Iop_CmpGT64Sx2
,
4424 mkexpr(t2
), mkexpr(t1
)),
4425 binop(Iop_CmpEQ64x2
,
4426 mkexpr(t1
), mkexpr(t2
))));
4427 putWReg(wd
, mkexpr(t3
));
4438 case 0x05: { /* CLEI_U.df */
4442 case 0x00: { /* CLEI_U.B */
4443 DIP("CLEI_U.B w%d, w%d, %d", wd
, ws
, i5
);
4444 t1
= newTemp(Ity_V128
);
4445 t2
= newTemp(Ity_V128
);
4446 t3
= newTemp(Ity_V128
);
4447 tmp
|= (tmp
<< 56) | (tmp
<< 48) | (tmp
<< 40) |
4448 (tmp
<< 32) | (tmp
<< 24) | (tmp
<< 16) |
4450 assign(t1
, getWReg(ws
));
4451 assign(t2
, binop(Iop_64HLtoV128
, mkU64(tmp
), mkU64(tmp
)));
4452 assign(t3
, binop(Iop_OrV128
,
4453 binop(Iop_CmpGT8Ux16
,
4454 mkexpr(t2
), mkexpr(t1
)),
4455 binop(Iop_CmpEQ8x16
,
4456 mkexpr(t1
), mkexpr(t2
))));
4457 putWReg(wd
, mkexpr(t3
));
4461 case 0x01: { /* CLEI_U.H */
4462 DIP("CLEI_U.H w%d, w%d, %d", wd
, ws
, i5
);
4463 t1
= newTemp(Ity_V128
);
4464 t2
= newTemp(Ity_V128
);
4465 t3
= newTemp(Ity_V128
);
4466 tmp
|= (tmp
<< 48) | (tmp
<< 32) | (tmp
<< 16);
4467 assign(t1
, getWReg(ws
));
4468 assign(t2
, binop(Iop_64HLtoV128
, mkU64(tmp
), mkU64(tmp
)));
4469 assign(t3
, binop(Iop_OrV128
,
4470 binop(Iop_CmpGT16Ux8
,
4471 mkexpr(t2
), mkexpr(t1
)),
4472 binop(Iop_CmpEQ16x8
,
4473 mkexpr(t1
), mkexpr(t2
))));
4474 putWReg(wd
, mkexpr(t3
));
4478 case 0x02: { /* CLEI_U.W */
4479 DIP("CLEI_U.W w%d, w%d, %d", wd
, ws
, i5
);
4480 t1
= newTemp(Ity_V128
);
4481 t2
= newTemp(Ity_V128
);
4482 t3
= newTemp(Ity_V128
);
4484 assign(t1
, getWReg(ws
));
4485 assign(t2
, binop(Iop_64HLtoV128
, mkU64(tmp
), mkU64(tmp
)));
4486 assign(t3
, binop(Iop_OrV128
,
4487 binop(Iop_CmpGT32Ux4
,
4488 mkexpr(t2
), mkexpr(t1
)),
4489 binop(Iop_CmpEQ32x4
,
4490 mkexpr(t1
), mkexpr(t2
))));
4491 putWReg(wd
, mkexpr(t3
));
4495 case 0x03: { /* CLEI_U.D */
4496 DIP("CLEI_U.D w%d, w%d, %d", wd
, ws
, i5
);
4497 t1
= newTemp(Ity_V128
);
4498 t2
= newTemp(Ity_V128
);
4499 t3
= newTemp(Ity_V128
);
4500 assign(t1
, getWReg(ws
));
4501 assign(t2
, binop(Iop_64HLtoV128
, mkU64(tmp
), mkU64(tmp
)));
4502 assign(t3
, binop(Iop_OrV128
,
4503 binop(Iop_CmpGT64Ux2
,
4504 mkexpr(t2
), mkexpr(t1
)),
4505 binop(Iop_CmpEQ64x2
,
4506 mkexpr(t1
), mkexpr(t2
))));
4507 putWReg(wd
, mkexpr(t3
));
4515 case 0x06: { /* LDI.df */
4518 s10
= (cins
& 0x001FF800) >> 11;
4521 case 0x00: /* LDI.B */
4522 DIP("LDI.B w%d, %d", wd
, s10
);
4524 tmp
= tmp
| (tmp
<< 8) | (tmp
<< 16) | (tmp
<< 24)
4525 | (tmp
<< 32) | (tmp
<< 40) | (tmp
<< 48) |
4529 case 0x01: /* LDI.H */
4530 DIP("LDI.H w%d, %d", wd
, s10
);
4531 tmp
= extend_s_10to16(s10
);
4532 tmp
= tmp
| (tmp
<< 16) | (tmp
<< 32) | (tmp
<< 48);
4535 case 0x02: /* LDI.W */
4536 DIP("LDI.W w%d, %d", wd
, s10
);
4537 tmp
= extend_s_10to32(s10
);
4538 tmp
= tmp
| (tmp
<< 32);
4541 case 0x03: /* LDI.D */
4542 DIP("LDI.D w%d, %d", wd
, s10
);
4543 tmp
= extend_s_10to64(s10
);
4550 putWReg(wd
, binop(Iop_64HLtoV128
, mkU64(tmp
), mkU64(tmp
)));
4561 static Int
msa_BIT_09(UInt cins
, UChar wd
, UChar ws
) /* BIT (0x09) */
4567 operation
= (cins
& 0x03800000) >> 23;
4568 df
= (cins
& 0x007F0000) >> 16;
4570 if ((df
& 0x70) == 0x70) { // 111mmmm; b
4573 } else if ((df
& 0x60) == 0x60) { // 110mmmm; h
4576 } else if ((df
& 0x40) == 0x40) { // 10mmmmm; w
4579 } else if ((df
& 0x00) == 0x00) { // 0mmmmmm; d
4584 switch (operation
) {
4585 case 0x00: { /* SLLI.df */
4587 case 0x00: { /* SLLI.B */
4588 DIP("SLLI.B w%d, w%d, %d", wd
, ws
, m
);
4589 putWReg(wd
, binop(Iop_ShlN8x16
, getWReg(ws
), mkU8(m
)));
4593 case 0x01: { /* SLLI.H */
4594 DIP("SLLI.H w%d, w%d, %d", wd
, ws
, m
);
4595 putWReg(wd
, binop(Iop_ShlN16x8
, getWReg(ws
), mkU8(m
)));
4599 case 0x02: { /* SLLI.W */
4600 DIP("SLLI.W w%d, w%d, %d", wd
, ws
, m
);
4601 putWReg(wd
, binop(Iop_ShlN32x4
, getWReg(ws
), mkU8(m
)));
4605 case 0x03: { /* SLLI.D */
4606 DIP("SLLI.D w%d, w%d, %d", wd
, ws
, m
);
4607 putWReg(wd
, binop(Iop_ShlN64x2
, getWReg(ws
), mkU8(m
)));
4615 case 0x01: { /* SRAI.df */
4617 case 0x00: { /* SRAI.B */
4618 DIP("SRAI.B w%d, w%d, %d", wd
, ws
, m
);
4619 putWReg(wd
, binop(Iop_SarN8x16
, getWReg(ws
), mkU8(m
)));
4623 case 0x01: { /* SRAI.H */
4624 DIP("SRAI.H w%d, w%d, %d", wd
, ws
, m
);
4625 putWReg(wd
, binop(Iop_SarN16x8
, getWReg(ws
), mkU8(m
)));
4629 case 0x02: { /* SRAI.W */
4630 DIP("SRAI.W w%d, w%d, %d", wd
, ws
, m
);
4631 putWReg(wd
, binop(Iop_SarN32x4
, getWReg(ws
), mkU8(m
)));
4635 case 0x03: { /* SRAI.D */
4636 DIP("SRAI.D w%d, w%d, %d", wd
, ws
, m
);
4637 putWReg(wd
, binop(Iop_SarN64x2
, getWReg(ws
), mkU8(m
)));
4645 case 0x02: { /* SRLI.df */
4647 case 0x00: { /* SRLI.B */
4648 DIP("SRLI.B w%d, w%d, %d", wd
, ws
, m
);
4649 putWReg(wd
, binop(Iop_ShrN8x16
, getWReg(ws
), mkU8(m
)));
4653 case 0x01: { /* SRLI.H */
4654 DIP("SRLI.H w%d, w%d, %d", wd
, ws
, m
);
4655 putWReg(wd
, binop(Iop_ShrN16x8
, getWReg(ws
), mkU8(m
)));
4659 case 0x02: { /* SRLI.W */
4660 DIP("SRLI.W w%d, w%d, %d", wd
, ws
, m
);
4661 putWReg(wd
, binop(Iop_ShrN32x4
, getWReg(ws
), mkU8(m
)));
4665 case 0x03: { /* SRLI.D */
4666 DIP("SRLI.D w%d, w%d, %d", wd
, ws
, m
);
4667 putWReg(wd
, binop(Iop_ShrN64x2
, getWReg(ws
), mkU8(m
)));
4675 case 0x03: { /* BCLRI.df */
4676 t1
= newTemp(Ity_V128
);
4677 t2
= newTemp(Ity_V128
);
4678 t3
= newTemp(Ity_V128
);
4680 assign(t1
, getWReg(ws
));
4683 case 0x00: { /* BCLRI.B */
4684 DIP("BCLRI.B w%d, w%d, %d", wd
, ws
, m
);
4685 tmp
|= (tmp
<< 56) | (tmp
<< 48) | (tmp
<< 40) |
4686 (tmp
<< 32) | (tmp
<< 24) | (tmp
<< 16) |
4688 assign(t2
, binop(Iop_ShlN8x16
,
4689 binop(Iop_64HLtoV128
,
4690 mkU64(tmp
), mkU64(tmp
)), mkU8(m
)));
4694 case 0x01: { /* BCLRI.H */
4695 DIP("BCLRI.H w%d, w%d, %d", wd
, ws
, m
);
4696 tmp
|= (tmp
<< 48) | (tmp
<< 32) | (tmp
<< 16);
4697 assign(t2
, binop(Iop_ShlN16x8
,
4698 binop(Iop_64HLtoV128
,
4699 mkU64(tmp
), mkU64(tmp
)), mkU8(m
)));
4703 case 0x02: { /* BCLRI.W */
4704 DIP("BCLRI.W w%d, w%d, %d", wd
, ws
, m
);
4706 assign(t2
, binop(Iop_ShlN32x4
,
4707 binop(Iop_64HLtoV128
,
4708 mkU64(tmp
), mkU64(tmp
)), mkU8(m
)));
4712 case 0x03: { /* BCLRI.D */
4713 DIP("BCLRI.D w%d, w%d, %d", wd
, ws
, m
);
4714 assign(t2
, binop(Iop_ShlN64x2
,
4715 binop(Iop_64HLtoV128
,
4716 mkU64(tmp
), mkU64(tmp
)), mkU8(m
)));
4721 assign(t3
, binop(Iop_AndV128
,
4722 mkexpr(t1
), unop(Iop_NotV128
, mkexpr(t2
))));
4723 putWReg(wd
, mkexpr(t3
));
4727 case 0x04: { /* BSETI */
4728 t1
= newTemp(Ity_V128
);
4729 t2
= newTemp(Ity_V128
);
4730 t3
= newTemp(Ity_V128
);
4732 assign(t1
, getWReg(ws
));
4735 case 0x00: { /* BSETI.B */
4736 DIP("BSETI.B w%d, w%d, %d", wd
, ws
, m
);
4737 tmp
|= (tmp
<< 56) | (tmp
<< 48) | (tmp
<< 40) |
4738 (tmp
<< 32) | (tmp
<< 24) | (tmp
<< 16) |
4740 assign(t2
, binop(Iop_ShlN8x16
,
4741 binop(Iop_64HLtoV128
,
4742 mkU64(tmp
), mkU64(tmp
)), mkU8(m
)));
4746 case 0x01: { /* BSETI.H */
4747 DIP("BSETI.H w%d, w%d, %d", wd
, ws
, m
);
4748 tmp
|= (tmp
<< 48) | (tmp
<< 32) | (tmp
<< 16);
4749 assign(t2
, binop(Iop_ShlN16x8
,
4750 binop(Iop_64HLtoV128
,
4751 mkU64(tmp
), mkU64(tmp
)), mkU8(m
)));
4755 case 0x02: { /* BSETI.W */
4756 DIP("BSETI.W w%d, w%d, %d", wd
, ws
, m
);
4758 assign(t2
, binop(Iop_ShlN32x4
,
4759 binop(Iop_64HLtoV128
,
4760 mkU64(tmp
), mkU64(tmp
)), mkU8(m
)));
4764 case 0x03: { /* BSETI.D */
4765 DIP("BSETI.D w%d, w%d, %d", wd
, ws
, m
);
4766 assign(t2
, binop(Iop_ShlN64x2
,
4767 binop(Iop_64HLtoV128
,
4768 mkU64(tmp
), mkU64(tmp
)), mkU8(m
)));
4773 assign(t3
, binop(Iop_OrV128
, mkexpr(t1
), mkexpr(t2
)));
4774 putWReg(wd
, mkexpr(t3
));
4778 case 0x05: { /* BNEGI.df */
4779 t1
= newTemp(Ity_V128
);
4780 t2
= newTemp(Ity_V128
);
4781 t3
= newTemp(Ity_V128
);
4783 assign(t1
, getWReg(ws
));
4786 case 0x00: { /* BNEGI.B */
4787 DIP("BNEGI.B w%d, w%d, %d", wd
, ws
, m
);
4788 tmp
|= (tmp
<< 56) | (tmp
<< 48) | (tmp
<< 40) |
4789 (tmp
<< 32) | (tmp
<< 24) | (tmp
<< 16) |
4791 assign(t2
, binop(Iop_ShlN8x16
,
4792 binop(Iop_64HLtoV128
,
4793 mkU64(tmp
), mkU64(tmp
)), mkU8(m
)));
4797 case 0x01: { /* BNEGI.H */
4798 DIP("BNEGI.H w%d, w%d, %d", wd
, ws
, m
);
4799 tmp
|= (tmp
<< 48) | (tmp
<< 32) | (tmp
<< 16);
4800 assign(t2
, binop(Iop_ShlN16x8
,
4801 binop(Iop_64HLtoV128
,
4802 mkU64(tmp
), mkU64(tmp
)), mkU8(m
)));
4806 case 0x02: { /* BNEGI.W */
4807 DIP("BNEGI.W w%d, w%d, %d", wd
, ws
, m
);
4809 assign(t2
, binop(Iop_ShlN32x4
,
4810 binop(Iop_64HLtoV128
,
4811 mkU64(tmp
), mkU64(tmp
)), mkU8(m
)));
4815 case 0x03: { /* BNEGI.D */
4816 DIP("BNEGI.D w%d, w%d, %d", wd
, ws
, m
);
4817 assign(t2
, binop(Iop_ShlN64x2
,
4818 binop(Iop_64HLtoV128
,
4819 mkU64(tmp
), mkU64(tmp
)), mkU8(m
)));
4824 assign(t3
, binop(Iop_XorV128
, mkexpr(t1
), mkexpr(t2
)));
4825 putWReg(wd
, mkexpr(t3
));
4829 case 0x06: { /* BINSLI.df */
4831 case 0x00: { /* BINSLI.B */
4832 DIP("BINSLI.B w%d, w%d, w%d", wd
, ws
, m
);
4833 t1
= newTemp(Ity_V128
);
4834 t2
= newTemp(Ity_V128
);
4835 t3
= newTemp(Ity_V128
);
4836 ULong tmp
= 0x8080808080808080ULL
;
4837 assign(t1
, binop(Iop_SarN8x16
,
4838 binop(Iop_64HLtoV128
,
4839 mkU64(tmp
), mkU64(tmp
)), mkU8(m
)));
4842 unop(Iop_NotV128
, mkexpr(t1
)), getWReg(wd
)));
4845 mkexpr(t1
), getWReg(ws
)));
4848 mkexpr(t2
), mkexpr(t3
)));
4852 case 0x01: { /* BINSLI.H */
4853 DIP("BINSLI.H w%d, w%d, w%d", wd
, ws
, m
);
4854 t1
= newTemp(Ity_V128
);
4855 t2
= newTemp(Ity_V128
);
4856 t3
= newTemp(Ity_V128
);
4857 ULong tmp
= 0x8000800080008000ULL
;
4860 binop(Iop_64HLtoV128
,
4861 mkU64(tmp
), mkU64(tmp
)), mkU8(m
)));
4864 unop(Iop_NotV128
, mkexpr(t1
)), getWReg(wd
)));
4867 mkexpr(t1
), getWReg(ws
)));
4870 mkexpr(t2
), mkexpr(t3
)));
4874 case 0x02: { /* BINSLI.W */
4875 DIP("BINSLI.W w%d, w%d, w%d", wd
, ws
, m
);
4876 t1
= newTemp(Ity_V128
);
4877 t2
= newTemp(Ity_V128
);
4878 t3
= newTemp(Ity_V128
);
4879 ULong tmp
= 0x8000000080000000ULL
;
4882 binop(Iop_64HLtoV128
,
4883 mkU64(tmp
), mkU64(tmp
)), mkU8(m
)));
4886 unop(Iop_NotV128
, mkexpr(t1
)), getWReg(wd
)));
4889 mkexpr(t1
), getWReg(ws
)));
4892 mkexpr(t2
), mkexpr(t3
)));
4896 case 0x03: { /* BINSLI.D */
4897 DIP("BINSLI.D w%d, w%d, w%d", wd
, ws
, m
);
4898 t1
= newTemp(Ity_V128
);
4899 t2
= newTemp(Ity_V128
);
4900 t3
= newTemp(Ity_V128
);
4901 ULong tmp
= 0x8000000000000000ULL
;
4904 binop(Iop_64HLtoV128
,
4905 mkU64(tmp
), mkU64(tmp
)), mkU8(m
)));
4908 unop(Iop_NotV128
, mkexpr(t1
)), getWReg(wd
)));
4911 mkexpr(t1
), getWReg(ws
)));
4914 mkexpr(t2
), mkexpr(t3
)));
4927 case 0x00: { /* BINSRI.B */
4928 DIP("BINSRI.B w%d, w%d, w%d", wd
, ws
, m
);
4929 t1
= newTemp(Ity_V128
);
4930 t2
= newTemp(Ity_V128
);
4931 t3
= newTemp(Ity_V128
);
4932 ULong tmp
= 0xFEFEFEFEFEFEFEFEULL
;
4935 binop(Iop_64HLtoV128
,
4936 mkU64(tmp
), mkU64(tmp
)), mkU8(m
)));
4939 unop(Iop_NotV128
, mkexpr(t1
)), getWReg(ws
)));
4942 mkexpr(t1
), getWReg(wd
)));
4945 mkexpr(t2
), mkexpr(t3
)));
4949 case 0x01: { /* BINSRI.H */
4950 DIP("BINSRI.H w%d, w%d, w%d", wd
, ws
, m
);
4951 t1
= newTemp(Ity_V128
);
4952 t2
= newTemp(Ity_V128
);
4953 t3
= newTemp(Ity_V128
);
4954 ULong tmp
= 0xFFFEFFFEFFFEFFFEULL
;
4957 binop(Iop_64HLtoV128
,
4958 mkU64(tmp
), mkU64(tmp
)),
4962 unop(Iop_NotV128
, mkexpr(t1
)),
4966 mkexpr(t1
), getWReg(wd
)));
4969 mkexpr(t2
), mkexpr(t3
)));
4973 case 0x02: { /* BINSRI.W */
4974 DIP("BINSRI.W w%d, w%d, w%d", wd
, ws
, m
);
4975 t1
= newTemp(Ity_V128
);
4976 t2
= newTemp(Ity_V128
);
4977 t3
= newTemp(Ity_V128
);
4978 ULong tmp
= 0xFFFFFFFEFFFFFFFEULL
;
4981 binop(Iop_64HLtoV128
,
4982 mkU64(tmp
), mkU64(tmp
)),
4986 unop(Iop_NotV128
, mkexpr(t1
)),
4990 mkexpr(t1
), getWReg(wd
)));
4993 mkexpr(t2
), mkexpr(t3
)));
4997 case 0x03: { /* BINSRI.D */
4998 DIP("BINSRI.D w%d, w%d, w%d", wd
, ws
, m
);
4999 t1
= newTemp(Ity_V128
);
5000 t2
= newTemp(Ity_V128
);
5001 t3
= newTemp(Ity_V128
);
5005 binop(Iop_64HLtoV128
,
5006 mkU64(tmp
), mkU64(tmp
)),
5010 unop(Iop_NotV128
, mkexpr(t1
)),
5014 mkexpr(t1
), getWReg(wd
)));
5017 mkexpr(t2
), mkexpr(t3
)));
5035 static Int
msa_BIT_0A(UInt cins
, UChar wd
, UChar ws
) /* BIT (0x0A) */
5041 operation
= (cins
& 0x03800000) >> 23;
5042 df
= (cins
& 0x007F0000) >> 16;
5044 if ((df
& 0x70) == 0x70) { // 111mmmm; b
5047 } else if ((df
& 0x60) == 0x60) { // 110mmmm; h
5050 } else if ((df
& 0x40) == 0x40) { // 10mmmmm; w
5053 } else if ((df
& 0x00) == 0x00) { // 0mmmmmm; d
5058 switch (operation
) {
5059 case 0x00: { /* SAT_S.df */
5061 case 0x00: { /* SAT_S.B */
5062 DIP("SAT_S.B w%d, w%d, %d", wd
, ws
, m
);
5063 t1
= newTemp(Ity_V128
);
5064 assign(t1
, binop(Iop_SarN8x16
, getWReg(ws
), mkU8(7)));
5067 putWReg(wd
, mkexpr(t1
));
5069 t2
= newTemp(Ity_V128
);
5071 binop(Iop_SarN8x16
, getWReg(ws
), mkU8(m
)));
5076 binop(Iop_CmpEQ8x16
,
5081 binop(Iop_CmpGT8Sx16
,
5086 binop(Iop_CmpGT8Sx16
,
5095 case 0x01: { /* SAT_S.H */
5096 DIP("SAT_S.H w%d, w%d, %d", wd
, ws
, m
);
5097 t1
= newTemp(Ity_V128
);
5098 assign(t1
, binop(Iop_SarN16x8
, getWReg(ws
), mkU8(15)));
5101 putWReg(wd
, mkexpr(t1
));
5103 t2
= newTemp(Ity_V128
);
5112 binop(Iop_CmpEQ16x8
,
5117 binop(Iop_CmpGT16Sx8
,
5122 binop(Iop_CmpGT16Sx8
,
5131 case 0x02: { /* SAT_S.W */
5132 DIP("SAT_S.W w%d, w%d, %d", wd
, ws
, m
);
5133 t1
= newTemp(Ity_V128
);
5134 assign(t1
, binop(Iop_SarN32x4
, getWReg(ws
), mkU8(31)));
5137 putWReg(wd
, mkexpr(t1
));
5139 t2
= newTemp(Ity_V128
);
5148 binop(Iop_CmpEQ32x4
,
5153 binop(Iop_CmpGT32Sx4
,
5158 binop(Iop_CmpGT32Sx4
,
5167 case 0x03: { /* SAT_S.D */
5168 DIP("SAT_S.D w%d, w%d, %d", wd
, ws
, m
);
5169 t1
= newTemp(Ity_V128
);
5170 assign(t1
, binop(Iop_SarN64x2
, getWReg(ws
), mkU8(63)));
5173 putWReg(wd
, mkexpr(t1
));
5175 t2
= newTemp(Ity_V128
);
5184 binop(Iop_CmpEQ64x2
,
5189 binop(Iop_CmpGT64Sx2
,
5194 binop(Iop_CmpGT64Sx2
,
5207 case 0x01: { /* SAT_U.df */
5209 case 0x00: { /* SAT_U.B */
5210 DIP("SAT_U.B w%d, w%d, %d", wd
, ws
, m
);
5213 putWReg(wd
, getWReg(ws
));
5215 t1
= newTemp(Ity_V128
);
5217 binop(Iop_CmpEQ8x16
,
5221 binop(Iop_64HLtoV128
,
5222 mkU64(0), mkU64(0))));
5237 case 0x01: { /* SAT_U.H */
5238 DIP("SAT_U.H w%d, w%d, %d", wd
, ws
, m
);
5241 putWReg(wd
, getWReg(ws
));
5243 t1
= newTemp(Ity_V128
);
5245 binop(Iop_CmpEQ16x8
,
5249 binop(Iop_64HLtoV128
,
5250 mkU64(0), mkU64(0))));
5265 case 0x02: { /* SAT_U.W */
5266 DIP("SAT_U.W w%d, w%d, %d", wd
, ws
, m
);
5269 putWReg(wd
, getWReg(ws
));
5271 t1
= newTemp(Ity_V128
);
5273 binop(Iop_CmpEQ32x4
,
5277 binop(Iop_64HLtoV128
,
5278 mkU64(0), mkU64(0))));
5293 case 0x03: { /* SAT_U.D */
5294 DIP("SAT_U.D w%d, w%d, %d", wd
, ws
, m
);
5297 putWReg(wd
, getWReg(ws
));
5299 t1
= newTemp(Ity_V128
);
5301 binop(Iop_CmpEQ64x2
,
5305 binop(Iop_64HLtoV128
,
5306 mkU64(0), mkU64(0))));
5325 case 0x02: { /* SRARI.df */
5327 case 0x00: { /* SRARI.B */
5328 DIP("SRARI.B w%d, w%d, %d", wd
, ws
, m
);
5329 t1
= newTemp(Ity_V128
);
5330 t2
= newTemp(Ity_V128
);
5342 if (m
) putWReg(wd
, binop(Iop_Add8x16
,
5345 else putWReg(wd
, mkexpr(t1
));
5350 case 0x01: { /* SRARI.H */
5351 DIP("SRARI.H w%d, w%d, %d", wd
, ws
, m
);
5352 t1
= newTemp(Ity_V128
);
5353 t2
= newTemp(Ity_V128
);
5368 mkexpr(t1
), mkexpr(t2
)));
5369 else putWReg(wd
, mkexpr(t1
));
5374 case 0x02: { /* SRARI.W */
5375 DIP("SRARI.W w%d, w%d, %d", wd
, ws
, m
);
5376 t1
= newTemp(Ity_V128
);
5377 t2
= newTemp(Ity_V128
);
5392 mkexpr(t1
), mkexpr(t2
)));
5393 else putWReg(wd
, mkexpr(t1
));
5398 case 0x03: { /* SRARI.D */
5399 DIP("SRARI.D w%d, w%d, %d", wd
, ws
, m
);
5400 t1
= newTemp(Ity_V128
);
5401 t2
= newTemp(Ity_V128
);
5416 mkexpr(t1
), mkexpr(t2
)));
5417 else putWReg(wd
, mkexpr(t1
));
5426 case 0x03: { /* SRLRI.df */
5428 case 0x00: { /* SRLRI.B */
5429 DIP("SRLRI.B w%d, w%d, %d", wd
, ws
, m
);
5430 t1
= newTemp(Ity_V128
);
5431 t2
= newTemp(Ity_V128
);
5446 mkexpr(t1
), mkexpr(t2
)));
5447 else putWReg(wd
, mkexpr(t1
));
5452 case 0x01: { /* SRLRI.H */
5453 DIP("SRLRI.H w%d, w%d, %d", wd
, ws
, m
);
5454 t1
= newTemp(Ity_V128
);
5455 t2
= newTemp(Ity_V128
);
5470 mkexpr(t1
), mkexpr(t2
)));
5471 else putWReg(wd
, mkexpr(t1
));
5476 case 0x02: { /* SRLRI.W */
5477 DIP("SRLRI.W w%d, w%d, %d", wd
, ws
, m
);
5478 t1
= newTemp(Ity_V128
);
5479 t2
= newTemp(Ity_V128
);
5494 mkexpr(t1
), mkexpr(t2
)));
5495 else putWReg(wd
, mkexpr(t1
));
5500 case 0x03: { /* SRLRI.D */
5501 DIP("SRLRI.D w%d, w%d, %d", wd
, ws
, m
);
5502 t1
= newTemp(Ity_V128
);
5503 t2
= newTemp(Ity_V128
);
5518 mkexpr(t1
), mkexpr(t2
)));
5519 else putWReg(wd
, mkexpr(t1
));
5535 static Int
msa_3R_0D(UInt cins
, UChar wd
, UChar ws
) /* 3R (0x0D) */
5541 operation
= (cins
& 0x03800000) >> 23;
5542 df
= (cins
& 0x00600000) >> 21;
5543 wt
= (cins
& 0x001F0000) >> 16;
5545 switch (operation
) {
5546 case 0x00: { /* SLL.df */
5548 case 0x00: { /* SLL.B */
5549 DIP("SLL.B w%d, w%d, w%d", wd
, ws
, wt
);
5550 t1
= newTemp(Ity_V128
);
5551 t2
= newTemp(Ity_V128
);
5552 t3
= newTemp(Ity_V128
);
5553 assign(t1
, getWReg(ws
));
5554 assign(t2
, getWReg(wt
));
5555 assign(t3
, binop(Iop_Shl8x16
, mkexpr(t1
), mkexpr(t2
)));
5556 putWReg(wd
, mkexpr(t3
));
5560 case 0x01: { /* SLL.H */
5561 DIP("SLL.H w%d, w%d, w%d", wd
, ws
, wt
);
5562 t1
= newTemp(Ity_V128
);
5563 t2
= newTemp(Ity_V128
);
5564 t3
= newTemp(Ity_V128
);
5565 assign(t1
, getWReg(ws
));
5566 assign(t2
, getWReg(wt
));
5567 assign(t3
, binop(Iop_Shl16x8
, mkexpr(t1
), mkexpr(t2
)));
5568 putWReg(wd
, mkexpr(t3
));
5572 case 0x02: { /* SLL.W */
5573 DIP("SLL.W w%d, w%d, w%d", wd
, ws
, wt
);
5574 t1
= newTemp(Ity_V128
);
5575 t2
= newTemp(Ity_V128
);
5576 t3
= newTemp(Ity_V128
);
5577 assign(t1
, getWReg(ws
));
5578 assign(t2
, getWReg(wt
));
5579 assign(t3
, binop(Iop_Shl32x4
, mkexpr(t1
), mkexpr(t2
)));
5580 putWReg(wd
, mkexpr(t3
));
5584 case 0x03: { /* SLL.D */
5585 DIP("SLL.D w%d, w%d, w%d", wd
, ws
, wt
);
5586 t1
= newTemp(Ity_V128
);
5587 t2
= newTemp(Ity_V128
);
5588 t3
= newTemp(Ity_V128
);
5589 assign(t1
, getWReg(ws
));
5590 assign(t2
, getWReg(wt
));
5591 assign(t3
, binop(Iop_Shl64x2
, mkexpr(t1
), mkexpr(t2
)));
5592 putWReg(wd
, mkexpr(t3
));
5603 case 0x01: { /* SRA.df */
5605 case 0x00: { /* SRA.B */
5606 DIP("SRA.B w%d, w%d, w%d", wd
, ws
, wt
);
5607 t1
= newTemp(Ity_V128
);
5608 t2
= newTemp(Ity_V128
);
5609 t3
= newTemp(Ity_V128
);
5610 assign(t1
, getWReg(ws
));
5611 assign(t2
, getWReg(wt
));
5612 assign(t3
, binop(Iop_Sar8x16
, mkexpr(t1
), mkexpr(t2
)));
5613 putWReg(wd
, mkexpr(t3
));
5617 case 0x01: { /* SRA.H */
5618 DIP("SRA.H w%d, w%d, w%d", wd
, ws
, wt
);
5619 t1
= newTemp(Ity_V128
);
5620 t2
= newTemp(Ity_V128
);
5621 t3
= newTemp(Ity_V128
);
5622 assign(t1
, getWReg(ws
));
5623 assign(t2
, getWReg(wt
));
5624 assign(t3
, binop(Iop_Sar16x8
, mkexpr(t1
), mkexpr(t2
)));
5625 putWReg(wd
, mkexpr(t3
));
5629 case 0x02: { /* SRA.W */
5630 DIP("SRA.W w%d, w%d, w%d", wd
, ws
, wt
);
5631 t1
= newTemp(Ity_V128
);
5632 t2
= newTemp(Ity_V128
);
5633 t3
= newTemp(Ity_V128
);
5634 assign(t1
, getWReg(ws
));
5635 assign(t2
, getWReg(wt
));
5636 assign(t3
, binop(Iop_Sar32x4
, mkexpr(t1
), mkexpr(t2
)));
5637 putWReg(wd
, mkexpr(t3
));
5641 case 0x03: { /* SRA.D */
5642 DIP("SRA.D w%d, w%d, w%d", wd
, ws
, wt
);
5643 t1
= newTemp(Ity_V128
);
5644 t2
= newTemp(Ity_V128
);
5645 t3
= newTemp(Ity_V128
);
5646 assign(t1
, getWReg(ws
));
5647 assign(t2
, getWReg(wt
));
5648 assign(t3
, binop(Iop_Sar64x2
, mkexpr(t1
), mkexpr(t2
)));
5649 putWReg(wd
, mkexpr(t3
));
5660 case 0x02: { /* SRL.df */
5662 case 0x00: { /* SRL.B */
5663 DIP("SRL.B w%d, w%d, w%d", wd
, ws
, wt
);
5664 t1
= newTemp(Ity_V128
);
5665 t2
= newTemp(Ity_V128
);
5666 t3
= newTemp(Ity_V128
);
5667 assign(t1
, getWReg(ws
));
5668 assign(t2
, getWReg(wt
));
5669 assign(t3
, binop(Iop_Shr8x16
, mkexpr(t1
), mkexpr(t2
)));
5670 putWReg(wd
, mkexpr(t3
));
5674 case 0x01: { /* SRL.H */
5675 DIP("SRL.H w%d, w%d, w%d", wd
, ws
, wt
);
5676 t1
= newTemp(Ity_V128
);
5677 t2
= newTemp(Ity_V128
);
5678 t3
= newTemp(Ity_V128
);
5679 assign(t1
, getWReg(ws
));
5680 assign(t2
, getWReg(wt
));
5681 assign(t3
, binop(Iop_Shr16x8
, mkexpr(t1
), mkexpr(t2
)));
5682 putWReg(wd
, mkexpr(t3
));
5686 case 0x02: { /* SRL.W */
5687 DIP("SRL.W w%d, w%d, w%d", wd
, ws
, wt
);
5688 t1
= newTemp(Ity_V128
);
5689 t2
= newTemp(Ity_V128
);
5690 t3
= newTemp(Ity_V128
);
5691 assign(t1
, getWReg(ws
));
5692 assign(t2
, getWReg(wt
));
5693 assign(t3
, binop(Iop_Shr32x4
, mkexpr(t1
), mkexpr(t2
)));
5694 putWReg(wd
, mkexpr(t3
));
5698 case 0x03: { /* SRL.D */
5699 DIP("SRL.D w%d, w%d, w%d", wd
, ws
, wt
);
5700 t1
= newTemp(Ity_V128
);
5701 t2
= newTemp(Ity_V128
);
5702 t3
= newTemp(Ity_V128
);
5703 assign(t1
, getWReg(ws
));
5704 assign(t2
, getWReg(wt
));
5705 assign(t3
, binop(Iop_Shr64x2
, mkexpr(t1
), mkexpr(t2
)));
5706 putWReg(wd
, mkexpr(t3
));
5717 case 0x03: { /* BCLR.df */
5718 t1
= newTemp(Ity_V128
);
5719 t2
= newTemp(Ity_V128
);
5720 t3
= newTemp(Ity_V128
);
5722 assign(t1
, getWReg(ws
));
5725 case 0x00: { /* BCLR.B */
5726 DIP("BCLR.B w%d, w%d, w%d", wd
, ws
, wt
);
5727 tmp
|= (tmp
<< 56) | (tmp
<< 48) | (tmp
<< 40) |
5728 (tmp
<< 32) | (tmp
<< 24) | (tmp
<< 16) |
5730 assign(t2
, binop(Iop_Shl8x16
,
5731 binop(Iop_64HLtoV128
,
5732 mkU64(tmp
), mkU64(tmp
)),
5737 case 0x01: { /* BCLR.H */
5738 DIP("BCLR.H w%d, w%d, w%d", wd
, ws
, wt
);
5739 tmp
|= (tmp
<< 48) | (tmp
<< 32) | (tmp
<< 16);
5742 binop(Iop_64HLtoV128
,
5743 mkU64(tmp
), mkU64(tmp
)),
5748 case 0x02: { /* BCLR.W */
5749 DIP("BCLR.W w%d, w%d, w%d", wd
, ws
, wt
);
5753 binop(Iop_64HLtoV128
,
5754 mkU64(tmp
), mkU64(tmp
)),
5759 case 0x03: { /* BCLR.D */
5760 DIP("BCLR.D w%d, w%d, w%d", wd
, ws
, wt
);
5763 binop(Iop_64HLtoV128
,
5764 mkU64(tmp
), mkU64(tmp
)),
5772 mkexpr(t1
), unop(Iop_NotV128
, mkexpr(t2
))));
5773 putWReg(wd
, mkexpr(t3
));
5777 case 0x04: { /* BSET.df */
5778 t1
= newTemp(Ity_V128
);
5779 t2
= newTemp(Ity_V128
);
5780 t3
= newTemp(Ity_V128
);
5782 assign(t1
, getWReg(ws
));
5785 case 0x00: { /* BSET.B */
5786 DIP("BSET.B w%d, w%d, w%d", wd
, ws
, wt
);
5787 tmp
|= (tmp
<< 56) | (tmp
<< 48) | (tmp
<< 40) |
5788 (tmp
<< 32) | (tmp
<< 24) | (tmp
<< 16) |
5792 binop(Iop_64HLtoV128
,
5793 mkU64(tmp
), mkU64(tmp
)),
5798 case 0x01: { /* BSET.H */
5799 DIP("BSET.H w%d, w%d, w%d", wd
, ws
, wt
);
5800 tmp
|= (tmp
<< 48) | (tmp
<< 32) | (tmp
<< 16);
5803 binop(Iop_64HLtoV128
,
5804 mkU64(tmp
), mkU64(tmp
)),
5809 case 0x02: { /* BSET.W */
5810 DIP("BSET.W w%d, w%d, w%d", wd
, ws
, wt
);
5814 binop(Iop_64HLtoV128
,
5815 mkU64(tmp
), mkU64(tmp
)),
5820 case 0x03: { /* BSET.D */
5821 DIP("BSET.D w%d, w%d, w%d", wd
, ws
, wt
);
5824 binop(Iop_64HLtoV128
,
5825 mkU64(tmp
), mkU64(tmp
)),
5831 assign(t3
, binop(Iop_OrV128
, mkexpr(t1
), mkexpr(t2
)));
5832 putWReg(wd
, mkexpr(t3
));
5836 case 0x05: { /* BNEG.df */
5837 t1
= newTemp(Ity_V128
);
5838 t2
= newTemp(Ity_V128
);
5839 t3
= newTemp(Ity_V128
);
5841 assign(t1
, getWReg(ws
));
5844 case 0x00: { /* BNEG.B */
5845 DIP("BNEG.B w%d, w%d, w%d", wd
, ws
, wt
);
5846 tmp
|= (tmp
<< 56) | (tmp
<< 48) | (tmp
<< 40) |
5847 (tmp
<< 32) | (tmp
<< 24) | (tmp
<< 16) |
5851 binop(Iop_64HLtoV128
,
5852 mkU64(tmp
), mkU64(tmp
)),
5857 case 0x01: { /* BNEG.H */
5858 DIP("BNEG.H w%d, w%d, w%d", wd
, ws
, wt
);
5859 tmp
|= (tmp
<< 48) | (tmp
<< 32) | (tmp
<< 16);
5862 binop(Iop_64HLtoV128
,
5863 mkU64(tmp
), mkU64(tmp
)),
5868 case 0x02: { /* BNEG.W */
5869 DIP("BNEG.W w%d, w%d, w%d", wd
, ws
, wt
);
5873 binop(Iop_64HLtoV128
,
5874 mkU64(tmp
), mkU64(tmp
)),
5879 case 0x03: { /* BNEG.D */
5880 DIP("BNEG.D w%d, w%d, w%d", wd
, ws
, wt
);
5883 binop(Iop_64HLtoV128
,
5884 mkU64(tmp
), mkU64(tmp
)),
5890 assign(t3
, binop(Iop_XorV128
, mkexpr(t1
), mkexpr(t2
)));
5891 putWReg(wd
, mkexpr(t3
));
5895 case 0x06: { /* BINSL.df */
5897 case 0x00: { /* BINSL.B */
5898 DIP("BINSL.B w%d, w%d, w%d", wd
, ws
, wt
);
5899 t1
= newTemp(Ity_V128
);
5900 t2
= newTemp(Ity_V128
);
5901 t3
= newTemp(Ity_V128
);
5902 ULong tmp
= 0x8080808080808080ULL
;
5905 binop(Iop_64HLtoV128
,
5906 mkU64(tmp
), mkU64(tmp
)),
5910 unop(Iop_NotV128
, mkexpr(t1
)),
5914 mkexpr(t1
), getWReg(ws
)));
5917 mkexpr(t2
), mkexpr(t3
)));
5921 case 0x01: { /* BINSL.H */
5922 DIP("BINSL.H w%d, w%d, w%d", wd
, ws
, wt
);
5923 t1
= newTemp(Ity_V128
);
5924 t2
= newTemp(Ity_V128
);
5925 t3
= newTemp(Ity_V128
);
5926 ULong tmp
= 0x8000800080008000ULL
;
5929 binop(Iop_64HLtoV128
,
5930 mkU64(tmp
), mkU64(tmp
)),
5934 unop(Iop_NotV128
, mkexpr(t1
)),
5938 mkexpr(t1
), getWReg(ws
)));
5941 mkexpr(t2
), mkexpr(t3
)));
5945 case 0x02: { /* BINSL.W */
5946 DIP("BINSL.W w%d, w%d, w%d", wd
, ws
, wt
);
5947 t1
= newTemp(Ity_V128
);
5948 t2
= newTemp(Ity_V128
);
5949 t3
= newTemp(Ity_V128
);
5950 ULong tmp
= 0x8000000080000000ULL
;
5953 binop(Iop_64HLtoV128
,
5954 mkU64(tmp
), mkU64(tmp
)),
5958 unop(Iop_NotV128
, mkexpr(t1
)),
5962 mkexpr(t1
), getWReg(ws
)));
5965 mkexpr(t2
), mkexpr(t3
)));
5969 case 0x03: { /* BINSL.D */
5970 DIP("BINSL.D w%d, w%d, w%d", wd
, ws
, wt
);
5971 t1
= newTemp(Ity_V128
);
5972 t2
= newTemp(Ity_V128
);
5973 t3
= newTemp(Ity_V128
);
5974 ULong tmp
= 0x8000000000000000ULL
;
5977 binop(Iop_64HLtoV128
,
5978 mkU64(tmp
), mkU64(tmp
)),
5982 unop(Iop_NotV128
, mkexpr(t1
)),
5986 mkexpr(t1
), getWReg(ws
)));
5989 mkexpr(t2
), mkexpr(t3
)));
6000 case 0x07: { /* BINSR.df */
6002 case 0x00: { /* BINSR.B */
6003 DIP("BINSR.B w%d, w%d, w%d", wd
, ws
, wt
);
6004 t1
= newTemp(Ity_V128
);
6005 t2
= newTemp(Ity_V128
);
6006 t3
= newTemp(Ity_V128
);
6007 ULong tmp
= 0xFEFEFEFEFEFEFEFEULL
;
6010 binop(Iop_64HLtoV128
,
6011 mkU64(tmp
), mkU64(tmp
)),
6015 unop(Iop_NotV128
, mkexpr(t1
)),
6019 mkexpr(t1
), getWReg(wd
)));
6022 mkexpr(t2
), mkexpr(t3
)));
6026 case 0x01: { /* BINSR.H */
6027 DIP("BINSR.H w%d, w%d, w%d", wd
, ws
, wt
);
6028 t1
= newTemp(Ity_V128
);
6029 t2
= newTemp(Ity_V128
);
6030 t3
= newTemp(Ity_V128
);
6031 ULong tmp
= 0xFFFEFFFEFFFEFFFEULL
;
6034 binop(Iop_64HLtoV128
,
6035 mkU64(tmp
), mkU64(tmp
)),
6039 unop(Iop_NotV128
, mkexpr(t1
)),
6043 mkexpr(t1
), getWReg(wd
)));
6046 mkexpr(t2
), mkexpr(t3
)));
6050 case 0x02: { /* BINSR.W */
6051 DIP("BINSR.W w%d, w%d, w%d", wd
, ws
, wt
);
6052 t1
= newTemp(Ity_V128
);
6053 t2
= newTemp(Ity_V128
);
6054 t3
= newTemp(Ity_V128
);
6055 ULong tmp
= 0xFFFFFFFEFFFFFFFEULL
;
6058 binop(Iop_64HLtoV128
,
6059 mkU64(tmp
), mkU64(tmp
)),
6063 unop(Iop_NotV128
, mkexpr(t1
)),
6067 mkexpr(t1
), getWReg(wd
)));
6070 mkexpr(t2
), mkexpr(t3
)));
6074 case 0x03: { /* BINSR.D */
6075 DIP("BINSR.D w%d, w%d, w%d", wd
, ws
, wt
);
6076 t1
= newTemp(Ity_V128
);
6077 t2
= newTemp(Ity_V128
);
6078 t3
= newTemp(Ity_V128
);
6082 binop(Iop_64HLtoV128
,
6083 mkU64(tmp
), mkU64(tmp
)),
6087 unop(Iop_NotV128
, mkexpr(t1
)),
6091 mkexpr(t1
), getWReg(wd
)));
6094 mkexpr(t2
), mkexpr(t3
)));
6112 static Int
msa_3R_0E(UInt cins
, UChar wd
, UChar ws
) /* 3R (0x0E) */
6114 IRTemp t1
, t2
, t3
, t4
;
6118 operation
= (cins
& 0x03800000) >> 23;
6119 df
= (cins
& 0x00600000) >> 21;
6120 wt
= (cins
& 0x001F0000) >> 16;
6122 switch (operation
) {
6123 case 0x00: { /* ADDV.df */
6125 case 0x00: { /* ADDV.B */
6126 DIP("ADDV.B w%d, w%d, w%d", wd
, ws
, wt
);
6127 t1
= newTemp(Ity_V128
);
6128 t2
= newTemp(Ity_V128
);
6129 t3
= newTemp(Ity_V128
);
6130 assign(t1
, getWReg(ws
));
6131 assign(t2
, getWReg(wt
));
6132 assign(t3
, binop(Iop_Add8x16
, mkexpr(t1
), mkexpr(t2
)));
6133 putWReg(wd
, mkexpr(t3
));
6137 case 0x01: { /* ADDV.H */
6138 DIP("ADDV.H w%d, w%d, w%d", wd
, ws
, wt
);
6139 t1
= newTemp(Ity_V128
);
6140 t2
= newTemp(Ity_V128
);
6141 t3
= newTemp(Ity_V128
);
6142 assign(t1
, getWReg(ws
));
6143 assign(t2
, getWReg(wt
));
6144 assign(t3
, binop(Iop_Add16x8
, mkexpr(t1
), mkexpr(t2
)));
6145 putWReg(wd
, mkexpr(t3
));
6149 case 0x02: { /* ADDV.W */
6150 DIP("ADDV.W w%d, w%d, w%d", wd
, ws
, wt
);
6151 t1
= newTemp(Ity_V128
);
6152 t2
= newTemp(Ity_V128
);
6153 t3
= newTemp(Ity_V128
);
6154 assign(t1
, getWReg(ws
));
6155 assign(t2
, getWReg(wt
));
6156 assign(t3
, binop(Iop_Add32x4
, mkexpr(t1
), mkexpr(t2
)));
6157 putWReg(wd
, mkexpr(t3
));
6161 case 0x03: { /* ADDV.D */
6162 DIP("ADDV.D w%d, w%d, w%d", wd
, ws
, wt
);
6163 t1
= newTemp(Ity_V128
);
6164 t2
= newTemp(Ity_V128
);
6165 t3
= newTemp(Ity_V128
);
6166 assign(t1
, getWReg(ws
));
6167 assign(t2
, getWReg(wt
));
6168 assign(t3
, binop(Iop_Add64x2
, mkexpr(t1
), mkexpr(t2
)));
6169 putWReg(wd
, mkexpr(t3
));
6180 case 0x01: { /* SUBV.df */
6182 case 0x00: { /* SUBV.B */
6183 DIP("SUBV.B w%d, w%d, w%d", wd
, ws
, wt
);
6184 t1
= newTemp(Ity_V128
);
6185 t2
= newTemp(Ity_V128
);
6186 t3
= newTemp(Ity_V128
);
6187 assign(t1
, getWReg(ws
));
6188 assign(t2
, getWReg(wt
));
6189 assign(t3
, binop(Iop_Sub8x16
, mkexpr(t1
), mkexpr(t2
)));
6190 putWReg(wd
, mkexpr(t3
));
6194 case 0x01: { /* SUBV.H */
6195 DIP("SUBV.H w%d, w%d, w%d", wd
, ws
, wt
);
6196 t1
= newTemp(Ity_V128
);
6197 t2
= newTemp(Ity_V128
);
6198 t3
= newTemp(Ity_V128
);
6199 assign(t1
, getWReg(ws
));
6200 assign(t2
, getWReg(wt
));
6201 assign(t3
, binop(Iop_Sub16x8
, mkexpr(t1
), mkexpr(t2
)));
6202 putWReg(wd
, mkexpr(t3
));
6206 case 0x02: { /* SUBV.W */
6207 DIP("SUBV.W w%d, w%d, w%d", wd
, ws
, wt
);
6208 t1
= newTemp(Ity_V128
);
6209 t2
= newTemp(Ity_V128
);
6210 t3
= newTemp(Ity_V128
);
6211 assign(t1
, getWReg(ws
));
6212 assign(t2
, getWReg(wt
));
6213 assign(t3
, binop(Iop_Sub32x4
, mkexpr(t1
), mkexpr(t2
)));
6214 putWReg(wd
, mkexpr(t3
));
6218 case 0x03: { /* SUBV.D */
6219 DIP("SUBV.D w%d, w%d, w%d", wd
, ws
, wt
);
6220 t1
= newTemp(Ity_V128
);
6221 t2
= newTemp(Ity_V128
);
6222 t3
= newTemp(Ity_V128
);
6223 assign(t1
, getWReg(ws
));
6224 assign(t2
, getWReg(wt
));
6225 assign(t3
, binop(Iop_Sub64x2
, mkexpr(t1
), mkexpr(t2
)));
6226 putWReg(wd
, mkexpr(t3
));
6237 case 0x02: { /* MAX_S.df */
6239 case 0x00: { /* MAX_S.B */
6240 DIP("MAX_S.B w%d, w%d, w%d", wd
, ws
, wt
);
6241 t1
= newTemp(Ity_V128
);
6242 t2
= newTemp(Ity_V128
);
6243 t3
= newTemp(Ity_V128
);
6244 assign(t1
, getWReg(ws
));
6245 assign(t2
, getWReg(wt
));
6246 assign(t3
, binop(Iop_Max8Sx16
, mkexpr(t1
), mkexpr(t2
)));
6247 putWReg(wd
, mkexpr(t3
));
6251 case 0x01: { /* MAX_S.H */
6252 DIP("MAX_S.H w%d, w%d, w%d", wd
, ws
, wt
);
6253 t1
= newTemp(Ity_V128
);
6254 t2
= newTemp(Ity_V128
);
6255 t3
= newTemp(Ity_V128
);
6256 assign(t1
, getWReg(ws
));
6257 assign(t2
, getWReg(wt
));
6258 assign(t3
, binop(Iop_Max16Sx8
, mkexpr(t1
), mkexpr(t2
)));
6259 putWReg(wd
, mkexpr(t3
));
6263 case 0x02: { /* MAX_S.W */
6264 DIP("MAX_S.W w%d, w%d, w%d", wd
, ws
, wt
);
6265 t1
= newTemp(Ity_V128
);
6266 t2
= newTemp(Ity_V128
);
6267 t3
= newTemp(Ity_V128
);
6268 assign(t1
, getWReg(ws
));
6269 assign(t2
, getWReg(wt
));
6270 assign(t3
, binop(Iop_Max32Sx4
, mkexpr(t1
), mkexpr(t2
)));
6271 putWReg(wd
, mkexpr(t3
));
6275 case 0x03: { /* MAX_S.D */
6276 DIP("MAX_S.D w%d, w%d, w%d", wd
, ws
, wt
);
6277 t1
= newTemp(Ity_V128
);
6278 t2
= newTemp(Ity_V128
);
6279 t3
= newTemp(Ity_V128
);
6280 assign(t1
, getWReg(ws
));
6281 assign(t2
, getWReg(wt
));
6282 assign(t3
, binop(Iop_Max64Sx2
, mkexpr(t1
), mkexpr(t2
)));
6283 putWReg(wd
, mkexpr(t3
));
6294 case 0x03: { /* MAX_U.df */
6296 case 0x00: { /* MAX_U.B */
6297 DIP("MAX_U.B w%d, w%d, w%d", wd
, ws
, wt
);
6298 t1
= newTemp(Ity_V128
);
6299 t2
= newTemp(Ity_V128
);
6300 t3
= newTemp(Ity_V128
);
6301 assign(t1
, getWReg(ws
));
6302 assign(t2
, getWReg(wt
));
6303 assign(t3
, binop(Iop_Max8Ux16
, mkexpr(t1
), mkexpr(t2
)));
6304 putWReg(wd
, mkexpr(t3
));
6308 case 0x01: { /* MAX_U.H */
6309 DIP("MAX_U.H w%d, w%d, w%d", wd
, ws
, wt
);
6310 t1
= newTemp(Ity_V128
);
6311 t2
= newTemp(Ity_V128
);
6312 t3
= newTemp(Ity_V128
);
6313 assign(t1
, getWReg(ws
));
6314 assign(t2
, getWReg(wt
));
6315 assign(t3
, binop(Iop_Max16Ux8
, mkexpr(t1
), mkexpr(t2
)));
6316 putWReg(wd
, mkexpr(t3
));
6320 case 0x02: { /* MAX_U.W */
6321 DIP("MAX_U.W w%d, w%d, w%d", wd
, ws
, wt
);
6322 t1
= newTemp(Ity_V128
);
6323 t2
= newTemp(Ity_V128
);
6324 t3
= newTemp(Ity_V128
);
6325 assign(t1
, getWReg(ws
));
6326 assign(t2
, getWReg(wt
));
6327 assign(t3
, binop(Iop_Max32Ux4
, mkexpr(t1
), mkexpr(t2
)));
6328 putWReg(wd
, mkexpr(t3
));
6332 case 0x03: { /* MAX_U.D */
6333 DIP("MAX_U.D w%d, w%d, w%d", wd
, ws
, wt
);
6334 t1
= newTemp(Ity_V128
);
6335 t2
= newTemp(Ity_V128
);
6336 t3
= newTemp(Ity_V128
);
6337 assign(t1
, getWReg(ws
));
6338 assign(t2
, getWReg(wt
));
6339 assign(t3
, binop(Iop_Max64Ux2
, mkexpr(t1
), mkexpr(t2
)));
6340 putWReg(wd
, mkexpr(t3
));
6351 case 0x04: { /* MIN_S.df */
6353 case 0x00: { /* MIN_S.B */
6354 DIP("MIN_S.B w%d, w%d, w%d", wd
, ws
, wt
);
6355 t1
= newTemp(Ity_V128
);
6356 t2
= newTemp(Ity_V128
);
6357 t3
= newTemp(Ity_V128
);
6358 assign(t1
, getWReg(ws
));
6359 assign(t2
, getWReg(wt
));
6360 assign(t3
, binop(Iop_Min8Sx16
, mkexpr(t1
), mkexpr(t2
)));
6361 putWReg(wd
, mkexpr(t3
));
6365 case 0x01: { /* MIN_S.H */
6366 DIP("MIN_S.H w%d, w%d, w%d", wd
, ws
, wt
);
6367 t1
= newTemp(Ity_V128
);
6368 t2
= newTemp(Ity_V128
);
6369 t3
= newTemp(Ity_V128
);
6370 assign(t1
, getWReg(ws
));
6371 assign(t2
, getWReg(wt
));
6372 assign(t3
, binop(Iop_Min16Sx8
, mkexpr(t1
), mkexpr(t2
)));
6373 putWReg(wd
, mkexpr(t3
));
6377 case 0x02: { /* MIN_S.W */
6378 DIP("MIN_S.W w%d, w%d, w%d", wd
, ws
, wt
);
6379 t1
= newTemp(Ity_V128
);
6380 t2
= newTemp(Ity_V128
);
6381 t3
= newTemp(Ity_V128
);
6382 assign(t1
, getWReg(ws
));
6383 assign(t2
, getWReg(wt
));
6384 assign(t3
, binop(Iop_Min32Sx4
, mkexpr(t1
), mkexpr(t2
)));
6385 putWReg(wd
, mkexpr(t3
));
6389 case 0x03: { /* MIN_S.D */
6390 DIP("MIN_S.D w%d, w%d, w%d", wd
, ws
, wt
);
6391 t1
= newTemp(Ity_V128
);
6392 t2
= newTemp(Ity_V128
);
6393 t3
= newTemp(Ity_V128
);
6394 assign(t1
, getWReg(ws
));
6395 assign(t2
, getWReg(wt
));
6396 assign(t3
, binop(Iop_Min64Sx2
, mkexpr(t1
), mkexpr(t2
)));
6397 putWReg(wd
, mkexpr(t3
));
6408 case 0x05: { /* MIN_U.df */
6410 case 0x00: { /* MIN_U.B */
6411 DIP("MIN_U.B w%d, w%d, w%d", wd
, ws
, wt
);
6412 t1
= newTemp(Ity_V128
);
6413 t2
= newTemp(Ity_V128
);
6414 t3
= newTemp(Ity_V128
);
6415 assign(t1
, getWReg(ws
));
6416 assign(t2
, getWReg(wt
));
6417 assign(t3
, binop(Iop_Min8Ux16
, mkexpr(t1
), mkexpr(t2
)));
6418 putWReg(wd
, mkexpr(t3
));
6422 case 0x01: { /* MIN_U.H */
6423 DIP("MIN_U.H w%d, w%d, w%d", wd
, ws
, wt
);
6424 t1
= newTemp(Ity_V128
);
6425 t2
= newTemp(Ity_V128
);
6426 t3
= newTemp(Ity_V128
);
6427 assign(t1
, getWReg(ws
));
6428 assign(t2
, getWReg(wt
));
6429 assign(t3
, binop(Iop_Min16Ux8
, mkexpr(t1
), mkexpr(t2
)));
6430 putWReg(wd
, mkexpr(t3
));
6434 case 0x02: { /* MIN_U.W */
6435 DIP("MIN_U.W w%d, w%d, w%d", wd
, ws
, wt
);
6436 t1
= newTemp(Ity_V128
);
6437 t2
= newTemp(Ity_V128
);
6438 t3
= newTemp(Ity_V128
);
6439 assign(t1
, getWReg(ws
));
6440 assign(t2
, getWReg(wt
));
6441 assign(t3
, binop(Iop_Min32Ux4
, mkexpr(t1
), mkexpr(t2
)));
6442 putWReg(wd
, mkexpr(t3
));
6446 case 0x03: { /* MIN_U.D */
6447 DIP("MIN_U.D w%d, w%d, w%d", wd
, ws
, wt
);
6448 t1
= newTemp(Ity_V128
);
6449 t2
= newTemp(Ity_V128
);
6450 t3
= newTemp(Ity_V128
);
6451 assign(t1
, getWReg(ws
));
6452 assign(t2
, getWReg(wt
));
6453 assign(t3
, binop(Iop_Min64Ux2
, mkexpr(t1
), mkexpr(t2
)));
6454 putWReg(wd
, mkexpr(t3
));
6465 case 0x06: { /* MAX_A.df */
6467 case 0x00: { /* MAX_A.B */
6468 DIP("MAX_A.B w%d, w%d, w%d", wd
, ws
, wt
);
6469 t1
= newTemp(Ity_V128
);
6470 t2
= newTemp(Ity_V128
);
6471 t3
= newTemp(Ity_V128
);
6472 t4
= newTemp(Ity_V128
);
6473 assign(t1
, unop(Iop_Abs8x16
, getWReg(ws
)));
6474 assign(t2
, unop(Iop_Abs8x16
, getWReg(wt
)));
6475 assign(t4
, binop(Iop_CmpGT8Ux16
, mkexpr(t1
), mkexpr(t2
)));
6476 assign(t3
, binop(Iop_OrV128
,
6481 unop(Iop_NotV128
, mkexpr(t4
)),
6483 putWReg(wd
, mkexpr(t3
));
6487 case 0x01: { /* MAX_A.H */
6488 DIP("MAX_A.H w%d, w%d, w%d", wd
, ws
, wt
);
6489 t1
= newTemp(Ity_V128
);
6490 t2
= newTemp(Ity_V128
);
6491 t3
= newTemp(Ity_V128
);
6492 t4
= newTemp(Ity_V128
);
6493 assign(t1
, unop(Iop_Abs16x8
, getWReg(ws
)));
6494 assign(t2
, unop(Iop_Abs16x8
, getWReg(wt
)));
6495 assign(t4
, binop(Iop_CmpGT16Ux8
, mkexpr(t1
), mkexpr(t2
)));
6496 assign(t3
, binop(Iop_OrV128
,
6501 unop(Iop_NotV128
, mkexpr(t4
)),
6503 putWReg(wd
, mkexpr(t3
));
6507 case 0x02: { /* MAX_A.W */
6508 DIP("MAX_A.W w%d, w%d, w%d", wd
, ws
, wt
);
6509 t1
= newTemp(Ity_V128
);
6510 t2
= newTemp(Ity_V128
);
6511 t3
= newTemp(Ity_V128
);
6512 t4
= newTemp(Ity_V128
);
6513 assign(t1
, unop(Iop_Abs32x4
, getWReg(ws
)));
6514 assign(t2
, unop(Iop_Abs32x4
, getWReg(wt
)));
6515 assign(t4
, binop(Iop_CmpGT32Ux4
, mkexpr(t1
), mkexpr(t2
)));
6516 assign(t3
, binop(Iop_OrV128
,
6521 unop(Iop_NotV128
, mkexpr(t4
)),
6523 putWReg(wd
, mkexpr(t3
));
6527 case 0x03: { /* MAX_A.D */
6528 DIP("MAX_A.D w%d, w%d, w%d", wd
, ws
, wt
);
6529 t1
= newTemp(Ity_V128
);
6530 t2
= newTemp(Ity_V128
);
6531 t3
= newTemp(Ity_V128
);
6532 t4
= newTemp(Ity_V128
);
6533 assign(t1
, unop(Iop_Abs64x2
, getWReg(ws
)));
6534 assign(t2
, unop(Iop_Abs64x2
, getWReg(wt
)));
6535 assign(t4
, binop(Iop_CmpGT64Ux2
, mkexpr(t1
), mkexpr(t2
)));
6536 assign(t3
, binop(Iop_OrV128
,
6541 unop(Iop_NotV128
, mkexpr(t4
)),
6543 putWReg(wd
, mkexpr(t3
));
6554 case 0x07: { /* MIN_A.df */
6556 case 0x00: { /* MIN_A.B */
6557 DIP("MIN_A.B w%d, w%d, w%d", wd
, ws
, wt
);
6558 t1
= newTemp(Ity_V128
);
6559 t2
= newTemp(Ity_V128
);
6560 t3
= newTemp(Ity_V128
);
6561 t4
= newTemp(Ity_V128
);
6562 assign(t1
, unop(Iop_Abs8x16
, getWReg(ws
)));
6563 assign(t2
, unop(Iop_Abs8x16
, getWReg(wt
)));
6564 assign(t4
, binop(Iop_OrV128
,
6565 binop(Iop_CmpGT8Ux16
,
6566 mkexpr(t1
), mkexpr(t2
)),
6567 binop(Iop_CmpEQ8x16
,
6568 mkexpr(t1
), mkexpr(t2
))));
6569 assign(t3
, binop(Iop_OrV128
,
6574 unop(Iop_NotV128
, mkexpr(t4
)),
6576 putWReg(wd
, mkexpr(t3
));
6580 case 0x01: { /* MIN_A.H */
6581 DIP("MIN_A.H w%d, w%d, w%d", wd
, ws
, wt
);
6582 t1
= newTemp(Ity_V128
);
6583 t2
= newTemp(Ity_V128
);
6584 t3
= newTemp(Ity_V128
);
6585 t4
= newTemp(Ity_V128
);
6586 assign(t1
, unop(Iop_Abs16x8
, getWReg(ws
)));
6587 assign(t2
, unop(Iop_Abs16x8
, getWReg(wt
)));
6588 assign(t4
, binop(Iop_OrV128
,
6589 binop(Iop_CmpGT16Ux8
,
6590 mkexpr(t1
), mkexpr(t2
)),
6591 binop(Iop_CmpEQ16x8
,
6592 mkexpr(t1
), mkexpr(t2
))));
6593 assign(t3
, binop(Iop_OrV128
,
6598 unop(Iop_NotV128
, mkexpr(t4
)),
6600 putWReg(wd
, mkexpr(t3
));
6604 case 0x02: { /* MIN_A.W */
6605 DIP("MIN_A.W w%d, w%d, w%d", wd
, ws
, wt
);
6606 t1
= newTemp(Ity_V128
);
6607 t2
= newTemp(Ity_V128
);
6608 t3
= newTemp(Ity_V128
);
6609 t4
= newTemp(Ity_V128
);
6610 assign(t1
, unop(Iop_Abs32x4
, getWReg(ws
)));
6611 assign(t2
, unop(Iop_Abs32x4
, getWReg(wt
)));
6612 assign(t4
, binop(Iop_OrV128
,
6613 binop(Iop_CmpGT32Ux4
,
6614 mkexpr(t1
), mkexpr(t2
)),
6615 binop(Iop_CmpEQ32x4
,
6616 mkexpr(t1
), mkexpr(t2
))));
6617 assign(t3
, binop(Iop_OrV128
,
6622 unop(Iop_NotV128
, mkexpr(t4
)),
6624 putWReg(wd
, mkexpr(t3
));
6628 case 0x03: { /* MIN_A.D */
6629 DIP("MIN_A.D w%d, w%d, w%d", wd
, ws
, wt
);
6630 t1
= newTemp(Ity_V128
);
6631 t2
= newTemp(Ity_V128
);
6632 t3
= newTemp(Ity_V128
);
6633 t4
= newTemp(Ity_V128
);
6634 assign(t1
, unop(Iop_Abs64x2
, getWReg(ws
)));
6635 assign(t2
, unop(Iop_Abs64x2
, getWReg(wt
)));
6636 assign(t4
, binop(Iop_OrV128
,
6637 binop(Iop_CmpGT64Ux2
,
6638 mkexpr(t1
), mkexpr(t2
)),
6639 binop(Iop_CmpEQ64x2
,
6640 mkexpr(t1
), mkexpr(t2
))));
6641 assign(t3
, binop(Iop_OrV128
,
6646 unop(Iop_NotV128
, mkexpr(t4
)),
6648 putWReg(wd
, mkexpr(t3
));
6666 static Int
msa_3R_0F(UInt cins
, UChar wd
, UChar ws
) /* 3R (0x0F) */
6672 operation
= (cins
& 0x03800000) >> 23;
6673 df
= (cins
& 0x00600000) >> 21;
6674 wt
= (cins
& 0x001F0000) >> 16;
6676 switch (operation
) {
6677 case 0x00: { /* CEQ.df */
6679 case 0x00: { /* CEQ.B */
6680 DIP("CEQ.B w%d, w%d, w%d", wd
, ws
, wt
);
6681 t1
= newTemp(Ity_V128
);
6682 t2
= newTemp(Ity_V128
);
6683 t3
= newTemp(Ity_V128
);
6684 assign(t1
, getWReg(ws
));
6685 assign(t2
, getWReg(wt
));
6686 assign(t3
, binop(Iop_CmpEQ8x16
, mkexpr(t1
), mkexpr(t2
)));
6687 putWReg(wd
, mkexpr(t3
));
6691 case 0x01: { /* CEQ.H */
6692 DIP("CEQ.H w%d, w%d, w%d", wd
, ws
, wt
);
6693 t1
= newTemp(Ity_V128
);
6694 t2
= newTemp(Ity_V128
);
6695 t3
= newTemp(Ity_V128
);
6696 assign(t1
, getWReg(ws
));
6697 assign(t2
, getWReg(wt
));
6698 assign(t3
, binop(Iop_CmpEQ16x8
, mkexpr(t1
), mkexpr(t2
)));
6699 putWReg(wd
, mkexpr(t3
));
6703 case 0x02: { /* CEQ.W */
6704 DIP("CEQ.W w%d, w%d, w%d", wd
, ws
, wt
);
6705 t1
= newTemp(Ity_V128
);
6706 t2
= newTemp(Ity_V128
);
6707 t3
= newTemp(Ity_V128
);
6708 assign(t1
, getWReg(ws
));
6709 assign(t2
, getWReg(wt
));
6710 assign(t3
, binop(Iop_CmpEQ32x4
, mkexpr(t1
), mkexpr(t2
)));
6711 putWReg(wd
, mkexpr(t3
));
6715 case 0x03: { /* CEQ.D */
6716 DIP("CEQ.D w%d, w%d, w%d", wd
, ws
, wt
);
6717 t1
= newTemp(Ity_V128
);
6718 t2
= newTemp(Ity_V128
);
6719 t3
= newTemp(Ity_V128
);
6720 assign(t1
, getWReg(ws
));
6721 assign(t2
, getWReg(wt
));
6722 assign(t3
, binop(Iop_CmpEQ64x2
, mkexpr(t1
), mkexpr(t2
)));
6723 putWReg(wd
, mkexpr(t3
));
6734 case 0x02: { /* CLT_S.df */
6736 case 0x00: { /* CLT_S.B */
6737 DIP("CLT_S.B w%d, w%d, w%d", wd
, ws
, wt
);
6738 t1
= newTemp(Ity_V128
);
6739 t2
= newTemp(Ity_V128
);
6740 t3
= newTemp(Ity_V128
);
6741 assign(t1
, getWReg(ws
));
6742 assign(t2
, getWReg(wt
));
6743 assign(t3
, binop(Iop_CmpGT8Sx16
, mkexpr(t2
), mkexpr(t1
)));
6744 putWReg(wd
, mkexpr(t3
));
6748 case 0x01: { /* CLT_S.H */
6749 DIP("CLT_S.H w%d, w%d, w%d", wd
, ws
, wt
);
6750 t1
= newTemp(Ity_V128
);
6751 t2
= newTemp(Ity_V128
);
6752 t3
= newTemp(Ity_V128
);
6753 assign(t1
, getWReg(ws
));
6754 assign(t2
, getWReg(wt
));
6755 assign(t3
, binop(Iop_CmpGT16Sx8
, mkexpr(t2
), mkexpr(t1
)));
6756 putWReg(wd
, mkexpr(t3
));
6760 case 0x02: { /* CLT_S.W */
6761 DIP("CLT_S.W w%d, w%d, w%d", wd
, ws
, wt
);
6762 t1
= newTemp(Ity_V128
);
6763 t2
= newTemp(Ity_V128
);
6764 t3
= newTemp(Ity_V128
);
6765 assign(t1
, getWReg(ws
));
6766 assign(t2
, getWReg(wt
));
6767 assign(t3
, binop(Iop_CmpGT32Sx4
, mkexpr(t2
), mkexpr(t1
)));
6768 putWReg(wd
, mkexpr(t3
));
6772 case 0x03: { /* CLT_S.D */
6773 DIP("CLT_S.D w%d, w%d, w%d", wd
, ws
, wt
);
6774 t1
= newTemp(Ity_V128
);
6775 t2
= newTemp(Ity_V128
);
6776 t3
= newTemp(Ity_V128
);
6777 assign(t1
, getWReg(ws
));
6778 assign(t2
, getWReg(wt
));
6779 assign(t3
, binop(Iop_CmpGT64Sx2
, mkexpr(t2
), mkexpr(t1
)));
6780 putWReg(wd
, mkexpr(t3
));
6791 case 0x03: { /* CLT_U.df */
6793 case 0x00: { /* CLT_U.B */
6794 DIP("CLT_U.B w%d, w%d, w%d", wd
, ws
, wt
);
6795 t1
= newTemp(Ity_V128
);
6796 t2
= newTemp(Ity_V128
);
6797 t3
= newTemp(Ity_V128
);
6798 assign(t1
, getWReg(ws
));
6799 assign(t2
, getWReg(wt
));
6800 assign(t3
, binop(Iop_CmpGT8Ux16
, mkexpr(t2
), mkexpr(t1
)));
6801 putWReg(wd
, mkexpr(t3
));
6805 case 0x01: { /* CLT_U.H */
6806 DIP("CLT_U.H w%d, w%d, w%d", wd
, ws
, wt
);
6807 t1
= newTemp(Ity_V128
);
6808 t2
= newTemp(Ity_V128
);
6809 t3
= newTemp(Ity_V128
);
6810 assign(t1
, getWReg(ws
));
6811 assign(t2
, getWReg(wt
));
6812 assign(t3
, binop(Iop_CmpGT16Ux8
, mkexpr(t2
), mkexpr(t1
)));
6813 putWReg(wd
, mkexpr(t3
));
6817 case 0x02: { /* CLT_U.W */
6818 DIP("CLT_U.W w%d, w%d, w%d", wd
, ws
, wt
);
6819 t1
= newTemp(Ity_V128
);
6820 t2
= newTemp(Ity_V128
);
6821 t3
= newTemp(Ity_V128
);
6822 assign(t1
, getWReg(ws
));
6823 assign(t2
, getWReg(wt
));
6824 assign(t3
, binop(Iop_CmpGT32Ux4
, mkexpr(t2
), mkexpr(t1
)));
6825 putWReg(wd
, mkexpr(t3
));
6829 case 0x03: { /* CLT_U.D */
6830 DIP("CLT_U.D w%d, w%d, w%d", wd
, ws
, wt
);
6831 t1
= newTemp(Ity_V128
);
6832 t2
= newTemp(Ity_V128
);
6833 t3
= newTemp(Ity_V128
);
6834 assign(t1
, getWReg(ws
));
6835 assign(t2
, getWReg(wt
));
6836 assign(t3
, binop(Iop_CmpGT64Ux2
, mkexpr(t2
), mkexpr(t1
)));
6837 putWReg(wd
, mkexpr(t3
));
6848 case 0x04: { /* CLE_S.df */
6850 case 0x00: { /* CLE_S.B */
6851 DIP("CLE_S.B w%d, w%d, w%d", wd
, ws
, wt
);
6852 t1
= newTemp(Ity_V128
);
6853 t2
= newTemp(Ity_V128
);
6854 t3
= newTemp(Ity_V128
);
6855 assign(t1
, getWReg(ws
));
6856 assign(t2
, getWReg(wt
));
6857 assign(t3
, binop(Iop_OrV128
,
6858 binop(Iop_CmpGT8Sx16
,
6859 mkexpr(t2
), mkexpr(t1
)),
6860 binop(Iop_CmpEQ8x16
,
6861 mkexpr(t1
), mkexpr(t2
))));
6862 putWReg(wd
, mkexpr(t3
));
6866 case 0x01: { /* CLE_S.H */
6867 DIP("CLE_S.H w%d, w%d, w%d", wd
, ws
, wt
);
6868 t1
= newTemp(Ity_V128
);
6869 t2
= newTemp(Ity_V128
);
6870 t3
= newTemp(Ity_V128
);
6871 assign(t1
, getWReg(ws
));
6872 assign(t2
, getWReg(wt
));
6873 assign(t3
, binop(Iop_OrV128
,
6874 binop(Iop_CmpGT16Sx8
,
6875 mkexpr(t2
), mkexpr(t1
)),
6876 binop(Iop_CmpEQ16x8
,
6877 mkexpr(t1
), mkexpr(t2
))));
6878 putWReg(wd
, mkexpr(t3
));
6882 case 0x02: { /* CLE_S.W */
6883 DIP("CLE_S.W w%d, w%d, w%d", wd
, ws
, wt
);
6884 t1
= newTemp(Ity_V128
);
6885 t2
= newTemp(Ity_V128
);
6886 t3
= newTemp(Ity_V128
);
6887 assign(t1
, getWReg(ws
));
6888 assign(t2
, getWReg(wt
));
6889 assign(t3
, binop(Iop_OrV128
,
6890 binop(Iop_CmpGT32Sx4
,
6891 mkexpr(t2
), mkexpr(t1
)),
6892 binop(Iop_CmpEQ32x4
,
6893 mkexpr(t1
), mkexpr(t2
))));
6894 putWReg(wd
, mkexpr(t3
));
6898 case 0x03: { /* CLE_S.D */
6899 DIP("CLE_S.D w%d, w%d, w%d", wd
, ws
, wt
);
6900 t1
= newTemp(Ity_V128
);
6901 t2
= newTemp(Ity_V128
);
6902 t3
= newTemp(Ity_V128
);
6903 assign(t1
, getWReg(ws
));
6904 assign(t2
, getWReg(wt
));
6905 assign(t3
, binop(Iop_OrV128
,
6906 binop(Iop_CmpGT64Sx2
,
6907 mkexpr(t2
), mkexpr(t1
)),
6908 binop(Iop_CmpEQ64x2
,
6909 mkexpr(t1
), mkexpr(t2
))));
6910 putWReg(wd
, mkexpr(t3
));
6921 case 0x05: { /* CLE_U.df */
6923 case 0x00: { /* CLE_U.B */
6924 DIP("CLE_U.B w%d, w%d, w%d", wd
, ws
, wt
);
6925 t1
= newTemp(Ity_V128
);
6926 t2
= newTemp(Ity_V128
);
6927 t3
= newTemp(Ity_V128
);
6928 assign(t1
, getWReg(ws
));
6929 assign(t2
, getWReg(wt
));
6930 assign(t3
, binop(Iop_OrV128
,
6931 binop(Iop_CmpGT8Ux16
,
6932 mkexpr(t2
), mkexpr(t1
)),
6933 binop(Iop_CmpEQ8x16
,
6934 mkexpr(t1
), mkexpr(t2
))));
6935 putWReg(wd
, mkexpr(t3
));
6939 case 0x01: { /* CLE_U.H */
6940 DIP("CLE_U.H w%d, w%d, w%d", wd
, ws
, wt
);
6941 t1
= newTemp(Ity_V128
);
6942 t2
= newTemp(Ity_V128
);
6943 t3
= newTemp(Ity_V128
);
6944 assign(t1
, getWReg(ws
));
6945 assign(t2
, getWReg(wt
));
6946 assign(t3
, binop(Iop_OrV128
,
6947 binop(Iop_CmpGT16Ux8
,
6948 mkexpr(t2
), mkexpr(t1
)),
6949 binop(Iop_CmpEQ16x8
,
6950 mkexpr(t1
), mkexpr(t2
))));
6951 putWReg(wd
, mkexpr(t3
));
6955 case 0x02: { /* CLE_U.W */
6956 DIP("CLE_U.W w%d, w%d, w%d", wd
, ws
, wt
);
6957 t1
= newTemp(Ity_V128
);
6958 t2
= newTemp(Ity_V128
);
6959 t3
= newTemp(Ity_V128
);
6960 assign(t1
, getWReg(ws
));
6961 assign(t2
, getWReg(wt
));
6962 assign(t3
, binop(Iop_OrV128
,
6963 binop(Iop_CmpGT32Ux4
,
6964 mkexpr(t2
), mkexpr(t1
)),
6965 binop(Iop_CmpEQ32x4
,
6966 mkexpr(t1
), mkexpr(t2
))));
6967 putWReg(wd
, mkexpr(t3
));
6971 case 0x03: { /* CLE_U.D */
6972 DIP("CLE_U.D w%d, w%d, w%d", wd
, ws
, wt
);
6973 t1
= newTemp(Ity_V128
);
6974 t2
= newTemp(Ity_V128
);
6975 t3
= newTemp(Ity_V128
);
6976 assign(t1
, getWReg(ws
));
6977 assign(t2
, getWReg(wt
));
6980 binop(Iop_CmpGT64Ux2
,
6981 mkexpr(t2
), mkexpr(t1
)),
6982 binop(Iop_CmpEQ64x2
,
6983 mkexpr(t1
), mkexpr(t2
))));
6984 putWReg(wd
, mkexpr(t3
));
7002 static Int
msa_3R_10(UInt cins
, UChar wd
, UChar ws
) /* 3R (0x10) */
7004 IRTemp t1
, t2
, t3
, t4
;
7008 operation
= (cins
& 0x03800000) >> 23;
7009 df
= (cins
& 0x00600000) >> 21;
7010 wt
= (cins
& 0x001F0000) >> 16;
7012 switch (operation
) {
7013 case 0x00: { /* ADD_A.df */
7015 case 0x00: { /* ADD_A.B */
7016 DIP("ADD_A.B w%d, w%d, w%d", wd
, ws
, wt
);
7017 t1
= newTemp(Ity_V128
);
7018 t2
= newTemp(Ity_V128
);
7019 t3
= newTemp(Ity_V128
);
7020 assign(t1
, unop(Iop_Abs8x16
, getWReg(ws
)));
7021 assign(t2
, unop(Iop_Abs8x16
, getWReg(wt
)));
7022 assign(t3
, binop(Iop_Add8x16
, mkexpr(t1
), mkexpr(t2
)));
7023 putWReg(wd
, mkexpr(t3
));
7027 case 0x01: { /* ADD_A.H */
7028 DIP("ADD_A.H w%d, w%d, w%d", wd
, ws
, wt
);
7029 t1
= newTemp(Ity_V128
);
7030 t2
= newTemp(Ity_V128
);
7031 t3
= newTemp(Ity_V128
);
7032 assign(t1
, unop(Iop_Abs16x8
, getWReg(ws
)));
7033 assign(t2
, unop(Iop_Abs16x8
, getWReg(wt
)));
7034 assign(t3
, binop(Iop_Add16x8
, mkexpr(t1
), mkexpr(t2
)));
7035 putWReg(wd
, mkexpr(t3
));
7039 case 0x02: { /* ADD_A.W */
7040 DIP("ADD_A.W w%d, w%d, w%d", wd
, ws
, wt
);
7041 t1
= newTemp(Ity_V128
);
7042 t2
= newTemp(Ity_V128
);
7043 t3
= newTemp(Ity_V128
);
7044 assign(t1
, unop(Iop_Abs32x4
, getWReg(ws
)));
7045 assign(t2
, unop(Iop_Abs32x4
, getWReg(wt
)));
7046 assign(t3
, binop(Iop_Add32x4
, mkexpr(t1
), mkexpr(t2
)));
7047 putWReg(wd
, mkexpr(t3
));
7051 case 0x03: { /* ADD_A.D */
7052 DIP("ADD_A.D w%d, w%d, w%d", wd
, ws
, wt
);
7053 t1
= newTemp(Ity_V128
);
7054 t2
= newTemp(Ity_V128
);
7055 t3
= newTemp(Ity_V128
);
7056 assign(t1
, unop(Iop_Abs64x2
, getWReg(ws
)));
7057 assign(t2
, unop(Iop_Abs64x2
, getWReg(wt
)));
7058 assign(t3
, binop(Iop_Add64x2
, mkexpr(t1
), mkexpr(t2
)));
7059 putWReg(wd
, mkexpr(t3
));
7070 case 0x01: { /* ADDS_A.df */
7072 case 0x00: { /* ADDS_A.B */
7073 DIP("ADDS_A.B w%d, w%d, w%d", wd
, ws
, wt
);
7074 t1
= newTemp(Ity_V128
);
7075 t2
= newTemp(Ity_V128
);
7076 t3
= newTemp(Ity_V128
);
7077 t4
= newTemp(Ity_V128
);
7078 assign(t1
, unop(Iop_Abs8x16
, getWReg(ws
)));
7079 assign(t2
, unop(Iop_Abs8x16
, getWReg(wt
)));
7080 assign(t3
, binop(Iop_SarN8x16
,
7085 assign(t4
, binop(Iop_SarN8x16
,
7090 putWReg(wd
, binop(Iop_QAdd8Sx16
,
7112 case 0x01: { /* ADDS_A.H */
7113 DIP("ADDS_A.H w%d, w%d, w%d", wd
, ws
, wt
);
7114 t1
= newTemp(Ity_V128
);
7115 t2
= newTemp(Ity_V128
);
7116 t3
= newTemp(Ity_V128
);
7117 t4
= newTemp(Ity_V128
);
7118 assign(t1
, unop(Iop_Abs16x8
, getWReg(ws
)));
7119 assign(t2
, unop(Iop_Abs16x8
, getWReg(wt
)));
7120 assign(t3
, binop(Iop_SarN16x8
,
7125 assign(t4
, binop(Iop_SarN16x8
,
7130 putWReg(wd
, binop(Iop_QAdd16Sx8
,
7152 case 0x02: { /* ADDS_A.W */
7153 DIP("ADDS_A.W w%d, w%d, w%d", wd
, ws
, wt
);
7154 t1
= newTemp(Ity_V128
);
7155 t2
= newTemp(Ity_V128
);
7156 t3
= newTemp(Ity_V128
);
7157 t4
= newTemp(Ity_V128
);
7158 assign(t1
, unop(Iop_Abs32x4
, getWReg(ws
)));
7159 assign(t2
, unop(Iop_Abs32x4
, getWReg(wt
)));
7160 assign(t3
, binop(Iop_SarN32x4
,
7165 assign(t4
, binop(Iop_SarN32x4
,
7170 putWReg(wd
, binop(Iop_QAdd32Sx4
,
7192 case 0x03: { /* ADDS_A.D */
7193 DIP("ADDS_A.D w%d, w%d, w%d", wd
, ws
, wt
);
7194 t1
= newTemp(Ity_V128
);
7195 t2
= newTemp(Ity_V128
);
7196 t3
= newTemp(Ity_V128
);
7197 t4
= newTemp(Ity_V128
);
7198 assign(t1
, unop(Iop_Abs64x2
, getWReg(ws
)));
7199 assign(t2
, unop(Iop_Abs64x2
, getWReg(wt
)));
7200 assign(t3
, binop(Iop_SarN64x2
,
7205 assign(t4
, binop(Iop_SarN64x2
,
7211 binop(Iop_QAdd64Sx2
,
7240 case 0x02: { /* ADDS_S.df */
7242 case 0x00: { /* ADDS_S.B */
7243 DIP("ADDS_S.B w%d, w%d, w%d", wd
, ws
, wt
);
7244 t1
= newTemp(Ity_V128
);
7245 t2
= newTemp(Ity_V128
);
7246 t3
= newTemp(Ity_V128
);
7247 assign(t1
, getWReg(ws
));
7248 assign(t2
, getWReg(wt
));
7249 assign(t3
, binop(Iop_QAdd8Sx16
, mkexpr(t1
), mkexpr(t2
)));
7250 putWReg(wd
, mkexpr(t3
));
7254 case 0x01: { /* ADDS_S.H */
7255 DIP("ADDS_S.H w%d, w%d, w%d", wd
, ws
, wt
);
7256 t1
= newTemp(Ity_V128
);
7257 t2
= newTemp(Ity_V128
);
7258 t3
= newTemp(Ity_V128
);
7259 assign(t1
, getWReg(ws
));
7260 assign(t2
, getWReg(wt
));
7261 assign(t3
, binop(Iop_QAdd16Sx8
, mkexpr(t1
), mkexpr(t2
)));
7262 putWReg(wd
, mkexpr(t3
));
7266 case 0x02: { /* ADDS_S.W */
7267 DIP("ADDS_S.W w%d, w%d, w%d", wd
, ws
, wt
);
7268 t1
= newTemp(Ity_V128
);
7269 t2
= newTemp(Ity_V128
);
7270 t3
= newTemp(Ity_V128
);
7271 assign(t1
, getWReg(ws
));
7272 assign(t2
, getWReg(wt
));
7273 assign(t3
, binop(Iop_QAdd32Sx4
, mkexpr(t1
), mkexpr(t2
)));
7274 putWReg(wd
, mkexpr(t3
));
7278 case 0x03: { /* ADDS_S.D */
7279 DIP("ADDS_S.D w%d, w%d, w%d", wd
, ws
, wt
);
7280 t1
= newTemp(Ity_V128
);
7281 t2
= newTemp(Ity_V128
);
7282 t3
= newTemp(Ity_V128
);
7283 assign(t1
, getWReg(ws
));
7284 assign(t2
, getWReg(wt
));
7285 assign(t3
, binop(Iop_QAdd64Sx2
, mkexpr(t1
), mkexpr(t2
)));
7286 putWReg(wd
, mkexpr(t3
));
7297 case 0x03: { /* ADDS_U.df */
7299 case 0x00: { /* ADDS_U.B */
7300 DIP("ADDS_U.B w%d, w%d, w%d", wd
, ws
, wt
);
7301 t1
= newTemp(Ity_V128
);
7302 t2
= newTemp(Ity_V128
);
7303 t3
= newTemp(Ity_V128
);
7304 assign(t1
, getWReg(ws
));
7305 assign(t2
, getWReg(wt
));
7306 assign(t3
, binop(Iop_QAdd8Ux16
, mkexpr(t1
), mkexpr(t2
)));
7307 putWReg(wd
, mkexpr(t3
));
7311 case 0x01: { /* ADDS_U.H */
7312 DIP("ADDS_U.H w%d, w%d, w%d", wd
, ws
, wt
);
7313 t1
= newTemp(Ity_V128
);
7314 t2
= newTemp(Ity_V128
);
7315 t3
= newTemp(Ity_V128
);
7316 assign(t1
, getWReg(ws
));
7317 assign(t2
, getWReg(wt
));
7318 assign(t3
, binop(Iop_QAdd16Ux8
, mkexpr(t1
), mkexpr(t2
)));
7319 putWReg(wd
, mkexpr(t3
));
7323 case 0x02: { /* ADDS_U.W */
7324 DIP("ADDS_U.W w%d, w%d, w%d", wd
, ws
, wt
);
7325 t1
= newTemp(Ity_V128
);
7326 t2
= newTemp(Ity_V128
);
7327 t3
= newTemp(Ity_V128
);
7328 assign(t1
, getWReg(ws
));
7329 assign(t2
, getWReg(wt
));
7330 assign(t3
, binop(Iop_QAdd32Ux4
, mkexpr(t1
), mkexpr(t2
)));
7331 putWReg(wd
, mkexpr(t3
));
7335 case 0x03: { /* ADDS_U.D */
7336 DIP("ADDS_U.D w%d, w%d, w%d", wd
, ws
, wt
);
7337 t1
= newTemp(Ity_V128
);
7338 t2
= newTemp(Ity_V128
);
7339 t3
= newTemp(Ity_V128
);
7340 assign(t1
, getWReg(ws
));
7341 assign(t2
, getWReg(wt
));
7342 assign(t3
, binop(Iop_QAdd64Ux2
, mkexpr(t1
), mkexpr(t2
)));
7343 putWReg(wd
, mkexpr(t3
));
7354 case 0x04: { /* AVE_S.df */
7356 case 0x00: { /* AVE_S.B */
7357 DIP("AVE_S.B w%d, w%d, w%d", wd
, ws
, wt
);
7358 t1
= newTemp(Ity_V128
);
7359 t2
= newTemp(Ity_V128
);
7360 t3
= newTemp(Ity_V128
);
7361 assign(t1
, getWReg(ws
));
7362 assign(t2
, getWReg(wt
));
7363 assign(t3
, binop(Iop_Add8x16
,
7366 mkexpr(t1
), mkU8(1)),
7368 mkexpr(t2
), mkU8(1))),
7376 putWReg(wd
, mkexpr(t3
));
7380 case 0x01: { /* AVE_S.H */
7381 DIP("AVE_S.H w%d, w%d, w%d", wd
, ws
, wt
);
7382 t1
= newTemp(Ity_V128
);
7383 t2
= newTemp(Ity_V128
);
7384 t3
= newTemp(Ity_V128
);
7385 assign(t1
, getWReg(ws
));
7386 assign(t2
, getWReg(wt
));
7391 mkexpr(t1
), mkU8(1)),
7393 mkexpr(t2
), mkU8(1))),
7401 putWReg(wd
, mkexpr(t3
));
7405 case 0x02: { /* AVE_S.W */
7406 DIP("AVE_S.W w%d, w%d, w%d", wd
, ws
, wt
);
7407 t1
= newTemp(Ity_V128
);
7408 t2
= newTemp(Ity_V128
);
7409 t3
= newTemp(Ity_V128
);
7410 assign(t1
, getWReg(ws
));
7411 assign(t2
, getWReg(wt
));
7412 assign(t3
, binop(Iop_Add32x4
,
7415 mkexpr(t1
), mkU8(1)),
7417 mkexpr(t2
), mkU8(1))),
7425 putWReg(wd
, mkexpr(t3
));
7429 case 0x03: { /* AVE_S.D */
7430 DIP("AVE_S.D w%d, w%d, w%d", wd
, ws
, wt
);
7431 t1
= newTemp(Ity_V128
);
7432 t2
= newTemp(Ity_V128
);
7433 t3
= newTemp(Ity_V128
);
7434 assign(t1
, getWReg(ws
));
7435 assign(t2
, getWReg(wt
));
7436 assign(t3
, binop(Iop_Add64x2
,
7439 mkexpr(t1
), mkU8(1)),
7441 mkexpr(t2
), mkU8(1))),
7449 putWReg(wd
, mkexpr(t3
));
7460 case 0x05: { /* AVE_U.df */
7462 case 0x00: { /* AVE_U.B */
7463 DIP("AVE_U.B w%d, w%d, w%d", wd
, ws
, wt
);
7464 t1
= newTemp(Ity_V128
);
7465 t2
= newTemp(Ity_V128
);
7466 t3
= newTemp(Ity_V128
);
7467 assign(t1
, getWReg(ws
));
7468 assign(t2
, getWReg(wt
));
7469 assign(t3
, binop(Iop_Add16x8
,
7472 mkexpr(t1
), mkU8(1)),
7474 mkexpr(t2
), mkU8(1))),
7482 putWReg(wd
, mkexpr(t3
));
7486 case 0x01: { /* AVE_U.H */
7487 DIP("AVE_U.H w%d, w%d, w%d", wd
, ws
, wt
);
7488 t1
= newTemp(Ity_V128
);
7489 t2
= newTemp(Ity_V128
);
7490 t3
= newTemp(Ity_V128
);
7491 assign(t1
, getWReg(ws
));
7492 assign(t2
, getWReg(wt
));
7493 assign(t3
, binop(Iop_Add16x8
,
7496 mkexpr(t1
), mkU8(1)),
7498 mkexpr(t2
), mkU8(1))),
7506 putWReg(wd
, mkexpr(t3
));
7510 case 0x02: { /* AVE_U.W */
7511 DIP("AVE_U.W w%d, w%d, w%d", wd
, ws
, wt
);
7512 t1
= newTemp(Ity_V128
);
7513 t2
= newTemp(Ity_V128
);
7514 t3
= newTemp(Ity_V128
);
7515 assign(t1
, getWReg(ws
));
7516 assign(t2
, getWReg(wt
));
7517 assign(t3
, binop(Iop_Add32x4
,
7520 mkexpr(t1
), mkU8(1)),
7522 mkexpr(t2
), mkU8(1))),
7530 putWReg(wd
, mkexpr(t3
));
7534 case 0x03: { /* AVE_U.D */
7535 DIP("AVE_U.D w%d, w%d, w%d", wd
, ws
, wt
);
7536 t1
= newTemp(Ity_V128
);
7537 t2
= newTemp(Ity_V128
);
7538 t3
= newTemp(Ity_V128
);
7539 assign(t1
, getWReg(ws
));
7540 assign(t2
, getWReg(wt
));
7541 assign(t3
, binop(Iop_Add64x2
,
7544 mkexpr(t1
), mkU8(1)),
7546 mkexpr(t2
), mkU8(1))),
7554 putWReg(wd
, mkexpr(t3
));
7565 case 0x06: { /* AVER_S.df */
7567 case 0x00: { /* AVER_S.B */
7568 DIP("AVER_S.B w%d, w%d, w%d", wd
, ws
, wt
);
7569 t1
= newTemp(Ity_V128
);
7570 t2
= newTemp(Ity_V128
);
7571 t3
= newTemp(Ity_V128
);
7572 assign(t1
, getWReg(ws
));
7573 assign(t2
, getWReg(wt
));
7574 assign(t3
, binop(Iop_Avg8Sx16
, mkexpr(t1
), mkexpr(t2
)));
7575 putWReg(wd
, mkexpr(t3
));
7579 case 0x01: { /* AVER_S.H */
7580 DIP("AVER_S.H w%d, w%d, w%d", wd
, ws
, wt
);
7581 t1
= newTemp(Ity_V128
);
7582 t2
= newTemp(Ity_V128
);
7583 t3
= newTemp(Ity_V128
);
7584 assign(t1
, getWReg(ws
));
7585 assign(t2
, getWReg(wt
));
7586 assign(t3
, binop(Iop_Avg16Sx8
, mkexpr(t1
), mkexpr(t2
)));
7587 putWReg(wd
, mkexpr(t3
));
7591 case 0x02: { /* AVER_S.W */
7592 DIP("AVER_S.W w%d, w%d, w%d", wd
, ws
, wt
);
7593 t1
= newTemp(Ity_V128
);
7594 t2
= newTemp(Ity_V128
);
7595 t3
= newTemp(Ity_V128
);
7596 assign(t1
, getWReg(ws
));
7597 assign(t2
, getWReg(wt
));
7598 assign(t3
, binop(Iop_Avg32Sx4
, mkexpr(t1
), mkexpr(t2
)));
7599 putWReg(wd
, mkexpr(t3
));
7603 case 0x03: { /* AVER_S.D */
7604 DIP("AVER_S.D w%d, w%d, w%d", wd
, ws
, wt
);
7605 t1
= newTemp(Ity_V128
);
7606 t2
= newTemp(Ity_V128
);
7607 t3
= newTemp(Ity_V128
);
7608 assign(t1
, getWReg(ws
));
7609 assign(t2
, getWReg(wt
));
7610 assign(t3
, binop(Iop_Add64x2
,
7613 mkexpr(t1
), mkU8(1)),
7615 mkexpr(t2
), mkU8(1))),
7623 putWReg(wd
, mkexpr(t3
));
7634 case 0x07: { /* AVER_U.df */
7636 case 0x00: { /* AVER_U.B */
7637 DIP("AVER_U.B w%d, w%d, w%d", wd
, ws
, wt
);
7638 t1
= newTemp(Ity_V128
);
7639 t2
= newTemp(Ity_V128
);
7640 t3
= newTemp(Ity_V128
);
7641 assign(t1
, getWReg(ws
));
7642 assign(t2
, getWReg(wt
));
7643 assign(t3
, binop(Iop_Avg8Ux16
, mkexpr(t1
), mkexpr(t2
)));
7644 putWReg(wd
, mkexpr(t3
));
7648 case 0x01: { /* AVER_U.H */
7649 DIP("AVER_U.H w%d, w%d, w%d", wd
, ws
, wt
);
7650 t1
= newTemp(Ity_V128
);
7651 t2
= newTemp(Ity_V128
);
7652 t3
= newTemp(Ity_V128
);
7653 assign(t1
, getWReg(ws
));
7654 assign(t2
, getWReg(wt
));
7655 assign(t3
, binop(Iop_Avg16Ux8
, mkexpr(t1
), mkexpr(t2
)));
7656 putWReg(wd
, mkexpr(t3
));
7660 case 0x02: { /* AVER_U.W */
7661 DIP("AVER_U.W w%d, w%d, w%d", wd
, ws
, wt
);
7662 t1
= newTemp(Ity_V128
);
7663 t2
= newTemp(Ity_V128
);
7664 t3
= newTemp(Ity_V128
);
7665 assign(t1
, getWReg(ws
));
7666 assign(t2
, getWReg(wt
));
7667 assign(t3
, binop(Iop_Avg32Ux4
, mkexpr(t1
), mkexpr(t2
)));
7668 putWReg(wd
, mkexpr(t3
));
7672 case 0x03: { /* AVER_U.D */
7673 DIP("AVER_U.D w%d, w%d, w%d", wd
, ws
, wt
);
7674 t1
= newTemp(Ity_V128
);
7675 t2
= newTemp(Ity_V128
);
7676 t3
= newTemp(Ity_V128
);
7677 assign(t1
, getWReg(ws
));
7678 assign(t2
, getWReg(wt
));
7679 assign(t3
, binop(Iop_Add64x2
,
7682 mkexpr(t1
), mkU8(1)),
7684 mkexpr(t2
), mkU8(1))),
7692 putWReg(wd
, mkexpr(t3
));
7710 static Int
msa_3R_11(UInt cins
, UChar wd
, UChar ws
) /* 3R (0x11) */
7716 operation
= (cins
& 0x03800000) >> 23;
7717 df
= (cins
& 0x00600000) >> 21;
7718 wt
= (cins
& 0x001F0000) >> 16;
7720 switch (operation
) {
7721 case 0x00: { /* SUBS_S.df */
7723 case 0x00: { /* SUBS_S.B */
7724 DIP("SUBS_S.B w%d, w%d, w%d", wd
, ws
, wt
);
7725 t1
= newTemp(Ity_V128
);
7726 t2
= newTemp(Ity_V128
);
7727 t3
= newTemp(Ity_V128
);
7728 assign(t1
, getWReg(ws
));
7729 assign(t2
, getWReg(wt
));
7730 assign(t3
, binop(Iop_QSub8Sx16
, mkexpr(t1
), mkexpr(t2
)));
7731 putWReg(wd
, mkexpr(t3
));
7735 case 0x01: { /* SUBS_S.H */
7736 DIP("SUBS_S.H w%d, w%d, w%d", wd
, ws
, wt
);
7737 t1
= newTemp(Ity_V128
);
7738 t2
= newTemp(Ity_V128
);
7739 t3
= newTemp(Ity_V128
);
7740 assign(t1
, getWReg(ws
));
7741 assign(t2
, getWReg(wt
));
7742 assign(t3
, binop(Iop_QSub16Sx8
, mkexpr(t1
), mkexpr(t2
)));
7743 putWReg(wd
, mkexpr(t3
));
7747 case 0x02: { /* SUBS_S.W */
7748 DIP("SUBS_S.W w%d, w%d, w%d", wd
, ws
, wt
);
7749 t1
= newTemp(Ity_V128
);
7750 t2
= newTemp(Ity_V128
);
7751 t3
= newTemp(Ity_V128
);
7752 assign(t1
, getWReg(ws
));
7753 assign(t2
, getWReg(wt
));
7754 assign(t3
, binop(Iop_QSub32Sx4
, mkexpr(t1
), mkexpr(t2
)));
7755 putWReg(wd
, mkexpr(t3
));
7759 case 0x03: { /* SUBS_S.D */
7760 DIP("SUBS_S.D w%d, w%d, w%d", wd
, ws
, wt
);
7761 t1
= newTemp(Ity_V128
);
7762 t2
= newTemp(Ity_V128
);
7763 t3
= newTemp(Ity_V128
);
7764 assign(t1
, getWReg(ws
));
7765 assign(t2
, getWReg(wt
));
7766 assign(t3
, binop(Iop_QSub64Sx2
, mkexpr(t1
), mkexpr(t2
)));
7767 putWReg(wd
, mkexpr(t3
));
7778 case 0x01: { /* SUBS_U.df */
7780 case 0x00: { /* SUBS_U.B */
7781 DIP("SUBS_U.B w%d, w%d, w%d", wd
, ws
, wt
);
7782 t1
= newTemp(Ity_V128
);
7783 t2
= newTemp(Ity_V128
);
7784 t3
= newTemp(Ity_V128
);
7785 assign(t1
, getWReg(ws
));
7786 assign(t2
, getWReg(wt
));
7787 assign(t3
, binop(Iop_QSub8Ux16
, mkexpr(t1
), mkexpr(t2
)));
7788 putWReg(wd
, mkexpr(t3
));
7792 case 0x01: { /* SUBS_U.H */
7793 DIP("SUBS_U.H w%d, w%d, w%d", wd
, ws
, wt
);
7794 t1
= newTemp(Ity_V128
);
7795 t2
= newTemp(Ity_V128
);
7796 t3
= newTemp(Ity_V128
);
7797 assign(t1
, getWReg(ws
));
7798 assign(t2
, getWReg(wt
));
7799 assign(t3
, binop(Iop_QSub16Ux8
, mkexpr(t1
), mkexpr(t2
)));
7800 putWReg(wd
, mkexpr(t3
));
7804 case 0x02: { /* SUBS_U.W */
7805 DIP("SUBS_U.W w%d, w%d, w%d", wd
, ws
, wt
);
7806 t1
= newTemp(Ity_V128
);
7807 t2
= newTemp(Ity_V128
);
7808 t3
= newTemp(Ity_V128
);
7809 assign(t1
, getWReg(ws
));
7810 assign(t2
, getWReg(wt
));
7811 assign(t3
, binop(Iop_QSub32Ux4
, mkexpr(t1
), mkexpr(t2
)));
7812 putWReg(wd
, mkexpr(t3
));
7816 case 0x03: { /* SUBS_U.D */
7817 DIP("SUBS_U.D w%d, w%d, w%d", wd
, ws
, wt
);
7818 t1
= newTemp(Ity_V128
);
7819 t2
= newTemp(Ity_V128
);
7820 t3
= newTemp(Ity_V128
);
7821 assign(t1
, getWReg(ws
));
7822 assign(t2
, getWReg(wt
));
7823 assign(t3
, binop(Iop_QSub64Ux2
, mkexpr(t1
), mkexpr(t2
)));
7824 putWReg(wd
, mkexpr(t3
));
7835 case 0x02: { /* SUBSUS_U.df */
7837 case 0x00: { /* SUBSUS_U.B */
7838 DIP("SUBSUS_U.B w%d, w%d, w%d", wd
, ws
, wt
);
7839 t1
= newTemp(Ity_V128
);
7840 t2
= newTemp(Ity_V128
);
7841 t3
= newTemp(Ity_V128
);
7842 assign(t1
, binop(Iop_Sub8x16
, getWReg(ws
), getWReg(wt
)));
7843 assign(t2
, binop(Iop_SarN8x16
, getWReg(wt
), mkU8(7)));
7844 assign(t3
, binop(Iop_OrV128
,
7845 binop(Iop_CmpGT8Ux16
,
7848 binop(Iop_CmpEQ8x16
,
7854 mkexpr(t3
), mkexpr(t2
)),
7863 case 0x01: { /* SUBSUS_U.H */
7864 DIP("SUBSUS_U.H w%d, w%d, w%d", wd
, ws
, wt
);
7865 t1
= newTemp(Ity_V128
);
7866 t2
= newTemp(Ity_V128
);
7867 t3
= newTemp(Ity_V128
);
7868 assign(t1
, binop(Iop_Sub16x8
, getWReg(ws
), getWReg(wt
)));
7869 assign(t2
, binop(Iop_SarN16x8
, getWReg(wt
), mkU8(15)));
7872 binop(Iop_CmpGT16Ux8
,
7875 binop(Iop_CmpEQ16x8
,
7881 mkexpr(t3
), mkexpr(t2
)),
7890 case 0x02: { /* SUBSUS_U.W */
7891 DIP("SUBSUS_U.W w%d, w%d, w%d", wd
, ws
, wt
);
7892 t1
= newTemp(Ity_V128
);
7893 t2
= newTemp(Ity_V128
);
7894 t3
= newTemp(Ity_V128
);
7895 assign(t1
, binop(Iop_Sub32x4
, getWReg(ws
), getWReg(wt
)));
7896 assign(t2
, binop(Iop_SarN32x4
, getWReg(wt
), mkU8(31)));
7899 binop(Iop_CmpGT32Ux4
,
7902 binop(Iop_CmpEQ32x4
,
7908 mkexpr(t3
), mkexpr(t2
)),
7917 case 0x03: { /* SUBSUS_U.D */
7918 DIP("SUBSUS_U.D w%d, w%d, w%d", wd
, ws
, wt
);
7919 t1
= newTemp(Ity_V128
);
7920 t2
= newTemp(Ity_V128
);
7921 t3
= newTemp(Ity_V128
);
7922 assign(t1
, binop(Iop_Sub64x2
, getWReg(ws
), getWReg(wt
)));
7923 assign(t2
, binop(Iop_SarN64x2
, getWReg(wt
), mkU8(63)));
7926 binop(Iop_CmpGT64Ux2
,
7929 binop(Iop_CmpEQ64x2
,
7935 mkexpr(t3
), mkexpr(t2
)),
7951 case 0x03: { /* SUBSUU_S.df */
7953 case 0x00: { /* SUBSUU_S.B */
7954 DIP("SUBSUU_S.B w%d, w%d, w%d", wd
, ws
, wt
);
7955 t1
= newTemp(Ity_V128
);
7956 t2
= newTemp(Ity_V128
);
7957 t3
= newTemp(Ity_V128
);
7958 assign(t1
, binop(Iop_Sub8x16
, getWReg(ws
), getWReg(wt
)));
7972 getWReg(ws
), mkU8(7)),
7982 mkexpr(t2
), mkU8(7)),
7987 case 0x01: { /* SUBSUU_S.H */
7988 DIP("SUBSUU_S.H w%d, w%d, w%d", wd
, ws
, wt
);
7989 t1
= newTemp(Ity_V128
);
7990 t2
= newTemp(Ity_V128
);
7991 t3
= newTemp(Ity_V128
);
7992 assign(t1
, binop(Iop_Sub16x8
, getWReg(ws
), getWReg(wt
)));
8017 mkexpr(t2
), mkU8(15)),
8022 case 0x02: { /* SUBSUU_S.W */
8023 DIP("SUBSUU_S.W w%d, w%d, w%d", wd
, ws
, wt
);
8024 t1
= newTemp(Ity_V128
);
8025 t2
= newTemp(Ity_V128
);
8026 t3
= newTemp(Ity_V128
);
8027 assign(t1
, binop(Iop_Sub32x4
, getWReg(ws
), getWReg(wt
)));
8058 case 0x03: { /* SUBSUU_S.D */
8059 DIP("SUBSUU_S.D w%d, w%d, w%d", wd
, ws
, wt
);
8060 t1
= newTemp(Ity_V128
);
8061 t2
= newTemp(Ity_V128
);
8062 t3
= newTemp(Ity_V128
);
8063 assign(t1
, binop(Iop_Sub64x2
, getWReg(ws
), getWReg(wt
)));
8088 mkexpr(t2
), mkU8(63)),
8100 case 0x04: { /* ASUB_S.df */
8102 case 0x00: { /* ASUB_S.B */
8103 DIP("ASUB_S.B w%d, w%d, w%d", wd
, ws
, wt
);
8104 t1
= newTemp(Ity_V128
);
8105 t2
= newTemp(Ity_V128
);
8106 t3
= newTemp(Ity_V128
);
8107 assign(t1
, binop(Iop_SarN8x16
, getWReg(ws
), mkU8(7)));
8108 assign(t2
, binop(Iop_SarN8x16
, getWReg(wt
), mkU8(7)));
8109 assign(t3
, binop(Iop_Sub8x16
, getWReg(ws
), getWReg(wt
)));
8137 case 0x01: { /* ASUB_S.H */
8138 DIP("ASUB_S.H w%d, w%d, w%d", wd
, ws
, wt
);
8139 t1
= newTemp(Ity_V128
);
8140 t2
= newTemp(Ity_V128
);
8141 t3
= newTemp(Ity_V128
);
8142 assign(t1
, binop(Iop_SarN16x8
, getWReg(ws
), mkU8(15)));
8143 assign(t2
, binop(Iop_SarN16x8
, getWReg(wt
), mkU8(15)));
8144 assign(t3
, binop(Iop_Sub16x8
, getWReg(ws
), getWReg(wt
)));
8172 case 0x02: { /* ASUB_S.W */
8173 DIP("ASUB_S.W w%d, w%d, w%d", wd
, ws
, wt
);
8174 t1
= newTemp(Ity_V128
);
8175 t2
= newTemp(Ity_V128
);
8176 t3
= newTemp(Ity_V128
);
8177 assign(t1
, binop(Iop_SarN32x4
, getWReg(ws
), mkU8(31)));
8178 assign(t2
, binop(Iop_SarN32x4
, getWReg(wt
), mkU8(31)));
8179 assign(t3
, binop(Iop_Sub32x4
, getWReg(ws
), getWReg(wt
)));
8207 case 0x03: { /* ASUB_S.D */
8208 DIP("ASUB_S.D w%d, w%d, w%d", wd
, ws
, wt
);
8209 t1
= newTemp(Ity_V128
);
8210 t2
= newTemp(Ity_V128
);
8211 t3
= newTemp(Ity_V128
);
8212 assign(t1
, binop(Iop_SarN64x2
, getWReg(ws
), mkU8(63)));
8213 assign(t2
, binop(Iop_SarN64x2
, getWReg(wt
), mkU8(63)));
8214 assign(t3
, binop(Iop_Sub64x2
, getWReg(ws
), getWReg(wt
)));
8249 case 0x05: { /* ASUB_U.df */
8251 case 0x00: { /* ASUB_U.B */
8252 DIP("ASUB_U.B w%d, w%d, w%d", wd
, ws
, wt
);
8253 t1
= newTemp(Ity_V128
);
8254 t2
= newTemp(Ity_V128
);
8255 t3
= newTemp(Ity_V128
);
8256 assign(t1
, getWReg(ws
));
8257 assign(t2
, getWReg(wt
));
8261 mkexpr(t1
), mkexpr(t2
)),
8266 unop(Iop_NotV128
, mkexpr(t3
)),
8271 binop(Iop_AndV128
, mkexpr(t3
),
8282 case 0x01: { /* ASUB_U.H */
8283 DIP("ASUB_U.H w%d, w%d, w%d", wd
, ws
, wt
);
8284 t1
= newTemp(Ity_V128
);
8285 t2
= newTemp(Ity_V128
);
8286 t3
= newTemp(Ity_V128
);
8287 assign(t1
, getWReg(ws
));
8288 assign(t2
, getWReg(wt
));
8292 mkexpr(t1
), mkexpr(t2
)),
8315 case 0x02: { /* ASUB_U.W */
8316 DIP("ASUB_U.W w%d, w%d, w%d", wd
, ws
, wt
);
8317 t1
= newTemp(Ity_V128
);
8318 t2
= newTemp(Ity_V128
);
8319 t3
= newTemp(Ity_V128
);
8320 assign(t1
, getWReg(ws
));
8321 assign(t2
, getWReg(wt
));
8325 mkexpr(t1
), mkexpr(t2
)),
8330 unop(Iop_NotV128
, mkexpr(t3
)),
8347 case 0x03: { /* ASUB_U.D */
8348 DIP("ASUB_U.D w%d, w%d, w%d", wd
, ws
, wt
);
8349 t1
= newTemp(Ity_V128
);
8350 t2
= newTemp(Ity_V128
);
8351 t3
= newTemp(Ity_V128
);
8352 assign(t1
, getWReg(ws
));
8353 assign(t2
, getWReg(wt
));
8357 mkexpr(t1
), mkexpr(t2
)),
8362 unop(Iop_NotV128
, mkexpr(t3
)),
8393 static Int
msa_3R_12(UInt cins
, UChar wd
, UChar ws
) /* 3R (0x12) */
8395 IRTemp t1
, t2
, t3
, t4
, t5
, t6
;
8399 operation
= (cins
& 0x03800000) >> 23;
8400 df
= (cins
& 0x00600000) >> 21;
8401 wt
= (cins
& 0x001F0000) >> 16;
8403 switch (operation
) {
8404 case 0x00: { /* MULV.df */
8406 case 0x00: { /* MULV.B */
8407 DIP("MULV.B w%d, w%d, w%d", wd
, ws
, wt
);
8408 putWReg(wd
, binop(Iop_Mul8x16
, getWReg(ws
), getWReg(wt
)));
8412 case 0x01: { /* MULV.H */
8413 DIP("MULV.H w%d, w%d, w%d", wd
, ws
, wt
);
8414 putWReg(wd
, binop(Iop_Mul16x8
, getWReg(ws
), getWReg(wt
)));
8418 case 0x02: { /* MULV.W */
8419 DIP("MULV.W w%d, w%d, w%d", wd
, ws
, wt
);
8420 putWReg(wd
, binop(Iop_Mul32x4
, getWReg(ws
), getWReg(wt
)));
8424 case 0x03: { /* MULV.D */
8425 DIP("MULV.D w%d, w%d, w%d", wd
, ws
, wt
);
8426 t1
= newTemp(Ity_V128
);
8427 t2
= newTemp(Ity_V128
);
8428 assign(t1
, getWReg(ws
));
8429 assign(t2
, getWReg(wt
));
8431 binop(Iop_64HLtoV128
,
8433 unop(Iop_V128HIto64
,
8435 unop(Iop_V128HIto64
,
8452 case 0x01: { /* MADDV.df */
8454 case 0x00: { /* MADDV.B */
8455 DIP("MADDV.B w%d, w%d, w%d", wd
, ws
, wt
);
8465 case 0x01: { /* MADDV.H */
8466 DIP("MADDV.H w%d, w%d, w%d", wd
, ws
, wt
);
8476 case 0x02: { /* MADDV.W */
8477 DIP("MADDV.W w%d, w%d, w%d", wd
, ws
, wt
);
8487 case 0x03: { /* MADDV.D */
8488 DIP("MADDV.D w%d, w%d, w%d", wd
, ws
, wt
);
8489 t1
= newTemp(Ity_V128
);
8490 t2
= newTemp(Ity_V128
);
8491 assign(t1
, getWReg(ws
));
8492 assign(t2
, getWReg(wt
));
8496 binop(Iop_64HLtoV128
,
8498 unop(Iop_V128HIto64
,
8500 unop(Iop_V128HIto64
,
8517 case 0x02: { /* MSUBV.df */
8519 case 0x00: { /* MSUBV.B */
8520 DIP("MSUBV.B w%d, w%d, w%d", wd
, ws
, wt
);
8530 case 0x01: { /* MSUBV.H */
8531 DIP("MSUBV.H w%d, w%d, w%d", wd
, ws
, wt
);
8541 case 0x02: { /* MSUBV.W */
8542 DIP("MSUBV.W w%d, w%d, w%d", wd
, ws
, wt
);
8552 case 0x03: { /* MSUBV.D */
8553 DIP("MSUBV.D w%d, w%d, w%d", wd
, ws
, wt
);
8554 t1
= newTemp(Ity_V128
);
8555 t2
= newTemp(Ity_V128
);
8556 assign(t1
, getWReg(ws
));
8557 assign(t2
, getWReg(wt
));
8561 binop(Iop_64HLtoV128
,
8563 unop(Iop_V128HIto64
,
8565 unop(Iop_V128HIto64
,
8582 case 0x04: { /* DIV_S.df */
8583 t1
= newTemp(Ity_V128
);
8584 t2
= newTemp(Ity_V128
);
8585 assign(t1
, getWReg(ws
));
8586 assign(t2
, getWReg(wt
));
8589 case 0x00: { /* DIV_S.B */
8590 DIP("DIV_S.B w%d, w%d, w%d", wd
, ws
, wt
);
8594 for (i
= 0; i
< 16; i
++) {
8595 tmp
[i
] = newTemp(Ity_I32
);
8602 binop(Iop_GetElem8x16
,
8606 binop(Iop_GetElem8x16
,
8609 mkU8((i
& 3) << 3)));
8613 binop(Iop_64HLtoV128
,
8648 case 0x01: { /* DIV_S.H */
8649 DIP("DIV_S.H w%d, w%d, w%d", wd
, ws
, wt
);
8653 for (i
= 0; i
< 8; i
++) {
8654 tmp
[i
] = newTemp(Ity_I32
);
8661 binop(Iop_GetElem16x8
,
8665 binop(Iop_GetElem16x8
,
8668 mkU8((i
& 1) << 4)));
8672 binop(Iop_64HLtoV128
,
8690 case 0x02: { /* DIV_S.W */
8691 DIP("DIV_S.W w%d, w%d, w%d", wd
, ws
, wt
);
8695 for (i
= 0; i
< 4; i
++) {
8696 tmp
[i
] = newTemp(Ity_I32
);
8699 binop(Iop_GetElem32x4
,
8700 mkexpr(t1
), mkU8(i
)),
8701 binop(Iop_GetElem32x4
,
8702 mkexpr(t2
), mkU8(i
))));
8706 binop(Iop_64HLtoV128
, \
8716 case 0x03: { /* DIV_S.D */
8717 DIP("DIV_S.D w%d, w%d, w%d", wd
, ws
, wt
);
8719 binop(Iop_64HLtoV128
,
8721 unop(Iop_V128HIto64
,
8723 unop(Iop_V128HIto64
,
8740 case 0x05: { /* DIV_U.df */
8741 t1
= newTemp(Ity_V128
);
8742 t2
= newTemp(Ity_V128
);
8743 assign(t1
, getWReg(ws
));
8744 assign(t2
, getWReg(wt
));
8747 case 0x00: { /* DIV_U.B */
8748 DIP("DIV_U.B w%d, w%d, w%d", wd
, ws
, wt
);
8752 for (i
= 0; i
< 16; i
++) {
8753 tmp
[i
] = newTemp(Ity_I32
);
8760 binop(Iop_GetElem8x16
,
8764 binop(Iop_GetElem8x16
,
8767 mkU8((i
& 3) << 3)));
8771 binop(Iop_64HLtoV128
,
8806 case 0x01: { /* DIV_U.H */
8807 DIP("DIV_U.H w%d, w%d, w%d", wd
, ws
, wt
);
8811 for (i
= 0; i
< 8; i
++) {
8812 tmp
[i
] = newTemp(Ity_I32
);
8819 binop(Iop_GetElem16x8
,
8823 binop(Iop_GetElem16x8
,
8826 mkU8((i
& 1) << 4)));
8830 binop(Iop_64HLtoV128
,
8848 case 0x02: { /* DIV_U.W */
8849 DIP("DIV_U.W w%d, w%d, w%d", wd
, ws
, wt
);
8853 for (i
= 0; i
< 4; i
++) {
8854 tmp
[i
] = newTemp(Ity_I32
);
8857 binop(Iop_GetElem32x4
,
8858 mkexpr(t1
), mkU8(i
)),
8859 binop(Iop_GetElem32x4
,
8860 mkexpr(t2
), mkU8(i
))));
8864 binop(Iop_64HLtoV128
,
8874 case 0x03: { /* DIV_U.D */
8875 DIP("DIV_U.D w%d, w%d, w%d", wd
, ws
, wt
);
8877 binop(Iop_64HLtoV128
,
8879 unop(Iop_V128HIto64
,
8881 unop(Iop_V128HIto64
,
8898 case 0x06: { /* MOD_S.df */
8899 t1
= newTemp(Ity_V128
);
8900 t2
= newTemp(Ity_V128
);
8901 assign(t1
, getWReg(ws
));
8902 assign(t2
, getWReg(wt
));
8905 case 0x00: { /* MOD_S.B */
8906 DIP("MOD_S.B w%d, w%d, w%d", wd
, ws
, wt
);
8910 for (i
= 0; i
< 16; i
++) {
8911 tmp
[i
] = newTemp(Ity_I32
);
8917 binop(Iop_DivModS32to32
,
8919 binop(Iop_GetElem8x16
,
8923 binop(Iop_GetElem8x16
,
8926 mkU8((i
& 3) << 3)));
8930 binop(Iop_64HLtoV128
,
8960 mkexpr(tmp
[0])))))));
8964 case 0x01: { /* MOD_S.H */
8965 DIP("MOD_S.H w%d, w%d, w%d", wd
, ws
, wt
);
8969 for (i
= 0; i
< 8; i
++) {
8970 tmp
[i
] = newTemp(Ity_I32
);
8976 binop(Iop_DivModS32to32
,
8978 binop(Iop_GetElem16x8
,
8982 binop(Iop_GetElem16x8
,
8985 mkU8((i
& 1) << 4)));
8989 binop(Iop_64HLtoV128
,
9007 case 0x02: { /* MOD_S.W */
9008 DIP("MOD_S.W w%d, w%d, w%d", wd
, ws
, wt
);
9012 for (i
= 0; i
< 4; i
++) {
9013 tmp
[i
] = newTemp(Ity_I32
);
9016 binop(Iop_DivModS32to32
,
9017 binop(Iop_GetElem32x4
,
9020 binop(Iop_GetElem32x4
,
9026 binop(Iop_64HLtoV128
,
9036 case 0x03: { /* MOD_S.D */
9037 DIP("MOD_S.D w%d, w%d, w%d", wd
, ws
, wt
);
9038 t3
= newTemp(Ity_I64
);
9039 t4
= newTemp(Ity_I64
);
9040 t5
= newTemp(Ity_I64
);
9041 t6
= newTemp(Ity_I64
);
9042 assign(t3
, unop(Iop_V128HIto64
, mkexpr(t1
)));
9043 assign(t4
, unop(Iop_V128HIto64
, mkexpr(t2
)));
9044 assign(t5
, unop(Iop_V128to64
, mkexpr(t1
)));
9045 assign(t6
, unop(Iop_V128to64
, mkexpr(t2
)));
9047 binop(Iop_64HLtoV128
,
9072 case 0x07: { /* MOD_U.df */
9073 t1
= newTemp(Ity_V128
);
9074 t2
= newTemp(Ity_V128
);
9075 assign(t1
, getWReg(ws
));
9076 assign(t2
, getWReg(wt
));
9079 case 0x00: { /* MOD_U.B */
9080 DIP("MOD_U.B w%d, w%d, w%d", wd
, ws
, wt
);
9084 for (i
= 0; i
< 16; i
++) {
9085 tmp
[i
] = newTemp(Ity_I32
);
9091 binop(Iop_DivModU32to32
,
9093 binop(Iop_GetElem8x16
,
9097 binop(Iop_GetElem8x16
,
9100 mkU8((i
& 3) << 3)));
9104 binop(Iop_64HLtoV128
,
9134 mkexpr(tmp
[0])))))));
9138 case 0x01: { /* MOD_U.H */
9139 DIP("MOD_U.H w%d, w%d, w%d", wd
, ws
, wt
);
9143 for (i
= 0; i
< 8; i
++) {
9144 tmp
[i
] = newTemp(Ity_I32
);
9150 binop(Iop_DivModU32to32
,
9152 binop(Iop_GetElem16x8
,
9156 binop(Iop_GetElem16x8
,
9159 mkU8((i
& 1) << 4)));
9163 binop(Iop_64HLtoV128
,
9181 case 0x02: { /* MOD_U.W */
9182 DIP("MOD_U.W w%d, w%d, w%d", wd
, ws
, wt
);
9186 for (i
= 0; i
< 4; i
++) {
9187 tmp
[i
] = newTemp(Ity_I32
);
9190 binop(Iop_DivModU32to32
,
9191 binop(Iop_GetElem32x4
,
9194 binop(Iop_GetElem32x4
,
9200 binop(Iop_64HLtoV128
,
9210 case 0x03: { /* MOD_U.D */
9211 DIP("MOD_U.D w%d, w%d, w%d", wd
, ws
, wt
);
9212 t3
= newTemp(Ity_I64
);
9213 t4
= newTemp(Ity_I64
);
9214 t5
= newTemp(Ity_I64
);
9215 t6
= newTemp(Ity_I64
);
9216 assign(t3
, unop(Iop_V128HIto64
, mkexpr(t1
)));
9217 assign(t4
, unop(Iop_V128HIto64
, mkexpr(t2
)));
9218 assign(t5
, unop(Iop_V128to64
, mkexpr(t1
)));
9219 assign(t6
, unop(Iop_V128to64
, mkexpr(t2
)));
9221 binop(Iop_64HLtoV128
,
9253 static Int
msa_3R_13(UInt cins
, UChar wd
, UChar ws
) /* 3R (0x13) */
9259 operation
= (cins
& 0x03800000) >> 23;
9260 df
= (cins
& 0x00600000) >> 21;
9261 wt
= (cins
& 0x001F0000) >> 16;
9263 switch (operation
) {
9264 case 0x00: { /* DOTP_S.df */
9265 t1
= newTemp(Ity_V128
);
9266 t2
= newTemp(Ity_V128
);
9267 assign(t1
, getWReg(ws
));
9268 assign(t2
, getWReg(wt
));
9271 case 0x01: { /* DOTP_S.H */
9272 DIP("DOTP_S.H w%d, w%d, w%d", wd
, ws
, wt
);
9276 for (i
= 0; i
< 8; i
++) {
9277 tmp
[i
] = newTemp(Ity_I16
);
9281 binop(Iop_GetElem8x16
,
9284 binop(Iop_GetElem8x16
,
9288 binop(Iop_GetElem8x16
,
9291 binop(Iop_GetElem8x16
,
9293 mkU8(2 * i
+ 1)))));
9297 binop(Iop_64HLtoV128
,
9315 case 0x02: { /* DOTP_S.W */
9316 DIP("DOTP_S.W w%d, w%d, w%d", wd
, ws
, wt
);
9320 for (i
= 0; i
< 4; i
++) {
9321 tmp
[i
] = newTemp(Ity_I32
);
9325 binop(Iop_GetElem16x8
,
9328 binop(Iop_GetElem16x8
,
9332 binop(Iop_GetElem16x8
,
9335 binop(Iop_GetElem16x8
,
9337 mkU8(2 * i
+ 1)))));
9341 binop(Iop_64HLtoV128
,
9351 case 0x03: { /* DOTP_S.D */
9352 DIP("DOTP_S.D w%d, w%d, w%d", wd
, ws
, wt
);
9356 for (i
= 0; i
< 2; i
++) {
9357 tmp
[i
] = newTemp(Ity_I64
);
9361 binop(Iop_GetElem32x4
,
9364 binop(Iop_GetElem32x4
,
9368 binop(Iop_GetElem32x4
,
9371 binop(Iop_GetElem32x4
,
9373 mkU8(2 * i
+ 1)))));
9377 binop(Iop_64HLtoV128
,
9378 mkexpr(tmp
[1]), mkexpr(tmp
[0])));
9389 case 0x01: { /* DOTP_U.df */
9390 t1
= newTemp(Ity_V128
);
9391 t2
= newTemp(Ity_V128
);
9392 assign(t1
, getWReg(ws
));
9393 assign(t2
, getWReg(wt
));
9396 case 0x01: { /* DOTP_U.H */
9397 DIP("DOTP_U.H w%d, w%d, w%d", wd
, ws
, wt
);
9401 for (i
= 0; i
< 8; i
++) {
9402 tmp
[i
] = newTemp(Ity_I16
);
9406 binop(Iop_GetElem8x16
,
9409 binop(Iop_GetElem8x16
,
9413 binop(Iop_GetElem8x16
,
9416 binop(Iop_GetElem8x16
,
9418 mkU8(2 * i
+ 1)))));
9422 binop(Iop_64HLtoV128
,
9440 case 0x02: { /* DOTP_U.W */
9441 DIP("DOTP_U.W w%d, w%d, w%d", wd
, ws
, wt
);
9445 for (i
= 0; i
< 4; i
++) {
9446 tmp
[i
] = newTemp(Ity_I32
);
9450 binop(Iop_GetElem16x8
,
9453 binop(Iop_GetElem16x8
,
9457 binop(Iop_GetElem16x8
,
9460 binop(Iop_GetElem16x8
,
9462 mkU8(2 * i
+ 1)))));
9466 binop(Iop_64HLtoV128
,
9476 case 0x03: { /* DOTP_U.D */
9477 DIP("DOTP_U.D w%d, w%d, w%d", wd
, ws
, wt
);
9481 for (i
= 0; i
< 2; i
++) {
9482 tmp
[i
] = newTemp(Ity_I64
);
9486 binop(Iop_GetElem32x4
,
9489 binop(Iop_GetElem32x4
,
9493 binop(Iop_GetElem32x4
,
9496 binop(Iop_GetElem32x4
,
9498 mkU8(2 * i
+ 1)))));
9502 binop(Iop_64HLtoV128
,
9503 mkexpr(tmp
[1]), mkexpr(tmp
[0])));
9514 case 0x02: { /* DPADD_S.df */
9515 t1
= newTemp(Ity_V128
);
9516 t2
= newTemp(Ity_V128
);
9517 assign(t1
, getWReg(ws
));
9518 assign(t2
, getWReg(wt
));
9521 case 0x01: { /* DPADD_S.H */
9522 DIP("DPADD_S.H w%d, w%d, w%d", wd
, ws
, wt
);
9526 for (i
= 0; i
< 8; i
++) {
9527 tmp
[i
] = newTemp(Ity_I16
);
9531 binop(Iop_GetElem8x16
,
9534 binop(Iop_GetElem8x16
,
9538 binop(Iop_GetElem8x16
,
9541 binop(Iop_GetElem8x16
,
9543 mkU8(2 * i
+ 1)))));
9549 binop(Iop_64HLtoV128
,
9563 mkexpr(tmp
[0]))))));
9567 case 0x02: { /* DPADD_S.W */
9568 DIP("DPADD_S.W w%d, w%d, w%d", wd
, ws
, wt
);
9572 for (i
= 0; i
< 4; i
++) {
9573 tmp
[i
] = newTemp(Ity_I32
);
9577 binop(Iop_GetElem16x8
,
9580 binop(Iop_GetElem16x8
,
9584 binop(Iop_GetElem16x8
,
9587 binop(Iop_GetElem16x8
,
9589 mkU8(2 * i
+ 1)))));
9595 binop(Iop_64HLtoV128
,
9605 case 0x03: { /* DPADD_S.D */
9606 DIP("DPADD_S.D w%d, w%d, w%d", wd
, ws
, wt
);
9610 for (i
= 0; i
< 2; i
++) {
9611 tmp
[i
] = newTemp(Ity_I64
);
9615 binop(Iop_GetElem32x4
,
9618 binop(Iop_GetElem32x4
,
9622 binop(Iop_GetElem32x4
,
9625 binop(Iop_GetElem32x4
,
9627 mkU8(2 * i
+ 1)))));
9633 binop(Iop_64HLtoV128
,
9646 case 0x03: { /* DPADD_U.df */
9647 t1
= newTemp(Ity_V128
);
9648 t2
= newTemp(Ity_V128
);
9649 assign(t1
, getWReg(ws
));
9650 assign(t2
, getWReg(wt
));
9653 case 0x01: { /* DPADD_U.H */
9654 DIP("DPADD_U.H w%d, w%d, w%d", wd
, ws
, wt
);
9658 for (i
= 0; i
< 8; i
++) {
9659 tmp
[i
] = newTemp(Ity_I16
);
9663 binop(Iop_GetElem8x16
,
9666 binop(Iop_GetElem8x16
,
9670 binop(Iop_GetElem8x16
,
9673 binop(Iop_GetElem8x16
,
9675 mkU8(2 * i
+ 1)))));
9681 binop(Iop_64HLtoV128
,
9695 mkexpr(tmp
[0]))))));
9699 case 0x02: { /* DPADD_U.W */
9700 DIP("DPADD_U.W w%d, w%d, w%d", wd
, ws
, wt
);
9704 for (i
= 0; i
< 4; i
++) {
9705 tmp
[i
] = newTemp(Ity_I32
);
9709 binop(Iop_GetElem16x8
,
9712 binop(Iop_GetElem16x8
,
9716 binop(Iop_GetElem16x8
,
9719 binop(Iop_GetElem16x8
,
9721 mkU8(2 * i
+ 1)))));
9727 binop(Iop_64HLtoV128
,
9737 case 0x03: { /* DPADD_U.D */
9738 DIP("DPADD_U.D w%d, w%d, w%d", wd
, ws
, wt
);
9742 for (i
= 0; i
< 2; i
++) {
9743 tmp
[i
] = newTemp(Ity_I64
);
9747 binop(Iop_GetElem32x4
,
9750 binop(Iop_GetElem32x4
,
9754 binop(Iop_GetElem32x4
,
9757 binop(Iop_GetElem32x4
,
9759 mkU8(2 * i
+ 1)))));
9765 binop(Iop_64HLtoV128
,
9778 case 0x04: { /* DPSUB_S.df */
9779 t1
= newTemp(Ity_V128
);
9780 t2
= newTemp(Ity_V128
);
9781 assign(t1
, getWReg(ws
));
9782 assign(t2
, getWReg(wt
));
9785 case 0x01: { /* DPSUB_S.H */
9786 DIP("DPSUB_S.H w%d, w%d, w%d", wd
, ws
, wt
);
9790 for (i
= 0; i
< 8; i
++) {
9791 tmp
[i
] = newTemp(Ity_I16
);
9795 binop(Iop_GetElem8x16
,
9798 binop(Iop_GetElem8x16
,
9802 binop(Iop_GetElem8x16
,
9805 binop(Iop_GetElem8x16
,
9807 mkU8(2 * i
+ 1)))));
9813 binop(Iop_64HLtoV128
,
9827 mkexpr(tmp
[0]))))));
9831 case 0x02: { /* DPSUB_S.W */
9832 DIP("DPSUB_S.W w%d, w%d, w%d", wd
, ws
, wt
);
9836 for (i
= 0; i
< 4; i
++) {
9837 tmp
[i
] = newTemp(Ity_I32
);
9841 binop(Iop_GetElem16x8
,
9844 binop(Iop_GetElem16x8
,
9848 binop(Iop_GetElem16x8
,
9851 binop(Iop_GetElem16x8
,
9853 mkU8(2 * i
+ 1)))));
9859 binop(Iop_64HLtoV128
,
9869 case 0x03: { /* DPSUB_S.D */
9870 DIP("DPSUB_S.D w%d, w%d, w%d", wd
, ws
, wt
);
9874 for (i
= 0; i
< 2; i
++) {
9875 tmp
[i
] = newTemp(Ity_I64
);
9879 binop(Iop_GetElem32x4
,
9882 binop(Iop_GetElem32x4
,
9886 binop(Iop_GetElem32x4
,
9889 binop(Iop_GetElem32x4
,
9891 mkU8(2 * i
+ 1)))));
9897 binop(Iop_64HLtoV128
,
9910 case 0x05: { /* DPSUB_U.df */
9911 t1
= newTemp(Ity_V128
);
9912 t2
= newTemp(Ity_V128
);
9913 assign(t1
, getWReg(ws
));
9914 assign(t2
, getWReg(wt
));
9917 case 0x01: { /* DPSUB_U.H */
9918 DIP("DPSUB_U.H w%d, w%d, w%d", wd
, ws
, wt
);
9922 for (i
= 0; i
< 8; i
++) {
9923 tmp
[i
] = newTemp(Ity_I16
);
9927 binop(Iop_GetElem8x16
,
9930 binop(Iop_GetElem8x16
,
9934 binop(Iop_GetElem8x16
,
9937 binop(Iop_GetElem8x16
,
9939 mkU8(2 * i
+ 1)))));
9945 binop(Iop_64HLtoV128
,
9959 mkexpr(tmp
[0]))))));
9963 case 0x02: { /* DPSUB_U.W */
9964 DIP("DPSUB_U.W w%d, w%d, w%d", wd
, ws
, wt
);
9968 for (i
= 0; i
< 4; i
++) {
9969 tmp
[i
] = newTemp(Ity_I32
);
9973 binop(Iop_GetElem16x8
,
9976 binop(Iop_GetElem16x8
,
9980 binop(Iop_GetElem16x8
,
9983 binop(Iop_GetElem16x8
,
9985 mkU8(2 * i
+ 1)))));
9991 binop(Iop_64HLtoV128
,
10001 case 0x03: { /* DPSUB_U.D */
10002 DIP("DPSUB_U.D w%d, w%d, w%d", wd
, ws
, wt
);
10006 for (i
= 0; i
< 2; i
++) {
10007 tmp
[i
] = newTemp(Ity_I64
);
10011 binop(Iop_GetElem32x4
,
10014 binop(Iop_GetElem32x4
,
10018 binop(Iop_GetElem32x4
,
10021 binop(Iop_GetElem32x4
,
10023 mkU8(2 * i
+ 1)))));
10029 binop(Iop_64HLtoV128
,
10049 static Int
msa_3R_14(UInt cins
, UChar wd
, UChar ws
) /* 3R (0x14) */
10051 IRTemp t1
, t2
, t3
, t4
;
10056 operation
= (cins
& 0x03800000) >> 23;
10057 df
= (cins
& 0x00600000) >> 21;
10058 wt
= (cins
& 0x001F0000) >> 16;
10059 ty
= mode64
? Ity_I64
: Ity_I32
;
10061 switch (operation
) {
10062 case 0x00: { /* SLD.df */
10065 DIP("SLD.B w%d, w%d[%d]", wd
, ws
, wt
);
10066 t1
= newTemp(Ity_I32
);
10067 t2
= newTemp(Ity_V128
);
10068 t3
= newTemp(Ity_V128
);
10079 unop(Iop_32to8
, mkexpr(t1
))));
10089 mkexpr(t2
), mkexpr(t3
)));
10093 case 0x01: {/* SLD.H */
10094 DIP("SLD.H w%d, w%d[%d]", wd
, ws
, wt
);
10095 t1
= newTemp(Ity_I32
);
10096 t2
= newTemp(Ity_I64
);
10097 t3
= newTemp(Ity_V128
);
10098 t4
= newTemp(Ity_V128
);
10107 binop(Iop_32HLto64
, mkU32(0), mkexpr(t1
)));
10111 binop(Iop_64HLtoV128
,
10112 mkexpr(t2
), mkexpr(t2
))));
10117 binop(Iop_64HLtoV128
,
10120 binop(Iop_64HLtoV128
,
10128 mkexpr(t1
), mkU32(0)),
10130 binop(Iop_64HLtoV128
,
10131 mkU64(0), mkU64(0)))));
10135 case 0x02: {/* SLD.W */
10136 DIP("SLD.W w%d, w%d[%d]", wd
, ws
, wt
);
10137 t1
= newTemp(Ity_I32
);
10138 t2
= newTemp(Ity_I64
);
10139 t3
= newTemp(Ity_V128
);
10140 t4
= newTemp(Ity_V128
);
10149 binop(Iop_32HLto64
,
10150 mkexpr(t1
), mkexpr(t1
)));
10154 binop(Iop_64HLtoV128
,
10155 mkexpr(t2
), mkexpr(t2
))));
10160 binop(Iop_64HLtoV128
,
10161 mkU64(0x2000000020ul
),
10162 mkU64(0x2000000020ul
)),
10163 binop(Iop_64HLtoV128
,
10171 mkexpr(t1
), mkU32(0)),
10173 binop(Iop_64HLtoV128
,
10174 mkU64(0), mkU64(0)))));
10178 case 0x03: { /* SLD.D */
10179 DIP("SLD.D w%d, w%d[%d]", wd
, ws
, wt
);
10180 t1
= newTemp(Ity_I32
);
10181 t2
= newTemp(Ity_I64
);
10182 t3
= newTemp(Ity_V128
);
10183 t4
= newTemp(Ity_V128
);
10192 binop(Iop_32HLto64
,
10196 mkexpr(t1
), mkU8(16))),
10200 mkexpr(t1
), mkU8(16)))));
10204 binop(Iop_64HLtoV128
,
10205 mkexpr(t2
), mkexpr(t2
))));
10210 binop(Iop_64HLtoV128
,
10211 mkU64(0x10001000100010ul
),
10212 mkU64(0x10001000100010ul
)),
10213 binop(Iop_64HLtoV128
,
10221 mkexpr(t1
), mkU32(0)),
10223 binop(Iop_64HLtoV128
,
10224 mkU64(0), mkU64(0)))));
10232 case 0x01: { /* SPLAT.df */
10236 case 0x00: { /* SPLAT.B */
10237 DIP("SPLAT.B w%d, w%d, w%d", wd
, ws
, wt
);
10238 t1
= newTemp(Ity_V128
);
10239 t2
= newTemp(Ity_I32
);
10240 assign(t1
, getWReg(ws
));
10242 mkNarrowTo32(ty
, getIReg(wt
)));
10245 for (i
= 0; i
< 16; i
++) {
10246 tmp
[i
] = newTemp(Ity_I8
);
10248 binop(Iop_GetElem8x16
,
10250 unop(Iop_32to8
, mkexpr(t2
))));
10254 binop(Iop_64HLtoV128
,
10255 binop(Iop_32HLto64
,
10256 binop(Iop_16HLto32
,
10263 binop(Iop_16HLto32
,
10270 binop(Iop_32HLto64
,
10271 binop(Iop_16HLto32
,
10278 binop(Iop_16HLto32
,
10284 mkexpr(tmp
[0]))))));
10288 case 0x01: { /* SPLAT.H */
10289 DIP("SPLAT.H w%d, w%d, w%d", wd
, ws
, wt
);
10290 t1
= newTemp(Ity_V128
);
10291 t2
= newTemp(Ity_I32
);
10292 assign(t1
, getWReg(ws
));
10294 mkNarrowTo32(ty
, getIReg(wt
)));
10297 for (i
= 0; i
< 8; i
++) {
10298 tmp
[i
] = newTemp(Ity_I16
);
10300 binop(Iop_GetElem16x8
,
10302 unop(Iop_32to8
, mkexpr(t2
))));
10306 binop(Iop_64HLtoV128
,
10307 binop(Iop_32HLto64
,
10308 binop(Iop_16HLto32
,
10311 binop(Iop_16HLto32
,
10314 binop(Iop_32HLto64
,
10315 binop(Iop_16HLto32
,
10318 binop(Iop_16HLto32
,
10320 mkexpr(tmp
[0])))));
10324 case 0x02: { /* SPLAT.W */
10325 DIP("SPLAT.W w%d, w%d, w%d", wd
, ws
, wt
);
10326 t1
= newTemp(Ity_V128
);
10327 t2
= newTemp(Ity_I32
);
10328 assign(t1
, getWReg(ws
));
10330 mkNarrowTo32(ty
, getIReg(wt
)));
10333 for (i
= 0; i
< 4; i
++) {
10334 tmp
[i
] = newTemp(Ity_I32
);
10336 binop(Iop_GetElem32x4
,
10338 unop(Iop_32to8
, mkexpr(t2
))));
10342 binop(Iop_64HLtoV128
,
10343 binop(Iop_32HLto64
,
10346 binop(Iop_32HLto64
,
10352 case 0x03: { /* SPLAT.D */
10353 DIP("SPLAT.D w%d, w%d, w%d", wd
, ws
, wt
);
10354 t1
= newTemp(Ity_V128
);
10355 t2
= newTemp(Ity_I32
);
10356 assign(t1
, getWReg(ws
));
10358 mkNarrowTo32(ty
, getIReg(wt
)));
10361 for (i
= 0; i
< 2; i
++) {
10362 tmp
[i
] = newTemp(Ity_I64
);
10364 binop(Iop_GetElem64x2
,
10366 unop(Iop_32to8
, mkexpr(t2
))));
10370 binop(Iop_64HLtoV128
,
10371 mkexpr(tmp
[1]), mkexpr(tmp
[0])));
10379 case 0x02: { /* PCKEV.df */
10381 case 0x00: { /* PCKEV.B */
10382 DIP("PCKEV.B w%d, w%d, w%d", wd
, ws
, wt
);
10383 t1
= newTemp(Ity_V128
);
10384 t2
= newTemp(Ity_V128
);
10385 t3
= newTemp(Ity_V128
);
10386 assign(t1
, getWReg(ws
));
10387 assign(t2
, getWReg(wt
));
10389 binop(Iop_PackEvenLanes8x16
,
10390 mkexpr(t1
), mkexpr(t2
)));
10391 putWReg(wd
, mkexpr(t3
));
10395 case 0x01: { /* PCKEV.H */
10396 DIP("PCKEV.H w%d, w%d, w%d", wd
, ws
, wt
);
10397 t1
= newTemp(Ity_V128
);
10398 t2
= newTemp(Ity_V128
);
10399 t3
= newTemp(Ity_V128
);
10400 assign(t1
, getWReg(ws
));
10401 assign(t2
, getWReg(wt
));
10403 binop(Iop_PackEvenLanes16x8
,
10404 mkexpr(t1
), mkexpr(t2
)));
10405 putWReg(wd
, mkexpr(t3
));
10409 case 0x02: { /* PCKEV.W */
10410 DIP("PCKEV.W w%d, w%d, w%d", wd
, ws
, wt
);
10411 t1
= newTemp(Ity_V128
);
10412 t2
= newTemp(Ity_V128
);
10413 t3
= newTemp(Ity_V128
);
10414 assign(t1
, getWReg(ws
));
10415 assign(t2
, getWReg(wt
));
10417 binop(Iop_PackEvenLanes32x4
,
10418 mkexpr(t1
), mkexpr(t2
)));
10419 putWReg(wd
, mkexpr(t3
));
10423 case 0x03: { /* PCKEV.D */
10424 DIP("PCKEV.D w%d, w%d, w%d", wd
, ws
, wt
);
10425 t1
= newTemp(Ity_V128
);
10426 t2
= newTemp(Ity_V128
);
10427 t3
= newTemp(Ity_V128
);
10428 assign(t1
, getWReg(ws
));
10429 assign(t2
, getWReg(wt
));
10431 binop(Iop_InterleaveLO64x2
,
10432 mkexpr(t1
), mkexpr(t2
)));
10433 putWReg(wd
, mkexpr(t3
));
10444 case 0x03: { /* PCKOD.df */
10446 case 0x00: { /* PCKOD.B */
10447 DIP("PCKOD.B w%d, w%d, w%d", wd
, ws
, wt
);
10448 t1
= newTemp(Ity_V128
);
10449 t2
= newTemp(Ity_V128
);
10450 t3
= newTemp(Ity_V128
);
10451 assign(t1
, getWReg(ws
));
10452 assign(t2
, getWReg(wt
));
10454 binop(Iop_PackOddLanes8x16
,
10455 mkexpr(t1
), mkexpr(t2
)));
10456 putWReg(wd
, mkexpr(t3
));
10460 case 0x01: { /* PCKOD.H */
10461 DIP("PCKOD.H w%d, w%d, w%d", wd
, ws
, wt
);
10462 t1
= newTemp(Ity_V128
);
10463 t2
= newTemp(Ity_V128
);
10464 t3
= newTemp(Ity_V128
);
10465 assign(t1
, getWReg(ws
));
10466 assign(t2
, getWReg(wt
));
10468 binop(Iop_PackOddLanes16x8
,
10469 mkexpr(t1
), mkexpr(t2
)));
10470 putWReg(wd
, mkexpr(t3
));
10474 case 0x02: { /* PCKOD.W */
10475 DIP("PCKOD.W w%d, w%d, w%d", wd
, ws
, wt
);
10476 t1
= newTemp(Ity_V128
);
10477 t2
= newTemp(Ity_V128
);
10478 t3
= newTemp(Ity_V128
);
10479 assign(t1
, getWReg(ws
));
10480 assign(t2
, getWReg(wt
));
10482 binop(Iop_PackOddLanes32x4
,
10483 mkexpr(t1
), mkexpr(t2
)));
10484 putWReg(wd
, mkexpr(t3
));
10488 case 0x03: { /* PCKOD.D */
10489 DIP("PCKOD.D w%d, w%d, w%d", wd
, ws
, wt
);
10490 t1
= newTemp(Ity_V128
);
10491 t2
= newTemp(Ity_V128
);
10492 t3
= newTemp(Ity_V128
);
10493 assign(t1
, getWReg(ws
));
10494 assign(t2
, getWReg(wt
));
10496 binop(Iop_InterleaveHI64x2
,
10497 mkexpr(t1
), mkexpr(t2
)));
10498 putWReg(wd
, mkexpr(t3
));
10509 case 0x04: { /* ILVL.df */
10511 case 0x00: { /* ILVL.B */
10512 DIP("ILVL.B w%d, w%d, w%d", wd
, ws
, wt
);
10513 t1
= newTemp(Ity_V128
);
10514 t2
= newTemp(Ity_V128
);
10515 t3
= newTemp(Ity_V128
);
10516 assign(t1
, getWReg(ws
));
10517 assign(t2
, getWReg(wt
));
10519 binop(Iop_InterleaveHI8x16
,
10520 mkexpr(t1
), mkexpr(t2
)));
10521 putWReg(wd
, mkexpr(t3
));
10525 case 0x01: { /* ILVL.H */
10526 DIP("ILVL.H w%d, w%d, w%d", wd
, ws
, wt
);
10527 t1
= newTemp(Ity_V128
);
10528 t2
= newTemp(Ity_V128
);
10529 t3
= newTemp(Ity_V128
);
10530 assign(t1
, getWReg(ws
));
10531 assign(t2
, getWReg(wt
));
10533 binop(Iop_InterleaveHI16x8
,
10534 mkexpr(t1
), mkexpr(t2
)));
10535 putWReg(wd
, mkexpr(t3
));
10539 case 0x02: { /* ILVL.W */
10540 DIP("ILVL.W w%d, w%d, w%d", wd
, ws
, wt
);
10541 t1
= newTemp(Ity_V128
);
10542 t2
= newTemp(Ity_V128
);
10543 t3
= newTemp(Ity_V128
);
10544 assign(t1
, getWReg(ws
));
10545 assign(t2
, getWReg(wt
));
10547 binop(Iop_InterleaveHI32x4
,
10548 mkexpr(t1
), mkexpr(t2
)));
10549 putWReg(wd
, mkexpr(t3
));
10553 case 0x03: { /* ILVL.D */
10554 DIP("ILVL.D w%d, w%d, w%d", wd
, ws
, wt
);
10555 t1
= newTemp(Ity_V128
);
10556 t2
= newTemp(Ity_V128
);
10557 t3
= newTemp(Ity_V128
);
10558 assign(t1
, getWReg(ws
));
10559 assign(t2
, getWReg(wt
));
10561 binop(Iop_InterleaveHI64x2
,
10562 mkexpr(t1
), mkexpr(t2
)));
10563 putWReg(wd
, mkexpr(t3
));
10574 case 0x05: { /* ILVR.df */
10576 case 0x00: { /* ILVL.B */
10577 DIP("ILVL.B w%d, w%d, w%d", wd
, ws
, wt
);
10578 t1
= newTemp(Ity_V128
);
10579 t2
= newTemp(Ity_V128
);
10580 t3
= newTemp(Ity_V128
);
10581 assign(t1
, getWReg(ws
));
10582 assign(t2
, getWReg(wt
));
10584 binop(Iop_InterleaveLO8x16
,
10585 mkexpr(t1
), mkexpr(t2
)));
10586 putWReg(wd
, mkexpr(t3
));
10590 case 0x01: { /* ILVL.H */
10591 DIP("ILVL.H w%d, w%d, w%d", wd
, ws
, wt
);
10592 t1
= newTemp(Ity_V128
);
10593 t2
= newTemp(Ity_V128
);
10594 t3
= newTemp(Ity_V128
);
10595 assign(t1
, getWReg(ws
));
10596 assign(t2
, getWReg(wt
));
10598 binop(Iop_InterleaveLO16x8
,
10599 mkexpr(t1
), mkexpr(t2
)));
10600 putWReg(wd
, mkexpr(t3
));
10604 case 0x02: { /* ILVL.W */
10605 DIP("ILVL.W w%d, w%d, w%d", wd
, ws
, wt
);
10606 t1
= newTemp(Ity_V128
);
10607 t2
= newTemp(Ity_V128
);
10608 t3
= newTemp(Ity_V128
);
10609 assign(t1
, getWReg(ws
));
10610 assign(t2
, getWReg(wt
));
10612 binop(Iop_InterleaveLO32x4
,
10613 mkexpr(t1
), mkexpr(t2
)));
10614 putWReg(wd
, mkexpr(t3
));
10618 case 0x03: { /* ILVL.D */
10619 DIP("ILVL.D w%d, w%d, w%d", wd
, ws
, wt
);
10620 t1
= newTemp(Ity_V128
);
10621 t2
= newTemp(Ity_V128
);
10622 t3
= newTemp(Ity_V128
);
10623 assign(t1
, getWReg(ws
));
10624 assign(t2
, getWReg(wt
));
10626 binop(Iop_InterleaveLO64x2
,
10627 mkexpr(t1
), mkexpr(t2
)));
10628 putWReg(wd
, mkexpr(t3
));
10636 case 0x06: { /* ILVEV.df */
10638 case 0x00: { /* ILVEV.B */
10639 DIP("ILVEV.B w%d, w%d, w%d", wd
, ws
, wt
);
10640 t1
= newTemp(Ity_V128
);
10641 t2
= newTemp(Ity_V128
);
10642 t3
= newTemp(Ity_V128
);
10643 assign(t1
, getWReg(ws
));
10644 assign(t2
, getWReg(wt
));
10646 binop(Iop_InterleaveEvenLanes8x16
,
10647 mkexpr(t1
), mkexpr(t2
)));
10648 putWReg(wd
, mkexpr(t3
));
10652 case 0x01: { /* ILVEV.H */
10653 DIP("ILVEV.H w%d, w%d, w%d", wd
, ws
, wt
);
10654 t1
= newTemp(Ity_V128
);
10655 t2
= newTemp(Ity_V128
);
10656 t3
= newTemp(Ity_V128
);
10657 assign(t1
, getWReg(ws
));
10658 assign(t2
, getWReg(wt
));
10660 binop(Iop_InterleaveEvenLanes16x8
,
10661 mkexpr(t1
), mkexpr(t2
)));
10662 putWReg(wd
, mkexpr(t3
));
10666 case 0x02: { /* ILVEV.W */
10667 DIP("ILVEV.W w%d, w%d, w%d", wd
, ws
, wt
);
10668 t1
= newTemp(Ity_V128
);
10669 t2
= newTemp(Ity_V128
);
10670 t3
= newTemp(Ity_V128
);
10671 assign(t1
, getWReg(ws
));
10672 assign(t2
, getWReg(wt
));
10674 binop(Iop_InterleaveEvenLanes32x4
,
10675 mkexpr(t1
), mkexpr(t2
)));
10676 putWReg(wd
, mkexpr(t3
));
10680 case 0x03: { /* ILVEV.D */
10681 DIP("ILVEV.D w%d, w%d, w%d", wd
, ws
, wt
);
10682 t1
= newTemp(Ity_V128
);
10683 t2
= newTemp(Ity_V128
);
10684 t3
= newTemp(Ity_V128
);
10685 assign(t1
, getWReg(ws
));
10686 assign(t2
, getWReg(wt
));
10688 binop(Iop_InterleaveLO64x2
,
10689 mkexpr(t1
), mkexpr(t2
)));
10690 putWReg(wd
, mkexpr(t3
));
10701 case 0x07: { /* ILVOD.df */
10703 case 0x00: { /* ILVOD.B */
10704 DIP("ILVOD.B w%d, w%d, w%d", wd
, ws
, wt
);
10705 t1
= newTemp(Ity_V128
);
10706 t2
= newTemp(Ity_V128
);
10707 t3
= newTemp(Ity_V128
);
10708 assign(t1
, getWReg(ws
));
10709 assign(t2
, getWReg(wt
));
10711 binop(Iop_InterleaveOddLanes8x16
,
10712 mkexpr(t1
), mkexpr(t2
)));
10713 putWReg(wd
, mkexpr(t3
));
10717 case 0x01: { /* ILVOD.H */
10718 DIP("ILVOD.H w%d, w%d, w%d", wd
, ws
, wt
);
10719 t1
= newTemp(Ity_V128
);
10720 t2
= newTemp(Ity_V128
);
10721 t3
= newTemp(Ity_V128
);
10722 assign(t1
, getWReg(ws
));
10723 assign(t2
, getWReg(wt
));
10725 binop(Iop_InterleaveOddLanes16x8
,
10726 mkexpr(t1
), mkexpr(t2
)));
10727 putWReg(wd
, mkexpr(t3
));
10731 case 0x02: { /* ILVOD.W */
10732 DIP("ILVOD.W w%d, w%d, w%d", wd
, ws
, wt
);
10733 t1
= newTemp(Ity_V128
);
10734 t2
= newTemp(Ity_V128
);
10735 t3
= newTemp(Ity_V128
);
10736 assign(t1
, getWReg(ws
));
10737 assign(t2
, getWReg(wt
));
10739 binop(Iop_InterleaveOddLanes32x4
,
10740 mkexpr(t1
), mkexpr(t2
)));
10741 putWReg(wd
, mkexpr(t3
));
10745 case 0x03: { /* ILVOD.D */
10746 DIP("ILVOD.D w%d, w%d, w%d", wd
, ws
, wt
);
10747 t1
= newTemp(Ity_V128
);
10748 t2
= newTemp(Ity_V128
);
10749 t3
= newTemp(Ity_V128
);
10750 assign(t1
, getWReg(ws
));
10751 assign(t2
, getWReg(wt
));
10753 binop(Iop_InterleaveHI64x2
,
10754 mkexpr(t1
), mkexpr(t2
)));
10755 putWReg(wd
, mkexpr(t3
));
10773 static Int
msa_3R_15(UInt cins
, UChar wd
, UChar ws
) /* 3R (0x15) */
10775 IRTemp t1
, t2
, t3
, t4
;
10779 operation
= (cins
& 0x03800000) >> 23;
10780 df
= (cins
& 0x00600000) >> 21;
10781 wt
= (cins
& 0x001F0000) >> 16;
10783 switch (operation
) {
10784 case 0x00: { /* VSHF.df */
10785 t1
= newTemp(Ity_V128
);
10786 t2
= newTemp(Ity_V128
);
10787 t3
= newTemp(Ity_V128
);
10788 assign(t1
, getWReg(wd
));
10789 assign(t2
, getWReg(ws
));
10790 assign(t3
, getWReg(wt
));
10793 case 0x00: { /* VSHF.B */
10794 DIP("VSHF.B w%d, w%d, w%d", wd
, ws
, wt
);
10798 for (i
= 0; i
< 16; i
++) {
10799 tmp
[i
] = newTemp(Ity_I8
);
10804 binop(Iop_GetElem8x16
,
10812 binop(Iop_GetElem8x16
,
10817 binop(Iop_GetElem8x16
,
10819 binop(Iop_GetElem8x16
,
10822 binop(Iop_GetElem8x16
,
10824 binop(Iop_GetElem8x16
,
10831 binop(Iop_64HLtoV128
,
10832 binop(Iop_32HLto64
,
10833 binop(Iop_16HLto32
,
10840 binop(Iop_16HLto32
,
10847 binop(Iop_32HLto64
,
10848 binop(Iop_16HLto32
,
10855 binop(Iop_16HLto32
,
10861 mkexpr(tmp
[0]))))));
10865 case 0x01: { /* VSHF.H */
10866 DIP("VSHF.H w%d, w%d, w%d", wd
, ws
, wt
);
10870 for (i
= 0; i
< 8; i
++) {
10871 tmp
[i
] = newTemp(Ity_I16
);
10876 binop(Iop_GetElem16x8
,
10884 binop(Iop_GetElem16x8
,
10889 binop(Iop_GetElem16x8
,
10892 binop(Iop_GetElem16x8
,
10895 binop(Iop_GetElem16x8
,
10898 binop(Iop_GetElem16x8
,
10905 binop(Iop_64HLtoV128
,
10906 binop(Iop_32HLto64
,
10907 binop(Iop_16HLto32
,
10910 binop(Iop_16HLto32
,
10913 binop(Iop_32HLto64
,
10914 binop(Iop_16HLto32
,
10917 binop(Iop_16HLto32
,
10919 mkexpr(tmp
[0])))));
10923 case 0x02: { /* VSHF.W */
10924 DIP("VSHF.W w%d, w%d, w%d", wd
, ws
, wt
);
10928 for (i
= 0; i
< 4; i
++) {
10929 tmp
[i
] = newTemp(Ity_I32
);
10934 binop(Iop_GetElem32x4
,
10942 binop(Iop_GetElem32x4
,
10947 binop(Iop_GetElem32x4
,
10950 binop(Iop_GetElem32x4
,
10953 binop(Iop_GetElem32x4
,
10956 binop(Iop_GetElem32x4
,
10963 binop(Iop_64HLtoV128
,
10964 binop(Iop_32HLto64
,
10967 binop(Iop_32HLto64
,
10973 case 0x03: { /* VSHF.D */
10974 DIP("VSHF.D w%d, w%d, w%d", wd
, ws
, wt
);
10978 for (i
= 0; i
< 2; i
++) {
10979 tmp
[i
] = newTemp(Ity_I64
);
10984 binop(Iop_GetElem64x2
,
10992 binop(Iop_GetElem64x2
,
10997 binop(Iop_GetElem64x2
,
11000 binop(Iop_GetElem64x2
,
11003 binop(Iop_GetElem64x2
,
11006 binop(Iop_GetElem64x2
,
11013 binop(Iop_64HLtoV128
,
11014 mkexpr(tmp
[1]), mkexpr(tmp
[0])));
11025 case 0x01: { /* SRAR.df */
11027 case 0x00: { /* SRAR.B */
11028 DIP("SRAR.B w%d, w%d, w%d", wd
, ws
, wt
);
11029 t1
= newTemp(Ity_V128
);
11030 t2
= newTemp(Ity_V128
);
11031 t3
= newTemp(Ity_V128
);
11032 t4
= newTemp(Ity_V128
);
11039 binop(Iop_64HLtoV128
,
11040 mkU64(0x808080808080808ull
),
11041 mkU64(0x808080808080808ull
)),
11045 binop(Iop_CmpEQ8x16
,
11046 binop(Iop_ShlN8x16
,
11049 binop(Iop_64HLtoV128
,
11050 mkU64(0), mkU64(0)))));
11052 binop(Iop_ShrN8x16
,
11061 mkexpr(t1
), mkexpr(t3
)));
11065 case 0x01: { /* SRAR.H */
11066 DIP("SRAR.H w%d, w%d, w%d", wd
, ws
, wt
);
11067 t1
= newTemp(Ity_V128
);
11068 t2
= newTemp(Ity_V128
);
11069 t3
= newTemp(Ity_V128
);
11070 t4
= newTemp(Ity_V128
);
11077 binop(Iop_64HLtoV128
,
11078 mkU64(0x10001000100010ul
),
11079 mkU64(0x10001000100010ul
)),
11083 binop(Iop_CmpEQ16x8
,
11084 binop(Iop_ShlN16x8
,
11087 binop(Iop_64HLtoV128
,
11088 mkU64(0), mkU64(0)))));
11090 binop(Iop_ShrN16x8
,
11099 mkexpr(t1
), mkexpr(t3
)));
11103 case 0x02: { /* SRAR.W */
11104 DIP("SRAR.W w%d, w%d, w%d", wd
, ws
, wt
);
11105 t1
= newTemp(Ity_V128
); // shifted
11106 t2
= newTemp(Ity_V128
); // 32 - wt
11107 t3
= newTemp(Ity_V128
); // rv
11108 t4
= newTemp(Ity_V128
); // wt % 32 == 0
11115 binop(Iop_64HLtoV128
,
11116 mkU64(0x2000000020ul
),
11117 mkU64(0x2000000020ul
)),
11121 binop(Iop_CmpEQ32x4
,
11122 binop(Iop_ShlN32x4
,
11125 binop(Iop_64HLtoV128
,
11126 mkU64(0), mkU64(0)))));
11128 binop(Iop_ShrN32x4
,
11137 mkexpr(t1
), mkexpr(t3
)));
11141 case 0x03: { /* SRAR.D */
11142 DIP("SRAR.D w%d, w%d, w%d", wd
, ws
, wt
);
11143 t1
= newTemp(Ity_V128
);
11144 t2
= newTemp(Ity_V128
);
11145 t3
= newTemp(Ity_V128
);
11146 t4
= newTemp(Ity_V128
);
11153 binop(Iop_64HLtoV128
,
11154 mkU64(64ul), mkU64(64ul)),
11158 binop(Iop_CmpEQ64x2
,
11159 binop(Iop_ShlN64x2
,
11162 binop(Iop_64HLtoV128
,
11163 mkU64(0), mkU64(0)))));
11165 binop(Iop_ShrN64x2
,
11174 mkexpr(t1
), mkexpr(t3
)));
11185 case 0x02: { /* SRLR.df */
11187 case 0x00: { /* SRLR.B */
11188 DIP("SRLR.B w%d, w%d, w%d", wd
, ws
, wt
);
11189 t1
= newTemp(Ity_V128
);
11190 t2
= newTemp(Ity_V128
);
11191 t3
= newTemp(Ity_V128
);
11192 t4
= newTemp(Ity_V128
);
11199 binop(Iop_64HLtoV128
,
11200 mkU64(0x808080808080808ull
),
11201 mkU64(0x808080808080808ull
)),
11205 binop(Iop_CmpEQ8x16
,
11206 binop(Iop_ShlN8x16
,
11209 binop(Iop_64HLtoV128
,
11210 mkU64(0), mkU64(0)))));
11212 binop(Iop_ShrN8x16
,
11221 mkexpr(t1
), mkexpr(t3
)));
11225 case 0x01: { /* SRLR.H */
11226 DIP("SRLR.H w%d, w%d, w%d", wd
, ws
, wt
);
11227 t1
= newTemp(Ity_V128
);
11228 t2
= newTemp(Ity_V128
);
11229 t3
= newTemp(Ity_V128
);
11230 t4
= newTemp(Ity_V128
);
11237 binop(Iop_64HLtoV128
,
11238 mkU64(0x10001000100010ul
),
11239 mkU64(0x10001000100010ul
)),
11243 binop(Iop_CmpEQ16x8
,
11244 binop(Iop_ShlN16x8
,
11247 binop(Iop_64HLtoV128
,
11248 mkU64(0), mkU64(0)))));
11250 binop(Iop_ShrN16x8
,
11259 mkexpr(t1
), mkexpr(t3
)));
11263 case 0x02: { /* SRLR.W */
11264 DIP("SRLR.W w%d, w%d, w%d", wd
, ws
, wt
);
11265 t1
= newTemp(Ity_V128
);
11266 t2
= newTemp(Ity_V128
);
11267 t3
= newTemp(Ity_V128
);
11268 t4
= newTemp(Ity_V128
);
11275 binop(Iop_64HLtoV128
,
11276 mkU64(0x2000000020ul
),
11277 mkU64(0x2000000020ul
)),
11281 binop(Iop_CmpEQ32x4
,
11282 binop(Iop_ShlN32x4
,
11285 binop(Iop_64HLtoV128
,
11286 mkU64(0), mkU64(0)))));
11288 binop(Iop_ShrN32x4
,
11297 mkexpr(t1
), mkexpr(t3
)));
11301 case 0x03: { /* SRLR.D */
11302 DIP("SRLR.D w%d, w%d, w%d", wd
, ws
, wt
);
11303 t1
= newTemp(Ity_V128
);
11304 t2
= newTemp(Ity_V128
);
11305 t3
= newTemp(Ity_V128
);
11306 t4
= newTemp(Ity_V128
);
11313 binop(Iop_64HLtoV128
,
11314 mkU64(64ul), mkU64(64ul)),
11318 binop(Iop_CmpEQ64x2
,
11319 binop(Iop_ShlN64x2
,
11322 binop(Iop_64HLtoV128
,
11323 mkU64(0), mkU64(0)))));
11325 binop(Iop_ShrN64x2
,
11334 mkexpr(t1
), mkexpr(t3
)));
11345 case 0x04: { /* HADD_S.df */
11347 case 0x01: { /* HADD_S.H */
11348 DIP("HADD_S.H w%d, w%d, w%d", wd
, ws
, wt
);
11349 t1
= newTemp(Ity_V128
);
11350 t2
= newTemp(Ity_V128
);
11351 t3
= newTemp(Ity_V128
);
11352 assign(t1
, getWReg(ws
));
11353 assign(t2
, getWReg(wt
));
11356 binop(Iop_SarN16x8
,
11357 mkexpr(t1
), mkU8(8)),
11358 binop(Iop_SarN16x8
,
11359 binop(Iop_ShlN16x8
,
11360 mkexpr(t2
), mkU8(8)),
11362 putWReg(wd
, mkexpr(t3
));
11366 case 0x02: { /* HADD_S.W */
11367 DIP("HADD_S.W w%d, w%d, w%d", wd
, ws
, wt
);
11368 t1
= newTemp(Ity_V128
);
11369 t2
= newTemp(Ity_V128
);
11370 t3
= newTemp(Ity_V128
);
11371 assign(t1
, getWReg(ws
));
11372 assign(t2
, getWReg(wt
));
11375 binop(Iop_SarN32x4
,
11376 mkexpr(t1
), mkU8(16)),
11377 binop(Iop_SarN32x4
,
11378 binop(Iop_ShlN32x4
,
11379 mkexpr(t2
), mkU8(16)),
11381 putWReg(wd
, mkexpr(t3
));
11385 case 0x03: { /* HADD_S.D */
11386 DIP("HADD_S.D w%d, w%d, w%d", wd
, ws
, wt
);
11387 t1
= newTemp(Ity_V128
);
11388 t2
= newTemp(Ity_V128
);
11389 t3
= newTemp(Ity_V128
);
11390 assign(t1
, getWReg(ws
));
11391 assign(t2
, getWReg(wt
));
11394 binop(Iop_SarN64x2
,
11395 mkexpr(t1
), mkU8(32)),
11396 binop(Iop_SarN64x2
,
11397 binop(Iop_ShlN64x2
,
11398 mkexpr(t2
), mkU8(32)),
11400 putWReg(wd
, mkexpr(t3
));
11411 case 0x05: { /* HADD_U.df */
11413 case 0x01: { /* HADD_U.H */
11414 DIP("HADD_U.H w%d, w%d, w%d", wd
, ws
, wt
);
11415 t1
= newTemp(Ity_V128
);
11416 t2
= newTemp(Ity_V128
);
11417 t3
= newTemp(Ity_V128
);
11418 assign(t1
, getWReg(ws
));
11419 assign(t2
, getWReg(wt
));
11422 binop(Iop_ShrN16x8
,
11423 mkexpr(t1
), mkU8(8)),
11424 binop(Iop_ShrN16x8
,
11425 binop(Iop_ShlN16x8
,
11426 mkexpr(t2
), mkU8(8)),
11428 putWReg(wd
, mkexpr(t3
));
11432 case 0x02: { /* HADD_U.W */
11433 DIP("HADD_U.W w%d, w%d, w%d", wd
, ws
, wt
);
11434 t1
= newTemp(Ity_V128
);
11435 t2
= newTemp(Ity_V128
);
11436 t3
= newTemp(Ity_V128
);
11437 assign(t1
, getWReg(ws
));
11438 assign(t2
, getWReg(wt
));
11441 binop(Iop_ShrN32x4
,
11442 mkexpr(t1
), mkU8(16)),
11443 binop(Iop_ShrN32x4
,
11444 binop(Iop_ShlN32x4
,
11445 mkexpr(t2
), mkU8(16)),
11447 putWReg(wd
, mkexpr(t3
));
11451 case 0x03: { /* HADD_U.D */
11452 DIP("HADD_U.D w%d, w%d, w%d", wd
, ws
, wt
);
11453 t1
= newTemp(Ity_V128
);
11454 t2
= newTemp(Ity_V128
);
11455 t3
= newTemp(Ity_V128
);
11456 assign(t1
, getWReg(ws
));
11457 assign(t2
, getWReg(wt
));
11460 binop(Iop_ShrN64x2
,
11461 mkexpr(t1
), mkU8(32)),
11462 binop(Iop_ShrN64x2
,
11463 binop(Iop_ShlN64x2
,
11464 mkexpr(t2
), mkU8(32)),
11466 putWReg(wd
, mkexpr(t3
));
11477 case 0x06: { /* HSUB_S.df */
11479 case 0x01: { /* HSUB_S.H */
11480 DIP("HSUB_S.H w%d, w%d, w%d", wd
, ws
, wt
);
11481 t1
= newTemp(Ity_V128
);
11482 t2
= newTemp(Ity_V128
);
11483 t3
= newTemp(Ity_V128
);
11484 assign(t1
, getWReg(ws
));
11485 assign(t2
, getWReg(wt
));
11488 binop(Iop_SarN16x8
,
11489 mkexpr(t1
), mkU8(8)),
11490 binop(Iop_SarN16x8
,
11491 binop(Iop_ShlN16x8
,
11492 mkexpr(t2
), mkU8(8)),
11494 putWReg(wd
, mkexpr(t3
));
11498 case 0x02: { /* HSUB_S.W */
11499 DIP("HSUB_S.W w%d, w%d, w%d", wd
, ws
, wt
);
11500 t1
= newTemp(Ity_V128
);
11501 t2
= newTemp(Ity_V128
);
11502 t3
= newTemp(Ity_V128
);
11503 assign(t1
, getWReg(ws
));
11504 assign(t2
, getWReg(wt
));
11507 binop(Iop_SarN32x4
,
11508 mkexpr(t1
), mkU8(16)),
11509 binop(Iop_SarN32x4
,
11510 binop(Iop_ShlN32x4
,
11511 mkexpr(t2
), mkU8(16)),
11513 putWReg(wd
, mkexpr(t3
));
11517 case 0x03: { /* HSUB_S.D */
11518 DIP("HSUB_S.D w%d, w%d, w%d", wd
, ws
, wt
);
11519 t1
= newTemp(Ity_V128
);
11520 t2
= newTemp(Ity_V128
);
11521 t3
= newTemp(Ity_V128
);
11522 assign(t1
, getWReg(ws
));
11523 assign(t2
, getWReg(wt
));
11526 binop(Iop_SarN64x2
,
11527 mkexpr(t1
), mkU8(32)),
11528 binop(Iop_SarN64x2
,
11529 binop(Iop_ShlN64x2
,
11530 mkexpr(t2
), mkU8(32)),
11532 putWReg(wd
, mkexpr(t3
));
11543 case 0x07: { /* HSUB_U.df */
11545 case 0x01: { /* HSUB_U.H */
11546 DIP("HSUB_U.H w%d, w%d, w%d", wd
, ws
, wt
);
11547 t1
= newTemp(Ity_V128
);
11548 t2
= newTemp(Ity_V128
);
11549 t3
= newTemp(Ity_V128
);
11550 assign(t1
, getWReg(ws
));
11551 assign(t2
, getWReg(wt
));
11554 binop(Iop_ShrN16x8
,
11555 mkexpr(t1
), mkU8(8)),
11556 binop(Iop_ShrN16x8
,
11557 binop(Iop_ShlN16x8
,
11558 mkexpr(t2
), mkU8(8)),
11560 putWReg(wd
, mkexpr(t3
));
11564 case 0x02: { /* HSUB_U.W */
11565 DIP("HSUB_U.W w%d, w%d, w%d", wd
, ws
, wt
);
11566 t1
= newTemp(Ity_V128
);
11567 t2
= newTemp(Ity_V128
);
11568 t3
= newTemp(Ity_V128
);
11569 assign(t1
, getWReg(ws
));
11570 assign(t2
, getWReg(wt
));
11573 binop(Iop_ShrN32x4
,
11574 mkexpr(t1
), mkU8(16)),
11575 binop(Iop_ShrN32x4
,
11576 binop(Iop_ShlN32x4
,
11577 mkexpr(t2
), mkU8(16)),
11579 putWReg(wd
, mkexpr(t3
));
11583 case 0x03: { /* HSUB_U.D */
11584 DIP("HSUB_U.D w%d, w%d, w%d", wd
, ws
, wt
);
11585 t1
= newTemp(Ity_V128
);
11586 t2
= newTemp(Ity_V128
);
11587 t3
= newTemp(Ity_V128
);
11588 assign(t1
, getWReg(ws
));
11589 assign(t2
, getWReg(wt
));
11592 binop(Iop_ShrN64x2
,
11593 mkexpr(t1
), mkU8(32)),
11594 binop(Iop_ShrN64x2
,
11595 binop(Iop_ShlN64x2
,
11596 mkexpr(t2
), mkU8(32)),
11598 putWReg(wd
, mkexpr(t3
));
11616 static Int
msa_3R_1A(UInt cins
, UChar wd
, UChar ws
) /* 3R (0x1A) */
11621 operation
= (cins
& 0x03C00000) >> 22;
11622 df
= (cins
& 0x00200000) >> 21;
11623 wt
= (cins
& 0x001F0000) >> 16;
11625 switch (operation
) {
11626 case 0x00: { /* FCAF.df */
11628 case 0x00: { /* FCAF.W */
11629 DIP("FCAF.W w%d, w%d, w%d", wd
, ws
, wt
);
11630 calculateMSACSR(ws
, wt
, FCAFW
, 2);
11631 putWReg(wd
, binop(Iop_64HLtoV128
, mkU64(0ul), mkU64(0ul)));
11635 case 0x01: { /* FCAF.D */
11636 DIP("FCAF.D w%d, w%d, w%d", wd
, ws
, wt
);
11637 calculateMSACSR(ws
, wt
, FCAFD
, 2);
11638 putWReg(wd
, binop(Iop_64HLtoV128
, mkU64(0ul), mkU64(0ul)));
11649 case 0x01: { /* FCUN.df */
11651 case 0x00: { /* FCUN.W */
11652 DIP("FCUN.W w%d, w%d, w%d", wd
, ws
, wt
);
11653 calculateMSACSR(ws
, wt
, FCUNW
, 2);
11654 putWReg(wd
, binop(Iop_CmpUN32Fx4
,
11660 case 0x01: { /* FCUN.D */
11661 DIP("FCUN.D w%d, w%d, w%d", wd
, ws
, wt
);
11662 calculateMSACSR(ws
, wt
, FCUND
, 2);
11663 putWReg(wd
, binop(Iop_CmpUN64Fx2
,
11676 case 0x02: { /* FCEQ.df */
11678 case 0x00: { /* FCEQ.W */
11679 DIP("FCEQ.W w%d, w%d, w%d", wd
, ws
, wt
);
11680 calculateMSACSR(ws
, wt
, FCEQW
, 2);
11681 putWReg(wd
, binop(Iop_CmpEQ32Fx4
,
11687 case 0x01: { /* FCEQ.D */
11688 DIP("FCEQ.D w%d, w%d, w%d", wd
, ws
, wt
);
11689 calculateMSACSR(ws
, wt
, FCEQD
, 2);
11690 putWReg(wd
, binop(Iop_CmpEQ64Fx2
,
11703 case 0x03: { /* FCUEQ.df */
11705 case 0x00: { /* FCUEQ.W */
11706 DIP("FCUEQ.W w%d, w%d, w%d", wd
, ws
, wt
);
11707 calculateMSACSR(ws
, wt
, FCUEQW
, 2);
11710 binop(Iop_CmpEQ32Fx4
,
11713 binop(Iop_CmpUN32Fx4
,
11719 case 0x01: { /* FCUEQ.D */
11720 DIP("FCUEQ.D w%d, w%d, w%d", wd
, ws
, wt
);
11721 calculateMSACSR(ws
, wt
, FCUEQD
, 2);
11724 binop(Iop_CmpEQ64Fx2
,
11727 binop(Iop_CmpUN64Fx2
,
11740 case 0x04: { /* FCLT.df */
11742 case 0x00: { /* FCLT.W */
11743 DIP("FCLT.W w%d, w%d, w%d", wd
, ws
, wt
);
11744 calculateMSACSR(ws
, wt
, FCLTW
, 2);
11746 binop(Iop_CmpLT32Fx4
,
11752 case 0x01: { /* FCLT.D */
11753 DIP("FCLT.D w%d, w%d, w%d", wd
, ws
, wt
);
11754 calculateMSACSR(ws
, wt
, FCLTD
, 2);
11756 binop(Iop_CmpLT64Fx2
,
11769 case 0x05: { /* FCULT.df */
11771 case 0x00: { /* FCULT.W */
11772 DIP("FCULT.W w%d, w%d, w%d", wd
, ws
, wt
);
11773 calculateMSACSR(ws
, wt
, FCULTW
, 2);
11776 binop(Iop_CmpLT32Fx4
,
11779 binop(Iop_CmpUN32Fx4
,
11785 case 0x01: { /* FCULT.D */
11786 DIP("FCULT.D w%d, w%d, w%d", wd
, ws
, wt
);
11787 calculateMSACSR(ws
, wt
, FCULTD
, 2);
11790 binop(Iop_CmpLT64Fx2
,
11793 binop(Iop_CmpUN64Fx2
,
11806 case 0x06: { /* FCLE.df */
11808 case 0x00: { /* FCLE.W */
11809 DIP("FCLE.W w%d, w%d, w%d", wd
, ws
, wt
);
11810 calculateMSACSR(ws
, wt
, FCLEW
, 2);
11812 binop(Iop_CmpLE32Fx4
,
11818 case 0x01: { /* FCLE.D */
11819 DIP("FCLE.D w%d, w%d, w%d", wd
, ws
, wt
);
11820 calculateMSACSR(ws
, wt
, FCLED
, 2);
11822 binop(Iop_CmpLE64Fx2
,
11835 case 0x07: { /* FCULE.df */
11837 case 0x00: { /* FCULE.W */
11838 DIP("FCULE.W w%d, w%d, w%d", wd
, ws
, wt
);
11839 calculateMSACSR(ws
, wt
, FCULEW
, 2);
11842 binop(Iop_CmpLE32Fx4
,
11845 binop(Iop_CmpUN32Fx4
,
11851 case 0x01: { /* FCULE.D */
11852 DIP("FCULE.D w%d, w%d, w%d", wd
, ws
, wt
);
11853 calculateMSACSR(ws
, wt
, FCULED
, 2);
11856 binop(Iop_CmpLE64Fx2
,
11859 binop(Iop_CmpUN64Fx2
,
11872 case 0x08: { /* FSAF.df */
11874 case 0x00: { /* FSAF.W */
11875 DIP("FSAF.W w%d, w%d, w%d", wd
, ws
, wt
);
11876 calculateMSACSR(ws
, wt
, FSAFW
, 2);
11878 binop(Iop_64HLtoV128
,
11879 mkU64(0ul), mkU64(0ul)));
11883 case 0x01: { /* FSAF.D */
11884 DIP("FSAF.D w%d, w%d, w%d", wd
, ws
, wt
);
11885 calculateMSACSR(ws
, wt
, FSAFD
, 2);
11887 binop(Iop_64HLtoV128
,
11888 mkU64(0ul), mkU64(0ul)));
11899 case 0x09: { /* FSUN.df */
11901 case 0x00: { /* FSUN.W */
11902 DIP("FSUN.W w%d, w%d, w%d", wd
, ws
, wt
);
11903 calculateMSACSR(ws
, wt
, FSUNW
, 2);
11905 binop(Iop_CmpUN32Fx4
,
11911 case 0x01: { /* FSUN.D */
11912 DIP("FSUN.D w%d, w%d, w%d", wd
, ws
, wt
);
11913 calculateMSACSR(ws
, wt
, FSUND
, 2);
11915 binop(Iop_CmpUN64Fx2
,
11928 case 0x0A: { /* FSEQ.df */
11930 case 0x00: { /* FSEQ.W */
11931 DIP("FSEQ.W w%d, w%d, w%d", wd
, ws
, wt
);
11932 calculateMSACSR(ws
, wt
, FSEQW
, 2);
11934 binop(Iop_CmpEQ32Fx4
,
11940 case 0x01: { /* FSEQ.D */
11941 DIP("FSEQ.D w%d, w%d, w%d", wd
, ws
, wt
);
11942 calculateMSACSR(ws
, wt
, FSEQD
, 2);
11944 binop(Iop_CmpEQ64Fx2
,
11957 case 0x0B: { /* FSUEQ.df */
11959 case 0x00: { /* FSUEQ.W */
11960 DIP("FSUEQ.W w%d, w%d, w%d", wd
, ws
, wt
);
11961 calculateMSACSR(ws
, wt
, FSUEQW
, 2);
11964 binop(Iop_CmpEQ32Fx4
,
11967 binop(Iop_CmpUN32Fx4
,
11973 case 0x01: { /* FSUEQ.D */
11974 DIP("FSUEQ.D w%d, w%d, w%d", wd
, ws
, wt
);
11975 calculateMSACSR(ws
, wt
, FSUEQD
, 2);
11978 binop(Iop_CmpEQ64Fx2
,
11981 binop(Iop_CmpUN64Fx2
,
11994 case 0x0C: { /* FSLT.df */
11996 case 0x00: { /* FSLT.W */
11997 DIP("FSLT.W w%d, w%d, w%d", wd
, ws
, wt
);
11998 calculateMSACSR(ws
, wt
, FSLTW
, 2);
12000 binop(Iop_CmpLT32Fx4
,
12006 case 0x01: { /* FSLT.D */
12007 DIP("FSLT.D w%d, w%d, w%d", wd
, ws
, wt
);
12008 calculateMSACSR(ws
, wt
, FSLTD
, 2);
12010 binop(Iop_CmpLT64Fx2
,
12023 case 0x0D: { /* FSULT.df */
12025 case 0x00: { /* FSULT.W */
12026 DIP("FSULT.W w%d, w%d, w%d", wd
, ws
, wt
);
12027 calculateMSACSR(ws
, wt
, FSULTW
, 2);
12030 binop(Iop_CmpLT32Fx4
,
12033 binop(Iop_CmpUN32Fx4
,
12039 case 0x01: { /* FSULT.D */
12040 DIP("FSULT.D w%d, w%d, w%d", wd
, ws
, wt
);
12041 calculateMSACSR(ws
, wt
, FSULTD
, 2);
12044 binop(Iop_CmpLT64Fx2
,
12047 binop(Iop_CmpUN64Fx2
,
12060 case 0x0E: { /* FSLE.df */
12062 case 0x00: { /* FSLE.W */
12063 DIP("FSLE.W w%d, w%d, w%d", wd
, ws
, wt
);
12064 calculateMSACSR(ws
, wt
, FSLEW
, 2);
12066 binop(Iop_CmpLE32Fx4
,
12072 case 0x01: { /* FSLE.D */
12073 DIP("FSLE.D w%d, w%d, w%d", wd
, ws
, wt
);
12074 calculateMSACSR(ws
, wt
, FSLED
, 2);
12076 binop(Iop_CmpLE64Fx2
,
12089 case 0x0F: { /* FSULE.df */
12091 case 0x00: { /* FSULE.W */
12092 DIP("FSULE.W w%d, w%d, w%d", wd
, ws
, wt
);
12093 calculateMSACSR(ws
, wt
, FSULEW
, 2);
12096 binop(Iop_CmpLE32Fx4
,
12099 binop(Iop_CmpUN32Fx4
,
12105 case 0x01: { /* FSULE.D */
12106 DIP("FSULE.D w%d, w%d, w%d", wd
, ws
, wt
);
12107 calculateMSACSR(ws
, wt
, FSULED
, 2);
12110 binop(Iop_CmpLE64Fx2
,
12113 binop(Iop_CmpUN64Fx2
,
12133 static Int
msa_3R_1B(UInt cins
, UChar wd
, UChar ws
) /* 3R (0x1B) */
12135 IRTemp t1
, t2
, t3
, t4
;
12139 operation
= (cins
& 0x03C00000) >> 22;
12140 df
= (cins
& 0x00200000) >> 21;
12141 wt
= (cins
& 0x001F0000) >> 16;
12143 switch (operation
) {
12144 case 0x00: { /* FADD.df */
12146 case 0x00: { /* FADD.W */
12147 DIP("FADD.W w%d, w%d, w%d", wd
, ws
, wt
);
12148 calculateMSACSR(ws
, wt
, FADDW
, 2);
12149 IRExpr
*rm
= get_IR_roundingmode_MSA();
12151 triop(Iop_Add32Fx4
, rm
,
12157 case 0x01: { /* FADD.D */
12158 DIP("FADD.D w%d, w%d, w%d", wd
, ws
, wt
);
12159 calculateMSACSR(ws
, wt
, FADDD
, 2);
12160 IRExpr
*rm
= get_IR_roundingmode_MSA();
12162 triop(Iop_Add64Fx2
, rm
,
12175 case 0x01: { /* FSUB.df */
12177 case 0x00: { /* FSUB.W */
12178 DIP("FSUB.W w%d, w%d, w%d", wd
, ws
, wt
);
12179 calculateMSACSR(ws
, wt
, FSUBW
, 2);
12180 IRExpr
*rm
= get_IR_roundingmode_MSA();
12182 triop(Iop_Sub32Fx4
, rm
,
12188 case 0x01: { /* FSUB.D */
12189 DIP("FSUB.D w%d, w%d, w%d", wd
, ws
, wt
);
12190 calculateMSACSR(ws
, wt
, FSUBD
, 2);
12191 IRExpr
*rm
= get_IR_roundingmode_MSA();
12193 triop(Iop_Sub64Fx2
, rm
,
12206 case 0x02: { /* FMUL.df */
12208 case 0x00: { /* FMUL.W */
12209 DIP("FMUL.W w%d, w%d, w%d", wd
, ws
, wt
);
12210 calculateMSACSR(ws
, wt
, FMULW
, 2);
12211 IRExpr
*rm
= get_IR_roundingmode_MSA();
12213 triop(Iop_Mul32Fx4
, rm
,
12219 case 0x01: { /* FMUL.D */
12220 DIP("FMUL.D w%d, w%d, w%d", wd
, ws
, wt
);
12221 calculateMSACSR(ws
, wt
, FMULW
, 2);
12222 IRExpr
*rm
= get_IR_roundingmode_MSA();
12224 triop(Iop_Mul64Fx2
, rm
,
12237 case 0x03: { /* FDIV.df */
12239 case 0x00: { /* FDIV.W */
12240 DIP("FDIV.W w%d, w%d, w%d", wd
, ws
, wt
);
12241 calculateMSACSR(ws
, wt
, FDIVW
, 2);
12242 IRExpr
*rm
= get_IR_roundingmode_MSA();
12244 triop(Iop_Div32Fx4
, rm
,
12250 case 0x01: { /* FDIV.D */
12251 DIP("FDIV.D w%d, w%d, w%d", wd
, ws
, wt
);
12252 calculateMSACSR(ws
, wt
, FDIVD
, 2);
12253 IRExpr
*rm
= get_IR_roundingmode_MSA();
12255 triop(Iop_Div64Fx2
, rm
,
12268 case 0x04: { /* FMADD.df */
12270 case 0x00: { /* FMADD.W */
12271 DIP("FMADD.W w%d, w%d, w%d", wd
, ws
, wt
);
12272 calculateMSACSR(ws
, wt
, FMADDW
, 2);
12273 IRExpr
*rm
= get_IR_roundingmode_MSA();
12277 for (i
= 0; i
< 4; i
++) {
12278 tmp
[i
] = newTemp(Ity_F32
);
12280 qop(Iop_MAddF32
, rm
,
12281 unop(Iop_ReinterpI32asF32
,
12282 binop(Iop_GetElem32x4
,
12285 unop(Iop_ReinterpI32asF32
,
12286 binop(Iop_GetElem32x4
,
12289 unop(Iop_ReinterpI32asF32
,
12290 binop(Iop_GetElem32x4
,
12296 binop(Iop_64HLtoV128
,
12297 binop(Iop_32HLto64
,
12298 unop(Iop_ReinterpF32asI32
,
12300 unop(Iop_ReinterpF32asI32
,
12302 binop(Iop_32HLto64
,
12303 unop(Iop_ReinterpF32asI32
,
12305 unop(Iop_ReinterpF32asI32
,
12306 mkexpr(tmp
[0])))));
12310 case 0x01: { /* FMADD.D */
12311 DIP("FMADD.D w%d, w%d, w%d", wd
, ws
, wt
);
12312 calculateMSACSR(ws
, wt
, FMADDW
, 2);
12313 IRExpr
*rm
= get_IR_roundingmode_MSA();
12317 for (i
= 0; i
< 2; i
++) {
12318 tmp
[i
] = newTemp(Ity_F64
);
12320 qop(Iop_MAddF64
, rm
,
12321 unop(Iop_ReinterpI64asF64
,
12322 binop(Iop_GetElem64x2
,
12325 unop(Iop_ReinterpI64asF64
,
12326 binop(Iop_GetElem64x2
,
12329 unop(Iop_ReinterpI64asF64
,
12330 binop(Iop_GetElem64x2
,
12336 binop(Iop_64HLtoV128
,
12337 unop(Iop_ReinterpF64asI64
,
12339 unop(Iop_ReinterpF64asI64
,
12351 case 0x05: { /* FMSUB.df */
12353 case 0x00: { /* FMSUB.W */
12354 DIP("FMSUB.W w%d, w%d, w%d", wd
, ws
, wt
);
12355 calculateMSACSR(ws
, wt
, FMADDW
, 2);
12356 IRExpr
*rm
= get_IR_roundingmode_MSA();
12360 for (i
= 0; i
< 4; i
++) {
12361 tmp
[i
] = newTemp(Ity_F32
);
12363 qop(Iop_MSubF32
, rm
,
12364 unop(Iop_ReinterpI32asF32
,
12365 binop(Iop_GetElem32x4
,
12368 unop(Iop_ReinterpI32asF32
,
12369 binop(Iop_GetElem32x4
,
12372 unop(Iop_ReinterpI32asF32
,
12373 binop(Iop_GetElem32x4
,
12379 binop(Iop_64HLtoV128
,
12380 binop(Iop_32HLto64
,
12381 unop(Iop_ReinterpF32asI32
,
12383 unop(Iop_ReinterpF32asI32
,
12385 binop(Iop_32HLto64
,
12386 unop(Iop_ReinterpF32asI32
,
12388 unop(Iop_ReinterpF32asI32
,
12389 mkexpr(tmp
[0])))));
12393 case 0x01: { /* FMSUB.D */
12394 DIP("FMSUB.D w%d, w%d, w%d", wd
, ws
, wt
);
12395 calculateMSACSR(ws
, wt
, FMADDD
, 2);
12396 IRExpr
*rm
= get_IR_roundingmode_MSA();
12400 for (i
= 0; i
< 2; i
++) {
12401 tmp
[i
] = newTemp(Ity_F64
);
12403 qop(Iop_MSubF64
, rm
,
12404 unop(Iop_ReinterpI64asF64
,
12405 binop(Iop_GetElem64x2
,
12408 unop(Iop_ReinterpI64asF64
,
12409 binop(Iop_GetElem64x2
,
12412 unop(Iop_ReinterpI64asF64
,
12413 binop(Iop_GetElem64x2
,
12419 binop(Iop_64HLtoV128
,
12420 unop(Iop_ReinterpF64asI64
,
12422 unop(Iop_ReinterpF64asI64
,
12434 case 0x07: { /* FEXP2.df */
12436 case 0x00: { /* FEXP2.W */
12437 DIP("FEXP2.W w%d, w%d, w%d", wd
, ws
, wt
);
12438 calculateMSACSR(ws
, wt
, FEXP2W
, 2);
12439 IRExpr
*rm
= get_IR_roundingmode_MSA();
12441 triop(Iop_Scale2_32Fx4
, rm
,
12447 case 0x01: { /* FEXP2.D */
12448 DIP("FEXP2.D w%d, w%d, w%d", wd
, ws
, wt
);
12449 calculateMSACSR(ws
, wt
, FEXP2D
, 2);
12450 IRExpr
*rm
= get_IR_roundingmode_MSA();
12452 triop(Iop_Scale2_64Fx2
, rm
,
12465 case 0x08: { /* FEXDO.df */
12467 case 0x00: { /* FEXDO.H */
12468 DIP("FEXDO.H w%d, w%d, w%d", wd
, ws
, wt
);
12469 calculateMSACSR(ws
, wt
, FEXDOH
, 2);
12470 t1
= newTemp(Ity_I64
);
12471 t2
= newTemp(Ity_I64
);
12473 unop(Iop_F32toF16x4_DEP
,
12476 unop(Iop_F32toF16x4_DEP
,
12479 binop(Iop_64HLtoV128
,
12480 mkexpr(t1
), mkexpr(t2
)));
12484 case 0x01: { /* FEXDO.W */
12485 DIP("FEXDO.W w%d, w%d, w%d", wd
, ws
, wt
);
12486 calculateMSACSR(ws
, wt
, FEXDOW
, 2);
12487 t1
= newTemp(Ity_I32
);
12488 t2
= newTemp(Ity_I32
);
12489 t3
= newTemp(Ity_I32
);
12490 t4
= newTemp(Ity_I32
);
12491 IRExpr
*rm
= get_IR_roundingmode_MSA();
12493 unop(Iop_ReinterpF32asI32
,
12494 binop(Iop_F64toF32
, rm
,
12495 unop(Iop_ReinterpI64asF64
,
12499 unop(Iop_ReinterpF32asI32
,
12500 binop(Iop_F64toF32
, rm
,
12501 unop(Iop_ReinterpI64asF64
,
12502 unop(Iop_V128HIto64
,
12505 unop(Iop_ReinterpF32asI32
,
12506 binop(Iop_F64toF32
, rm
,
12507 unop(Iop_ReinterpI64asF64
,
12511 unop(Iop_ReinterpF32asI32
,
12512 binop(Iop_F64toF32
, rm
,
12513 unop(Iop_ReinterpI64asF64
,
12514 unop(Iop_V128HIto64
,
12517 binop(Iop_64HLtoV128
,
12518 binop(Iop_32HLto64
,
12519 mkexpr(t2
), mkexpr(t1
)),
12520 binop(Iop_32HLto64
,
12521 mkexpr(t4
), mkexpr(t3
))));
12532 case 0x0A: { /* FTQ.df */
12534 case 0x00: { /* FTQ.H */
12535 DIP("FTQ.H w%d, w%d, w%d", wd
, ws
, wt
);
12536 calculateMSACSR(ws
, wt
, FTQH
, 2);
12537 IRExpr
*rm
= get_IR_roundingmode_MSA();
12539 triop(Iop_F32x4_2toQ16x8
, rm
,
12545 case 0x01: { /* FTQ.W */
12546 DIP("FTQ.W w%d, w%d, w%d", wd
, ws
, wt
);
12547 calculateMSACSR(ws
, wt
, FTQW
, 2);
12548 IRExpr
*rm
= get_IR_roundingmode_MSA();
12550 triop(Iop_F64x2_2toQ32x4
, rm
,
12563 case 0x0C: { /* FMIN.df */
12565 case 0x00: { /* FMIN.W */
12566 DIP("FMIN.W w%d, w%d, w%d", wd
, ws
, wt
);
12567 calculateMSACSR(ws
, wt
, FMINW
, 2);
12569 binop(Iop_Min32Fx4
,
12575 case 0x01: { /* FMIN.D */
12576 DIP("FMIN.D w%d, w%d, w%d", wd
, ws
, wt
);
12577 calculateMSACSR(ws
, wt
, FMINW
, 2);
12579 binop(Iop_Min64Fx2
,
12592 case 0x0D: { /* FMIN_A.df */
12594 case 0x00: { /* FMIN_A.W */
12595 DIP("FMIN_A.W w%d, w%d, w%d", wd
, ws
, wt
);
12596 calculateMSACSR(ws
, wt
, FMINAW
, 2);
12597 t1
= newTemp(Ity_V128
);
12598 t2
= newTemp(Ity_V128
);
12599 t3
= newTemp(Ity_V128
);
12600 t4
= newTemp(Ity_V128
);
12604 binop(Iop_64HLtoV128
,
12605 mkU64(0x7FFFFFFF7FFFFFFF),
12606 mkU64(0x7FFFFFFF7FFFFFFF))));
12610 binop(Iop_64HLtoV128
,
12611 mkU64(0x7FFFFFFF7FFFFFFF),
12612 mkU64(0x7FFFFFFF7FFFFFFF))));
12614 binop(Iop_Min32Fx4
,
12615 mkexpr(t2
), mkexpr(t1
)));
12620 binop(Iop_CmpUN32Fx4
,
12625 binop(Iop_CmpEQ32Fx4
,
12634 binop(Iop_CmpUN32Fx4
,
12637 binop(Iop_CmpLT32Fx4
,
12643 binop(Iop_CmpUN32Fx4
,
12646 binop(Iop_CmpLT32Fx4
,
12650 binop(Iop_64HLtoV128
,
12651 mkU64(0x8000000080000000),
12652 mkU64(0x8000000080000000))));
12655 mkexpr(t3
), mkexpr(t4
)));
12659 case 0x01: { /* FMIN_A.D */
12660 DIP("FMIN_A.D w%d, w%d, w%d", wd
, ws
, wt
);
12661 calculateMSACSR(ws
, wt
, FMINAD
, 2);
12662 t1
= newTemp(Ity_V128
);
12663 t2
= newTemp(Ity_V128
);
12664 t3
= newTemp(Ity_V128
);
12665 t4
= newTemp(Ity_V128
);
12669 binop(Iop_64HLtoV128
,
12670 mkU64(0x7FFFFFFFFFFFFFFF),
12671 mkU64(0x7FFFFFFFFFFFFFFF))));
12675 binop(Iop_64HLtoV128
,
12676 mkU64(0x7FFFFFFFFFFFFFFF),
12677 mkU64(0x7FFFFFFFFFFFFFFF))));
12679 binop(Iop_Min64Fx2
,
12680 mkexpr(t2
), mkexpr(t1
)));
12685 binop(Iop_CmpUN64Fx2
,
12690 binop(Iop_CmpEQ64Fx2
,
12699 binop(Iop_CmpUN64Fx2
,
12702 binop(Iop_CmpLT64Fx2
,
12708 binop(Iop_CmpUN64Fx2
,
12711 binop(Iop_CmpLT64Fx2
,
12715 binop(Iop_64HLtoV128
,
12716 mkU64(0x8000000000000000),
12717 mkU64(0x8000000000000000))));
12720 mkexpr(t3
), mkexpr(t4
)));
12731 case 0x0E: { /* FMAX.df */
12733 case 0x00: { /* FMAX.W */
12734 DIP("FMAX.W w%d, w%d, w%d", wd
, ws
, wt
);
12735 calculateMSACSR(ws
, wt
, FMAXW
, 2);
12737 binop(Iop_Max32Fx4
,
12743 case 0x01: { /* FMAX.D */
12744 DIP("FMAX.D w%d, w%d, w%d", wd
, ws
, wt
);
12745 calculateMSACSR(ws
, wt
, FMAXW
, 2);
12747 binop(Iop_Max64Fx2
,
12760 case 0x0F: { /* FMAX_A.df */
12762 case 0x00: { /* FMAX_A.W */
12763 DIP("FMAX_A.W w%d, w%d, w%d", wd
, ws
, wt
);
12764 calculateMSACSR(ws
, wt
, FMAXAW
, 2);
12765 t1
= newTemp(Ity_V128
);
12766 t2
= newTemp(Ity_V128
);
12767 t3
= newTemp(Ity_V128
);
12768 t4
= newTemp(Ity_V128
);
12772 binop(Iop_64HLtoV128
,
12773 mkU64(0x7FFFFFFF7FFFFFFF),
12774 mkU64(0x7FFFFFFF7FFFFFFF))));
12778 binop(Iop_64HLtoV128
,
12779 mkU64(0x7FFFFFFF7FFFFFFF),
12780 mkU64(0x7FFFFFFF7FFFFFFF))));
12782 binop(Iop_Max32Fx4
,
12783 mkexpr(t2
), mkexpr(t1
)));
12788 binop(Iop_CmpUN32Fx4
,
12793 binop(Iop_CmpEQ32Fx4
,
12802 binop(Iop_CmpUN32Fx4
,
12805 binop(Iop_CmpLT32Fx4
,
12811 binop(Iop_CmpUN32Fx4
,
12814 binop(Iop_CmpLT32Fx4
,
12818 binop(Iop_64HLtoV128
,
12819 mkU64(0x8000000080000000),
12820 mkU64(0x8000000080000000))));
12823 mkexpr(t3
), mkexpr(t4
)));
12827 case 0x01: { /* FMAX_A.D */
12828 DIP("FMAX_A.D w%d, w%d, w%d", wd
, ws
, wt
);
12829 calculateMSACSR(ws
, wt
, FMAXAD
, 2);
12830 t1
= newTemp(Ity_V128
);
12831 t2
= newTemp(Ity_V128
);
12832 t3
= newTemp(Ity_V128
);
12833 t4
= newTemp(Ity_V128
);
12837 binop(Iop_64HLtoV128
,
12838 mkU64(0x7FFFFFFFFFFFFFFF),
12839 mkU64(0x7FFFFFFFFFFFFFFF))));
12843 binop(Iop_64HLtoV128
,
12844 mkU64(0x7FFFFFFFFFFFFFFF),
12845 mkU64(0x7FFFFFFFFFFFFFFF))));
12847 binop(Iop_Max64Fx2
,
12848 mkexpr(t2
), mkexpr(t1
)));
12853 binop(Iop_CmpUN64Fx2
,
12858 binop(Iop_CmpEQ64Fx2
,
12867 binop(Iop_CmpUN64Fx2
,
12870 binop(Iop_CmpLT64Fx2
,
12876 binop(Iop_CmpUN64Fx2
,
12879 binop(Iop_CmpLT64Fx2
,
12883 binop(Iop_64HLtoV128
,
12884 mkU64(0x8000000000000000),
12885 mkU64(0x8000000000000000))));
12888 mkexpr(t3
), mkexpr(t4
)));
12906 static Int
msa_3R_1C(UInt cins
, UChar wd
, UChar ws
) /* 3R (0x1C) */
12908 IRTemp t1
, t2
, t3
, t4
, t5
, t6
;
12912 operation
= (cins
& 0x03C00000) >> 22;
12913 df
= (cins
& 0x00200000) >> 21;
12914 wt
= (cins
& 0x001F0000) >> 16;
12916 switch (operation
) {
12917 case 0x01: { /* FCOR.df */
12919 case 0x00: { /* FCOR.W */
12920 DIP("FCOR.W w%d, w%d, w%d", wd
, ws
, wt
);
12921 calculateMSACSR(ws
, wt
, FCORW
, 2);
12924 binop(Iop_CmpUN32Fx4
,
12930 case 0x01: { /* FCOR.D */
12931 DIP("FCOR.D w%d, w%d, w%d", wd
, ws
, wt
);
12932 calculateMSACSR(ws
, wt
, FCORD
, 2);
12935 binop(Iop_CmpUN64Fx2
,
12948 case 0x02: { /* FCUNE.df */
12950 case 0x00: { /* FCUNE.W */
12951 DIP("FCUNE.W w%d, w%d, w%d", wd
, ws
, wt
);
12952 calculateMSACSR(ws
, wt
, FCUNEW
, 2);
12955 binop(Iop_CmpEQ32Fx4
,
12961 case 0x01: { /* FCUNE.D */
12962 DIP("FCUNE.D w%d, w%d, w%d", wd
, ws
, wt
);
12963 calculateMSACSR(ws
, wt
, FCUNED
, 2);
12966 binop(Iop_CmpEQ64Fx2
,
12979 case 0x03: { /* FCNE.df */
12981 case 0x00: { /* FCNE.W */
12982 DIP("FCNE.W w%d, w%d, w%d", wd
, ws
, wt
);
12983 calculateMSACSR(ws
, wt
, FCNEW
, 2);
12987 binop(Iop_CmpEQ32Fx4
,
12990 binop(Iop_CmpUN32Fx4
,
12996 case 0x01: { /* FCNE.D */
12997 DIP("FCNE.D w%d, w%d, w%d", wd
, ws
, wt
);
12998 calculateMSACSR(ws
, wt
, FCNED
, 2);
13002 binop(Iop_CmpEQ64Fx2
,
13005 binop(Iop_CmpUN64Fx2
,
13018 case 0x04: { /* MUL_Q.df */
13020 case 0x00: { /* MUL_Q.H */
13021 DIP("MUL_Q.H w%d, w%d, w%d", wd
, ws
, wt
);
13022 t1
= newTemp(Ity_V128
);
13023 t2
= newTemp(Ity_V128
);
13024 t3
= newTemp(Ity_V128
);
13025 assign(t1
, getWReg(ws
));
13026 assign(t2
, getWReg(wt
));
13028 binop(Iop_QDMulHi16Sx8
,
13029 mkexpr(t1
), mkexpr(t2
)));
13030 putWReg(wd
, mkexpr(t3
));
13034 case 0x01: { /* MUL_Q.W */
13035 DIP("MUL_Q.W w%d, w%d, w%d", wd
, ws
, wt
);
13036 t1
= newTemp(Ity_V128
);
13037 t2
= newTemp(Ity_V128
);
13038 t3
= newTemp(Ity_V128
);
13039 assign(t1
, getWReg(ws
));
13040 assign(t2
, getWReg(wt
));
13042 binop(Iop_QDMulHi32Sx4
,
13043 mkexpr(t1
), mkexpr(t2
)));
13044 putWReg(wd
, mkexpr(t3
));
13055 case 0x05: { /* MADD_Q.df */
13057 case 0x00: { /* MADD_Q.W */
13058 DIP("MADD_Q.W w%d, w%d, w%d", wd
, ws
, wt
);
13059 t1
= newTemp(Ity_V128
);
13060 t2
= newTemp(Ity_V128
);
13061 t3
= newTemp(Ity_V128
);
13062 t4
= newTemp(Ity_V128
);
13063 t5
= newTemp(Ity_V128
);
13064 t6
= newTemp(Ity_V128
);
13066 binop(Iop_SarN32x4
,
13067 binop(Iop_InterleaveEvenLanes16x8
,
13072 binop(Iop_SarN32x4
,
13073 getWReg(ws
), mkU8(16)));
13075 binop(Iop_SarN32x4
,
13076 binop(Iop_InterleaveEvenLanes16x8
,
13081 binop(Iop_SarN32x4
,
13082 getWReg(wt
), mkU8(16)));
13085 binop(Iop_ShlN32x4
,
13086 binop(Iop_SarN32x4
,
13087 binop(Iop_InterleaveEvenLanes16x8
,
13093 mkexpr(t1
), mkexpr(t3
))));
13096 binop(Iop_ShlN32x4
,
13097 binop(Iop_SarN32x4
,
13102 mkexpr(t2
), mkexpr(t4
))));
13104 binop(Iop_InterleaveEvenLanes16x8
,
13105 binop(Iop_QandQSarNnarrow32Sto16Sx4
,
13106 mkexpr(t6
), mkU8(15)),
13107 binop(Iop_QandQSarNnarrow32Sto16Sx4
,
13108 mkexpr(t5
), mkU8(15))));
13112 case 0x01: { /* MADD_Q.W */
13113 DIP("MADD_Q.W w%d, w%d, w%d", wd
, ws
, wt
);
13114 t1
= newTemp(Ity_V128
);
13115 t2
= newTemp(Ity_V128
);
13116 t3
= newTemp(Ity_V128
);
13117 t4
= newTemp(Ity_V128
);
13118 t5
= newTemp(Ity_V128
);
13119 t6
= newTemp(Ity_V128
);
13121 binop(Iop_SarN64x2
,
13122 binop(Iop_InterleaveEvenLanes32x4
,
13127 binop(Iop_SarN64x2
,
13128 getWReg(ws
), mkU8(32)));
13130 binop(Iop_SarN64x2
,
13131 binop(Iop_InterleaveEvenLanes32x4
,
13136 binop(Iop_SarN64x2
,
13137 getWReg(wt
), mkU8(32)));
13140 binop(Iop_ShlN64x2
,
13141 binop(Iop_SarN64x2
,
13142 binop(Iop_InterleaveEvenLanes32x4
,
13147 binop(Iop_64HLtoV128
,
13149 unop(Iop_V128HIto64
,
13151 unop(Iop_V128HIto64
,
13160 binop(Iop_ShlN64x2
,
13161 binop(Iop_SarN64x2
,
13165 binop(Iop_64HLtoV128
,
13167 unop(Iop_V128HIto64
,
13169 unop(Iop_V128HIto64
,
13177 binop(Iop_InterleaveEvenLanes32x4
,
13178 binop(Iop_QandQSarNnarrow64Sto32Sx2
,
13179 mkexpr(t6
), mkU8(31)),
13180 binop(Iop_QandQSarNnarrow64Sto32Sx2
,
13181 mkexpr(t5
), mkU8(31))));
13192 case 0x06: { /* MSUB_Q.df */
13194 case 0x00: { /* MSUB_Q.H */
13195 DIP("MSUB_Q.H w%d, w%d, w%d", wd
, ws
, wt
);
13196 t1
= newTemp(Ity_V128
);
13197 t2
= newTemp(Ity_V128
);
13198 t3
= newTemp(Ity_V128
);
13199 t4
= newTemp(Ity_V128
);
13200 t5
= newTemp(Ity_V128
);
13201 t6
= newTemp(Ity_V128
);
13203 binop(Iop_SarN32x4
,
13204 binop(Iop_InterleaveEvenLanes16x8
,
13209 binop(Iop_SarN32x4
,
13210 getWReg(ws
), mkU8(16)));
13212 binop(Iop_SarN32x4
,
13213 binop(Iop_InterleaveEvenLanes16x8
,
13218 binop(Iop_SarN32x4
,
13219 getWReg(wt
), mkU8(16)));
13222 binop(Iop_ShlN32x4
,
13223 binop(Iop_SarN32x4
,
13224 binop(Iop_InterleaveEvenLanes16x8
,
13230 mkexpr(t1
), mkexpr(t3
))));
13233 binop(Iop_ShlN32x4
,
13234 binop(Iop_SarN32x4
,
13239 mkexpr(t2
), mkexpr(t4
))));
13241 binop(Iop_InterleaveEvenLanes16x8
,
13242 binop(Iop_QandQSarNnarrow32Sto16Sx4
,
13243 mkexpr(t6
), mkU8(15)),
13244 binop(Iop_QandQSarNnarrow32Sto16Sx4
,
13245 mkexpr(t5
), mkU8(15))));
13249 case 0x01: { /* MSUB_Q.W */
13250 DIP("MSUB_Q.W w%d, w%d, w%d", wd
, ws
, wt
);
13251 t1
= newTemp(Ity_V128
);
13252 t2
= newTemp(Ity_V128
);
13253 t3
= newTemp(Ity_V128
);
13254 t4
= newTemp(Ity_V128
);
13255 t5
= newTemp(Ity_V128
);
13256 t6
= newTemp(Ity_V128
);
13258 binop(Iop_SarN64x2
,
13259 binop(Iop_InterleaveEvenLanes32x4
,
13264 binop(Iop_SarN64x2
,
13265 getWReg(ws
), mkU8(32)));
13267 binop(Iop_SarN64x2
,
13268 binop(Iop_InterleaveEvenLanes32x4
,
13273 binop(Iop_SarN64x2
,
13274 getWReg(wt
), mkU8(32)));
13277 binop(Iop_ShlN64x2
,
13278 binop(Iop_SarN64x2
,
13279 binop(Iop_InterleaveEvenLanes32x4
,
13284 binop(Iop_64HLtoV128
,
13286 unop(Iop_V128HIto64
,
13288 unop(Iop_V128HIto64
,
13297 binop(Iop_ShlN64x2
,
13298 binop(Iop_SarN64x2
,
13302 binop(Iop_64HLtoV128
,
13304 unop(Iop_V128HIto64
,
13306 unop(Iop_V128HIto64
,
13314 binop(Iop_InterleaveEvenLanes32x4
,
13315 binop(Iop_QandQSarNnarrow64Sto32Sx2
,
13316 mkexpr(t6
), mkU8(31)),
13317 binop(Iop_QandQSarNnarrow64Sto32Sx2
,
13318 mkexpr(t5
), mkU8(31))));
13329 case 0x09: { /* FSOR.df */
13331 case 0x00: { /* FSOR.W */
13332 DIP("FSOR.W w%d, w%d, w%d", wd
, ws
, wt
);
13333 calculateMSACSR(ws
, wt
, FSORW
, 2);
13336 binop(Iop_CmpUN32Fx4
,
13342 case 0x01: { /* FSOR.D */
13343 DIP("FSOR.D w%d, w%d, w%d", wd
, ws
, wt
);
13344 calculateMSACSR(ws
, wt
, FSORD
, 2);
13347 binop(Iop_CmpUN64Fx2
,
13360 case 0x0A: { /* FSUNE.df */
13362 case 0x00: { /* FSUNE.W */
13363 DIP("FSUNE.W w%d, w%d, w%d", wd
, ws
, wt
);
13364 calculateMSACSR(ws
, wt
, FSUNEW
, 2);
13367 binop(Iop_CmpEQ32Fx4
,
13373 case 0x01: { /* FSUNE.D */
13374 DIP("FSUNE.D w%d, w%d, w%d", wd
, ws
, wt
);
13375 calculateMSACSR(ws
, wt
, FSUNED
, 2);
13378 binop(Iop_CmpEQ64Fx2
,
13391 case 0x0B: { /* FSNE.df */
13393 case 0x00: { /* FSNE.W */
13394 DIP("FSNE.W w%d, w%d, w%d", wd
, ws
, wt
);
13395 calculateMSACSR(ws
, wt
, FSNEW
, 2);
13399 binop(Iop_CmpEQ32Fx4
,
13402 binop(Iop_CmpUN32Fx4
,
13408 case 0x01: { /* FSNE.D */
13409 DIP("FSNE.D w%d, w%d, w%d", wd
, ws
, wt
);
13410 calculateMSACSR(ws
, wt
, FSNED
, 2);
13414 binop(Iop_CmpEQ64Fx2
,
13417 binop(Iop_CmpUN64Fx2
,
13430 case 0x0C: { /* MULR_Q.df */
13432 case 0x00: { /* MULR_Q.H */
13433 DIP("MULR_Q.H w%d, w%d, w%d", wd
, ws
, wt
);
13434 t1
= newTemp(Ity_V128
);
13435 t2
= newTemp(Ity_V128
);
13436 t3
= newTemp(Ity_V128
);
13437 assign(t1
, getWReg(ws
));
13438 assign(t2
, getWReg(wt
));
13439 assign(t3
, binop(Iop_QRDMulHi16Sx8
,
13440 mkexpr(t1
), mkexpr(t2
)));
13441 putWReg(wd
, mkexpr(t3
));
13445 case 0x01: { /* MULR_Q.W */
13446 DIP("MULR_Q.W w%d, w%d, w%d", wd
, ws
, wt
);
13447 t1
= newTemp(Ity_V128
);
13448 t2
= newTemp(Ity_V128
);
13449 t3
= newTemp(Ity_V128
);
13450 assign(t1
, getWReg(ws
));
13451 assign(t2
, getWReg(wt
));
13452 assign(t3
, binop(Iop_QRDMulHi32Sx4
,
13453 mkexpr(t1
), mkexpr(t2
)));
13454 putWReg(wd
, mkexpr(t3
));
13465 case 0x0D: { /* MADDR_Q.df */
13467 case 0x00: { /* MADDR_Q.W */
13468 DIP("MADDR_Q.W w%d, w%d, w%d", wd
, ws
, wt
);
13469 t1
= newTemp(Ity_V128
);
13470 t2
= newTemp(Ity_V128
);
13471 t3
= newTemp(Ity_V128
);
13472 t4
= newTemp(Ity_V128
);
13473 t5
= newTemp(Ity_V128
);
13474 t6
= newTemp(Ity_V128
);
13476 binop(Iop_SarN32x4
,
13477 binop(Iop_InterleaveEvenLanes16x8
,
13482 binop(Iop_SarN32x4
,
13483 getWReg(ws
), mkU8(16)));
13485 binop(Iop_SarN32x4
,
13486 binop(Iop_InterleaveEvenLanes16x8
,
13491 binop(Iop_SarN32x4
,
13492 getWReg(wt
), mkU8(16)));
13495 binop(Iop_ShlN32x4
,
13496 binop(Iop_SarN32x4
,
13497 binop(Iop_InterleaveEvenLanes16x8
,
13503 mkexpr(t1
), mkexpr(t3
))));
13506 binop(Iop_ShlN32x4
,
13507 binop(Iop_SarN32x4
,
13512 mkexpr(t2
), mkexpr(t4
))));
13514 binop(Iop_InterleaveEvenLanes16x8
,
13515 binop(Iop_QandQRSarNnarrow32Sto16Sx4
,
13516 mkexpr(t6
), mkU8(15)),
13517 binop(Iop_QandQRSarNnarrow32Sto16Sx4
,
13518 mkexpr(t5
), mkU8(15))));
13522 case 0x01: { /* MADDR_Q.D */
13523 DIP("MADDR_Q.D w%d, w%d, w%d", wd
, ws
, wt
);
13524 t1
= newTemp(Ity_V128
);
13525 t2
= newTemp(Ity_V128
);
13526 t3
= newTemp(Ity_V128
);
13527 t4
= newTemp(Ity_V128
);
13528 t5
= newTemp(Ity_V128
);
13529 t6
= newTemp(Ity_V128
);
13531 binop(Iop_SarN64x2
,
13532 binop(Iop_InterleaveEvenLanes32x4
,
13537 binop(Iop_SarN64x2
,
13538 getWReg(ws
), mkU8(32)));
13540 binop(Iop_SarN64x2
,
13541 binop(Iop_InterleaveEvenLanes32x4
,
13546 binop(Iop_SarN64x2
,
13547 getWReg(wt
), mkU8(32)));
13550 binop(Iop_ShlN64x2
,
13551 binop(Iop_SarN64x2
,
13552 binop(Iop_InterleaveEvenLanes32x4
,
13557 binop(Iop_64HLtoV128
,
13559 unop(Iop_V128HIto64
,
13561 unop(Iop_V128HIto64
,
13570 binop(Iop_ShlN64x2
,
13571 binop(Iop_SarN64x2
,
13575 binop(Iop_64HLtoV128
,
13577 unop(Iop_V128HIto64
,
13579 unop(Iop_V128HIto64
,
13587 binop(Iop_InterleaveEvenLanes32x4
,
13588 binop(Iop_QandQRSarNnarrow64Sto32Sx2
,
13589 mkexpr(t6
), mkU8(31)),
13590 binop(Iop_QandQRSarNnarrow64Sto32Sx2
,
13591 mkexpr(t5
), mkU8(31))));
13602 case 0x0E: { /* MSUBR_Q.df */
13604 case 0x00: { /* MSUBR_Q.W */
13605 DIP("MSUBR_Q.W w%d, w%d, w%d", wd
, ws
, wt
);
13606 t1
= newTemp(Ity_V128
);
13607 t2
= newTemp(Ity_V128
);
13608 t3
= newTemp(Ity_V128
);
13609 t4
= newTemp(Ity_V128
);
13610 t5
= newTemp(Ity_V128
);
13611 t6
= newTemp(Ity_V128
);
13613 binop(Iop_SarN32x4
,
13614 binop(Iop_InterleaveEvenLanes16x8
,
13619 binop(Iop_SarN32x4
,
13620 getWReg(ws
), mkU8(16)));
13622 binop(Iop_SarN32x4
,
13623 binop(Iop_InterleaveEvenLanes16x8
,
13628 binop(Iop_SarN32x4
,
13629 getWReg(wt
), mkU8(16)));
13632 binop(Iop_ShlN32x4
,
13633 binop(Iop_SarN32x4
,
13634 binop(Iop_InterleaveEvenLanes16x8
,
13640 mkexpr(t1
), mkexpr(t3
))));
13643 binop(Iop_ShlN32x4
,
13644 binop(Iop_SarN32x4
,
13649 mkexpr(t2
), mkexpr(t4
))));
13651 binop(Iop_InterleaveEvenLanes16x8
,
13652 binop(Iop_QandQRSarNnarrow32Sto16Sx4
,
13653 mkexpr(t6
), mkU8(15)),
13654 binop(Iop_QandQRSarNnarrow32Sto16Sx4
,
13655 mkexpr(t5
), mkU8(15))));
13659 case 0x01: { /* MSUBR_Q.D */
13660 DIP("MSUBR_Q.D w%d, w%d, w%d", wd
, ws
, wt
);
13661 t1
= newTemp(Ity_V128
);
13662 t2
= newTemp(Ity_V128
);
13663 t3
= newTemp(Ity_V128
);
13664 t4
= newTemp(Ity_V128
);
13665 t5
= newTemp(Ity_V128
);
13666 t6
= newTemp(Ity_V128
);
13668 binop(Iop_SarN64x2
,
13669 binop(Iop_InterleaveEvenLanes32x4
,
13674 binop(Iop_SarN64x2
,
13675 getWReg(ws
), mkU8(32)));
13677 binop(Iop_SarN64x2
,
13678 binop(Iop_InterleaveEvenLanes32x4
,
13683 binop(Iop_SarN64x2
,
13684 getWReg(wt
), mkU8(32)));
13687 binop(Iop_ShlN64x2
,
13688 binop(Iop_SarN64x2
,
13689 binop(Iop_InterleaveEvenLanes32x4
,
13694 binop(Iop_64HLtoV128
,
13696 unop(Iop_V128HIto64
,
13698 unop(Iop_V128HIto64
,
13707 binop(Iop_ShlN64x2
,
13708 binop(Iop_SarN64x2
,
13712 binop(Iop_64HLtoV128
,
13714 unop(Iop_V128HIto64
,
13716 unop(Iop_V128HIto64
,
13724 binop(Iop_InterleaveEvenLanes32x4
,
13725 binop(Iop_QandQRSarNnarrow64Sto32Sx2
,
13726 mkexpr(t6
), mkU8(31)),
13727 binop(Iop_QandQRSarNnarrow64Sto32Sx2
,
13728 mkexpr(t5
), mkU8(31))));
13746 static Int
msa_ELM(UInt cins
, UChar wd
, UChar ws
) /* ELM (0x19) */
13748 IRTemp t1
, t2
, t3
, t4
, t5
;
13753 operation
= (cins
& 0x03C00000) >> 22;
13754 ty
= mode64
? Ity_I64
: Ity_I32
;
13756 switch ((cins
& 0x03FF0000) >> 16) {
13757 case 0x07E: /* CFCMSA */
13758 DIP("CFCMSA r%d, c%d", wd
, ws
);
13761 case 0: { /* MSAIR */
13763 t1
= newTemp(Ity_I32
);
13764 /* IRExpr_BBPTR() =>
13765 Need to pass pointer to
13766 guest state to helper. */
13767 d
= unsafeIRDirty_1_N(t1
, 0,
13768 "mips_dirtyhelper_get_MSAIR",
13769 &mips_dirtyhelper_get_MSAIR
,
13771 /* d->nFxState = 0; */
13772 stmt(IRStmt_Dirty(d
));
13774 mkWidenFrom32(ty
, mkexpr(t1
), True
));
13778 case 1: /* MSACSR */
13780 mkWidenFrom32(ty
, getMSACSR(), True
));
13785 mkWidenFrom32(ty
, mkU32(0), False
));
13791 case 0x03E: /* CTCMSA */
13792 DIP("CTCMSA r%d, c%d", ws
, wd
);
13794 if (wd
== 1) { /* MSACSR */
13796 binop(Iop_And32
, mkNarrowTo32(ty
, getIReg(ws
)),
13797 mkU32(0x1FFFFFF)));
13802 case 0x0BE: /* MOVE.V */
13803 DIP("MOVE.V w%d, w%d", ws
, wd
);
13804 putWReg(wd
, getWReg(ws
));
13808 df
= (cins
& 0x003F0000) >> 16;
13810 if ((df
& 0x38) == 0x38) { // 11100n; dw
13813 } else if ((df
& 0x30) == 0x30) { // 1100nn; w
13816 } else if ((df
& 0x20) == 0x20) { // 100nnn; hw
13819 } else if ((df
& 0x00) == 0x00) { // 00nnnn; b
13824 switch (operation
) {
13825 case 0x00: /* SLDI.df */
13827 case 0x00: /* SLDI.B */
13828 DIP("SLDI.B w%d, w%d[%d]", wd
, ws
, n
);
13829 t1
= newTemp(Ity_V128
);
13830 t2
= newTemp(Ity_V128
);
13839 (16 - n
) << 3 : 0)));
13841 binop(Iop_OrV128
, mkexpr(t1
), mkexpr(t2
)));
13844 case 0x20: /* SLDI.H */
13845 DIP("SLDI.H w%d, w%d[%d]", wd
, ws
, n
);
13848 putWReg(wd
, getWReg(ws
));
13850 t1
= newTemp(Ity_V128
);
13851 t2
= newTemp(Ity_V128
);
13853 binop(Iop_ShrN64x2
,
13857 binop(Iop_ShlN64x2
,
13859 mkU8((8 - n
) << 3)));
13868 case 0x30: /* SLDI.W */
13869 DIP("SLDI.W w%d, w%d[%d]", wd
, ws
, n
);
13872 putWReg(wd
, getWReg(ws
));
13874 t1
= newTemp(Ity_V128
);
13875 t2
= newTemp(Ity_V128
);
13877 binop(Iop_ShrN32x4
,
13881 binop(Iop_ShlN32x4
,
13883 mkU8((4 - n
) << 3)));
13892 case 0x38: /* SLDI.D */
13893 DIP("SLDI.D w%d, w%d[%d]", wd
, ws
, n
);
13896 putWReg(wd
, getWReg(ws
));
13898 t1
= newTemp(Ity_V128
);
13899 t2
= newTemp(Ity_V128
);
13901 binop(Iop_ShrN16x8
,
13905 binop(Iop_ShlN16x8
,
13907 mkU8((2 - n
) << 3)));
13922 case 0x01: /* SPLATI.df */
13924 case 0x00: { /* SPLATI.B */
13925 DIP("SPLATI.B w%d, w%d[%d]", wd
, ws
, n
);
13926 t1
= newTemp(Ity_V128
);
13927 t2
= newTemp(Ity_V128
);
13928 t3
= newTemp(Ity_V128
);
13929 t4
= newTemp(Ity_V128
);
13933 binop(Iop_InterleaveOddLanes8x16
,
13938 binop(Iop_InterleaveEvenLanes8x16
,
13946 binop(Iop_InterleaveOddLanes16x8
,
13947 mkexpr(t1
), mkexpr(t1
)));
13950 binop(Iop_InterleaveEvenLanes16x8
,
13951 mkexpr(t1
), mkexpr(t1
)));
13957 binop(Iop_InterleaveOddLanes32x4
,
13958 mkexpr(t2
), mkexpr(t2
)));
13961 binop(Iop_InterleaveEvenLanes32x4
,
13962 mkexpr(t2
), mkexpr(t2
)));
13968 binop(Iop_InterleaveHI64x2
,
13969 mkexpr(t3
), mkexpr(t3
)));
13972 binop(Iop_InterleaveLO64x2
,
13973 mkexpr(t3
), mkexpr(t3
)));
13975 putWReg(wd
, mkexpr(t4
));
13979 case 0x20: { /* SPLATI.H */
13980 DIP("SPLATI.H w%d, w%d[%d]", wd
, ws
, n
);
13981 t1
= newTemp(Ity_V128
);
13982 t2
= newTemp(Ity_V128
);
13983 t3
= newTemp(Ity_V128
);
13987 binop(Iop_InterleaveOddLanes16x8
,
13992 binop(Iop_InterleaveEvenLanes16x8
,
14000 binop(Iop_InterleaveOddLanes32x4
,
14001 mkexpr(t1
), mkexpr(t1
)));
14004 binop(Iop_InterleaveEvenLanes32x4
,
14005 mkexpr(t1
), mkexpr(t1
)));
14011 binop(Iop_InterleaveHI64x2
,
14012 mkexpr(t2
), mkexpr(t2
)));
14015 binop(Iop_InterleaveLO64x2
,
14016 mkexpr(t2
), mkexpr(t2
)));
14018 putWReg(wd
, mkexpr(t3
));
14022 case 0x30: { /* SPLATI.W */
14023 DIP("SPLATI.W w%d, w%d[%d]", wd
, ws
, n
);
14024 t1
= newTemp(Ity_V128
);
14025 t2
= newTemp(Ity_V128
);
14026 t3
= newTemp(Ity_V128
);
14027 assign(t1
, getWReg(ws
));
14031 binop(Iop_InterleaveOddLanes32x4
,
14032 mkexpr(t1
), mkexpr(t1
)));
14035 binop(Iop_InterleaveEvenLanes32x4
,
14036 mkexpr(t1
), mkexpr(t1
)));
14042 binop(Iop_InterleaveHI64x2
,
14043 mkexpr(t2
), mkexpr(t2
)));
14046 binop(Iop_InterleaveLO64x2
,
14047 mkexpr(t2
), mkexpr(t2
)));
14049 putWReg(wd
, mkexpr(t3
));
14053 case 0x38: /* SPLATI.D */
14054 DIP("SPLATI.D w%d, w%d[%d]", wd
, ws
, n
);
14055 t1
= newTemp(Ity_V128
);
14056 t3
= newTemp(Ity_V128
);
14057 assign(t1
, getWReg(ws
));
14061 binop(Iop_InterleaveHI64x2
,
14062 mkexpr(t1
), mkexpr(t1
)));
14065 binop(Iop_InterleaveLO64x2
,
14066 mkexpr(t1
), mkexpr(t1
)));
14068 putWReg(wd
, mkexpr(t3
));
14077 case 0x02: /* COPY_S.df */
14079 case 0x00: /* COPY_S.B */
14080 DIP("COPY_S.B r%d, w%d[%d]", wd
, ws
, n
);
14081 t1
= newTemp(Ity_I8
);
14158 unop(Iop_V128HIto64
,
14167 unop(Iop_V128HIto64
,
14176 unop(Iop_V128HIto64
,
14185 unop(Iop_V128HIto64
,
14194 unop(Iop_V128HIto64
,
14203 unop(Iop_V128HIto64
,
14212 unop(Iop_V128HIto64
,
14221 unop(Iop_V128HIto64
,
14227 unop(mode64
? Iop_8Sto64
: Iop_8Sto32
,
14231 case 0x20: /* COPY_S.H */
14232 DIP("COPY_S.H r%d, w%d[%d]", wd
, ws
, n
);
14233 t1
= newTemp(Ity_I16
);
14272 unop(Iop_V128HIto64
,
14280 unop(Iop_V128HIto64
,
14288 unop(Iop_V128HIto64
,
14296 unop(Iop_V128HIto64
,
14302 unop(mode64
? Iop_16Sto64
: Iop_16Sto32
,
14306 case 0x30: /* COPY_S.W */
14307 DIP("COPY_S.W r%d, w%d[%d]", wd
, ws
, n
);
14319 t2
= newTemp(Ity_I64
);
14321 unop(Iop_V128to64
, getWReg(ws
)));
14330 t2
= newTemp(Ity_I64
);
14332 unop(Iop_V128HIto64
,
14342 t2
= newTemp(Ity_I64
);
14344 unop(Iop_V128HIto64
,
14359 case 0x38: /* COPY_S.D */
14361 DIP("COPY_S.D r%d, w%d[%d]", wd
, ws
, n
);
14372 unop(Iop_V128HIto64
,
14388 case 0x03: { /* COPY_U.df */
14390 case 0x00: /* COPY_U.B */
14391 DIP("COPY_U.B r%d, w%d[%d]", wd
, ws
, n
);
14392 t1
= newTemp(Ity_I8
);
14472 unop(Iop_V128HIto64
,
14481 unop(Iop_V128HIto64
,
14490 unop(Iop_V128HIto64
,
14499 unop(Iop_V128HIto64
,
14508 unop(Iop_V128HIto64
,
14517 unop(Iop_V128HIto64
,
14526 unop(Iop_V128HIto64
,
14535 unop(Iop_V128HIto64
,
14541 unop(mode64
? Iop_8Uto64
: Iop_8Uto32
,
14545 case 0x20: /* COPY_U.H */
14546 DIP("COPY_U.H r%d, w%d[%d]", wd
, ws
, n
);
14547 t1
= newTemp(Ity_I16
);
14586 unop(Iop_V128HIto64
,
14594 unop(Iop_V128HIto64
,
14602 unop(Iop_V128HIto64
,
14610 unop(Iop_V128HIto64
,
14616 unop(mode64
? Iop_16Uto64
: Iop_16Uto32
,
14620 case 0x30: /* COPY_U.W */
14621 DIP("COPY_U.W r%d, w%d[%d]", wd
, ws
, n
);
14633 t2
= newTemp(Ity_I64
);
14645 t2
= newTemp(Ity_I64
);
14647 unop(Iop_V128HIto64
,
14657 t2
= newTemp(Ity_I64
);
14659 unop(Iop_V128HIto64
,
14681 case 0x04: { /* INSERT.df */
14682 t5
= newTemp(Ity_I64
);
14686 assign(t5
, mode64
? getIReg(ws
) :
14687 unop(Iop_32Uto64
, getIReg(ws
)));
14689 if (df
== 0x38) { /* INSERT.D */
14691 DIP("INSERT.D w%d[%d], r%d", wd
, n
, ws
);
14695 binop(Iop_64HLtoV128
,
14696 unop(Iop_V128HIto64
,
14701 binop(Iop_64HLtoV128
,
14712 t1
= newTemp(Ity_I64
);
14713 t2
= newTemp(Ity_I64
);
14714 assign(t1
, unop(Iop_V128to64
, getWReg(wd
)));
14715 assign(t2
, unop(Iop_V128HIto64
, getWReg(wd
)));
14719 case 0x00: /* INSERT.B */
14720 DIP("INSERT.B w%d[%d], r%d", wd
, n
, ws
);
14732 case 0x20: /* INSERT.H */
14733 DIP("INSERT.H w%d[%d], r%d", wd
, n
, ws
);
14745 case 0x30: /* INSERT.W */
14746 DIP("INSERT.W w%d[%d], r%d", wd
, n
, ws
);
14755 mask
= 0xFFFFFFFFull
;
14763 t4
= newTemp(Ity_I64
);
14768 t3
= newTemp(Ity_I64
);
14777 binop(Iop_And64
, mkexpr(*src
), mkU64(~mask
)),
14779 binop(Iop_Shl64
, mkexpr(t5
), mkU8(n
)),
14782 binop(Iop_64HLtoV128
, mkexpr(t4
), mkexpr(t3
)));
14786 case 0x05: { /* INSVE.df */
14788 case 0x00: { /* INSVE.B */
14789 DIP("INSVE.B w%d[%d], w%d[0]", wd
, n
, ws
);
14790 t1
= newTemp(Ity_V128
);
14791 t2
= newTemp(Ity_V128
);
14792 assign(t1
, getWReg(wd
));
14793 assign(t2
, getWReg(ws
));
14797 for (i
= 0; i
< 16; i
++) {
14798 tmp
[i
] = newTemp(Ity_I8
);
14802 binop(Iop_GetElem8x16
,
14803 mkexpr(t2
), mkU8(0x0)));
14806 binop(Iop_GetElem8x16
,
14807 mkexpr(t1
), mkU8(i
)));
14811 binop(Iop_64HLtoV128
,
14812 binop(Iop_32HLto64
,
14813 binop(Iop_16HLto32
,
14820 binop(Iop_16HLto32
,
14827 binop(Iop_32HLto64
,
14828 binop(Iop_16HLto32
,
14835 binop(Iop_16HLto32
,
14841 mkexpr(tmp
[0]))))));
14845 case 0x20: { /* INSVE.H */
14846 DIP("INSVE.H w%d[%d], r%d[0]", wd
, n
, ws
);
14847 t1
= newTemp(Ity_V128
);
14848 t2
= newTemp(Ity_V128
);
14849 assign(t1
, getWReg(wd
));
14850 assign(t2
, getWReg(ws
));
14854 for (i
= 0; i
< 8; i
++) {
14855 tmp
[i
] = newTemp(Ity_I16
);
14859 binop(Iop_GetElem16x8
,
14860 mkexpr(t2
), mkU8(0x0)));
14863 binop(Iop_GetElem16x8
,
14864 mkexpr(t1
), mkU8(i
)));
14868 binop(Iop_64HLtoV128
,
14869 binop(Iop_32HLto64
,
14870 binop(Iop_16HLto32
,
14873 binop(Iop_16HLto32
,
14876 binop(Iop_32HLto64
,
14877 binop(Iop_16HLto32
,
14880 binop(Iop_16HLto32
,
14882 mkexpr(tmp
[0])))));
14886 case 0x30: { /* INSVE.W */
14887 DIP("INSVE.W w%d[%d], r%d[0]", wd
, n
, ws
);
14888 t1
= newTemp(Ity_V128
);
14889 t2
= newTemp(Ity_V128
);
14890 assign(t1
, getWReg(wd
));
14891 assign(t2
, getWReg(ws
));
14895 for (i
= 0; i
< 4; i
++) {
14896 tmp
[i
] = newTemp(Ity_I32
);
14900 binop(Iop_GetElem32x4
,
14901 mkexpr(t2
), mkU8(0x0)));
14904 binop(Iop_GetElem32x4
,
14905 mkexpr(t1
), mkU8(i
)));
14909 binop(Iop_64HLtoV128
,
14910 binop(Iop_32HLto64
,
14913 binop(Iop_32HLto64
,
14919 case 0x38: { /* INSVE.D */
14920 DIP("INSVE.D w%d[%d], r%d[0]", wd
, n
, ws
);
14921 t1
= newTemp(Ity_V128
);
14922 t2
= newTemp(Ity_V128
);
14923 assign(t1
, getWReg(wd
));
14924 assign(t2
, getWReg(ws
));
14928 for (i
= 0; i
< 2; i
++) {
14929 tmp
[i
] = newTemp(Ity_I64
);
14933 binop(Iop_GetElem64x2
,
14934 mkexpr(t2
), mkU8(0x0)));
14937 binop(Iop_GetElem64x2
,
14938 mkexpr(t1
), mkU8(i
)));
14942 binop(Iop_64HLtoV128
,
14943 mkexpr(tmp
[1]), mkexpr(tmp
[0])));
14959 static Int
msa_VEC(UInt cins
, UChar wd
, UChar ws
) /* VEC */
14965 vassert((cins
& 0x03000000) == 0);
14967 operation
= (cins
& 0x03E00000) >> 21;
14968 wt
= (cins
& 0x001F0000) >> 16;
14970 switch (operation
) {
14971 case 0x00: { /* AND.V */
14972 DIP("AND.V w%d, w%d, w%d", wd
, ws
, wt
);
14973 t1
= newTemp(Ity_V128
);
14974 t2
= newTemp(Ity_V128
);
14975 t3
= newTemp(Ity_V128
);
14976 assign(t1
, getWReg(ws
));
14977 assign(t2
, getWReg(wt
));
14978 assign(t3
, binop(Iop_AndV128
, mkexpr(t1
), mkexpr(t2
)));
14979 putWReg(wd
, mkexpr(t3
));
14983 case 0x01: { /* OR.V */
14984 DIP("OR.V w%d, w%d, w%d", wd
, ws
, wt
);
14985 t1
= newTemp(Ity_V128
);
14986 t2
= newTemp(Ity_V128
);
14987 t3
= newTemp(Ity_V128
);
14988 assign(t1
, getWReg(ws
));
14989 assign(t2
, getWReg(wt
));
14990 assign(t3
, binop(Iop_OrV128
, mkexpr(t1
), mkexpr(t2
)));
14991 putWReg(wd
, mkexpr(t3
));
14995 case 0x02: { /* NOR.V */
14996 DIP("NOR.V w%d, w%d, w%d", wd
, ws
, wt
);
14997 t1
= newTemp(Ity_V128
);
14998 t2
= newTemp(Ity_V128
);
14999 t3
= newTemp(Ity_V128
);
15000 assign(t1
, getWReg(ws
));
15001 assign(t2
, getWReg(wt
));
15004 binop(Iop_OrV128
, mkexpr(t1
), mkexpr(t2
))));
15005 putWReg(wd
, mkexpr(t3
));
15009 case 0x03: { /* XOR.V */
15010 DIP("XOR.V w%d, w%d, w%d", wd
, ws
, wt
);
15011 t1
= newTemp(Ity_V128
);
15012 t2
= newTemp(Ity_V128
);
15013 t3
= newTemp(Ity_V128
);
15014 assign(t1
, getWReg(ws
));
15015 assign(t2
, getWReg(wt
));
15016 assign(t3
, binop(Iop_XorV128
, mkexpr(t1
), mkexpr(t2
)));
15017 putWReg(wd
, mkexpr(t3
));
15021 case 0x04: { /* BMNZ (ws AND wt) OR (wd AND NOT wt) */
15022 DIP("BMNZ.V w%d, w%d, w%d", wd
, ws
, wt
);
15023 t1
= newTemp(Ity_V128
);
15024 t2
= newTemp(Ity_V128
);
15025 t3
= newTemp(Ity_V128
);
15028 getWReg(ws
), getWReg(wt
)));
15032 unop(Iop_NotV128
, getWReg(wt
))));
15033 assign(t3
, binop(Iop_OrV128
, mkexpr(t1
), mkexpr(t2
)));
15034 putWReg(wd
, mkexpr(t3
));
15038 case 0x05: { /* BMZ.V (ws AND NOT wt) OR (wd AND wt) */
15039 DIP("BMZ.V w%d, w%d, w%d", wd
, ws
, wt
);
15040 t1
= newTemp(Ity_V128
);
15041 t2
= newTemp(Ity_V128
);
15042 t3
= newTemp(Ity_V128
);
15045 getWReg(wd
), getWReg(wt
)));
15049 unop(Iop_NotV128
, getWReg(wt
))));
15050 assign(t3
, binop(Iop_OrV128
, mkexpr(t1
), mkexpr(t2
)));
15051 putWReg(wd
, mkexpr(t3
));
15055 case 0x06: { /* BSEL (ws AND NOT wd) OR (wt AND wd) */
15056 DIP("BSEL.V w%d, w%d, w%d", wd
, ws
, wt
);
15057 t1
= newTemp(Ity_V128
);
15058 t2
= newTemp(Ity_V128
);
15059 t3
= newTemp(Ity_V128
);
15062 getWReg(wd
), getWReg(wt
)));
15066 unop(Iop_NotV128
, getWReg(wd
))));
15067 assign(t3
, binop(Iop_OrV128
, mkexpr(t1
), mkexpr(t2
)));
15068 putWReg(wd
, mkexpr(t3
));
15079 static Int
msa_2R(UInt cins
, UChar wd
, UChar ws
) /* 2R */
15081 IRTemp t1
, t2
, t3
, t4
;
15086 vassert((cins
& 0x00200000) == 0);
15088 operation
= (cins
& 0x03FC0000) >> 18;
15089 df
= (cins
& 0x00030000) >> 16;
15090 ty
= mode64
? Ity_I64
: Ity_I32
;
15092 switch (operation
) {
15093 case 0xC0: { /* FILL.df */
15094 t1
= newTemp(Ity_I64
);
15097 case 0x00: /* FILL.B */
15098 DIP("FILL.B w%d, r%d", wd
, ws
);
15099 t2
= newTemp(Ity_I32
);
15100 t3
= newTemp(Ity_I16
);
15101 t4
= newTemp(Ity_I8
);
15102 assign(t4
, mkNarrowTo8(ty
, getIReg(ws
)));
15104 binop(Iop_8HLto16
, mkexpr(t4
), mkexpr(t4
)));
15106 binop(Iop_16HLto32
, mkexpr(t3
), mkexpr(t3
)));
15108 binop(Iop_32HLto64
, mkexpr(t2
), mkexpr(t2
)));
15111 case 0x01: /* FILL.H */
15112 DIP("FILL.H w%d, r%d", wd
, ws
);
15113 t2
= newTemp(Ity_I32
);
15114 t3
= newTemp(Ity_I16
);
15115 assign(t3
, mkNarrowTo16(ty
, getIReg(ws
)));
15117 binop(Iop_16HLto32
, mkexpr(t3
), mkexpr(t3
)));
15119 binop(Iop_32HLto64
, mkexpr(t2
), mkexpr(t2
)));
15122 case 0x02: /* FILL.W */
15123 DIP("FILL.W w%d, r%d", wd
, ws
);
15124 t2
= newTemp(Ity_I32
);
15125 assign(t2
, mkNarrowTo32(ty
, getIReg(ws
)));
15127 binop(Iop_32HLto64
, mkexpr(t2
), mkexpr(t2
)));
15130 case 0x03: /* FILL.D */
15132 DIP("FILL.W w%d, r%d", wd
, ws
);
15133 t2
= newTemp(Ity_I32
);
15134 assign(t1
, getIReg(ws
));
15146 binop(Iop_64HLtoV128
, mkexpr(t1
), mkexpr(t1
)));
15150 case 0xC1: { /* PCNT.df */
15152 case 0x00: /* PCNT.B */
15153 DIP("PCNT.B w%d, r%d", wd
, ws
);
15155 unop(Iop_Cnt8x16
, getWReg(ws
)));
15158 case 0x01: /* PCNT.H */
15159 DIP("PCNT.H w%d, r%d", wd
, ws
);
15160 t1
= newTemp(Ity_V128
);
15161 t2
= newTemp(Ity_V128
);
15162 assign(t1
, unop(Iop_Cnt8x16
, getWReg(ws
)));
15167 binop(Iop_64HLtoV128
,
15168 mkU64(0x00FF00FF00FF00FFULL
),
15169 mkU64(0x00FF00FF00FF00FFULL
))),
15171 binop(Iop_ShrN16x8
,
15172 mkexpr(t1
), mkU8(8)),
15173 binop(Iop_64HLtoV128
,
15174 mkU64(0x00FF00FF00FF00FFULL
),
15175 mkU64(0x00FF00FF00FF00FFULL
)))));
15176 putWReg(wd
, mkexpr(t2
));
15179 case 0x02: /* PCNT.W */
15180 DIP("PCNT.W w%d, r%d", wd
, ws
);
15181 t1
= newTemp(Ity_V128
);
15182 t2
= newTemp(Ity_V128
);
15183 t3
= newTemp(Ity_V128
);
15184 assign(t1
, unop(Iop_Cnt8x16
, getWReg(ws
)));
15189 binop(Iop_64HLtoV128
,
15190 mkU64(0x00FF00FF00FF00FFULL
),
15191 mkU64(0x00FF00FF00FF00FFULL
))),
15193 binop(Iop_ShrN32x4
,
15194 mkexpr(t1
), mkU8(8)),
15195 binop(Iop_64HLtoV128
,
15196 mkU64(0x00FF00FF00FF00FFULL
),
15197 mkU64(0x00FF00FF00FF00FFULL
)))));
15202 binop(Iop_64HLtoV128
,
15203 mkU64(0x0000FFFF0000FFFFULL
),
15204 mkU64(0x0000FFFF0000FFFFULL
))),
15206 binop(Iop_ShrN32x4
,
15207 mkexpr(t2
), mkU8(16)),
15208 binop(Iop_64HLtoV128
,
15209 mkU64(0x0000FFFF0000FFFFULL
),
15210 mkU64(0x0000FFFF0000FFFFULL
)))));
15211 putWReg(wd
, mkexpr(t3
));
15214 case 0x03: /* PCNT.D */
15215 DIP("PCNT.D w%d, r%d", wd
, ws
);
15216 t1
= newTemp(Ity_V128
);
15217 t2
= newTemp(Ity_V128
);
15218 t3
= newTemp(Ity_V128
);
15219 t4
= newTemp(Ity_V128
);;
15220 assign(t1
, unop(Iop_Cnt8x16
, getWReg(ws
)));
15225 binop(Iop_64HLtoV128
,
15226 mkU64(0x00FF00FF00FF00FFULL
),
15227 mkU64(0x00FF00FF00FF00FFULL
))),
15229 binop(Iop_ShrN64x2
,
15230 mkexpr(t1
), mkU8(8)),
15231 binop(Iop_64HLtoV128
,
15232 mkU64(0x00FF00FF00FF00FFULL
),
15233 mkU64(0x00FF00FF00FF00FFULL
)))));
15238 binop(Iop_64HLtoV128
,
15239 mkU64(0x0000FFFF0000FFFFULL
),
15240 mkU64(0x0000FFFF0000FFFFULL
))),
15242 binop(Iop_ShrN64x2
,
15243 mkexpr(t2
), mkU8(16)),
15244 binop(Iop_64HLtoV128
,
15245 mkU64(0x0000FFFF0000FFFFULL
),
15246 mkU64(0x0000FFFF0000FFFFULL
)))));
15251 binop(Iop_64HLtoV128
,
15252 mkU64(0x00000000FFFFFFFFULL
),
15253 mkU64(0x00000000FFFFFFFFULL
))),
15255 binop(Iop_ShrN64x2
,
15256 mkexpr(t3
), mkU8(32)),
15257 binop(Iop_64HLtoV128
,
15258 mkU64(0x00000000FFFFFFFFULL
),
15259 mkU64(0x00000000FFFFFFFFULL
)))));
15260 putWReg(wd
, mkexpr(t4
));
15270 case 0xC2: { /* NLOC.df */
15272 case 0x00: /* NLOC.B */
15273 DIP("NLOC.B w%d, w%d", wd
, ws
);
15275 unop(Iop_Cls8x16
, getWReg(ws
)));
15278 case 0x01: /* NLOC.H */
15279 DIP("NLOC.H w%d, w%d", wd
, ws
);
15281 unop(Iop_Cls16x8
, getWReg(ws
)));
15284 case 0x02: /* NLOC.W */
15285 DIP("NLOC.W w%d, w%d", wd
, ws
);
15287 unop(Iop_Cls32x4
, getWReg(ws
)));
15290 case 0x03: /* NLOC.D */
15291 DIP("NLOC.D w%d, w%d", wd
, ws
);
15292 t1
= newTemp(Ity_V128
);
15293 assign(t1
, unop(Iop_NotV128
, getWReg(ws
)));
15294 putWReg(wd
, unop(Iop_Clz64x2
, mkexpr(t1
)));
15304 case 0xC3: { /* NLZC.df */
15306 case 0x00: /* NLZC.B */
15307 DIP("NLZC.W w%d, w%d", wd
, ws
);
15309 unop(Iop_Clz8x16
, getWReg(ws
)));
15312 case 0x01: /* NLZC.H */
15313 DIP("NLZC.H w%d, w%d", wd
, ws
);
15315 unop(Iop_Clz16x8
, getWReg(ws
)));
15318 case 0x02: /* NLZC.W */
15319 DIP("NLZC.W w%d, w%d", wd
, ws
);
15321 unop(Iop_Clz32x4
, getWReg(ws
)));
15324 case 0x03: {/* NLZC.D */
15326 unop(Iop_Clz64x2
, getWReg(ws
)));
15344 static Int
msa_2RF(UInt cins
, UChar wd
, UChar ws
) /* 2RF */
15346 IRTemp t1
, t2
, t3
, t4
, t5
;
15350 operation
= (cins
& 0x03FE0000) >> 17;
15351 df
= (cins
& 0x00010000) >> 16;
15352 wt
= (cins
& 0x001F0000) >> 16;
15354 switch (operation
) {
15356 case 0x190: { /* FCLASS.df */
15357 IRTemp t0
= newTemp(Ity_V128
);
15358 t1
= newTemp(Ity_V128
);
15359 t2
= newTemp(Ity_V128
);
15360 t3
= newTemp(Ity_V128
);
15361 t4
= newTemp(Ity_V128
);
15362 t5
= newTemp(Ity_V128
);
15365 case 0x00: { /* FCLASS.W */
15366 DIP("FCLASS.W w%d, w%d", wd
, ws
);
15368 binop(Iop_CmpEQ32x4
,
15371 binop(Iop_64HLtoV128
,
15372 mkU64(0x7F8000007F800000ull
),
15373 mkU64(0x7F8000007F800000ull
))),
15374 binop(Iop_64HLtoV128
,
15375 mkU64(0ull), mkU64(0ull))));
15377 binop(Iop_CmpEQ32x4
,
15380 binop(Iop_64HLtoV128
,
15381 mkU64(0x7F8000007F800000ull
),
15382 mkU64(0x7F8000007F800000ull
))),
15383 binop(Iop_64HLtoV128
,
15384 mkU64(0x7F8000007F800000ull
),
15385 mkU64(0x7F8000007F800000ull
))));
15387 binop(Iop_SarN32x4
,
15388 getWReg(ws
), mkU8(31)));
15390 binop(Iop_CmpEQ32x4
,
15393 binop(Iop_64HLtoV128
,
15394 mkU64(0x0040000000400000ull
),
15395 mkU64(0x0040000000400000ull
))),
15396 binop(Iop_64HLtoV128
,
15397 mkU64(0x0040000000400000ull
),
15398 mkU64(0x0040000000400000ull
))));
15400 binop(Iop_CmpEQ32x4
,
15403 binop(Iop_64HLtoV128
,
15404 mkU64(0x007FFFFF007FFFFFULL
),
15405 mkU64(0x007FFFFF007FFFFFULL
))),
15406 binop(Iop_64HLtoV128
,
15407 mkU64(0ull), mkU64(0ull))));
15415 binop(Iop_64HLtoV128
,
15416 mkU64(0x100000001ull
),
15417 mkU64(0x100000001ull
)))),
15424 binop(Iop_64HLtoV128
,
15425 mkU64(0x800000008ull
),
15426 mkU64(0x800000008ull
))),
15430 binop(Iop_64HLtoV128
,
15431 mkU64(0x400000004ull
),
15432 mkU64(0x400000004ull
))))),
15439 binop(Iop_64HLtoV128
,
15440 mkU64(0x200000002ull
),
15441 mkU64(0x200000002ull
)))))),
15445 binop(Iop_64HLtoV128
,
15446 mkU64(0x200000002ull
),
15447 mkU64(0x200000002ull
))),
15451 binop(Iop_64HLtoV128
,
15452 mkU64(0x600000006ull
),
15453 mkU64(0x600000006ull
))))));
15458 binop(Iop_CmpEQ32x4
,
15460 binop(Iop_64HLtoV128
,
15466 binop(Iop_64HLtoV128
,
15467 mkU64(0x100000001ull
),
15468 mkU64(0x100000001ull
))),
15470 unop(Iop_NotV128
, mkexpr(t3
)),
15471 binop(Iop_64HLtoV128
,
15472 mkU64(0x200000002ull
),
15473 mkU64(0x200000002ull
)))))));
15477 case 0x01: { /* FCLASS.D */
15478 DIP("FCLASS.D w%d, w%d", wd
, ws
);
15480 binop(Iop_CmpEQ64x2
,
15483 binop(Iop_64HLtoV128
,
15484 mkU64(0x7FF0000000000000ull
),
15485 mkU64(0x7FF0000000000000ull
))),
15486 binop(Iop_64HLtoV128
,
15487 mkU64(0ull), mkU64(0ull))));
15489 binop(Iop_CmpEQ64x2
,
15492 binop(Iop_64HLtoV128
,
15493 mkU64(0x7FF0000000000000ull
),
15494 mkU64(0x7FF0000000000000ull
))),
15495 binop(Iop_64HLtoV128
,
15496 mkU64(0x7FF0000000000000ull
),
15497 mkU64(0x7FF0000000000000ull
))));
15499 binop(Iop_SarN64x2
,
15500 getWReg(ws
), mkU8(63)));
15502 binop(Iop_CmpEQ64x2
,
15505 binop(Iop_64HLtoV128
,
15506 mkU64(0x0008000000000000ull
),
15507 mkU64(0x0008000000000000ull
))),
15508 binop(Iop_64HLtoV128
,
15509 mkU64(0x0008000000000000ull
),
15510 mkU64(0x0008000000000000ull
))));
15512 binop(Iop_CmpEQ64x2
,
15515 binop(Iop_64HLtoV128
,
15516 mkU64(0x000FFFFFFFFFFFFFULL
),
15517 mkU64(0x000FFFFFFFFFFFFFULL
))),
15518 binop(Iop_64HLtoV128
,
15519 mkU64(0ull), mkU64(0ull))));
15527 binop(Iop_64HLtoV128
,
15536 binop(Iop_64HLtoV128
,
15542 binop(Iop_64HLtoV128
,
15551 binop(Iop_64HLtoV128
,
15557 binop(Iop_64HLtoV128
,
15563 binop(Iop_64HLtoV128
,
15570 binop(Iop_CmpEQ64x2
,
15572 binop(Iop_64HLtoV128
,
15578 binop(Iop_64HLtoV128
,
15584 binop(Iop_64HLtoV128
,
15597 case 0x191: { /* FTRUNC_S.df */
15599 case 0x00: { /* FTRUNC_S.W */
15600 DIP("FTRUNC_S.W w%d, w%d", wd
, ws
);
15601 calculateMSACSR(ws
, wd
, FTRUNCSW
, 1);
15602 putWReg(wd
, unop(Iop_F32toI32Sx4_RZ
, getWReg(ws
)));
15606 case 0x01: { /* FTRUNC_S.D */
15607 DIP("FTRUNC_S.D w%d, w%d", wd
, ws
);
15608 calculateMSACSR(ws
, wd
, FTRUNCSD
, 1);
15609 t1
= newTemp(Ity_I64
);
15610 t2
= newTemp(Ity_I64
);
15611 t3
= newTemp(Ity_V128
);
15615 binop(Iop_CmpUN64Fx2
,
15618 binop(Iop_Max64Fx2
,
15620 binop(Iop_64HLtoV128
,
15621 mkU64(0xC3E0000000000000),
15622 mkU64(0xC3E0000000000000)))));
15624 binop(Iop_F64toI64S
, mkU32(0x3),
15625 unop(Iop_ReinterpI64asF64
,
15626 unop(Iop_V128to64
, mkexpr(t3
)))));
15628 binop(Iop_F64toI64S
, mkU32(0x3),
15629 unop(Iop_ReinterpI64asF64
,
15630 unop(Iop_V128HIto64
, mkexpr(t3
)))));
15632 binop(Iop_64HLtoV128
,
15633 mkexpr(t2
), mkexpr(t1
)));
15644 case 0x192: { /* FTRUNC_U.df */
15646 case 0x00: { /* FTRUNC_U.W */
15647 DIP("FTRUNC_U.W w%d, w%d", wd
, ws
);
15648 calculateMSACSR(ws
, wd
, FTRUNCUW
, 1);
15649 putWReg(wd
, unop(Iop_F32toI32Ux4_RZ
, getWReg(ws
)));
15653 case 0x01: { /* FTRUNC_U.D */
15654 DIP("FTRUNC_U.D w%d, w%d", wd
, ws
);
15655 calculateMSACSR(ws
, wd
, FTRUNCUD
, 1);
15656 t1
= newTemp(Ity_I64
);
15657 t2
= newTemp(Ity_I64
);
15659 binop(Iop_F64toI64U
,
15661 unop(Iop_ReinterpI64asF64
,
15665 binop(Iop_F64toI64U
,
15667 unop(Iop_ReinterpI64asF64
,
15668 unop(Iop_V128HIto64
,
15671 binop(Iop_64HLtoV128
,
15672 mkexpr(t2
), mkexpr(t1
)));
15683 case 0x193: { /* FSQRT.df */
15685 case 0x00: { /* FSQRT.W */
15686 DIP("FSQRT.W w%d, w%d", wd
, ws
);
15687 IRExpr
*rm
= get_IR_roundingmode_MSA();
15688 calculateMSACSR(ws
, wd
, FSQRTW
, 1);
15689 putWReg(wd
, binop(Iop_Sqrt32Fx4
, rm
, getWReg(ws
)));
15693 case 0x01: { /* FSQRT.D */
15694 DIP("FSQRT.D w%d, w%d", wd
, ws
);
15695 IRExpr
*rm
= get_IR_roundingmode_MSA();
15696 calculateMSACSR(ws
, wd
, FSQRTD
, 1);
15697 putWReg(wd
, binop(Iop_Sqrt64Fx2
, rm
, getWReg(ws
)));
15708 case 0x194: { /* FRSQRT.df */
15710 case 0x00: { /* FRSQRT.W */
15711 DIP("FRSQRT.W w%d, w%d", wd
, ws
);
15712 calculateMSACSR(ws
, wd
, FRSQRTW
, 1);
15713 putWReg(wd
, unop(Iop_RSqrtEst32Fx4
, getWReg(ws
)));
15717 case 0x01: { /* FRSQRT.D */
15718 DIP("FRSQRT.D w%d, w%d", wd
, ws
);
15719 calculateMSACSR(ws
, wd
, FRSQRTD
, 1);
15720 putWReg(wd
, unop(Iop_RSqrtEst64Fx2
, getWReg(ws
)));
15731 case 0x195: { /* FRCP.df */
15732 switch (df
) { /* FRCP.W */
15734 DIP("FRCP.W w%d, w%d", wd
, ws
);
15735 calculateMSACSR(ws
, wd
, FRCPW
, 1);
15736 putWReg(wd
, unop(Iop_RecipEst32Fx4
, getWReg(ws
)));
15740 case 0x01: { /* FRCP.D */
15741 DIP("FRCP.D w%d, w%d", wd
, ws
);
15742 calculateMSACSR(ws
, wd
, FRCPD
, 1);
15743 putWReg(wd
, unop(Iop_RecipEst64Fx2
, getWReg(ws
)));
15754 case 0x196: { /* FRINT.df */
15755 t1
= newTemp(Ity_V128
);
15756 t2
= newTemp(Ity_V128
);
15757 t3
= newTemp(Ity_V128
);
15758 t4
= newTemp(Ity_V128
);
15759 IRExpr
*rm
= get_IR_roundingmode_MSA();
15760 assign(t1
, getWReg(ws
));
15763 case 0x00: { /* FRINT.W */
15764 DIP("FRINT.W w%d, w%d", wd
, ws
);
15765 calculateMSACSR(ws
, wt
, FRINTW
, 1);
15768 binop(Iop_CmpLT32Fx4
,
15770 binop(Iop_64HLtoV128
,
15771 mkU64(0xCF000000CF000000ull
),
15772 mkU64(0xCF000000CF000000ull
))),
15773 binop(Iop_CmpLT32Fx4
,
15774 binop(Iop_64HLtoV128
,
15775 mkU64(0x4F0000004F000000ull
),
15776 mkU64(0x4F0000004F000000ull
)),
15779 binop(Iop_CmpEQ32x4
,
15782 binop(Iop_64HLtoV128
,
15783 mkU64(0x0040000000400000ull
),
15784 mkU64(0x0040000000400000ull
))),
15785 binop(Iop_64HLtoV128
,
15786 mkU64(0x0040000000400000ull
),
15787 mkU64(0x0040000000400000ull
))));
15789 binop(Iop_CmpUN32Fx4
,
15790 mkexpr(t1
), mkexpr(t1
)));
15794 for (i
= 0; i
< 4; i
++) {
15795 tmp
[i
] = newTemp(Ity_I32
);
15797 unop(Iop_ReinterpF32asI32
,
15798 binop(Iop_RoundF32toInt
, rm
,
15799 unop(Iop_ReinterpI32asF32
,
15800 binop(Iop_GetElem32x4
,
15801 mkexpr(t1
), mkU8(i
))))));
15819 binop(Iop_64HLtoV128
,
15820 mkU64(0x7FBFFFFF7FBFFFFF),
15821 mkU64(0x7FBFFFFF7FBFFFFF)))),
15828 binop(Iop_64HLtoV128
,
15829 binop(Iop_32HLto64
,
15832 binop(Iop_32HLto64
,
15837 binop(Iop_64HLtoV128
,
15838 mkU64(0x8000000080000000ull
),
15839 mkU64(0x8000000080000000ull
)))
15844 case 0x01: { /* FRINT.D */
15845 DIP("FRINT.D w%d, w%d", wd
, ws
);
15846 calculateMSACSR(ws
, wt
, FRINTD
, 1);
15849 binop(Iop_CmpLT64Fx2
,
15851 binop(Iop_64HLtoV128
,
15852 mkU64(0xC3E0000000000000ull
),
15853 mkU64(0xC3E0000000000000ull
))),
15854 binop(Iop_CmpLT64Fx2
,
15855 binop(Iop_64HLtoV128
,
15856 mkU64(0x43E0000000000000ull
),
15857 mkU64(0x43E0000000000000ull
)),
15860 binop(Iop_CmpEQ64x2
,
15863 binop(Iop_64HLtoV128
,
15864 mkU64(0x0008000000000000ull
),
15865 mkU64(0x0008000000000000ull
))),
15866 binop(Iop_64HLtoV128
,
15867 mkU64(0x0008000000000000ull
),
15868 mkU64(0x0008000000000000ull
))));
15870 binop(Iop_CmpUN64Fx2
,
15871 mkexpr(t1
), mkexpr(t1
)));
15875 for (i
= 0; i
< 2; i
++) {
15876 tmp
[i
] = newTemp(Ity_I64
);
15878 unop(Iop_ReinterpF64asI64
,
15879 binop(Iop_RoundF64toInt
, rm
,
15880 unop(Iop_ReinterpI64asF64
,
15881 binop(Iop_GetElem64x2
,
15882 mkexpr(t1
), mkU8(i
))))));
15900 binop(Iop_64HLtoV128
,
15901 mkU64(0x7FF7FFFFFFFFFFFF),
15902 mkU64(0x7FF7FFFFFFFFFFFF)))),
15909 binop(Iop_64HLtoV128
,
15914 binop(Iop_64HLtoV128
,
15915 mkU64(0x8000000000000000ull
),
15916 mkU64(0x8000000000000000ull
))
15928 case 0x197: { /* FLOG2.df */
15931 case 0x00: { /* FLOG2.W */
15932 DIP("FLOG2.W w%d, w%d", wd
, ws
);
15933 calculateMSACSR(ws
, wt
, FLOG2W
, 1);
15934 putWReg(wd
, unop(Iop_Log2_32Fx4
, getWReg(ws
)));
15938 case 0x01: { /* FLOG2.D */
15939 DIP("FLOG2.D w%d, w%d", wd
, ws
);
15940 calculateMSACSR(ws
, wt
, FLOG2D
, 1);
15941 putWReg(wd
, unop(Iop_Log2_64Fx2
, getWReg(ws
)));
15952 case 0x198: { /* FEXUPL.df */
15954 case 0x00: { /* FEXUPL.W */
15955 DIP("FEXUPL.W w%d, w%d", wd
, ws
);
15956 calculateMSACSR(ws
, wt
, FEXUPLW
, 1);
15958 unop(Iop_F16toF32x4
,
15959 unop(Iop_V128HIto64
,
15964 case 0x01: { /* FEXUPL.D */
15965 DIP("FEXUPL.D w%d, w%d", wd
, ws
);
15966 calculateMSACSR(ws
, wt
, FEXUPLD
, 1);
15967 t1
= newTemp(Ity_I64
);
15968 t2
= newTemp(Ity_I64
);
15970 unop(Iop_ReinterpF64asI64
,
15972 unop(Iop_ReinterpI32asF32
,
15974 unop(Iop_V128HIto64
,
15977 unop(Iop_ReinterpF64asI64
,
15979 unop(Iop_ReinterpI32asF32
,
15981 unop(Iop_V128HIto64
,
15984 binop(Iop_64HLtoV128
,
15985 mkexpr(t2
), mkexpr(t1
)));
15996 case 0x199: { /* FEXUPR.df */
15998 case 0x00: { /* FEXUPR.W */
15999 DIP("FEXUPR.W w%d, w%d", wd
, ws
);
16000 calculateMSACSR(ws
, wt
, FEXUPRW
, 1);
16002 unop(Iop_F16toF32x4
,
16008 case 0x01: { /* FEXUPR.D */
16009 DIP("FEXUPR.D w%d, w%d", wd
, ws
);
16010 calculateMSACSR(ws
, wt
, FEXUPRD
, 1);
16011 t1
= newTemp(Ity_I64
);
16012 t2
= newTemp(Ity_I64
);
16014 unop(Iop_ReinterpF64asI64
,
16016 unop(Iop_ReinterpI32asF32
,
16021 unop(Iop_ReinterpF64asI64
,
16023 unop(Iop_ReinterpI32asF32
,
16028 binop(Iop_64HLtoV128
,
16029 mkexpr(t2
), mkexpr(t1
)));
16040 case 0x19A: { /* FFQL.df */
16042 case 0x00: { /* FFQL.W */
16043 DIP("FFQL.W w%d, w%d", wd
, ws
);
16044 calculateMSACSR(ws
, wt
, FFQLW
, 1);
16045 t1
= newTemp(Ity_V128
);
16046 t2
= newTemp(Ity_I64
);
16047 t3
= newTemp(Ity_I64
);
16048 IRExpr
*rm
= get_IR_roundingmode_MSA();
16050 binop(Iop_SarN32x4
,
16051 binop(Iop_InterleaveHI16x8
,
16056 binop(Iop_32HLto64
,
16057 unop(Iop_ReinterpF32asI32
,
16058 binop(Iop_I32StoF32
, rm
,
16059 binop(Iop_GetElem32x4
,
16062 unop(Iop_ReinterpF32asI32
,
16063 binop(Iop_I32StoF32
, rm
,
16064 binop(Iop_GetElem32x4
,
16068 binop(Iop_32HLto64
,
16069 unop(Iop_ReinterpF32asI32
,
16070 binop(Iop_I32StoF32
, rm
,
16071 binop(Iop_GetElem32x4
,
16074 unop(Iop_ReinterpF32asI32
,
16075 binop(Iop_I32StoF32
, rm
,
16076 binop(Iop_GetElem32x4
,
16080 triop(Iop_Div32Fx4
, rm
,
16081 binop(Iop_64HLtoV128
,
16082 mkexpr(t3
), mkexpr(t2
)),
16083 binop(Iop_64HLtoV128
,
16084 mkU64(0x4700000047000000),
16085 mkU64(0x4700000047000000))));
16089 case 0x01: { /* FFQL.D */
16090 DIP("FFQL.D w%d, w%d", wd
, ws
);
16091 calculateMSACSR(ws
, wt
, FFQLD
, 1);
16092 t1
= newTemp(Ity_V128
);
16093 t2
= newTemp(Ity_I64
);
16094 t3
= newTemp(Ity_I64
);
16095 IRExpr
*rm
= get_IR_roundingmode_MSA();
16097 binop(Iop_SarN64x2
,
16098 binop(Iop_InterleaveHI32x4
,
16103 unop(Iop_ReinterpF64asI64
,
16104 binop(Iop_I64StoF64
, rm
,
16108 unop(Iop_ReinterpF64asI64
,
16109 binop(Iop_I64StoF64
, rm
,
16110 unop(Iop_V128HIto64
,
16113 triop(Iop_Div64Fx2
, rm
,
16114 binop(Iop_64HLtoV128
,
16115 mkexpr(t3
), mkexpr(t2
)),
16116 binop(Iop_64HLtoV128
,
16117 mkU64(0x41E0000000000000),
16118 mkU64(0x41E0000000000000))));
16129 case 0x19B: { /* FFQR.df */
16131 case 0x00: { /* FFQR.W */
16132 DIP("FFQR.W w%d, w%d", wd
, ws
);
16133 calculateMSACSR(ws
, wt
, FFQRW
, 1);
16134 t1
= newTemp(Ity_V128
);
16135 t2
= newTemp(Ity_I64
);
16136 t3
= newTemp(Ity_I64
);
16137 IRExpr
*rm
= get_IR_roundingmode_MSA();
16139 binop(Iop_SarN32x4
,
16140 binop(Iop_InterleaveLO16x8
,
16145 binop(Iop_32HLto64
,
16146 unop(Iop_ReinterpF32asI32
,
16147 binop(Iop_I32StoF32
, rm
,
16148 binop(Iop_GetElem32x4
,
16151 unop(Iop_ReinterpF32asI32
,
16152 binop(Iop_I32StoF32
, rm
,
16153 binop(Iop_GetElem32x4
,
16157 binop(Iop_32HLto64
,
16158 unop(Iop_ReinterpF32asI32
,
16159 binop(Iop_I32StoF32
, rm
,
16160 binop(Iop_GetElem32x4
,
16163 unop(Iop_ReinterpF32asI32
,
16164 binop(Iop_I32StoF32
, rm
,
16165 binop(Iop_GetElem32x4
,
16169 triop(Iop_Div32Fx4
, rm
,
16170 binop(Iop_64HLtoV128
,
16171 mkexpr(t3
), mkexpr(t2
)),
16172 binop(Iop_64HLtoV128
,
16173 mkU64(0x4700000047000000),
16174 mkU64(0x4700000047000000))));
16178 case 0x01: { /* FFQR.D */
16179 DIP("FFQR.D w%d, w%d", wd
, ws
);
16180 calculateMSACSR(ws
, wt
, FFQRD
, 1);
16181 t1
= newTemp(Ity_V128
);
16182 t2
= newTemp(Ity_I64
);
16183 t3
= newTemp(Ity_I64
);
16184 IRExpr
*rm
= get_IR_roundingmode_MSA();
16186 binop(Iop_SarN64x2
,
16187 binop(Iop_InterleaveLO32x4
,
16192 unop(Iop_ReinterpF64asI64
,
16193 binop(Iop_I64StoF64
, rm
,
16197 unop(Iop_ReinterpF64asI64
,
16198 binop(Iop_I64StoF64
, rm
,
16199 unop(Iop_V128HIto64
,
16202 triop(Iop_Div64Fx2
, rm
,
16203 binop(Iop_64HLtoV128
,
16204 mkexpr(t3
), mkexpr(t2
)),
16205 binop(Iop_64HLtoV128
,
16206 mkU64(0x41E0000000000000),
16207 mkU64(0x41E0000000000000))));
16218 case 0x19C: { /* FTINT_S.df */
16219 switch (df
) { /* FTINT_S.W */
16221 DIP("FTINT_S.W w%d, w%d", wd
, ws
);
16222 calculateMSACSR(ws
, wd
, FTINT_SW
, 1);
16223 t1
= newTemp(Ity_I64
);
16224 t2
= newTemp(Ity_I64
);
16225 t3
= newTemp(Ity_V128
);
16226 t4
= newTemp(Ity_I32
);
16230 binop(Iop_CmpUN32Fx4
,
16233 binop(Iop_Max32Fx4
,
16235 binop(Iop_64HLtoV128
,
16236 mkU64(0xCF000000CF000000),
16237 mkU64(0xCF000000CF000000)))));
16238 IRExpr
*rm
= get_IR_roundingmode_MSA();
16240 binop(Iop_32HLto64
,
16241 binop(Iop_F32toI32S
, rm
,
16242 unop(Iop_ReinterpI32asF32
,
16243 binop(Iop_GetElem32x4
,
16244 mkexpr(t3
), mkU8(1)))),
16245 binop(Iop_F32toI32S
, rm
,
16246 unop(Iop_ReinterpI32asF32
,
16247 binop(Iop_GetElem32x4
,
16248 mkexpr(t3
), mkU8(0))))));
16250 binop(Iop_32HLto64
,
16251 binop(Iop_F32toI32S
, rm
,
16252 unop(Iop_ReinterpI32asF32
,
16253 binop(Iop_GetElem32x4
,
16254 mkexpr(t3
), mkU8(3)))),
16255 binop(Iop_F32toI32S
, rm
,
16256 unop(Iop_ReinterpI32asF32
,
16257 binop(Iop_GetElem32x4
,
16258 mkexpr(t3
), mkU8(2))))));
16260 binop(Iop_64HLtoV128
,
16261 mkexpr(t2
), mkexpr(t1
)));
16265 case 0x01: { /* FTINT_S.D */
16266 DIP("FTINT_S.D w%d, w%d", wd
, ws
);
16267 calculateMSACSR(ws
, wd
, FTINT_SD
, 1);
16268 t1
= newTemp(Ity_I64
);
16269 t2
= newTemp(Ity_I64
);
16270 t3
= newTemp(Ity_V128
);
16274 binop(Iop_CmpUN64Fx2
,
16277 binop(Iop_Max64Fx2
,
16279 binop(Iop_64HLtoV128
,
16280 mkU64(0xC3E0000000000000),
16281 mkU64(0xC3E0000000000000)))));
16282 IRExpr
*rm
= get_IR_roundingmode_MSA();
16284 binop(Iop_F64toI64S
, rm
,
16285 unop(Iop_ReinterpI64asF64
,
16286 unop(Iop_V128to64
, mkexpr(t3
)))));
16288 binop(Iop_F64toI64S
, rm
,
16289 unop(Iop_ReinterpI64asF64
,
16290 unop(Iop_V128HIto64
, mkexpr(t3
)))));
16292 binop(Iop_64HLtoV128
,
16293 mkexpr(t2
), mkexpr(t1
)));
16304 case 0x19D: {/* FTINT_U.df */
16305 switch (df
) { /* FTINT_U.W */
16307 DIP("FTINT_U.W w%d, w%d", wd
, ws
);
16308 calculateMSACSR(ws
, wd
, FTINT_UW
, 1);
16309 t1
= newTemp(Ity_I64
);
16310 t2
= newTemp(Ity_I64
);
16311 t3
= newTemp(Ity_V128
);
16312 t4
= newTemp(Ity_V128
);
16313 IRExpr
*rm
= get_IR_roundingmode_MSA();
16315 binop(Iop_32HLto64
,
16316 binop(Iop_F32toI32U
, rm
,
16317 unop(Iop_ReinterpI32asF32
,
16318 binop(Iop_GetElem32x4
,
16319 getWReg(ws
), mkU8(1)))),
16320 binop(Iop_F32toI32U
, rm
,
16321 unop(Iop_ReinterpI32asF32
,
16322 binop(Iop_GetElem32x4
,
16323 getWReg(ws
), mkU8(0))))));
16325 binop(Iop_32HLto64
,
16326 binop(Iop_F32toI32U
, rm
,
16327 unop(Iop_ReinterpI32asF32
,
16328 binop(Iop_GetElem32x4
,
16329 getWReg(ws
), mkU8(3)))),
16330 binop(Iop_F32toI32U
, rm
,
16331 unop(Iop_ReinterpI32asF32
,
16332 binop(Iop_GetElem32x4
,
16333 getWReg(ws
), mkU8(2))))));
16336 binop(Iop_SarN32x4
,
16340 binop(Iop_CmpLT32Fx4
,
16342 binop(Iop_64HLtoV128
,
16343 mkU64(0x4EFFFFFF4EFFFFFF),
16344 mkU64(0x4EFFFFFF4EFFFFFF))));
16350 binop(Iop_64HLtoV128
,
16355 unop(Iop_NotV128
, mkexpr(t4
)),
16356 unop(Iop_F32toI32Ux4_RZ
,
16361 case 0x01: { /* FTINT_U.D */
16362 DIP("FTINT_U.D w%d, w%d", wd
, ws
);
16363 calculateMSACSR(ws
, wd
, FTINT_UD
, 1);
16364 t1
= newTemp(Ity_I64
);
16365 t2
= newTemp(Ity_I64
);
16366 IRExpr
*rm
= get_IR_roundingmode_MSA();
16368 binop(Iop_F64toI64U
, rm
,
16369 unop(Iop_ReinterpI64asF64
,
16373 binop(Iop_F64toI64U
, rm
,
16374 unop(Iop_ReinterpI64asF64
,
16375 unop(Iop_V128HIto64
,
16378 binop(Iop_64HLtoV128
,
16379 mkexpr(t2
), mkexpr(t1
)));
16390 case 0x19E: { /* FFINT_S.df */
16391 t1
= newTemp(Ity_V128
);
16392 assign(t1
, getWReg(ws
));
16393 IRExpr
*rm
= get_IR_roundingmode_MSA();
16396 case 0x00: { /* FFINT_S.W */
16397 DIP("FFINT_S.W w%d, w%d", wd
, ws
);
16398 calculateMSACSR(ws
, wt
, FFINTSW
, 1);
16402 for (i
= 0; i
< 4; i
++) {
16403 tmp
[i
] = newTemp(Ity_F32
);
16405 binop(Iop_I32StoF32
, rm
,
16406 binop(Iop_GetElem32x4
,
16407 mkexpr(t1
), mkU8(i
))));
16411 binop(Iop_64HLtoV128
,
16412 binop(Iop_32HLto64
,
16413 unop(Iop_ReinterpF32asI32
,
16415 unop(Iop_ReinterpF32asI32
,
16417 binop(Iop_32HLto64
,
16418 unop(Iop_ReinterpF32asI32
,
16420 unop(Iop_ReinterpF32asI32
,
16421 mkexpr(tmp
[0])))));
16425 case 0x01: { /* FFINT_S.D */
16426 DIP("FFINT_S.D w%d, w%d", wd
, ws
);
16427 calculateMSACSR(ws
, wt
, FFINTSD
, 1);
16431 for (i
= 0; i
< 2; i
++) {
16432 tmp
[i
] = newTemp(Ity_F64
);
16434 binop(Iop_I64StoF64
, rm
,
16435 binop(Iop_GetElem64x2
,
16436 mkexpr(t1
), mkU8(i
))));
16440 binop(Iop_64HLtoV128
,
16441 unop(Iop_ReinterpF64asI64
,
16443 unop(Iop_ReinterpF64asI64
,
16455 case 0x19F: { /* FFINT_U.df */
16456 IRExpr
*rm
= get_IR_roundingmode_MSA();
16459 case 0x00: { /* FFINT_U.W */
16460 DIP("FFINT_U.W w%d, w%d", wd
, ws
);
16461 calculateMSACSR(ws
, wt
, FFINT_UW
, 1);
16462 putWReg(wd
, unop(Iop_I32UtoF32x4_DEP
, getWReg(ws
)));
16466 case 0x01: { /* FFINT_U.D */
16467 DIP("FFINT_U.D w%d, w%d",
16469 calculateMSACSR(ws
, wt
,
16471 t1
= newTemp(Ity_I64
);
16472 t2
= newTemp(Ity_I64
);
16474 unop(Iop_ReinterpF64asI64
,
16475 binop(Iop_I64UtoF64
, rm
,
16479 unop(Iop_ReinterpF64asI64
,
16480 binop(Iop_I64UtoF64
, rm
,
16481 unop(Iop_V128HIto64
,
16484 binop(Iop_64HLtoV128
,
16485 mkexpr(t2
), mkexpr(t1
)));
16503 static Int
msa_MI10_load(UInt cins
, UChar wd
, UChar ws
) /* MI10 (0x20) */
16509 i10
= (cins
& 0x03FF0000) >> 16;
16510 df
= cins
& 0x00000003;
16513 case 0x00: { /* LD.B */
16514 DIP("LD.B w%d, %d(r%d)", wd
, ws
, i10
);
16515 LOAD_STORE_PATTERN_MSA(i10
);
16516 putWReg(wd
, load(Ity_V128
, mkexpr(t1
)));
16520 case 0x01: { /* LD.H */
16521 DIP("LD.H w%d, %d(r%d)", wd
, ws
, i10
);
16522 LOAD_STORE_PATTERN_MSA(i10
<< 1);
16523 #if defined (_MIPSEL)
16524 putWReg(wd
, load(Ity_V128
, mkexpr(t1
)));
16525 #elif defined (_MIPSEB)
16527 unop(Iop_Reverse8sIn16_x8
,
16528 load(Ity_V128
, mkexpr(t1
))));
16533 case 0x02: { /* LD.W */
16534 DIP("LD.W w%d, %d(r%d)", wd
, ws
, i10
);
16535 LOAD_STORE_PATTERN_MSA(i10
<< 2);
16536 #if defined (_MIPSEL)
16537 putWReg(wd
, load(Ity_V128
, mkexpr(t1
)));
16538 #elif defined (_MIPSEB)
16540 unop(Iop_Reverse8sIn32_x4
,
16541 load(Ity_V128
, mkexpr(t1
))));
16546 case 0x03: { /* LD.D */
16547 DIP("LD.D w%d, %d(r%d)", wd
, ws
, i10
);
16548 LOAD_STORE_PATTERN_MSA(i10
<< 3);
16549 #if defined (_MIPSEL)
16550 putWReg(wd
, load(Ity_V128
, mkexpr(t1
)));
16551 #elif defined (_MIPSEB)
16553 unop(Iop_Reverse8sIn64_x2
,
16554 load(Ity_V128
, mkexpr(t1
))));
16566 static Int
msa_MI10_store(UInt cins
, UChar wd
, UChar ws
) /* MI10 (0x24) */
16572 df
= cins
& 0x00000003;
16573 i10
= (cins
& 0x03FF0000) >> 16;
16576 case 0x00: { /* ST.B */
16577 DIP("ST.B w%d, %d(r%d)", wd
, ws
, i10
);
16578 LOAD_STORE_PATTERN_MSA(i10
);
16579 store(mkexpr(t1
), getWReg(wd
));
16583 case 0x01: { /* ST.H */
16584 DIP("ST.H w%d, %d(r%d)", wd
, ws
, i10
);
16585 LOAD_STORE_PATTERN_MSA(i10
<< 1);
16586 #if defined (_MIPSEL)
16587 store(mkexpr(t1
), getWReg(wd
));
16588 #elif defined (_MIPSEB)
16590 unop(Iop_Reverse8sIn16_x8
, getWReg(wd
)));
16595 case 0x02: { /* ST.W */
16596 DIP("ST.W w%d, %d(r%d)", wd
, ws
, i10
);
16597 LOAD_STORE_PATTERN_MSA(i10
<< 2);
16598 #if defined (_MIPSEL)
16599 store(mkexpr(t1
), getWReg(wd
));
16600 #elif defined (_MIPSEB)
16602 unop(Iop_Reverse8sIn32_x4
, getWReg(wd
)));
16607 case 0x03: { /* ST.D */
16608 DIP("ST.D w%d, %d(r%d)", wd
, ws
, i10
);
16609 LOAD_STORE_PATTERN_MSA(i10
<< 3);
16610 #if defined (_MIPSEL)
16611 store(mkexpr(t1
), getWReg(wd
));
16612 #elif defined (_MIPSEB)
16614 unop(Iop_Reverse8sIn64_x2
, getWReg(wd
)));
16626 /*------------------------------------------------------------*/
16627 /*--- Disassemble a single MIPS MSA (SIMD) instruction ---*/
16628 /*--- Return values: ---*/
16629 /*--- 0: Success ---*/
16630 /*--- -1: Decode failure (unknown instruction) ---*/
16631 /*--- -2: Illegal instruction ---*/
16632 /*------------------------------------------------------------*/
16633 static Int
disMSAInstr_MIPS_WRK ( UInt cins
)
16635 UChar minor_opcode
, wd
, ws
;
16638 vassert((cins
& 0xFC000000) == 0x78000000);
16640 minor_opcode
= (cins
& 0x20) > 0 ? (cins
& 0x3C) : (cins
& 0x3F);
16641 wd
= (cins
& 0x000007C0) >> 6;
16642 ws
= (cins
& 0x0000F800) >> 11;
16644 switch (minor_opcode
) {
16646 return msa_I8_logical(cins
, wd
, ws
);
16649 return msa_I8_branch(cins
, wd
, ws
);
16652 return msa_I8_shift(cins
, wd
, ws
);
16655 return msa_I5_06(cins
, wd
, ws
);
16658 return msa_I5_07(cins
, wd
, ws
);
16661 return msa_BIT_09(cins
, wd
, ws
);
16664 return msa_BIT_0A(cins
, wd
, ws
);
16667 return msa_3R_0D(cins
, wd
, ws
);
16670 return msa_3R_0E(cins
, wd
, ws
);
16673 return msa_3R_0F(cins
, wd
, ws
);
16676 return msa_3R_10(cins
, wd
, ws
);
16679 return msa_3R_11(cins
, wd
, ws
);
16682 return msa_3R_12(cins
, wd
, ws
);
16685 return msa_3R_13(cins
, wd
, ws
);
16688 return msa_3R_14(cins
, wd
, ws
);
16691 return msa_3R_15(cins
, wd
, ws
);
16694 return msa_ELM(cins
, wd
, ws
);
16697 return msa_3R_1A(cins
, wd
, ws
);
16700 return msa_3R_1B(cins
, wd
, ws
);
16703 return msa_3R_1C(cins
, wd
, ws
);
16706 if ((cins
& 0x03000000) == 0)
16707 return msa_VEC(cins
, wd
, ws
);
16708 else if ((cins
& 0x00200000) == 0)
16709 return msa_2R(cins
, wd
, ws
);
16711 return msa_2RF(cins
, wd
, ws
);
16714 return msa_MI10_load(cins
, wd
, ws
);
16717 return msa_MI10_store(cins
, wd
, ws
);
16723 /*------------------------------------------------------------*/
16724 /*--- DSP to IR function ---*/
16725 /*------------------------------------------------------------*/
16727 extern UInt
disDSPInstr_MIPS_WRK ( UInt
);
16729 /*------------------------------------------------------------*/
16730 /*--- Disassemble a single instruction ---*/
16731 /*------------------------------------------------------------*/
16733 /* Disassemble a single instruction into IR. The instruction is
16734 located in host memory at guest_instr, and has guest IP of
16735 guest_PC_curr_instr, which will have been set before the call
16739 static UInt
disInstr_MIPS_WRK_Special(UInt cins
, const VexArchInfo
* archinfo
,
16740 const VexAbiInfo
* abiinfo
, DisResult
* dres
,
16741 IRStmt
** bstmt
, IRExpr
** lastn
)
16743 IRTemp t0
, t1
= 0, t2
, t3
, t4
, t5
;
16744 UInt rs
, rt
, rd
, sa
, tf
, function
, trap_code
, imm
, instr_index
, rot
, sel
;
16745 /* Additional variables for instruction fields in DSP ASE insructions */
16748 imm
= get_imm(cins
);
16754 sel
= get_sel(cins
);
16755 instr_index
= get_instr_index(cins
);
16756 trap_code
= get_code(cins
);
16757 function
= get_function(cins
);
16758 IRType ty
= mode64
? Ity_I64
: Ity_I32
;
16760 ac
= get_acNo(cins
);
16762 switch (function
) {
16763 case 0x00: { /* SLL */
16764 DIP("sll r%u, r%u, %u", rd
, rt
, sa
);
16765 IRTemp tmpRt32
= newTemp(Ity_I32
);
16766 IRTemp tmpSh32
= newTemp(Ity_I32
);
16767 IRTemp tmpRd
= newTemp(Ity_I64
);
16770 assign(tmpRt32
, mkNarrowTo32(ty
, getIReg(rt
)));
16771 assign(tmpSh32
, binop(Iop_Shl32
, mkexpr(tmpRt32
), mkU8(sa
)));
16772 assign(tmpRd
, mkWidenFrom32(ty
, mkexpr(tmpSh32
), True
));
16773 putIReg(rd
, mkexpr(tmpRd
));
16775 SXX_PATTERN(Iop_Shl32
);
16780 case 0x01: { /* MOVCI */
16781 UInt mov_cc
= get_mov_cc(cins
);
16783 if (tf
== 0) { /* MOVF */
16784 DIP("movf r%u, r%u, %u", rd
, rs
, mov_cc
);
16785 t1
= newTemp(Ity_I1
);
16786 t2
= newTemp(Ity_I32
);
16787 t3
= newTemp(Ity_I1
);
16789 assign(t1
, binop(Iop_CmpEQ32
, mkU32(0), mkU32(mov_cc
)));
16790 assign(t2
, IRExpr_ITE(mkexpr(t1
),
16792 binop(Iop_Shr32
, getFCSR(),
16796 binop(Iop_Shr32
, getFCSR(),
16797 mkU8(24 + mov_cc
)),
16800 assign(t3
, binop(Iop_CmpEQ32
, mkU32(0), mkexpr(t2
)));
16801 putIReg(rd
, IRExpr_ITE(mkexpr(t3
), getIReg(rs
), getIReg(rd
)));
16802 } else if (tf
== 1) { /* MOVT */
16803 DIP("movt r%u, r%u, %u", rd
, rs
, mov_cc
);
16804 t1
= newTemp(Ity_I1
);
16805 t2
= newTemp(Ity_I32
);
16806 t3
= newTemp(Ity_I1
);
16808 assign(t1
, binop(Iop_CmpEQ32
, mkU32(0), mkU32(mov_cc
)));
16809 assign(t2
, IRExpr_ITE(mkexpr(t1
),
16811 binop(Iop_Shr32
, getFCSR(),
16815 binop(Iop_Shr32
, getFCSR(),
16816 mkU8(24 + mov_cc
)),
16819 assign(t3
, binop(Iop_CmpEQ32
, mkU32(1), mkexpr(t2
)));
16820 putIReg(rd
, IRExpr_ITE(mkexpr(t3
), getIReg(rs
), getIReg(rd
)));
16826 case 0x02: { /* SRL */
16827 rot
= get_rot(cins
);
16830 DIP("rotr r%u, r%u, %u", rd
, rt
, sa
);
16831 putIReg(rd
, mkWidenFrom32(ty
, genROR32(mkNarrowTo32(ty
,
16832 getIReg(rt
)), sa
), True
));
16834 DIP("srl r%u, r%u, %u", rd
, rt
, sa
);
16837 IRTemp tmpSh32
= newTemp(Ity_I32
);
16838 IRTemp tmpRt32
= newTemp(Ity_I32
);
16840 assign(tmpRt32
, mkNarrowTo32(ty
, getIReg(rt
)));
16841 assign(tmpSh32
, binop(Iop_Shr32
, mkexpr(tmpRt32
), mkU8(sa
)));
16842 putIReg(rd
, mkWidenFrom32(ty
, mkexpr(tmpSh32
), True
));
16844 SXX_PATTERN(Iop_Shr32
);
16851 case 0x03: /* SRA */
16852 DIP("sra r%u, r%u, %u", rd
, rt
, sa
);
16855 IRTemp tmpRt32
= newTemp(Ity_I32
);
16856 IRTemp tmpSh32
= newTemp(Ity_I32
);
16858 t1
= newTemp(Ity_I64
);
16859 t2
= newTemp(Ity_I64
);
16860 t3
= newTemp(Ity_I64
);
16862 assign(t1
, binop(Iop_And64
, getIReg(rt
), /* hi */
16863 mkU64(0xFFFFFFFF00000000ULL
)));
16865 assign(t2
, binop(Iop_Sar64
, mkexpr(t1
), mkU8(sa
)));
16867 assign(tmpRt32
, mkNarrowTo32(ty
, getIReg(rt
)));
16868 assign(tmpSh32
, binop(Iop_Sar32
, mkexpr(tmpRt32
), mkU8(sa
)));
16870 putIReg(rd
, mkWidenFrom32(ty
, mkexpr(tmpSh32
), True
));
16872 SXX_PATTERN(Iop_Sar32
);
16877 case 0x04: { /* SLLV */
16878 DIP("sllv r%u, r%u, r%u", rd
, rt
, rs
);
16881 IRTemp tmpRs8
= newTemp(Ity_I8
);
16882 IRTemp tmpRt32
= newTemp(Ity_I32
);
16883 IRTemp tmpSh32
= newTemp(Ity_I32
);
16884 IRTemp tmp
= newTemp(ty
);
16885 assign(tmp
, binop(mkSzOp(ty
, Iop_And8
), getIReg(rs
),
16887 assign(tmpRs8
, mkNarrowTo8(ty
, mkexpr(tmp
)));
16888 assign(tmpRt32
, mkNarrowTo32(ty
, getIReg(rt
)));
16889 assign(tmpSh32
, binop(Iop_Shl32
, mkexpr(tmpRt32
), mkexpr(tmpRs8
)));
16890 putIReg(rd
, mkWidenFrom32(ty
, mkexpr(tmpSh32
), True
));
16892 SXXV_PATTERN(Iop_Shl32
);
16898 case 0x05: { /* LSA */
16899 UInt imm2
= (imm
& 0xC0) >> 6;
16901 if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
) || has_msa
) {
16902 DIP("lsa r%u, r%u, r%u, imm: 0x%x", rd
, rs
, rt
, imm2
);
16905 DIP("lsa r%u, r%u, r%u, imm: 0x%x", rd
, rs
, rt
, imm2
);
16906 putIReg(rd
, unop(Iop_32Sto64
,
16909 unop(Iop_64to32
, getIReg(rs
)),
16911 unop(Iop_64to32
, getIReg(rt
)))));
16914 DIP("lsa r%u, r%u, r%u, imm: 0x%x", rd
, rs
, rt
, imm2
);
16915 putIReg(rd
, binop(Iop_Add32
,
16917 getIReg(rs
), mkU8(imm2
+ 1)), getIReg(rt
)));
16921 ILLEGAL_INSTRUCTON
;
16926 case 0x06: { /* SRLV */
16927 rot
= get_rotv(cins
);
16930 DIP("rotrv r%u, r%u, r%u", rd
, rt
, rs
);
16931 putIReg(rd
, mkWidenFrom32(ty
, genRORV32(mkNarrowTo32(ty
,
16932 getIReg(rt
)), mkNarrowTo32(ty
, getIReg(rs
))), True
));
16934 } else { /* SRLV */
16935 DIP("srlv r%u, r%u, r%u", rd
, rt
, rs
);
16938 SXXV_PATTERN64(Iop_Shr32
);
16940 SXXV_PATTERN(Iop_Shr32
);
16947 case 0x07: /* SRAV */
16948 DIP("srav r%u, r%u, r%u", rd
, rt
, rs
);
16951 IRTemp tmpRt32
= newTemp(Ity_I32
);
16952 IRTemp tmpSh32
= newTemp(Ity_I32
);
16954 t1
= newTemp(Ity_I64
);
16955 t2
= newTemp(Ity_I64
);
16956 t3
= newTemp(Ity_I64
);
16957 t4
= newTemp(Ity_I8
);
16959 assign(t4
, unop(Iop_32to8
, binop(Iop_And32
,
16960 mkNarrowTo32(ty
, getIReg(rs
)), mkU32(0x0000001F))));
16962 assign(t1
, binop(Iop_And64
, getIReg(rt
), /* hi */
16963 mkU64(0xFFFFFFFF00000000ULL
)));
16965 assign(t2
, binop(Iop_Sar64
, mkexpr(t1
), mkexpr(t4
)));
16967 assign(tmpRt32
, mkNarrowTo32(ty
, getIReg(rt
)));
16968 assign(tmpSh32
, binop(Iop_Sar32
, mkexpr(tmpRt32
), mkexpr(t4
)));
16970 putIReg(rd
, mkWidenFrom32(ty
, mkexpr(tmpSh32
), True
));
16972 SXXV_PATTERN(Iop_Sar32
);
16977 case 0x08: /* JR */
16980 assign(t0
, getIReg(rs
));
16981 *lastn
= mkexpr(t0
);
16984 case 0x09: /* JALR */
16985 DIP("jalr r%u r%u", rd
, rs
);
16988 putIReg(rd
, mkU64(guest_PC_curr_instr
+ 8));
16989 t0
= newTemp(Ity_I64
);
16990 assign(t0
, getIReg(rs
));
16991 *lastn
= mkexpr(t0
);
16993 putIReg(rd
, mkU32(guest_PC_curr_instr
+ 8));
16994 t0
= newTemp(Ity_I32
);
16995 assign(t0
, getIReg(rs
));
16996 *lastn
= mkexpr(t0
);
17001 case 0x0A: { /* MOVZ */
17002 DIP("movz r%u, r%u, r%u", rd
, rs
, rt
);
17007 assign(t1
, unop(Iop_32Sto64
, unop(Iop_1Sto32
, binop(Iop_CmpEQ64
,
17008 getIReg(rt
), mkU64(0x0)))));
17009 assign(t2
, unop(Iop_32Sto64
, unop(Iop_1Sto32
, binop(Iop_CmpNE64
,
17010 getIReg(rt
), mkU64(0x0)))));
17011 putIReg(rd
, binop(Iop_Add64
, binop(Iop_And64
, getIReg(rs
),
17012 mkexpr(t1
)), binop(Iop_And64
, getIReg(rd
), mkexpr(t2
))));
17014 assign(t1
, unop(Iop_1Sto32
, binop(Iop_CmpEQ32
, getIReg(rt
),
17016 assign(t2
, unop(Iop_1Sto32
, binop(Iop_CmpNE32
, getIReg(rt
),
17018 putIReg(rd
, binop(Iop_Add32
, binop(Iop_And32
, getIReg(rs
),
17019 mkexpr(t1
)), binop(Iop_And32
, getIReg(rd
),
17026 case 0x0B: { /* MOVN */
17027 DIP("movn r%u, r%u, r%u", rd
, rs
, rt
);
17032 assign(t1
, unop(Iop_32Sto64
, unop(Iop_1Sto32
, binop(Iop_CmpEQ64
,
17033 getIReg(rt
), mkU64(0x0)))));
17034 assign(t2
, unop(Iop_32Sto64
, unop(Iop_1Sto32
, binop(Iop_CmpNE64
,
17035 getIReg(rt
), mkU64(0x0)))));
17036 putIReg(rd
, binop(Iop_Add64
, binop(Iop_And64
, getIReg(rs
),
17037 mkexpr(t2
)), binop(Iop_And64
, getIReg(rd
),
17040 assign(t1
, unop(Iop_1Sto32
, binop(Iop_CmpEQ32
, getIReg(rt
),
17042 assign(t2
, unop(Iop_1Sto32
, binop(Iop_CmpNE32
, getIReg(rt
),
17044 putIReg(rd
, binop(Iop_Add32
, binop(Iop_And32
, getIReg(rs
),
17045 mkexpr(t2
)), binop(Iop_And32
, getIReg(rd
),
17052 case 0x0C: /* SYSCALL */
17056 putPC(mkU64(guest_PC_curr_instr
+ 4));
17058 putPC(mkU32(guest_PC_curr_instr
+ 4));
17060 dres
->jk_StopHere
= Ijk_Sys_syscall
;
17061 dres
->whatNext
= Dis_StopHere
;
17064 case 0x0D: /* BREAK */
17065 DIP("break 0x%x", trap_code
);
17068 jmp_lit64(dres
, Ijk_SigTRAP
, (guest_PC_curr_instr
+ 4));
17070 jmp_lit32(dres
, Ijk_SigTRAP
, (guest_PC_curr_instr
+ 4));
17072 vassert(dres
->whatNext
== Dis_StopHere
);
17075 case 0x0F: /* SYNC */
17076 DIP("sync 0x%x", sel
);
17077 /* Just ignore it. */
17080 case 0x10: { /* MFHI, CLZ R6 */
17081 if (((instr_index
>> 6) & 0x1f) == 1) { /* CLZ */
17082 if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
)) {
17083 DIP("clz r%u, r%u", rd
, rs
);
17086 IRTemp tmpClz32
= newTemp(Ity_I32
);
17087 IRTemp tmpRs32
= newTemp(Ity_I32
);
17089 assign(tmpRs32
, mkNarrowTo32(ty
, getIReg(rs
)));
17090 assign(tmpClz32
, unop(Iop_Clz32
, mkexpr(tmpRs32
)));
17091 putIReg(rd
, mkWidenFrom32(ty
, mkexpr(tmpClz32
), True
));
17093 t1
= newTemp(Ity_I1
);
17094 assign(t1
, binop(Iop_CmpEQ32
, getIReg(rs
), mkU32(0)));
17095 putIReg(rd
, IRExpr_ITE(mkexpr(t1
),
17097 unop(Iop_Clz32
, getIReg(rs
))));
17100 ILLEGAL_INSTRUCTON
;
17104 } else if (VEX_MIPS_PROC_DSP(archinfo
->hwcaps
)) {
17105 /* If DSP is present -> DSP ASE MFHI */
17106 UInt retVal
= disDSPInstr_MIPS_WRK ( cins
);
17108 if (0 != retVal
) {
17114 DIP("mfhi r%u", rd
);
17115 putIReg(rd
, getHI());
17120 case 0x11: { /* MTHI, CLO R6 */
17121 if (((instr_index
>> 6) & 0x1f) == 1) { /* CLO */
17122 if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
)) {
17123 DIP("clo r%u, r%u", rd
, rs
);
17126 IRTemp tmpClo32
= newTemp(Ity_I32
);
17127 IRTemp tmpRs32
= newTemp(Ity_I32
);
17128 assign(tmpRs32
, mkNarrowTo32(ty
, getIReg(rs
)));
17130 t1
= newTemp(Ity_I1
);
17131 assign(t1
, binop(Iop_CmpEQ32
, mkexpr(tmpRs32
), mkU32(0xffffffff)));
17132 assign(tmpClo32
, IRExpr_ITE(mkexpr(t1
),
17134 unop(Iop_Clz32
, unop(Iop_Not32
, mkexpr(tmpRs32
)))));
17136 putIReg(rd
, mkWidenFrom32(ty
, mkexpr(tmpClo32
), True
));
17139 t1
= newTemp(Ity_I1
);
17140 assign(t1
, binop(Iop_CmpEQ32
, getIReg(rs
), mkU32(0xffffffff)));
17141 putIReg(rd
, IRExpr_ITE(mkexpr(t1
),
17144 unop(Iop_Not32
, getIReg(rs
)))));
17147 ILLEGAL_INSTRUCTON
;
17151 } else if (VEX_MIPS_PROC_DSP(archinfo
->hwcaps
)) {
17152 /* If DSP is present -> DSP ASE MTHI */
17153 UInt retVal
= disDSPInstr_MIPS_WRK ( cins
);
17155 if (0 != retVal
) {
17161 DIP("mthi r%u", rs
);
17162 putHI(getIReg(rs
));
17167 case 0x12: { /* MFLO */
17168 if (VEX_MIPS_PROC_DSP(archinfo
->hwcaps
)) {
17169 /* If DSP is present -> DSP ASE MFLO */
17170 UInt retVal
= disDSPInstr_MIPS_WRK ( cins
);
17172 if (0 != retVal
) {
17180 DIP("mflo r%u", rd
);
17182 if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
) &&
17183 !VEX_MIPS_CPU_HAS_MIPS32R2(archinfo
->hwcaps
)) {
17187 putIReg(rd
, getLO());
17191 DIP("dclz r%u, r%u", rd
, rs
);
17192 t1
= newTemp(Ity_I1
);
17193 assign(t1
, binop(Iop_CmpEQ64
, getIReg(rs
), mkU64(0)));
17194 putIReg(rd
, IRExpr_ITE(mkexpr(t1
),
17196 unop(Iop_Clz64
, getIReg(rs
))));
17204 case 0x13: { /* MTLO */
17205 if (VEX_MIPS_PROC_DSP(archinfo
->hwcaps
)) {
17206 /* If DSP is present -> DSP ASE MTLO */
17207 UInt retVal
= disDSPInstr_MIPS_WRK ( cins
);
17209 if (0 != retVal
) {
17217 DIP("mtlo r%u", rs
);
17219 if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
) &&
17220 !VEX_MIPS_CPU_HAS_MIPS32R2(archinfo
->hwcaps
)) {
17224 putLO(getIReg(rs
));
17228 DIP("dclo r%u, r%u", rd
, rs
);
17229 t1
= newTemp(Ity_I1
);
17230 assign(t1
, binop(Iop_CmpEQ64
, getIReg(rs
),
17231 mkU64(0xffffffffffffffffULL
)));
17232 putIReg(rd
, IRExpr_ITE(mkexpr(t1
),
17234 unop(Iop_Clz64
, unop(Iop_Not64
,
17243 case 0x15: { /* DLSA */
17244 UInt imm2
= (imm
& 0xC0) >> 6;
17246 if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
) || has_msa
) {
17247 DIP("dlsa r%u, r%u, r%u, imm: 0x%x", rd
, rs
, rt
, imm2
);
17248 putIReg(rd
, binop(Iop_Add64
,
17249 binop(Iop_Shl64
, getIReg(rs
), mkU8(imm2
+ 1)),
17258 case 0x18: { /* MULT */
17259 switch (sa
& 0x3) {
17261 if ((1 <= ac
) && ( 3 >= ac
)) {
17262 if (VEX_MIPS_PROC_DSP(archinfo
->hwcaps
)) {
17263 /* If DSP is present -> DSP ASE MULT */
17264 UInt retVal
= disDSPInstr_MIPS_WRK(cins
);
17275 DIP("mult r%u, r%u", rs
, rt
);
17277 if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
) &&
17278 !VEX_MIPS_CPU_HAS_MIPS32R2(archinfo
->hwcaps
)) {
17282 t2
= newTemp(Ity_I64
);
17284 assign(t2
, binop(Iop_MullS32
, mkNarrowTo32(ty
, getIReg(rs
)),
17285 mkNarrowTo32(ty
, getIReg(rt
))));
17287 putHI(mkWidenFrom32(ty
, unop(Iop_64HIto32
, mkexpr(t2
)), True
));
17288 putLO(mkWidenFrom32(ty
, unop(Iop_64to32
, mkexpr(t2
)), True
));
17293 case 2: { /* MUL R6 */
17294 if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
)) {
17295 DIP("mul r%u, r%u, r%u", rs
, rt
, rd
);
17298 putIReg(rd
, unop(Iop_32Sto64
,
17301 unop(Iop_64to32
, getIReg(rs
)),
17302 unop(Iop_64to32
, getIReg(rt
))))));
17304 putIReg(rd
, unop(Iop_64to32
,
17306 getIReg(rs
), getIReg(rt
))));
17309 ILLEGAL_INSTRUCTON
;
17315 case 3: { /* MUH R6 */
17316 if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
)) {
17317 DIP("muh r%u, r%u, r%u", rs
, rt
, rd
);
17320 putIReg(rd
, unop(Iop_32Sto64
,
17323 unop(Iop_64to32
, getIReg(rs
)),
17324 unop(Iop_64to32
, getIReg(rt
))))));
17326 putIReg(rd
, unop(Iop_64HIto32
,
17328 getIReg(rs
), getIReg(rt
))));
17331 ILLEGAL_INSTRUCTON
;
17341 case 0x19: { /* MULTU */
17342 switch (sa
& 0x3) {
17344 if ((1 <= ac
) && ( 3 >= ac
)) {
17345 if (VEX_MIPS_PROC_DSP(archinfo
->hwcaps
)) {
17346 /* If DSP is present -> DSP ASE MULTU */
17347 UInt retVal
= disDSPInstr_MIPS_WRK ( cins
);
17358 DIP("multu r%u, r%u", rs
, rt
);
17360 if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
) &&
17361 !VEX_MIPS_CPU_HAS_MIPS32R2(archinfo
->hwcaps
)) {
17365 t2
= newTemp(Ity_I64
);
17367 assign(t2
, binop(Iop_MullU32
, mkNarrowTo32(ty
, getIReg(rs
)),
17368 mkNarrowTo32(ty
, getIReg(rt
))));
17370 putHI(mkWidenFrom32(ty
, unop(Iop_64HIto32
, mkexpr(t2
)), True
));
17371 putLO(mkWidenFrom32(ty
, unop(Iop_64to32
, mkexpr(t2
)), True
));
17376 case 2: { /* MULU R6 */
17377 if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
)) {
17378 DIP("mulu r%u, r%u, r%u", rs
, rt
, rd
);
17381 putIReg(rd
, unop(Iop_32Uto64
,
17384 unop(Iop_64to32
, getIReg(rs
)),
17385 unop(Iop_64to32
, getIReg(rt
))))));
17387 putIReg(rd
, unop(Iop_64to32
,
17389 getIReg(rs
), getIReg(rt
))));
17392 ILLEGAL_INSTRUCTON
;
17398 case 3: { /* MUHU R6 */
17399 if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
)) {
17400 DIP("muhu r%u, r%u, r%u", rs
, rt
, rd
);
17403 putIReg(rd
, unop(Iop_32Uto64
,
17406 unop(Iop_64to32
, getIReg(rs
)),
17407 unop(Iop_64to32
, getIReg(rt
))))));
17409 putIReg(rd
, unop(Iop_64HIto32
,
17411 getIReg(rs
), getIReg(rt
))));
17414 ILLEGAL_INSTRUCTON
;
17424 case 0x1A: /* DIV */
17425 switch (sa
& 0x3) {
17427 DIP("div r%u, r%u", rs
, rt
);
17429 if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
) &&
17430 !VEX_MIPS_CPU_HAS_MIPS32R2(archinfo
->hwcaps
)) {
17435 t2
= newTemp(Ity_I64
);
17437 assign(t2
, binop(Iop_DivModS32to32
,
17438 mkNarrowTo32(ty
, getIReg(rs
)),
17439 mkNarrowTo32(ty
, getIReg(rt
))));
17441 putHI(mkWidenFrom32(ty
, unop(Iop_64HIto32
, mkexpr(t2
)), True
));
17442 putLO(mkWidenFrom32(ty
, unop(Iop_64to32
, mkexpr(t2
)), True
));
17444 t1
= newTemp(Ity_I64
);
17446 assign(t1
, binop(Iop_DivModS32to32
, getIReg(rs
), getIReg(rt
)));
17448 putHI(unop(Iop_64HIto32
, mkexpr(t1
)));
17449 putLO(unop(Iop_64to32
, mkexpr(t1
)));
17455 if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
)) {
17456 DIP("div r%u, r%u, r%u", rs
, rt
, rd
);
17459 putIReg(rd
, unop(Iop_32Sto64
,
17461 unop(Iop_64to32
, getIReg(rs
)),
17462 unop(Iop_64to32
, getIReg(rt
)))));
17464 putIReg(rd
, binop(Iop_DivS32
, getIReg(rs
), getIReg(rt
)));
17473 if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
)) {
17474 DIP("mod r%u, r%u, r%u", rs
, rt
, rd
);
17477 putIReg(rd
, unop(Iop_32Sto64
,
17479 binop(Iop_DivModS32to32
,
17480 unop(Iop_64to32
, getIReg(rs
)),
17481 unop(Iop_64to32
, getIReg(rt
))))));
17483 t1
= newTemp(Ity_I64
);
17485 assign(t1
, binop(Iop_DivModS32to32
, getIReg(rs
), getIReg(rt
)));
17486 putIReg(rd
, unop(Iop_64HIto32
, mkexpr(t1
)));
17497 case 0x1B: /* DIVU */
17498 switch (sa
& 0x3) {
17500 DIP("divu r%u, r%u", rs
, rt
);
17502 if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
) &&
17503 !VEX_MIPS_CPU_HAS_MIPS32R2(archinfo
->hwcaps
)) {
17508 t1
= newTemp(Ity_I64
);
17510 assign(t1
, binop(Iop_DivModU32to32
,
17511 mkNarrowTo32(ty
, getIReg(rs
)),
17512 mkNarrowTo32(ty
, getIReg(rt
))));
17514 putHI(mkWidenFrom32(ty
, unop(Iop_64HIto32
, mkexpr(t1
)), True
));
17515 putLO(mkWidenFrom32(ty
, unop(Iop_64to32
, mkexpr(t1
)), True
));
17517 t1
= newTemp(Ity_I64
);
17519 assign(t1
, binop(Iop_DivModU32to32
, getIReg(rs
), getIReg(rt
)));
17520 putHI(unop(Iop_64HIto32
, mkexpr(t1
)));
17521 putLO(unop(Iop_64to32
, mkexpr(t1
)));
17527 if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
)) {
17528 DIP("divu r%u, r%u, r%u", rs
, rt
, rd
);
17531 putIReg(rd
, unop(Iop_32Sto64
,
17533 unop(Iop_64to32
, getIReg(rs
)),
17534 unop(Iop_64to32
, getIReg(rt
)))));
17536 putIReg(rd
, binop(Iop_DivU32
, getIReg(rs
), getIReg(rt
)));
17547 if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
)) {
17548 DIP("modu r%u, r%u, r%u", rs
, rt
, rd
);
17551 putIReg(rd
, unop(Iop_32Uto64
,
17553 binop(Iop_DivModU32to32
,
17554 unop(Iop_64to32
, getIReg(rs
)),
17555 unop(Iop_64to32
, getIReg(rt
))))));
17557 t1
= newTemp(Ity_I64
);
17559 assign(t1
, binop(Iop_DivModU32to32
, getIReg(rs
), getIReg(rt
)));
17560 putIReg(rd
, unop(Iop_64HIto32
, mkexpr(t1
)));
17571 case 0x1C: /* Doubleword Multiply - DMULT; MIPS64 */
17574 DIP("dmult r%u, r%u", rs
, rt
);
17576 if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
) &&
17577 !VEX_MIPS_CPU_HAS_MIPS32R2(archinfo
->hwcaps
)) {
17581 t0
= newTemp(Ity_I128
);
17583 assign(t0
, binop(Iop_MullS64
, getIReg(rs
), getIReg(rt
)));
17585 putHI(unop(Iop_128HIto64
, mkexpr(t0
)));
17586 putLO(unop(Iop_128to64
, mkexpr(t0
)));
17590 if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
)) {
17591 DIP("dmul r%u, r%u, r%u", rd
, rs
, rt
);
17592 putIReg(rd
, unop(Iop_128to64
,
17593 binop(Iop_MullS64
, getIReg(rs
), getIReg(rt
))));
17601 if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
)) {
17602 DIP("dmuh r%u, r%u, r%u", rd
, rs
, rt
);
17603 putIReg(rd
, unop(Iop_128HIto64
,
17604 binop(Iop_MullS64
, getIReg(rs
), getIReg(rt
))));
17614 case 0x1D: /* Doubleword Multiply Unsigned - DMULTU; MIPS64 */
17617 DIP("dmultu r%u, r%u", rs
, rt
);
17619 if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
) &&
17620 !VEX_MIPS_CPU_HAS_MIPS32R2(archinfo
->hwcaps
)) {
17624 t0
= newTemp(Ity_I128
);
17626 assign(t0
, binop(Iop_MullU64
, getIReg(rs
), getIReg(rt
)));
17628 putHI(unop(Iop_128HIto64
, mkexpr(t0
)));
17629 putLO(unop(Iop_128to64
, mkexpr(t0
)));
17632 case 2: /* DMULU */
17633 if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
)) {
17634 DIP("dmulu r%u, r%u, r%u", rd
, rs
, rt
);
17635 putIReg(rd
, unop(Iop_128to64
,
17636 binop(Iop_MullU64
, getIReg(rs
), getIReg(rt
))));
17643 case 3: /* DMUHU */
17644 if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
)) {
17645 DIP("dmuhu r%u, r%u, r%u", rd
, rs
, rt
);
17646 putIReg(rd
, unop(Iop_128HIto64
,
17647 binop(Iop_MullU64
, getIReg(rs
), getIReg(rt
))));
17657 case 0x1E: /* Doubleword Divide DDIV; MIPS64 */
17660 DIP("ddiv r%u, r%u", rs
, rt
);
17662 if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
) &&
17663 !VEX_MIPS_CPU_HAS_MIPS32R2(archinfo
->hwcaps
)) {
17667 t1
= newTemp(Ity_I128
);
17669 assign(t1
, binop(Iop_DivModS64to64
, getIReg(rs
), getIReg(rt
)));
17671 putHI(unop(Iop_128HIto64
, mkexpr(t1
)));
17672 putLO(unop(Iop_128to64
, mkexpr(t1
)));
17675 case 2: /* DDIV r6 */
17676 if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
)) {
17677 DIP("ddiv r%u, r%u, r%u", rs
, rt
, rd
);
17678 putIReg(rd
, unop(Iop_128to64
,
17679 binop(Iop_DivModS64to64
,
17680 getIReg(rs
), getIReg(rt
))));
17687 case 3: /* DMOD r6 */
17688 if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
)) {
17689 DIP("dmod r%u, r%u, r%u", rs
, rt
, rd
);
17690 t2
= newTemp(Ity_I128
);
17691 assign(t2
, binop(Iop_DivModS64to64
, getIReg(rs
), getIReg(rt
)));
17692 putIReg(rd
, unop(Iop_128HIto64
, mkexpr(t2
)));
17702 case 0x1F: /* Doubleword Divide Unsigned DDIVU; MIPS64 check this */
17705 DIP("ddivu r%u, r%u", rs
, rt
);
17707 if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
) &&
17708 !VEX_MIPS_CPU_HAS_MIPS32R2(archinfo
->hwcaps
)) {
17712 t1
= newTemp(Ity_I128
);
17714 assign(t1
, binop(Iop_DivModU64to64
, getIReg(rs
), getIReg(rt
)));
17716 putHI(unop(Iop_128HIto64
, mkexpr(t1
)));
17717 putLO(unop(Iop_128to64
, mkexpr(t1
)));
17721 if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
)) {
17722 DIP("ddivu r%u, r%u, r%u", rs
, rt
, rd
);
17723 putIReg(rd
, unop(Iop_128to64
, binop(Iop_DivModU64to64
,
17724 getIReg(rs
), getIReg(rt
))));
17732 if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
)) {
17733 DIP("dmodu r%u, r%u, r%u", rs
, rt
, rd
);
17734 putIReg(rd
, unop(Iop_128HIto64
, binop(Iop_DivModU64to64
,
17735 getIReg(rs
), getIReg(rt
))));
17745 case 0x20: { /* ADD */
17746 DIP("add r%u, r%u, r%u", rd
, rs
, rt
);
17747 IRTemp tmpRs32
= newTemp(Ity_I32
);
17748 IRTemp tmpRt32
= newTemp(Ity_I32
);
17750 assign(tmpRs32
, mkNarrowTo32(ty
, getIReg(rs
)));
17751 assign(tmpRt32
, mkNarrowTo32(ty
, getIReg(rt
)));
17753 t0
= newTemp(Ity_I32
);
17754 t1
= newTemp(Ity_I32
);
17755 t2
= newTemp(Ity_I32
);
17756 t3
= newTemp(Ity_I32
);
17757 t4
= newTemp(Ity_I32
);
17758 /* dst = src0 + src1
17759 if (sign(src0 ) != sign(src1 ))
17761 if (sign(dst) == sign(src0 ))
17763 we have overflow! */
17765 assign(t0
, binop(Iop_Add32
, mkexpr(tmpRs32
), mkexpr(tmpRt32
)));
17766 assign(t1
, binop(Iop_Xor32
, mkexpr(tmpRs32
), mkexpr(tmpRt32
)));
17767 assign(t2
, unop(Iop_1Uto32
,
17769 binop(Iop_And32
, mkexpr(t1
), mkU32(0x80000000)),
17770 mkU32(0x80000000))));
17772 assign(t3
, binop(Iop_Xor32
, mkexpr(t0
), mkexpr(tmpRs32
)));
17773 assign(t4
, unop(Iop_1Uto32
,
17775 binop(Iop_And32
, mkexpr(t3
), mkU32(0x80000000)),
17776 mkU32(0x80000000))));
17778 stmt(IRStmt_Exit(binop(Iop_CmpEQ32
,
17779 binop(Iop_Or32
, mkexpr(t2
), mkexpr(t4
)),
17782 mode64
? IRConst_U64(guest_PC_curr_instr
+ 4) :
17783 IRConst_U32(guest_PC_curr_instr
+ 4),
17786 putIReg(rd
, mkWidenFrom32(ty
, mkexpr(t0
), True
));
17790 case 0x21: /* ADDU */
17791 DIP("addu r%u, r%u, r%u", rd
, rs
, rt
);
17794 ALU_PATTERN64(Iop_Add32
);
17796 ALU_PATTERN(Iop_Add32
);
17801 case 0x22: { /* SUB */
17802 DIP("sub r%u, r%u, r%u", rd
, rs
, rt
);
17803 IRTemp tmpRs32
= newTemp(Ity_I32
);
17804 IRTemp tmpRt32
= newTemp(Ity_I32
);
17806 assign(tmpRs32
, mkNarrowTo32(ty
, getIReg(rs
)));
17807 assign(tmpRt32
, mkNarrowTo32(ty
, getIReg(rt
)));
17808 t0
= newTemp(Ity_I32
);
17809 t1
= newTemp(Ity_I32
);
17810 t2
= newTemp(Ity_I32
);
17811 t3
= newTemp(Ity_I32
);
17812 t4
= newTemp(Ity_I32
);
17813 t5
= newTemp(Ity_I32
);
17814 /* dst = src0 + (-1 * src1)
17815 if(sign(src0 ) != sign((-1 * src1) ))
17817 if(sign(dst) == sign(src0 ))
17819 we have overflow! */
17821 assign(t5
, binop(Iop_Mul32
, mkexpr(tmpRt32
), mkU32(-1)));
17822 assign(t0
, binop(Iop_Add32
, mkexpr(tmpRs32
), mkexpr(t5
)));
17823 assign(t1
, binop(Iop_Xor32
, mkexpr(tmpRs32
), mkexpr(t5
)));
17824 assign(t2
, unop(Iop_1Sto32
, binop(Iop_CmpEQ32
, binop(Iop_And32
,
17825 mkexpr(t1
), mkU32(0x80000000)), mkU32(0x80000000))));
17827 assign(t3
, binop(Iop_Xor32
, mkexpr(t0
), mkexpr(tmpRs32
)));
17828 assign(t4
, unop(Iop_1Sto32
, binop(Iop_CmpNE32
, binop(Iop_And32
,
17829 mkexpr(t3
), mkU32(0x80000000)), mkU32(0x80000000))));
17831 stmt(IRStmt_Exit(binop(Iop_CmpEQ32
, binop(Iop_Or32
, mkexpr(t2
),
17832 mkexpr(t4
)), mkU32(0)), Ijk_SigFPE_IntOvf
,
17833 mode64
? IRConst_U64(guest_PC_curr_instr
+ 4) :
17834 IRConst_U32(guest_PC_curr_instr
+ 4),
17837 putIReg(rd
, mkWidenFrom32(ty
, mkexpr(t0
), True
));
17841 case 0x23: /* SUBU */
17842 DIP("subu r%u, r%u, r%u", rd
, rs
, rt
);
17845 ALU_PATTERN64(Iop_Sub32
);
17847 ALU_PATTERN(Iop_Sub32
);
17852 case 0x24: /* AND */
17853 DIP("and r%u, r%u, r%u", rd
, rs
, rt
);
17856 ALU_PATTERN(Iop_And64
);
17858 ALU_PATTERN(Iop_And32
);
17863 case 0x25: /* OR */
17864 DIP("or r%u, r%u, r%u", rd
, rs
, rt
);
17867 ALU_PATTERN(Iop_Or64
);
17869 ALU_PATTERN(Iop_Or32
);
17874 case 0x26: /* XOR */
17875 DIP("xor r%u, r%u, r%u", rd
, rs
, rt
);
17878 ALU_PATTERN(Iop_Xor64
);
17880 ALU_PATTERN(Iop_Xor32
);
17885 case 0x27: /* NOR */
17886 DIP("nor r%u, r%u, r%u", rd
, rs
, rt
);
17889 putIReg(rd
, unop(Iop_Not64
, binop(Iop_Or64
, getIReg(rs
),
17892 putIReg(rd
, unop(Iop_Not32
, binop(Iop_Or32
, getIReg(rs
),
17897 case 0x2A: /* SLT */
17898 DIP("slt r%u, r%u, r%u", rd
, rs
, rt
);
17901 putIReg(rd
, unop(Iop_1Uto64
, binop(Iop_CmpLT64S
, getIReg(rs
),
17904 putIReg(rd
, unop(Iop_1Uto32
, binop(Iop_CmpLT32S
, getIReg(rs
),
17909 case 0x2B: /* SLTU */
17910 DIP("sltu r%u, r%u, r%u", rd
, rs
, rt
);
17913 putIReg(rd
, unop(Iop_1Uto64
, binop(Iop_CmpLT64U
, getIReg(rs
),
17916 putIReg(rd
, unop(Iop_1Uto32
, binop(Iop_CmpLT32U
, getIReg(rs
),
17921 case 0x2C: { /* Doubleword Add - DADD; MIPS64 */
17922 DIP("dadd r%u, r%u, r%u", rd
, rs
, rt
);
17923 IRTemp tmpRs64
= newTemp(Ity_I64
);
17924 IRTemp tmpRt64
= newTemp(Ity_I64
);
17926 assign(tmpRs64
, getIReg(rs
));
17927 assign(tmpRt64
, getIReg(rt
));
17929 t0
= newTemp(Ity_I64
);
17930 t1
= newTemp(Ity_I64
);
17931 t2
= newTemp(Ity_I64
);
17932 t3
= newTemp(Ity_I64
);
17933 t4
= newTemp(Ity_I64
);
17934 /* dst = src0 + src1
17935 if(sign(src0 ) != sign(src1 ))
17937 if(sign(dst) == sign(src0 ))
17939 we have overflow! */
17941 assign(t0
, binop(Iop_Add64
, mkexpr(tmpRs64
), mkexpr(tmpRt64
)));
17942 assign(t1
, binop(Iop_Xor64
, mkexpr(tmpRs64
), mkexpr(tmpRt64
)));
17943 assign(t2
, unop(Iop_1Uto64
,
17945 binop(Iop_And64
, mkexpr(t1
),
17946 mkU64(0x8000000000000000ULL
)),
17947 mkU64(0x8000000000000000ULL
))));
17949 assign(t3
, binop(Iop_Xor64
, mkexpr(t0
), mkexpr(tmpRs64
)));
17950 assign(t4
, unop(Iop_1Uto64
,
17952 binop(Iop_And64
, mkexpr(t3
),
17953 mkU64(0x8000000000000000ULL
)),
17954 mkU64(0x8000000000000000ULL
))));
17956 stmt(IRStmt_Exit(binop(Iop_CmpEQ64
,
17957 binop(Iop_Or64
, mkexpr(t2
), mkexpr(t4
)),
17960 IRConst_U64(guest_PC_curr_instr
+ 4),
17963 putIReg(rd
, mkexpr(t0
));
17967 case 0x2D: /* Doubleword Add Unsigned - DADDU; MIPS64 */
17968 DIP("daddu r%u, r%u, r%u", rd
, rs
, rt
);
17969 ALU_PATTERN(Iop_Add64
);
17972 case 0x2E: { /* Doubleword Subtract - DSUB; MIPS64 */
17973 DIP("dsub r%u, r%u, r%u", rd
, rs
, rt
);
17974 IRTemp tmpRs64
= newTemp(Ity_I64
);
17975 IRTemp tmpRt64
= newTemp(Ity_I64
);
17977 assign(tmpRs64
, getIReg(rs
));
17978 assign(tmpRt64
, getIReg(rt
));
17979 t0
= newTemp(Ity_I64
);
17980 t1
= newTemp(Ity_I64
);
17981 t2
= newTemp(Ity_I64
);
17982 t3
= newTemp(Ity_I64
);
17983 t4
= newTemp(Ity_I64
);
17984 t5
= newTemp(Ity_I64
);
17985 /* dst = src0 + (-1 * src1)
17986 if(sign(src0 ) != sign((-1 * src1) ))
17988 if(sign(dst) == sign(src0 ))
17990 we have overflow! */
17992 assign(t5
, binop(Iop_Mul64
,
17994 mkU64(0xffffffffffffffffULL
)));
17995 assign(t0
, binop(Iop_Add64
, mkexpr(tmpRs64
), mkexpr(t5
)));
17996 assign(t1
, binop(Iop_Xor64
, mkexpr(tmpRs64
), mkexpr(t5
)));
17997 assign(t2
, unop(Iop_1Sto64
,
18001 mkU64(0x8000000000000000ULL
)),
18002 mkU64(0x8000000000000000ULL
))));
18004 assign(t3
, binop(Iop_Xor64
, mkexpr(t0
), mkexpr(tmpRs64
)));
18005 assign(t4
, unop(Iop_1Sto64
,
18009 mkU64(0x8000000000000000ULL
)),
18010 mkU64(0x8000000000000000ULL
))));
18012 stmt(IRStmt_Exit(binop(Iop_CmpEQ64
, binop(Iop_Or64
, mkexpr(t2
),
18013 mkexpr(t4
)), mkU64(0)), Ijk_SigFPE_IntOvf
,
18014 IRConst_U64(guest_PC_curr_instr
+ 4),
18017 putIReg(rd
, binop(Iop_Sub64
, getIReg(rs
), getIReg(rt
)));
18021 case 0x2F: /* Doubleword Subtract Unsigned - DSUBU; MIPS64 */
18022 DIP("dsub r%u, r%u,r%u", rd
, rt
, rt
);
18023 ALU_PATTERN(Iop_Sub64
);
18026 case 0x30: { /* TGE */
18027 DIP("tge r%u, r%u %u", rs
, rt
, trap_code
);
18030 if (trap_code
== 7)
18031 stmt (IRStmt_Exit (unop (Iop_Not1
,
18032 binop (Iop_CmpLT64S
,
18036 IRConst_U64(guest_PC_curr_instr
+ 4),
18038 else if (trap_code
== 6)
18039 stmt (IRStmt_Exit (unop (Iop_Not1
,
18040 binop (Iop_CmpLT64S
,
18044 IRConst_U64(guest_PC_curr_instr
+ 4),
18047 stmt (IRStmt_Exit (unop (Iop_Not1
,
18048 binop (Iop_CmpLT64S
,
18052 IRConst_U64(guest_PC_curr_instr
+ 4),
18055 if (trap_code
== 7)
18056 stmt (IRStmt_Exit (unop (Iop_Not1
,
18057 binop (Iop_CmpLT32S
,
18061 IRConst_U32(guest_PC_curr_instr
+ 4),
18063 else if (trap_code
== 6)
18064 stmt (IRStmt_Exit (unop (Iop_Not1
,
18065 binop (Iop_CmpLT32S
,
18069 IRConst_U32(guest_PC_curr_instr
+ 4),
18072 stmt (IRStmt_Exit (unop (Iop_Not1
,
18073 binop (Iop_CmpLT32S
,
18077 IRConst_U32(guest_PC_curr_instr
+ 4),
18084 case 0x31: { /* TGEU */
18085 DIP("tgeu r%u, r%u %u", rs
, rt
, trap_code
);
18088 if (trap_code
== 7)
18089 stmt (IRStmt_Exit (unop (Iop_Not1
,
18090 binop (Iop_CmpLT64U
,
18094 IRConst_U64(guest_PC_curr_instr
+ 4),
18096 else if (trap_code
== 6)
18097 stmt (IRStmt_Exit (unop (Iop_Not1
,
18098 binop (Iop_CmpLT64U
,
18102 IRConst_U64(guest_PC_curr_instr
+ 4),
18105 stmt (IRStmt_Exit (unop (Iop_Not1
,
18106 binop (Iop_CmpLT64U
,
18110 IRConst_U64(guest_PC_curr_instr
+ 4),
18113 if (trap_code
== 7)
18114 stmt (IRStmt_Exit (unop (Iop_Not1
,
18115 binop (Iop_CmpLT32U
,
18119 IRConst_U32(guest_PC_curr_instr
+ 4),
18121 else if (trap_code
== 6)
18122 stmt (IRStmt_Exit (unop (Iop_Not1
,
18123 binop (Iop_CmpLT32U
,
18127 IRConst_U32(guest_PC_curr_instr
+ 4),
18130 stmt (IRStmt_Exit (unop (Iop_Not1
,
18131 binop (Iop_CmpLT32U
,
18135 IRConst_U32(guest_PC_curr_instr
+ 4),
18142 case 0x32: { /* TLT */
18143 DIP("tlt r%u, r%u %u", rs
, rt
, trap_code
);
18146 if (trap_code
== 7)
18147 stmt(IRStmt_Exit(binop(Iop_CmpLT64S
, getIReg(rs
),
18148 getIReg(rt
)), Ijk_SigFPE_IntDiv
,
18149 IRConst_U64(guest_PC_curr_instr
+ 4),
18151 else if (trap_code
== 6)
18152 stmt(IRStmt_Exit(binop(Iop_CmpLT64S
, getIReg(rs
),
18153 getIReg(rt
)), Ijk_SigFPE_IntOvf
,
18154 IRConst_U64(guest_PC_curr_instr
+ 4),
18157 stmt(IRStmt_Exit(binop(Iop_CmpLT64S
, getIReg(rs
),
18158 getIReg(rt
)), Ijk_SigTRAP
,
18159 IRConst_U64(guest_PC_curr_instr
+ 4),
18162 if (trap_code
== 7)
18163 stmt(IRStmt_Exit(binop(Iop_CmpLT32S
, getIReg(rs
),
18164 getIReg(rt
)), Ijk_SigFPE_IntDiv
,
18165 IRConst_U32(guest_PC_curr_instr
+ 4),
18167 else if (trap_code
== 6)
18168 stmt(IRStmt_Exit(binop(Iop_CmpLT32S
, getIReg(rs
),
18169 getIReg(rt
)), Ijk_SigFPE_IntOvf
,
18170 IRConst_U32(guest_PC_curr_instr
+ 4),
18173 stmt(IRStmt_Exit(binop(Iop_CmpLT32S
, getIReg(rs
),
18174 getIReg(rt
)), Ijk_SigTRAP
,
18175 IRConst_U32(guest_PC_curr_instr
+ 4),
18182 case 0x33: { /* TLTU */
18183 DIP("tltu r%u, r%u %u", rs
, rt
, trap_code
);
18186 if (trap_code
== 7)
18187 stmt(IRStmt_Exit(binop(Iop_CmpLT64U
, getIReg(rs
),
18188 getIReg(rt
)), Ijk_SigFPE_IntDiv
,
18189 IRConst_U64(guest_PC_curr_instr
+ 4),
18191 else if (trap_code
== 6)
18192 stmt(IRStmt_Exit(binop(Iop_CmpLT64U
, getIReg(rs
),
18193 getIReg(rt
)), Ijk_SigFPE_IntOvf
,
18194 IRConst_U64(guest_PC_curr_instr
+ 4),
18197 stmt(IRStmt_Exit(binop(Iop_CmpLT64U
, getIReg(rs
),
18198 getIReg(rt
)), Ijk_SigTRAP
,
18199 IRConst_U64(guest_PC_curr_instr
+ 4),
18202 if (trap_code
== 7)
18203 stmt(IRStmt_Exit(binop(Iop_CmpLT32U
, getIReg(rs
),
18204 getIReg(rt
)), Ijk_SigFPE_IntDiv
,
18205 IRConst_U32(guest_PC_curr_instr
+ 4),
18207 else if (trap_code
== 6)
18208 stmt(IRStmt_Exit(binop(Iop_CmpLT32U
, getIReg(rs
),
18209 getIReg(rt
)), Ijk_SigFPE_IntOvf
,
18210 IRConst_U32(guest_PC_curr_instr
+ 4),
18213 stmt(IRStmt_Exit(binop(Iop_CmpLT32U
, getIReg(rs
),
18214 getIReg (rt
)), Ijk_SigTRAP
,
18215 IRConst_U32(guest_PC_curr_instr
+ 4),
18222 case 0x34: { /* TEQ */
18223 DIP("teq r%u, r%u, %u", rs
, rt
, trap_code
);
18226 if (trap_code
== 7)
18227 stmt(IRStmt_Exit(binop(Iop_CmpEQ64
, getIReg(rs
),
18228 getIReg(rt
)), Ijk_SigFPE_IntDiv
,
18229 IRConst_U64(guest_PC_curr_instr
+ 4),
18231 else if (trap_code
== 6)
18232 stmt(IRStmt_Exit(binop(Iop_CmpEQ64
, getIReg(rs
),
18233 getIReg(rt
)), Ijk_SigFPE_IntOvf
,
18234 IRConst_U64(guest_PC_curr_instr
+ 4),
18237 stmt(IRStmt_Exit(binop(Iop_CmpEQ64
, getIReg(rs
),
18238 getIReg(rt
)), Ijk_SigTRAP
,
18239 IRConst_U64(guest_PC_curr_instr
+ 4),
18242 if (trap_code
== 7)
18243 stmt(IRStmt_Exit(binop(Iop_CmpEQ32
, getIReg(rs
),
18244 getIReg(rt
)), Ijk_SigFPE_IntDiv
,
18245 IRConst_U32(guest_PC_curr_instr
+ 4),
18247 else if (trap_code
== 6)
18248 stmt(IRStmt_Exit(binop(Iop_CmpEQ32
, getIReg(rs
),
18249 getIReg(rt
)), Ijk_SigFPE_IntOvf
,
18250 IRConst_U32(guest_PC_curr_instr
+ 4),
18253 stmt(IRStmt_Exit(binop(Iop_CmpEQ32
, getIReg(rs
),
18254 getIReg(rt
)), Ijk_SigTRAP
,
18255 IRConst_U32(guest_PC_curr_instr
+ 4),
18262 case 0x35: { /* SELEQZ */
18263 if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
)) {
18264 DIP("seleqz r%u, r%u, r%u", rd
, rs
, rt
);
18267 putIReg(rd
, binop(Iop_And64
,
18269 unop(Iop_CmpwNEZ64
, getIReg(rt
))),
18272 putIReg(rd
, binop(Iop_And32
,
18274 unop(Iop_CmpwNEZ32
, getIReg(rt
))),
18278 ILLEGAL_INSTRUCTON
;
18284 case 0x36: { /* TNE */
18285 DIP("tne r%u, r%u %u", rs
, rt
, trap_code
);
18288 if (trap_code
== 7)
18289 stmt(IRStmt_Exit(binop(Iop_CmpNE64
, getIReg(rs
),
18290 getIReg(rt
)), Ijk_SigFPE_IntDiv
,
18291 IRConst_U64(guest_PC_curr_instr
+ 4),
18293 else if (trap_code
== 6)
18294 stmt(IRStmt_Exit(binop(Iop_CmpNE64
, getIReg(rs
),
18295 getIReg(rt
)), Ijk_SigFPE_IntOvf
,
18296 IRConst_U64(guest_PC_curr_instr
+ 4),
18299 stmt(IRStmt_Exit(binop(Iop_CmpNE64
, getIReg(rs
),
18300 getIReg(rt
)), Ijk_SigTRAP
,
18301 IRConst_U64(guest_PC_curr_instr
+ 4),
18304 if (trap_code
== 7)
18305 stmt(IRStmt_Exit(binop(Iop_CmpNE32
, getIReg(rs
),
18306 getIReg(rt
)), Ijk_SigFPE_IntDiv
,
18307 IRConst_U32(guest_PC_curr_instr
+ 4),
18309 else if (trap_code
== 6)
18310 stmt(IRStmt_Exit(binop(Iop_CmpNE32
, getIReg(rs
),
18311 getIReg(rt
)), Ijk_SigFPE_IntOvf
,
18312 IRConst_U32(guest_PC_curr_instr
+ 4),
18315 stmt(IRStmt_Exit(binop(Iop_CmpNE32
, getIReg(rs
),
18316 getIReg(rt
)), Ijk_SigTRAP
,
18317 IRConst_U32(guest_PC_curr_instr
+ 4),
18324 case 0x37: { /* SELNEZ */
18325 if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
)) {
18326 DIP("selnez r%u, r%u, r%u", rd
, rs
, rt
);
18329 putIReg(rd
, binop(Iop_And64
,
18330 unop(Iop_CmpwNEZ64
, getIReg(rt
)), getIReg(rs
)));
18332 putIReg(rd
, binop(Iop_And32
,
18333 unop(Iop_CmpwNEZ32
, getIReg(rt
)), getIReg(rs
)));
18336 ILLEGAL_INSTRUCTON
;
18344 case 0x17: /* DSLLV, DROTRV:DSRLV, DSRAV */
18347 case 0x3B: /* DSLL, DROTL:DSRL, DSRA */
18350 case 0x3F: /* DSLL32, DROTR32:DSRL32, DSRA32 */
18351 if (dis_instr_shrt(cins
))
18363 static UInt
disInstr_MIPS_WRK_Special2(UInt cins
, const VexArchInfo
* archinfo
,
18364 const VexAbiInfo
* abiinfo
, DisResult
* dres
,
18365 IRStmt
** bstmt
, IRExpr
** lastn
)
18367 IRTemp t0
, t1
= 0, t2
, t3
, t4
, t5
, t6
;
18368 UInt rs
, rt
, rd
, function
;
18369 /* Additional variables for instruction fields in DSP ASE insructions */
18375 function
= get_function(cins
);
18376 IRType ty
= mode64
? Ity_I64
: Ity_I32
;
18378 ac
= get_acNo(cins
);
18380 switch (function
) {
18381 /* Cavium Specific instructions */
18384 case 0x33: /* DMUL, CINS , CINS32 */
18387 case 0x2B: /* EXT, EXT32, SNE */
18389 /* CVM Compare Instructions */
18392 case 0x2F: /* SEQ, SEQI, SNEI */
18394 /* CPU Load, Store, Memory, and Control Instructions */
18396 case 0x19: /* SAA, SAAD */
18397 case 0x1F: /* LAA, LAAD, LAI, LAID */
18400 case 0x2D: /* BADDU, POP, DPOP */
18401 if (VEX_MIPS_COMP_ID(archinfo
->hwcaps
) == VEX_PRID_COMP_CAVIUM
) {
18402 if (dis_instr_CVM(cins
))
18412 case 0x02: { /* MUL */
18413 DIP("mul r%u, r%u, r%u", rd
, rs
, rt
);
18416 IRTemp tmpRs32
= newTemp(Ity_I32
);
18417 IRTemp tmpRt32
= newTemp(Ity_I32
);
18418 IRTemp tmpRes
= newTemp(Ity_I32
);
18420 assign(tmpRs32
, mkNarrowTo32(ty
, getIReg(rs
)));
18421 assign(tmpRt32
, mkNarrowTo32(ty
, getIReg(rt
)));
18422 assign(tmpRes
, binop(Iop_Mul32
,
18423 mkexpr(tmpRs32
), mkexpr(tmpRt32
)));
18424 putIReg(rd
, mkWidenFrom32(ty
, mkexpr(tmpRes
), True
));
18426 putIReg(rd
, binop(Iop_Mul32
, getIReg(rs
), getIReg(rt
)));
18431 case 0x00: { /* MADD */
18433 DIP("madd r%u, r%u", rs
, rt
);
18434 t1
= newTemp(Ity_I32
);
18435 t2
= newTemp(Ity_I32
);
18436 t3
= newTemp(Ity_I64
);
18437 t4
= newTemp(Ity_I64
);
18438 t5
= newTemp(Ity_I64
);
18439 t6
= newTemp(Ity_I32
);
18441 assign(t1
, mkNarrowTo32(ty
, getHI()));
18442 assign(t2
, mkNarrowTo32(ty
, getLO()));
18444 assign(t3
, binop(Iop_MullS32
, mkNarrowTo32(ty
, getIReg(rs
)),
18445 mkNarrowTo32(ty
, getIReg(rt
))));
18447 assign(t4
, binop(Iop_32HLto64
, mkexpr(t1
), mkexpr(t2
)));
18448 assign(t5
, binop(Iop_Add64
, mkexpr(t3
), mkexpr(t4
)));
18450 putHI(mkWidenFrom32(ty
, unop(Iop_64HIto32
, mkexpr(t5
)), True
));
18451 putLO(mkWidenFrom32(ty
, unop(Iop_64to32
, mkexpr(t5
)), True
));
18453 if ( (1 <= ac
) && ( 3 >= ac
) ) {
18454 if (VEX_MIPS_PROC_DSP(archinfo
->hwcaps
)) {
18455 /* If DSP is present -> DSP ASE MADD */
18456 UInt retVal
= disDSPInstr_MIPS_WRK ( cins
);
18458 if (0 != retVal
) {
18467 DIP("madd r%u, r%u", rs
, rt
);
18468 t1
= newTemp(Ity_I32
);
18469 t2
= newTemp(Ity_I32
);
18470 t3
= newTemp(Ity_I64
);
18471 t4
= newTemp(Ity_I32
);
18472 t5
= newTemp(Ity_I32
);
18473 t6
= newTemp(Ity_I32
);
18475 assign(t1
, getHI());
18476 assign(t2
, getLO());
18478 assign(t3
, binop(Iop_MullS32
, getIReg(rs
), getIReg(rt
)));
18480 assign(t4
, binop(Iop_Add32
, mkexpr(t2
), unop(Iop_64to32
,
18483 assign(t5
, unop(Iop_1Uto32
, binop(Iop_CmpLT32U
, mkexpr(t4
),
18484 unop(Iop_64to32
, mkexpr(t3
)))));
18485 assign(t6
, binop(Iop_Add32
, mkexpr(t5
), mkexpr(t1
)));
18487 putHI(binop(Iop_Add32
, mkexpr(t6
), unop(Iop_64HIto32
,
18497 case 0x01: { /* MADDU */
18499 DIP("maddu r%u, r%u", rs
, rt
);
18500 t1
= newTemp(Ity_I32
);
18501 t2
= newTemp(Ity_I32
);
18502 t3
= newTemp(Ity_I64
);
18503 t4
= newTemp(Ity_I64
);
18504 t5
= newTemp(Ity_I64
);
18505 t6
= newTemp(Ity_I32
);
18507 assign(t1
, mkNarrowTo32(ty
, getHI()));
18508 assign(t2
, mkNarrowTo32(ty
, getLO()));
18510 assign(t3
, binop(Iop_MullU32
, mkNarrowTo32(ty
, getIReg(rs
)),
18511 mkNarrowTo32(ty
, getIReg(rt
))));
18513 assign(t4
, binop(Iop_32HLto64
, mkexpr(t1
), mkexpr(t2
)));
18514 assign(t5
, binop(Iop_Add64
, mkexpr(t3
), mkexpr(t4
)));
18516 putHI(mkWidenFrom32(ty
, unop(Iop_64HIto32
, mkexpr(t5
)), True
));
18517 putLO(mkWidenFrom32(ty
, unop(Iop_64to32
, mkexpr(t5
)), True
));
18519 if ( (1 <= ac
) && ( 3 >= ac
) ) {
18520 if (VEX_MIPS_PROC_DSP(archinfo
->hwcaps
)) {
18521 /* If DSP is present -> DSP ASE MADDU */
18522 UInt retVal
= disDSPInstr_MIPS_WRK ( cins
);
18524 if (0 != retVal
) {
18533 DIP("maddu r%u, r%u", rs
, rt
);
18534 t1
= newTemp(Ity_I32
);
18535 t2
= newTemp(Ity_I32
);
18536 t3
= newTemp(Ity_I64
);
18537 t4
= newTemp(Ity_I32
);
18538 t5
= newTemp(Ity_I32
);
18539 t6
= newTemp(Ity_I32
);
18541 assign(t1
, getHI());
18542 assign(t2
, getLO());
18544 assign(t3
, binop(Iop_MullU32
, getIReg(rs
), getIReg(rt
)));
18546 assign(t4
, binop(Iop_Add32
, mkexpr(t2
), unop(Iop_64to32
,
18548 assign(t5
, unop(Iop_1Uto32
, binop(Iop_CmpLT32U
, mkexpr(t4
),
18549 unop(Iop_64to32
, mkexpr(t3
)))));
18550 assign(t6
, binop(Iop_Add32
, mkexpr(t5
), mkexpr(t1
)));
18552 putHI(binop(Iop_Add32
, mkexpr(t6
), unop(Iop_64HIto32
,
18562 case 0x04: { /* MSUB */
18564 DIP("msub r%u, r%u", rs
, rt
);
18565 t1
= newTemp(Ity_I32
);
18566 t2
= newTemp(Ity_I32
);
18567 t3
= newTemp(Ity_I64
);
18568 t4
= newTemp(Ity_I64
);
18569 t5
= newTemp(Ity_I64
);
18570 t6
= newTemp(Ity_I32
);
18572 assign(t1
, mkNarrowTo32(ty
, getHI()));
18573 assign(t2
, mkNarrowTo32(ty
, getLO()));
18575 assign(t3
, binop(Iop_MullS32
, mkNarrowTo32(ty
, getIReg(rs
)),
18576 mkNarrowTo32(ty
, getIReg(rt
))));
18578 assign(t4
, binop(Iop_32HLto64
, mkexpr(t1
), mkexpr(t2
)));
18579 assign(t5
, binop(Iop_Sub64
, mkexpr(t4
), mkexpr(t3
)));
18581 putHI(mkWidenFrom32(ty
, unop(Iop_64HIto32
, mkexpr(t5
)), True
));
18582 putLO(mkWidenFrom32(ty
, unop(Iop_64to32
, mkexpr(t5
)), True
));
18584 if ( (1 <= ac
) && ( 3 >= ac
) ) {
18585 if (VEX_MIPS_PROC_DSP(archinfo
->hwcaps
)) {
18586 /* If DSP is present -> DSP ASE MSUB */
18587 UInt retVal
= disDSPInstr_MIPS_WRK ( cins
);
18589 if (0 != retVal
) {
18598 DIP("msub r%u, r%u", rs
, rt
);
18599 t1
= newTemp(Ity_I32
);
18600 t2
= newTemp(Ity_I32
);
18601 t3
= newTemp(Ity_I64
);
18602 t4
= newTemp(Ity_I32
);
18603 t5
= newTemp(Ity_I1
);
18604 t6
= newTemp(Ity_I32
);
18606 assign(t1
, getHI());
18607 assign(t2
, getLO());
18609 assign(t3
, binop(Iop_MullS32
, getIReg(rs
), getIReg(rt
)));
18610 assign(t4
, unop(Iop_64to32
, mkexpr(t3
))); /* new lo */
18612 /* if lo<lo(mul) hi = hi - 1 */
18613 assign(t5
, binop(Iop_CmpLT32U
,
18617 assign(t6
, IRExpr_ITE(mkexpr(t5
),
18618 binop(Iop_Sub32
, mkexpr(t1
), mkU32(0x1)),
18621 putHI(binop(Iop_Sub32
, mkexpr(t6
), unop(Iop_64HIto32
,
18623 putLO(binop(Iop_Sub32
, mkexpr(t2
), mkexpr(t4
)));
18631 case 0x05: { /* MSUBU */
18633 DIP("msubu r%u, r%u", rs
, rt
);
18634 t1
= newTemp(Ity_I32
);
18635 t2
= newTemp(Ity_I32
);
18636 t3
= newTemp(Ity_I64
);
18637 t4
= newTemp(Ity_I64
);
18638 t5
= newTemp(Ity_I64
);
18639 t6
= newTemp(Ity_I32
);
18641 assign(t1
, mkNarrowTo32(ty
, getHI()));
18642 assign(t2
, mkNarrowTo32(ty
, getLO()));
18644 assign(t3
, binop(Iop_MullU32
, mkNarrowTo32(ty
, getIReg(rs
)),
18645 mkNarrowTo32(ty
, getIReg(rt
))));
18647 assign(t4
, binop(Iop_32HLto64
, mkexpr(t1
), mkexpr(t2
)));
18648 assign(t5
, binop(Iop_Sub64
, mkexpr(t4
), mkexpr(t3
)));
18650 putHI(mkWidenFrom32(ty
, unop(Iop_64HIto32
, mkexpr(t5
)), True
));
18651 putLO(mkWidenFrom32(ty
, unop(Iop_64to32
, mkexpr(t5
)), True
));
18653 if ( (1 <= ac
) && ( 3 >= ac
) ) {
18654 if (VEX_MIPS_PROC_DSP(archinfo
->hwcaps
)) {
18655 /* If DSP is present -> DSP ASE MSUBU */
18656 UInt retVal
= disDSPInstr_MIPS_WRK ( cins
);
18658 if (0 != retVal
) {
18667 DIP("msubu r%u, r%u", rs
, rt
);
18668 t1
= newTemp(Ity_I32
);
18669 t2
= newTemp(Ity_I32
);
18670 t3
= newTemp(Ity_I64
);
18671 t4
= newTemp(Ity_I32
);
18672 t5
= newTemp(Ity_I1
);
18673 t6
= newTemp(Ity_I32
);
18675 assign(t1
, getHI());
18676 assign(t2
, getLO());
18678 assign(t3
, binop(Iop_MullU32
, getIReg(rs
), getIReg(rt
)));
18679 assign(t4
, unop(Iop_64to32
, mkexpr(t3
))); /* new lo */
18681 /* if lo<lo(mul) hi = hi - 1 */
18682 assign(t5
, binop(Iop_CmpLT32U
,
18686 assign(t6
, IRExpr_ITE(mkexpr(t5
),
18692 putHI(binop(Iop_Sub32
, mkexpr(t6
), unop(Iop_64HIto32
,
18694 putLO(binop(Iop_Sub32
, mkexpr(t2
), mkexpr(t4
)));
18702 case 0x6: /* dmul MIPS64 - Netlogic */
18703 DIP("dmul r%u, r%u, r%u", rd
, rs
, rt
);
18704 t0
= newTemp(Ity_I128
);
18706 assign(t0
, binop(Iop_MullU64
, getIReg(rs
), getIReg(rt
)));
18708 putIReg(rd
, unop(Iop_128to64
, mkexpr(t0
)));
18711 case 0x10: /* LDADDW - Swap Word - Netlogic */
18712 DIP("ldaddw r%u, r%u", rt
, rs
);
18713 t0
= newTemp(Ity_I32
);
18714 t1
= newTemp(Ity_I32
);
18715 t2
= newTemp(Ity_I32
);
18716 t3
= newTemp(Ity_I64
);
18717 t4
= newTemp(Ity_I32
);
18718 t5
= newTemp(Ity_I32
);
18719 t6
= newTemp(Ity_I32
);
18722 assign(t0
, mkNarrowTo32(ty
, getIReg(rt
)));
18724 /* GPR[rt] = memory[base]; */
18725 assign(t1
, load(Ity_I32
, getIReg(rs
)));
18726 putIReg(rt
, mkWidenFrom32(ty
, mkexpr(t1
), True
));
18728 /* memory[base] = memory[base] + v; */
18729 store(getIReg(rs
), binop(Iop_Add32
, mkexpr(t0
), mkexpr(t1
)));
18732 case 0x12: /* LDADDD - Swap Word - Netlogic */
18733 DIP("ldaddw r%u, r%u", rt
, rs
);
18734 t0
= newTemp(Ity_I64
);
18735 t1
= newTemp(Ity_I64
);
18738 assign(t0
, getIReg(rt
));
18740 /* GPR[rt] = memory[base]; */
18741 assign(t1
, load(Ity_I64
, getIReg(rs
)));
18742 putIReg(rt
, mkexpr(t1
));
18744 /* memory[base] = memory[base] + v; */
18745 store(getIReg(rs
), binop(Iop_Add64
, mkexpr(t0
), mkexpr(t1
)));
18748 case 0x14: /* SWAPW - Swap Word - Netlogic */
18749 DIP("swapw r%u, r%u", rt
, rs
);
18750 t0
= newTemp(Ity_I32
);
18751 t1
= newTemp(Ity_I32
);
18752 assign(t0
, mkNarrowTo32(ty
, getIReg(rt
)));
18753 assign(t1
, load(Ity_I32
, getIReg(rs
)));
18754 putIReg(rt
, mkWidenFrom32(ty
, mkexpr(t1
), True
));
18755 store(getIReg(rs
), mkexpr(t0
));
18758 case 0x16: /* SWAPD - Swap Double - Netlogic */
18759 DIP("swapw r%u, r%u", rt
, rs
);
18760 t0
= newTemp(Ity_I64
);
18761 t1
= newTemp(Ity_I64
);
18762 assign(t0
, getIReg(rt
));
18763 assign(t1
, load(Ity_I64
, getIReg(rs
)));
18764 putIReg(rt
, mkexpr(t1
));
18765 store(getIReg(rs
), mkexpr(t0
));
18768 case 0x20: { /* CLZ */
18769 DIP("clz r%u, r%u", rd
, rs
);
18772 IRTemp tmpClz32
= newTemp(Ity_I32
);
18773 IRTemp tmpRs32
= newTemp(Ity_I32
);
18775 assign(tmpRs32
, mkNarrowTo32(ty
, getIReg(rs
)));
18776 assign(tmpClz32
, unop(Iop_Clz32
, mkexpr(tmpRs32
)));
18777 putIReg(rd
, mkWidenFrom32(ty
, mkexpr(tmpClz32
), True
));
18779 t1
= newTemp(Ity_I1
);
18780 assign(t1
, binop(Iop_CmpEQ32
, getIReg(rs
), mkU32(0)));
18781 putIReg(rd
, IRExpr_ITE(mkexpr(t1
),
18783 unop(Iop_Clz32
, getIReg(rs
))));
18789 case 0x21: { /* CLO */
18790 DIP("clo r%u, r%u", rd
, rs
);
18793 IRTemp tmpClo32
= newTemp(Ity_I32
);
18794 IRTemp tmpRs32
= newTemp(Ity_I32
);
18795 assign(tmpRs32
, mkNarrowTo32(ty
, getIReg(rs
)));
18797 t1
= newTemp(Ity_I1
);
18798 assign(t1
, binop(Iop_CmpEQ32
, mkexpr(tmpRs32
), mkU32(0xffffffff)));
18799 assign(tmpClo32
, IRExpr_ITE(mkexpr(t1
),
18801 unop(Iop_Clz32
, unop(Iop_Not32
, mkexpr(tmpRs32
)))));
18803 putIReg(rd
, mkWidenFrom32(ty
, mkexpr(tmpClo32
), True
));
18806 t1
= newTemp(Ity_I1
);
18807 assign(t1
, binop(Iop_CmpEQ32
, getIReg(rs
), mkU32(0xffffffff)));
18808 putIReg(rd
, IRExpr_ITE(mkexpr(t1
),
18811 unop(Iop_Not32
, getIReg(rs
)))));
18816 case 0x24: /* Count Leading Zeros in Doubleword - DCLZ; MIPS64 */
18817 DIP("dclz r%u, r%u", rd
, rs
);
18818 t1
= newTemp(Ity_I1
);
18819 assign(t1
, binop(Iop_CmpEQ64
, getIReg(rs
), mkU64(0)));
18820 putIReg(rd
, IRExpr_ITE(mkexpr(t1
),
18822 unop(Iop_Clz64
, getIReg(rs
))));
18825 case 0x25: /* Count Leading Ones in Doubleword - DCLO; MIPS64 */
18826 DIP("dclo r%u, r%u", rd
, rs
);
18827 t1
= newTemp(Ity_I1
);
18828 assign(t1
, binop(Iop_CmpEQ64
, getIReg(rs
),
18829 mkU64(0xffffffffffffffffULL
)));
18830 putIReg(rd
, IRExpr_ITE(mkexpr(t1
),
18832 unop(Iop_Clz64
, unop(Iop_Not64
,
18843 static UInt
disInstr_MIPS_WRK_Special3(UInt cins
, const VexArchInfo
* archinfo
,
18844 const VexAbiInfo
* abiinfo
, DisResult
* dres
,
18845 IRStmt
** bstmt
, IRExpr
** lastn
)
18848 IRTemp t0
, t1
= 0, t2
, t3
, t4
, t5
, t6
;
18849 UInt rs
, rt
, rd
, sa
, function
, imm
, instr_index
, msb
, lsb
, size
;
18850 /* Additional variables for instruction fields in DSP ASE insructions */
18852 imm
= get_imm(cins
);
18857 instr_index
= get_instr_index(cins
);
18858 function
= get_function(cins
);
18859 IRType ty
= mode64
? Ity_I64
: Ity_I32
;
18861 switch (function
) {
18862 case 0x01: { /* Doubleword Extract Bit Field - DEXTM; MIPS64r2 */
18863 msb
= get_msb(cins
);
18864 lsb
= get_lsb(cins
);
18867 UInt dstSz
= msb
+ 33;
18868 t1
= newTemp(Ity_I64
);
18869 DIP("dextm r%u, r%u, %u, %u", rt
, rs
, lsb
, msb
+ 1);
18871 UChar lsAmt
= 64 - (srcPos
+ dstSz
); /* left shift amount; */
18872 UChar rsAmt
= 64 - dstSz
; /* right shift amount; */
18874 assign(t1
, binop(Iop_Shl64
, getIReg(rs
), mkU8(lsAmt
)));
18875 putIReg(rt
, binop(Iop_Shr64
, mkexpr(t1
), mkU8(rsAmt
)));
18880 case 0x02: { /* Doubleword Extract Bit Field Upper - DEXTU; MIPS64r2 */
18881 msb
= get_msb(cins
);
18882 lsb
= get_lsb(cins
);
18884 UInt srcPos
= lsb
+ 32;
18885 UInt dstSz
= msb
+ 1;
18886 DIP("dextu r%u, r%u, %u, %u", rt
, rs
, srcPos
, dstSz
);
18887 t1
= newTemp(Ity_I64
);
18889 vassert(srcPos
>= 32 && srcPos
< 64);
18890 vassert(dstSz
> 0 && dstSz
<= 32);
18891 vassert((srcPos
+ dstSz
) > 32 && (srcPos
+ dstSz
) <= 64);
18893 UChar lsAmt
= 64 - (srcPos
+ dstSz
); /* left shift amount; */
18894 UChar rsAmt
= 64 - dstSz
; /* right shift amount; */
18896 assign(t1
, binop(Iop_Shl64
, getIReg(rs
), mkU8(lsAmt
)));
18897 putIReg(rt
, binop(Iop_Shr64
, mkexpr(t1
), mkU8(rsAmt
)));
18901 case 0x05: { /* Doubleword Insert Bit Field Middle - DINSM; MIPS64r2 */
18902 msb
= get_msb(cins
);
18903 lsb
= get_lsb(cins
);
18906 UInt srcSz
= msb
- lsb
+ 33;
18911 IRTemp tmpT1
= newTemp(ty
);
18912 IRTemp tmpT2
= newTemp(ty
);
18913 IRTemp tmpT3
= newTemp(ty
);
18914 IRTemp tmpT4
= newTemp(ty
);
18915 IRTemp tmpT5
= newTemp(ty
);
18916 IRTemp tmpT6
= newTemp(ty
);
18917 IRTemp tmpT7
= newTemp(ty
);
18918 IRTemp tmpRs
= newTemp(ty
);
18919 IRTemp tmpRt
= newTemp(ty
);
18920 IRTemp tmpRd
= newTemp(ty
);
18922 assign(tmpRs
, getIReg(rs
));
18923 assign(tmpRt
, getIReg(rt
));
18924 DIP("dinsm r%u, r%u, %u, %u", rt
, rs
, lsb
, msb
);
18926 UChar lsAmt
= dstPos
+ srcSz
- 1; /* left shift amount; */
18927 UChar rsAmt
= dstPos
+ srcSz
- 1; /* right shift amount; */
18929 assign(t1
, binop(Iop_Shr64
, mkexpr(tmpRt
), mkU8(rsAmt
)));
18930 assign(tmpT1
, binop(Iop_Shr64
, mkexpr(t1
), mkU8(1)));
18931 assign(t2
, binop(Iop_Shl64
, mkexpr(tmpT1
), mkU8(lsAmt
)));
18932 assign(tmpT2
, binop(Iop_Shl64
, mkexpr(t2
), mkU8(1)));
18934 lsAmt
= 63 - dstPos
; /* left shift amount; */
18935 rsAmt
= 63 - dstPos
; /* right shift amount; */
18937 assign(t3
, binop(Iop_Shl64
, mkexpr(tmpRt
), mkU8(lsAmt
)));
18938 assign(tmpT3
, binop(Iop_Shl64
, mkexpr(t3
), mkU8(1)));
18939 assign(t4
, binop(Iop_Shr64
, mkexpr(tmpT3
), mkU8(rsAmt
)));
18940 assign(tmpT4
, binop(Iop_Shr64
, mkexpr(t4
), mkU8(1)));
18942 /* extract size from src register */
18943 lsAmt
= 64 - srcSz
; /* left shift amount; */
18944 rsAmt
= 64 - (lsb
+ srcSz
); /* right shift amount; */
18946 assign(tmpT5
, binop(Iop_Shl64
, mkexpr(tmpRs
), mkU8(lsAmt
)));
18947 assign(tmpT6
, binop(Iop_Shr64
, mkexpr(tmpT5
), mkU8(rsAmt
)));
18949 assign(tmpT7
, binop(Iop_Or64
, mkexpr(tmpT2
), mkexpr(tmpT4
)));
18950 assign(tmpRd
, binop(Iop_Or64
, mkexpr(tmpT6
), mkexpr(tmpT7
)));
18951 putIReg(rt
, mkexpr(tmpRd
));
18955 case 0x06: { /* Doubleword Insert Bit Field Upper - DINSU; MIPS64r2 */
18956 msb
= get_msb(cins
);
18957 lsb
= get_lsb(cins
);
18959 UInt dstPos
= lsb
+ 32;
18960 UInt srcSz
= msb
- lsb
+ 1;
18961 IRTemp tmpT1
= newTemp(ty
);
18962 IRTemp tmpT2
= newTemp(ty
);
18963 IRTemp tmpT3
= newTemp(ty
);
18964 IRTemp tmpT4
= newTemp(ty
);
18965 IRTemp tmpT5
= newTemp(ty
);
18966 IRTemp tmpT6
= newTemp(ty
);
18967 IRTemp tmpT7
= newTemp(ty
);
18968 IRTemp tmpT8
= newTemp(ty
);
18969 IRTemp tmpT9
= newTemp(ty
);
18970 IRTemp tmpRs
= newTemp(ty
);
18971 IRTemp tmpRt
= newTemp(ty
);
18972 IRTemp tmpRd
= newTemp(ty
);
18974 assign(tmpRs
, getIReg(rs
));
18975 assign(tmpRt
, getIReg(rt
));
18976 DIP("dinsu r%u, r%u, %u, %u", rt
, rs
, lsb
, msb
);
18978 UChar lsAmt
= 64 - srcSz
; /* left shift amount; */
18979 UChar rsAmt
= 64 - (dstPos
+ srcSz
); /* right shift amount; */
18980 assign(tmpT1
, binop(Iop_Shl64
, mkexpr(tmpRs
), mkU8(lsAmt
)));
18981 assign(tmpT2
, binop(Iop_Shr64
, mkexpr(tmpT1
), mkU8(rsAmt
)));
18983 lsAmt
= 64 - dstPos
; /* left shift amount; */
18984 rsAmt
= 64 - dstPos
; /* right shift amount; */
18985 assign(tmpT3
, binop(Iop_Shl64
, mkexpr(tmpRt
), mkU8(lsAmt
)));
18986 assign(tmpT4
, binop(Iop_Shr64
, mkexpr(tmpT3
), mkU8(rsAmt
)));
18988 lsAmt
= dstPos
; /* left shift amount; */
18989 rsAmt
= srcSz
; /* right shift amount; */
18990 assign(tmpT5
, binop(Iop_Shr64
, mkexpr(tmpRt
), mkU8(rsAmt
)));
18991 assign(tmpT6
, binop(Iop_Shr64
, mkexpr(tmpT5
), mkU8(lsAmt
)));
18993 assign(tmpT7
, binop(Iop_Shl64
, mkexpr(tmpT6
), mkU8(rsAmt
)));
18994 assign(tmpT8
, binop(Iop_Shl64
, mkexpr(tmpT7
), mkU8(lsAmt
)));
18996 assign(tmpT9
, binop(Iop_Or64
, mkexpr(tmpT8
), mkexpr(tmpT4
)));
18997 assign(tmpRd
, binop(Iop_Or64
, mkexpr(tmpT2
), mkexpr(tmpT9
)));
18998 putIReg(rt
, mkexpr(tmpRd
));
19002 case 0x07: { /* Doubleword Insert Bit Field - DINS; MIPS64r2 */
19003 IRTemp tmp1
= newTemp(ty
);
19004 IRTemp tmpT1
= newTemp(ty
);
19005 IRTemp tmpT2
= newTemp(ty
);
19006 IRTemp tmpT3
= newTemp(ty
);
19007 IRTemp tmpT4
= newTemp(ty
);
19008 IRTemp tmpT5
= newTemp(ty
);
19009 IRTemp tmpT6
= newTemp(ty
);
19010 IRTemp tmpT7
= newTemp(ty
);
19011 IRTemp tmpT8
= newTemp(ty
);
19012 IRTemp tmpT9
= newTemp(ty
);
19013 IRTemp tmp
= newTemp(ty
);
19014 IRTemp tmpRs
= newTemp(ty
);
19015 IRTemp tmpRt
= newTemp(ty
);
19016 IRTemp tmpRd
= newTemp(ty
);
19018 assign(tmpRs
, getIReg(rs
));
19019 assign(tmpRt
, getIReg(rt
));
19021 msb
= get_msb(cins
);
19022 lsb
= get_lsb(cins
);
19024 DIP("dins r%u, r%u, %u, %u", rt
, rs
, lsb
,
19026 UChar lsAmt
= 63 - lsb
; /* left shift amount; */
19027 UChar rsAmt
= 63 - lsb
; /* right shift amount; */
19028 assign(tmp
, binop(Iop_Shl64
, mkexpr(tmpRt
), mkU8(lsAmt
)));
19029 assign(tmpT1
, binop(Iop_Shl64
, mkexpr(tmp
), mkU8(1)));
19030 assign(tmp1
, binop(Iop_Shr64
, mkexpr(tmpT1
), mkU8(rsAmt
)));
19031 assign(tmpT2
, binop(Iop_Shr64
, mkexpr(tmp1
), mkU8(1)));
19033 lsAmt
= msb
; /* left shift amount; */
19034 rsAmt
= 1; /*right shift amount; */
19035 assign(tmpT3
, binop(Iop_Shr64
, mkexpr(tmpRt
), mkU8(rsAmt
)));
19036 assign(tmpT4
, binop(Iop_Shr64
, mkexpr(tmpT3
), mkU8(lsAmt
)));
19037 assign(tmpT5
, binop(Iop_Shl64
, mkexpr(tmpT4
), mkU8(rsAmt
)));
19038 assign(tmpT6
, binop(Iop_Shl64
, mkexpr(tmpT5
), mkU8(lsAmt
)));
19040 lsAmt
= 64 - (msb
- lsb
+ 1); /* left shift amount; */
19041 rsAmt
= 64 - (msb
+ 1); /* right shift amount; */
19042 assign(tmpT7
, binop(Iop_Shl64
, mkexpr(tmpRs
), mkU8(lsAmt
)));
19043 assign(tmpT8
, binop(Iop_Shr64
, mkexpr(tmpT7
), mkU8(rsAmt
)));
19045 assign(tmpT9
, binop(Iop_Or64
, mkexpr(tmpT2
), mkexpr(tmpT8
)));
19046 assign(tmpRd
, binop(Iop_Or64
, mkexpr(tmpT6
), mkexpr(tmpT9
)));
19047 putIReg(rt
, mkexpr(tmpRd
));
19051 case 0x24: /* DBSHFL */
19052 lsb
= get_lsb(cins
);
19053 IRTemp tmpRs
= newTemp(ty
);
19054 IRTemp tmpRt
= newTemp(ty
);
19055 IRTemp tmpRd
= newTemp(ty
);
19056 assign(tmpRs
, getIReg(rs
));
19057 assign(tmpRt
, getIReg(rt
));
19060 case 0x02: { /* DSBH */
19061 DIP("dsbh r%u, r%u", rd
, rt
);
19062 IRTemp tmpT1
= newTemp(ty
);
19063 IRTemp tmpT2
= newTemp(ty
);
19064 IRTemp tmpT3
= newTemp(ty
);
19065 IRTemp tmpT4
= newTemp(ty
);
19066 IRTemp tmpT5
= newTemp(Ity_I64
);
19067 IRTemp tmpT6
= newTemp(ty
);
19068 assign(tmpT5
, mkU64(0xFF00FF00FF00FF00ULL
));
19069 assign(tmpT6
, mkU64(0x00FF00FF00FF00FFULL
));
19070 assign(tmpT1
, binop(Iop_And64
, mkexpr(tmpRt
), mkexpr(tmpT5
)));
19071 assign(tmpT2
, binop(Iop_Shr64
, mkexpr(tmpT1
), mkU8(8)));
19072 assign(tmpT3
, binop(Iop_And64
, mkexpr(tmpRt
), mkexpr(tmpT6
)));
19073 assign(tmpT4
, binop(Iop_Shl64
, mkexpr(tmpT3
), mkU8(8)));
19074 assign(tmpRd
, binop(Iop_Or64
, mkexpr(tmpT4
), mkexpr(tmpT2
)));
19075 putIReg(rd
, mkexpr(tmpRd
));
19079 case 0x05: { /* DSHD */
19080 DIP("dshd r%u, r%u\n", rd
, rt
);
19081 IRTemp tmpT1
= newTemp(ty
);
19082 IRTemp tmpT2
= newTemp(ty
);
19083 IRTemp tmpT3
= newTemp(ty
);
19084 IRTemp tmpT4
= newTemp(ty
);
19085 IRTemp tmpT5
= newTemp(Ity_I64
);
19086 IRTemp tmpT6
= newTemp(ty
);
19087 IRTemp tmpT7
= newTemp(ty
);
19088 IRTemp tmpT8
= newTemp(ty
);
19089 IRTemp tmpT9
= newTemp(ty
);
19090 assign(tmpT5
, mkU64(0xFFFF0000FFFF0000ULL
));
19091 assign(tmpT6
, mkU64(0x0000FFFF0000FFFFULL
));
19092 assign(tmpT1
, binop(Iop_And64
, mkexpr(tmpRt
), mkexpr(tmpT5
)));
19093 assign(tmpT2
, binop(Iop_Shr64
, mkexpr(tmpT1
), mkU8(16)));
19094 assign(tmpT3
, binop(Iop_And64
, mkexpr(tmpRt
), mkexpr(tmpT6
)));
19095 assign(tmpT4
, binop(Iop_Shl64
, mkexpr(tmpT3
), mkU8(16)));
19096 assign(tmpT7
, binop(Iop_Or64
, mkexpr(tmpT4
), mkexpr(tmpT2
)));
19097 assign(tmpT8
, binop(Iop_Shl64
, mkexpr(tmpT7
), mkU8(32)));
19098 assign(tmpT9
, binop(Iop_Shr64
, mkexpr(tmpT7
), mkU8(32)));
19099 assign(tmpRd
, binop(Iop_Or64
, mkexpr(tmpT8
), mkexpr(tmpT9
)));
19100 putIReg(rd
, mkexpr(tmpRd
));
19104 case 0x08 ... 0x0f: { /* DALIGN */
19105 if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
)) {
19106 DIP("dalign r%u, r%u, r%u, %u", rd
, rs
, rt
, lsb
& 0x7);
19107 UInt bp
= (lsb
& 0x7) << 3;
19110 putIReg(rd
, binop(Iop_Or64
,
19111 binop(Iop_Shl64
, getIReg(rt
), mkU8(bp
)),
19113 getIReg(rs
), mkU8(64 - bp
))));
19115 putIReg(rd
, getIReg(rt
));
19123 case 0: /* DBITSWAP */
19124 if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
)) {
19125 DIP("dbitswap r%u, r%u", rd
, rt
);
19126 putIReg(rd
, qop(Iop_Rotx64
, getIReg(rt
), mkU8(7), mkU8(8), mkU8(1)));
19139 case 0x3B: /* RDHWR */
19140 DIP("rdhwr r%u, r%u", rt
, rd
);
19142 if (VEX_MIPS_CPU_HAS_MIPS32R2(archinfo
->hwcaps
) ||
19143 VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
) ||
19144 (VEX_MIPS_COMP_ID(archinfo
->hwcaps
) == VEX_PRID_COMP_BROADCOM
)) {
19146 putIReg(rt
, getULR());
19149 && VEX_MIPS_COMP_ID(archinfo
->hwcaps
)
19150 == VEX_PRID_COMP_CAVIUM
)) {
19151 IRExpr
** arg
= mkIRExprVec_1(mkU32(rd
));
19152 IRTemp val
= newTemp(ty
);
19153 IRDirty
*d
= unsafeIRDirty_1_N(val
,
19155 "mips_dirtyhelper_rdhwr",
19156 &mips_dirtyhelper_rdhwr
,
19158 stmt(IRStmt_Dirty(d
));
19159 putIReg(rt
, mkexpr(val
));
19168 case 0x04: /* INS */
19169 msb
= get_msb(cins
);
19170 lsb
= get_lsb(cins
);
19171 size
= msb
- lsb
+ 1;
19172 DIP("ins size:%u msb:%u lsb:%u", size
, msb
, lsb
);
19174 vassert(lsb
+ size
<= 32);
19175 vassert(lsb
+ size
> 0);
19177 /* put size bits from rs at the pos in temporary */
19178 t0
= newTemp(Ity_I32
);
19179 t3
= newTemp(Ity_I32
);
19180 /* shift left for 32 - size to clear leading bits and get zeros
19182 assign(t0
, binop(Iop_Shl32
, mkNarrowTo32(ty
, getIReg(rs
)),
19184 /* now set it at pos */
19185 t1
= newTemp(Ity_I32
);
19186 assign(t1
, binop(Iop_Shr32
, mkexpr(t0
), mkU8(32 - size
- lsb
)));
19189 t2
= newTemp(Ity_I32
);
19190 /* clear everything but lower pos bits from rt */
19191 assign(t2
, binop(Iop_Shl32
, mkNarrowTo32(ty
, getIReg(rt
)),
19193 assign(t3
, binop(Iop_Shr32
, mkexpr(t2
), mkU8(32 - lsb
)));
19195 assign(t3
, mkU32(0));
19198 t4
= newTemp(Ity_I32
);
19199 /* clear everything but upper msb + 1 bits from rt */
19200 assign(t4
, binop(Iop_Shr32
, mkNarrowTo32(ty
, getIReg(rt
)),
19202 t5
= newTemp(Ity_I32
);
19203 assign(t5
, binop(Iop_Shl32
, mkexpr(t4
), mkU8(msb
+ 1)));
19205 /* now combine these registers */
19207 t6
= newTemp(Ity_I32
);
19208 assign(t6
, binop(Iop_Or32
, mkexpr(t5
), mkexpr(t1
)));
19209 putIReg(rt
, mkWidenFrom32(ty
, binop(Iop_Or32
, mkexpr(t6
),
19210 mkexpr(t3
)), True
));
19212 putIReg(rt
, mkWidenFrom32(ty
, binop(Iop_Or32
, mkexpr(t1
),
19213 mkexpr(t5
)), True
));
19216 putIReg(rt
, mkWidenFrom32(ty
, binop(Iop_Or32
, mkexpr(t1
),
19217 mkexpr(t3
)), True
));
19222 case 0x00: /* EXT */
19223 msb
= get_msb(cins
);
19224 lsb
= get_lsb(cins
);
19226 DIP("ext size:%u msb:%u lsb:%u", size
, msb
, lsb
);
19227 vassert(lsb
+ size
<= 32);
19228 vassert(lsb
+ size
> 0);
19230 /* put size bits from rs at the top of in temporary */
19231 if (lsb
+ size
< 32) {
19232 t0
= newTemp(Ity_I32
);
19233 assign(t0
, binop(Iop_Shl32
, mkNarrowTo32(ty
, getIReg(rs
)),
19234 mkU8(32 - lsb
- size
)));
19236 putIReg(rt
, mkWidenFrom32(ty
, binop(Iop_Shr32
, mkexpr(t0
),
19237 mkU8(32 - size
)), True
));
19239 putIReg(rt
, mkWidenFrom32(ty
, binop(Iop_Shr32
,
19240 mkNarrowTo32(ty
, getIReg(rs
)),
19241 mkU8(32 - size
)), True
));
19246 case 0x03: /* Doubleword Extract Bit Field - DEXT; MIPS64r2 */
19247 msb
= get_msb(cins
);
19248 lsb
= get_lsb(cins
);
19250 DIP("dext r%u, r%u, %u, %u", rt
, rs
, lsb
, msb
+ 1);
19251 t1
= newTemp(Ity_I64
);
19252 vassert(lsb
>= 0 && lsb
< 32);
19253 vassert(size
> 0 && size
<= 32);
19254 vassert((lsb
+ size
) > 0 && (lsb
+ size
) <= 63);
19256 UChar lsAmt
= 63 - (lsb
+ msb
); /* left shift amount; */
19257 UChar rsAmt
= 63 - msb
; /* right shift amount; */
19259 assign(t1
, binop(Iop_Shl64
, getIReg(rs
), mkU8(lsAmt
)));
19260 putIReg(rt
, binop(Iop_Shr64
, mkexpr(t1
), mkU8(rsAmt
)));
19264 case 0x20: /* BSHFL */
19266 case 0x0: /* BITSWAP */
19267 if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
)) {
19268 DIP("bitswap r%u, r%u", rd
, rt
);
19271 putIReg(rd
, unop(Iop_32Uto64
, qop(Iop_Rotx32
, unop(Iop_64to32
, getIReg(rt
)),
19272 mkU8(7), mkU8(8), mkU8(1))));
19274 putIReg(rd
, qop(Iop_Rotx32
, getIReg(rt
), mkU8(7),
19275 mkU8(8), mkU8(1)));
19278 ILLEGAL_INSTRUCTON
;
19283 case 0x02: /* WSBH */
19284 DIP("wsbh r%u, r%u", rd
, rt
);
19285 t0
= newTemp(Ity_I32
);
19286 t1
= newTemp(Ity_I32
);
19287 t2
= newTemp(Ity_I32
);
19288 t3
= newTemp(Ity_I32
);
19289 assign(t0
, binop(Iop_Shl32
, binop(Iop_And32
, mkNarrowTo32(ty
,
19290 getIReg(rt
)), mkU32(0x00FF0000)),
19292 assign(t1
, binop(Iop_Shr32
, binop(Iop_And32
, mkNarrowTo32(ty
,
19293 getIReg(rt
)), mkU32(0xFF000000)), mkU8(0x8)));
19294 assign(t2
, binop(Iop_Shl32
, binop(Iop_And32
, mkNarrowTo32(ty
,
19295 getIReg(rt
)), mkU32(0x000000FF)), mkU8(0x8)));
19296 assign(t3
, binop(Iop_Shr32
, binop(Iop_And32
, mkNarrowTo32(ty
,
19297 getIReg(rt
)), mkU32(0x0000FF00)), mkU8(0x8)));
19298 putIReg(rd
, mkWidenFrom32(ty
, binop(Iop_Or32
, binop(Iop_Or32
,
19299 mkexpr(t0
), mkexpr(t1
)),
19300 binop(Iop_Or32
, mkexpr(t2
),
19301 mkexpr(t3
))), True
));
19304 case 0x10: /* SEB */
19305 DIP("seb r%u, r%u", rd
, rt
);
19308 putIReg(rd
, unop(Iop_8Sto64
, unop(Iop_64to8
, getIReg(rt
))));
19310 putIReg(rd
, unop(Iop_8Sto32
, unop(Iop_32to8
, getIReg(rt
))));
19314 case 0x18: /* SEH */
19315 DIP("seh r%u, r%u", rd
, rt
);
19318 putIReg(rd
, unop(Iop_16Sto64
, unop(Iop_64to16
, getIReg(rt
))));
19320 putIReg(rd
, unop(Iop_16Sto32
, unop(Iop_32to16
, getIReg(rt
))));
19324 case 0x08 ... 0x0b: /* ALIGN */
19325 if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
)) {
19327 UInt bp
= (sa
& 0x3) << 3;
19330 putIReg(rd
, unop(Iop_32Sto64
,
19341 putIReg(rd
, getIReg(rt
));
19343 UInt bp
= (sa
& 0x3) << 3;
19346 putIReg(rd
, binop(Iop_Or32
,
19348 getIReg(rt
), mkU8(bp
)),
19350 getIReg(rs
), mkU8(32 - bp
))));
19352 putIReg(rd
, getIReg(rt
));
19355 ILLEGAL_INSTRUCTON
;
19367 /* --- MIPS32(r2) DSP ASE(r2) / Cavium Specfic (LX) instructions --- */
19369 if (VEX_MIPS_COMP_ID(archinfo
->hwcaps
) == VEX_PRID_COMP_CAVIUM
) {
19370 if (dis_instr_CVM(cins
))
19376 case 0xC: /* INSV */
19377 case 0x38: { /* EXTR.W */
19378 if (VEX_MIPS_PROC_DSP(archinfo
->hwcaps
)) {
19379 UInt retVal
= disDSPInstr_MIPS_WRK ( cins
);
19381 if (0 != retVal
) {
19393 case 0x10: { /* ADDU.QB */
19395 case 0xC: /* SUBU_S.PH */
19396 case 0xD: /* ADDU_S.PH */
19397 case 0x1E: { /* MULQ_S.PH */
19398 if (VEX_MIPS_PROC_DSP2(archinfo
->hwcaps
)) {
19399 UInt retVal
= disDSPInstr_MIPS_WRK ( cins
);
19401 if (0 != retVal
) {
19414 if (VEX_MIPS_PROC_DSP(archinfo
->hwcaps
)) {
19415 UInt retVal
= disDSPInstr_MIPS_WRK ( cins
);
19417 if (0 != retVal
) {
19433 case 0x11: { /* CMPU.EQ.QB */
19435 case 0x18: /* CMPGDU.EQ.QB */
19436 case 0x19: /* CMPGDU.LT.QB */
19437 case 0x1A: /* CMPGDU.LE.QB */
19438 case 0x0D: /* PRECR.QB.PH */
19439 case 0x1E: /* PRECR_SRA.PH.W */
19440 case 0x1F: { /* PRECR_SRA_R.PH.W */
19441 if (VEX_MIPS_PROC_DSP2(archinfo
->hwcaps
)) {
19442 UInt retVal
= disDSPInstr_MIPS_WRK ( cins
);
19444 if (0 != retVal
) {
19457 if (VEX_MIPS_PROC_DSP(archinfo
->hwcaps
)) {
19458 UInt retVal
= disDSPInstr_MIPS_WRK ( cins
);
19460 if (0 != retVal
) {
19476 case 0x12: { /* ABSQ_S.PH */
19478 case 0x1: { /* ABSQ_S.QB */
19479 if (VEX_MIPS_PROC_DSP2(archinfo
->hwcaps
)) {
19480 UInt retVal
= disDSPInstr_MIPS_WRK ( cins
);
19482 if (0 != retVal
) {
19495 if (VEX_MIPS_PROC_DSP(archinfo
->hwcaps
)) {
19496 UInt retVal
= disDSPInstr_MIPS_WRK ( cins
);
19498 if (0 != retVal
) {
19514 case 0x13: { /* SHLL.QB */
19516 case 0x04: /* SHRA.QB */
19517 case 0x05: /* SHRA_R.QB */
19518 case 0x06: /* SHRAV.QB */
19519 case 0x07: /* SHRAV_R.QB */
19520 case 0x19: /* SHLR.PH */
19521 case 0x1B: { /* SHLRV.PH */
19522 if (VEX_MIPS_PROC_DSP2(archinfo
->hwcaps
)) {
19523 UInt retVal
= disDSPInstr_MIPS_WRK ( cins
);
19525 if (0 != retVal
) {
19538 if (VEX_MIPS_PROC_DSP(archinfo
->hwcaps
)) {
19539 UInt retVal
= disDSPInstr_MIPS_WRK ( cins
);
19541 if (0 != retVal
) {
19557 case 0x30: { /* DPAQ.W.PH */
19559 case 0x0: /* DPA.W.PH */
19560 case 0x18: /* DPAQX_S.W.PH */
19561 case 0x1A: /* DPAQX_SA.W.PH */
19562 case 0x8: /* DPAX.W.PH */
19563 case 0x1: /* DPS.W.PH */
19564 case 0x19: /* DPSQX_S.W.PH */
19565 case 0x1B: /* DPSQX_SA.W.PH */
19566 case 0x9: /* DPSX.W.PH */
19567 case 0x2: { /* MULSA.W.PH */
19568 if (VEX_MIPS_PROC_DSP2(archinfo
->hwcaps
)) {
19569 UInt retVal
= disDSPInstr_MIPS_WRK ( cins
);
19571 if (0 != retVal
) {
19584 if (VEX_MIPS_PROC_DSP(archinfo
->hwcaps
)) {
19585 UInt retVal
= disDSPInstr_MIPS_WRK ( cins
);
19587 if (0 != retVal
) {
19603 case 0x18: /* ADDUH.QB/MUL.PH */
19604 case 0x31: { /* APPEND */
19605 if (VEX_MIPS_PROC_DSP2(archinfo
->hwcaps
)) {
19606 UInt retVal
= disDSPInstr_MIPS_WRK ( cins
);
19608 if (0 != retVal
) {
19618 case 0x35: { /* PREF r6*/
19623 case 0x36: { /* LL */
19624 imm
= extend_s_9to16((instr_index
>> 7) & 0x1ff);
19625 DIP("ll r%u, %u(r%u)", rt
, imm
, rs
);
19626 LOAD_STORE_PATTERN
;
19628 assign(t2
, mkWidenFrom32(ty
, load(Ity_I32
, mkexpr(t1
)), True
));
19629 putLLaddr(mkexpr(t1
));
19630 putLLdata(mkexpr(t2
));
19631 putIReg(rt
, mkexpr(t2
));
19635 case 0x26: { /* SC */
19636 imm
= extend_s_9to16((instr_index
>> 7) & 0x1ff);
19637 DIP("sc r%u, %u(r%u)", rt
, imm
, rs
);
19638 LOAD_STORE_PATTERN
;
19640 t2
= newTemp(Ity_I1
);
19641 t3
= newTemp(Ity_I32
);
19642 assign(t2
, binop(mode64
? Iop_CmpNE64
: Iop_CmpNE32
,
19643 mkexpr(t1
), getLLaddr()));
19644 assign(t3
, mkNarrowTo32(ty
, getIReg(rt
)));
19645 putLLaddr(LLADDR_INVALID
);
19646 putIReg(rt
, getIReg(0));
19648 mips_next_insn_if(mkexpr(t2
));
19650 t4
= newTemp(Ity_I32
);
19651 t5
= newTemp(Ity_I32
);
19653 assign(t5
, mkNarrowTo32(ty
, getLLdata()));
19655 stmt(IRStmt_CAS(mkIRCAS(IRTemp_INVALID
, t4
, /* old_mem */
19656 MIPS_IEND
, mkexpr(t1
), /* addr */
19657 NULL
, mkexpr(t5
), /* expected value */
19658 NULL
, mkexpr(t3
) /* new value */)));
19660 putIReg(rt
, unop(mode64
? Iop_1Uto64
: Iop_1Uto32
,
19661 binop(Iop_CmpEQ32
, mkexpr(t4
), mkexpr(t5
))));
19665 case 0x37: { /* LLD */
19666 imm
= extend_s_9to16((instr_index
>> 7) & 0x1ff);
19667 DIP("lld r%u, %u(r%u)", rt
, imm
, rs
);
19668 LOAD_STORE_PATTERN
;
19670 t2
= newTemp(Ity_I64
);
19671 assign(t2
, load(Ity_I64
, mkexpr(t1
)));
19672 putLLaddr(mkexpr(t1
));
19673 putLLdata(mkexpr(t2
));
19674 putIReg(rt
, mkexpr(t2
));
19678 case 0x27: { /* SCD */
19679 imm
= extend_s_9to16((instr_index
>> 7) & 0x1ff);
19680 DIP("sdc r%u, %u(r%u)", rt
, imm
, rs
);
19681 LOAD_STORE_PATTERN
;
19683 t2
= newTemp(Ity_I1
);
19684 t3
= newTemp(Ity_I64
);
19685 assign(t2
, binop(Iop_CmpNE64
, mkexpr(t1
), getLLaddr()));
19686 assign(t3
, getIReg(rt
));
19687 putLLaddr(LLADDR_INVALID
);
19688 putIReg(rt
, getIReg(0));
19690 mips_next_insn_if(mkexpr(t2
));
19692 t4
= newTemp(Ity_I64
);
19693 t5
= newTemp(Ity_I64
);
19695 assign(t5
, getLLdata());
19697 stmt(IRStmt_CAS(mkIRCAS(IRTemp_INVALID
, t4
, /* old_mem */
19698 MIPS_IEND
, mkexpr(t1
), /* addr */
19699 NULL
, mkexpr(t5
), /* expected value */
19700 NULL
, mkexpr(t3
) /* new value */)));
19702 putIReg(rt
, unop(Iop_1Uto64
,
19703 binop(Iop_CmpEQ64
, mkexpr(t4
), mkexpr(t5
))));
19714 static UInt
disInstr_MIPS_WRK_00(UInt cins
, const VexArchInfo
* archinfo
,
19715 const VexAbiInfo
* abiinfo
, DisResult
* dres
,
19716 IRStmt
** bstmt
, IRExpr
** lastn
)
19719 UInt opcode
, rs
, rt
, trap_code
, imm
, instr_index
, p
;
19720 /* Additional variables for instruction fields in DSP ASE insructions */
19722 opcode
= get_opcode(cins
);
19723 imm
= get_imm(cins
);
19726 instr_index
= get_instr_index(cins
);
19727 trap_code
= get_code(cins
);
19728 IRType ty
= mode64
? Ity_I64
: Ity_I32
;
19730 switch (opcode
& 0x0F) {
19731 case 0x00: /* Special */
19732 return disInstr_MIPS_WRK_Special(cins
, archinfo
, abiinfo
,
19733 dres
, bstmt
, lastn
);
19735 case 0x01: /* Regimm */
19737 case 0x00: /* BLTZ */
19738 DIP("bltz r%u, %u", rs
, imm
);
19741 if (!dis_instr_branch(cins
, dres
, bstmt
))
19744 dis_branch(False
, binop(Iop_CmpEQ32
, binop(Iop_And32
, getIReg(rs
),
19745 mkU32(0x80000000)), mkU32(0x80000000)), imm
, bstmt
);
19749 case 0x01: /* BGEZ */
19750 DIP("bgez r%u, %u", rs
, imm
);
19753 if (!dis_instr_branch(cins
, dres
, bstmt
))
19756 dis_branch(False
, binop(Iop_CmpEQ32
, binop(Iop_And32
, getIReg(rs
),
19757 mkU32(0x80000000)), mkU32(0x0)), imm
, bstmt
);
19761 case 0x02: /* BLTZL */
19762 DIP("bltzl r%u, %u", rs
, imm
);
19763 *lastn
= dis_branch_likely(binop(mode64
? Iop_CmpNE64
: Iop_CmpNE32
,
19764 binop(mode64
? Iop_And64
: Iop_And32
, getIReg(rs
),
19765 mode64
? mkU64(0x8000000000000000ULL
) : mkU32(0x80000000)),
19766 mode64
? mkU64(0x8000000000000000ULL
) : mkU32(0x80000000)),
19770 case 0x03: /* BGEZL */
19771 DIP("bgezl r%u, %u", rs
, imm
);
19772 *lastn
= dis_branch_likely(binop(mode64
? Iop_CmpNE64
: Iop_CmpNE32
,
19773 binop(mode64
? Iop_And64
: Iop_And32
, getIReg(rs
),
19774 mode64
? mkU64(0x8000000000000000ULL
) : mkU32(0x80000000)),
19775 mode64
? mkU64(0x0) : mkU32(0x0)), imm
);
19778 case 0x06: { /* DAHI */
19779 if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
)) {
19780 DIP("dahi r%u, %x", rs
, imm
);
19781 putIReg(rs
, binop(Iop_Add64
,
19782 getIReg(rs
), mkU64(extend_s_16to64 (imm
) << 32)));
19790 case 0x08: /* TGEI */
19791 DIP("tgei r%u, %u %u", rs
, imm
, trap_code
);
19794 stmt (IRStmt_Exit (unop (Iop_Not1
,
19795 binop (Iop_CmpLT64S
,
19797 mkU64 (extend_s_16to64 (imm
)))),
19799 IRConst_U64(guest_PC_curr_instr
+ 4),
19802 stmt (IRStmt_Exit (unop (Iop_Not1
,
19803 binop (Iop_CmpLT32S
,
19805 mkU32 (extend_s_16to32 (imm
)))),
19807 IRConst_U32(guest_PC_curr_instr
+ 4),
19813 case 0x09: { /* TGEIU */
19814 DIP("tgeiu r%u, %u %u", rs
, imm
, trap_code
);
19817 stmt (IRStmt_Exit (unop (Iop_Not1
,
19818 binop (Iop_CmpLT64U
,
19820 mkU64 (extend_s_16to64 (imm
)))),
19822 IRConst_U64(guest_PC_curr_instr
+ 4),
19825 stmt (IRStmt_Exit (unop (Iop_Not1
,
19826 binop (Iop_CmpLT32U
,
19828 mkU32 (extend_s_16to32 (imm
)))),
19830 IRConst_U32(guest_PC_curr_instr
+ 4),
19837 case 0x0A: { /* TLTI */
19838 DIP("tlti r%u, %u %u", rs
, imm
, trap_code
);
19841 stmt (IRStmt_Exit (binop (Iop_CmpLT64S
, getIReg (rs
),
19842 mkU64 (extend_s_16to64 (imm
))),
19844 IRConst_U64(guest_PC_curr_instr
+ 4),
19847 stmt (IRStmt_Exit (binop (Iop_CmpLT32S
, getIReg (rs
),
19848 mkU32 (extend_s_16to32 (imm
))),
19850 IRConst_U32(guest_PC_curr_instr
+ 4),
19857 case 0x0B: { /* TLTIU */
19858 DIP("tltiu r%u, %u %u", rs
, imm
, trap_code
);
19861 stmt (IRStmt_Exit (binop (Iop_CmpLT64U
, getIReg (rs
),
19862 mkU64 (extend_s_16to64 (imm
))),
19864 IRConst_U64(guest_PC_curr_instr
+ 4),
19867 stmt (IRStmt_Exit (binop (Iop_CmpLT32U
, getIReg (rs
),
19868 mkU32 (extend_s_16to32 (imm
))),
19870 IRConst_U32(guest_PC_curr_instr
+ 4),
19877 case 0x0C: { /* TEQI */
19878 DIP("teqi r%u, %u %u", rs
, imm
, trap_code
);
19881 stmt (IRStmt_Exit (binop (Iop_CmpEQ64
, getIReg (rs
),
19882 mkU64 (extend_s_16to64 (imm
))),
19884 IRConst_U64(guest_PC_curr_instr
+ 4),
19887 stmt (IRStmt_Exit (binop (Iop_CmpEQ32
, getIReg (rs
),
19888 mkU32 (extend_s_16to32 (imm
))),
19890 IRConst_U32(guest_PC_curr_instr
+ 4),
19897 case 0x0E: { /* TNEI */
19898 DIP("tnei r%u, %u %u", rs
, imm
, trap_code
);
19901 stmt (IRStmt_Exit (binop (Iop_CmpNE64
, getIReg (rs
),
19902 mkU64 (extend_s_16to64 (imm
))),
19904 IRConst_U64(guest_PC_curr_instr
+ 4),
19907 stmt (IRStmt_Exit (binop (Iop_CmpNE32
, getIReg (rs
),
19908 mkU32 (extend_s_16to32 (imm
))),
19910 IRConst_U32(guest_PC_curr_instr
+ 4),
19917 case 0x10: /* BLTZAL */
19918 DIP("bltzal r%u, %u", rs
, imm
);
19921 if (!dis_instr_branch(cins
, dres
, bstmt
))
19924 dis_branch(True
, binop(Iop_CmpEQ32
, binop(Iop_And32
, getIReg(rs
),
19925 mkU32(0x80000000)), mkU32(0x80000000)), imm
, bstmt
);
19929 case 0x11: /* BGEZAL */
19930 DIP("bgezal r%u, %u", rs
, imm
);
19933 if (!dis_instr_branch(cins
, dres
, bstmt
))
19936 dis_branch(True
, binop(Iop_CmpEQ32
, binop(Iop_And32
, getIReg(rs
),
19937 mkU32(0x80000000)), mkU32(0x0)), imm
, bstmt
);
19941 case 0x12: /* BLTZALL */
19942 DIP("bltzall r%u, %u", rs
, imm
);
19943 putIReg(31, mode64
? mkU64(guest_PC_curr_instr
+ 8) :
19944 mkU32(guest_PC_curr_instr
+ 8));
19945 *lastn
= dis_branch_likely(binop(mode64
? Iop_CmpNE64
: Iop_CmpNE32
,
19946 binop(mode64
? Iop_And64
: Iop_And32
, getIReg(rs
),
19947 mode64
? mkU64(0x8000000000000000ULL
) : mkU32(0x80000000)),
19948 mode64
? mkU64(0x8000000000000000ULL
) : mkU32(0x80000000)),
19952 case 0x13: /* BGEZALL */
19953 DIP("bgezall r%u, %u", rs
, imm
);
19956 putIReg(31, mkU64(guest_PC_curr_instr
+ 8));
19957 *lastn
= dis_branch_likely(binop(Iop_CmpNE64
,
19960 mkU64(0x8000000000000000ULL
)),
19964 putIReg(31, mkU32(guest_PC_curr_instr
+ 8));
19965 *lastn
= dis_branch_likely(binop(Iop_CmpNE32
, binop(Iop_And32
,
19966 getIReg(rs
), mkU32(0x80000000)),
19972 case 0x1C: { /* BPOSGE32 */
19973 DIP("bposge32 %u", imm
);
19975 t0
= newTemp(Ity_I32
);
19976 /* Get pos field from DSPControl register. */
19977 assign(t0
, binop(Iop_And32
, getDSPControl(), mkU32(0x3f)));
19978 dis_branch(False
, unop(Iop_Not1
, binop(Iop_CmpLT32U
, mkexpr(t0
),
19979 mkU32(32))), imm
, bstmt
);
19983 case 0x1E: { /* DATI */
19984 if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
)) {
19985 DIP("dati r%u, %x", rs
, imm
);
19986 putIReg(rs
, binop(Iop_Add64
,
19987 getIReg(rs
), mkU64((long long)imm
<< 48)));
19995 case 0x1F: /* SYNCI */
19996 /* Just ignore it */
20006 DIP("j 0x%x", instr_index
);
20010 assign(t0
, mkU64((guest_PC_curr_instr
& 0xFFFFFFFFF0000000ULL
) |
20011 (instr_index
<< 2)));
20013 assign(t0
, mkU32((guest_PC_curr_instr
& 0xF0000000) |
20014 (instr_index
<< 2)));
20016 *lastn
= mkexpr(t0
);
20019 case 0x03: /* JAL */
20020 DIP("jal 0x%x", instr_index
);
20023 putIReg(31, mkU64(guest_PC_curr_instr
+ 8));
20025 assign(t0
, mkU64((guest_PC_curr_instr
& 0xFFFFFFFFF0000000ULL
) |
20026 (instr_index
<< 2)));
20028 putIReg(31, mkU32(guest_PC_curr_instr
+ 8));
20030 assign(t0
, mkU32((guest_PC_curr_instr
& 0xF0000000) |
20031 (instr_index
<< 2)));
20034 *lastn
= mkexpr(t0
);
20037 case 0x04: /* BEQ */
20038 DIP("beq r%u, r%u, %u", rs
, rt
, imm
);
20041 dis_branch(False
, binop(Iop_CmpEQ64
, getIReg(rs
), getIReg(rt
)),
20044 dis_branch(False
, binop(Iop_CmpEQ32
, getIReg(rs
), getIReg(rt
)),
20049 case 0x05: /* BNE */
20050 DIP("bne r%u, r%u, %u", rs
, rt
, imm
);
20053 dis_branch(False
, binop(Iop_CmpNE64
, getIReg(rs
), getIReg(rt
)),
20056 dis_branch(False
, binop(Iop_CmpNE32
, getIReg(rs
), getIReg(rt
)),
20061 case 0x06: /* BLEZ, BLEZALC, BGEZALC, BGEUC */
20062 if (rt
== 0) { /* BLEZ */
20063 DIP("blez r%u, %u", rs
, imm
);
20066 dis_branch(False
, binop(Iop_CmpLE64S
, getIReg(rs
), mkU64(0x0)),
20069 dis_branch(False
, binop(Iop_CmpLE32S
, getIReg(rs
), mkU32(0x0)), imm
,
20071 } else if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
)) {
20072 if (rs
== 0) { /* BLEZALC */
20073 DIP("blezalc r%u, %u", rt
, imm
);
20076 dis_branch_compact(True
,
20077 binop(Iop_CmpLE64S
, getIReg(rt
), mkU64(0x0)),
20080 dis_branch_compact(True
,
20081 binop(Iop_CmpLE32S
, getIReg(rt
), mkU32(0x0)),
20083 } else if (rt
== rs
) { /* BGEZALC */
20084 DIP("bgezalc r%u, %u", rt
, imm
);
20087 dis_branch_compact(True
,
20088 binop(Iop_CmpLE64S
, mkU64(0x0), getIReg(rt
)),
20091 dis_branch_compact(True
,
20092 binop(Iop_CmpLE32S
, mkU32(0x0), getIReg(rt
)),
20094 } else { /* BGEUC */
20095 DIP("bgeuc r%u, r%u, %u", rt
, rs
, imm
);
20098 dis_branch_compact(False
,
20100 binop(Iop_CmpLT64U
,
20101 getIReg(rs
), getIReg(rt
))),
20104 dis_branch_compact(False
,
20106 binop(Iop_CmpLT32U
,
20107 getIReg(rs
), getIReg(rt
))),
20116 case 0x07: /* BGTZ, BGTZALC, BLTZALC, BLTUC */
20117 if (rt
== 0) { /* BGTZ */
20118 DIP("bgtz r%u, %u", rs
, imm
);
20121 dis_branch(False
, unop(Iop_Not1
, binop(Iop_CmpLE64S
, getIReg(rs
),
20122 mkU64(0x00))), imm
, bstmt
);
20124 dis_branch(False
, unop(Iop_Not1
, binop(Iop_CmpLE32S
, getIReg(rs
),
20125 mkU32(0x00))), imm
, bstmt
);
20126 } else if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
)) {
20127 if (rs
== 0) { /* BGTZALC */
20128 DIP("bgtzalc r%u, %u", rt
, imm
);
20131 dis_branch_compact(True
,
20133 binop(Iop_CmpLE64S
,
20134 getIReg(rt
), mkU64(0x0))),
20137 dis_branch_compact(True
,
20139 binop(Iop_CmpLE32S
,
20140 getIReg(rt
), mkU32(0x0))),
20143 } else if (rs
== rt
) { /* BLTZALC */
20144 DIP("bltzalc r%u, %u", rt
, imm
);
20147 dis_branch_compact(True
,
20149 binop(Iop_CmpLE64S
,
20150 mkU64(0x0), getIReg(rt
))),
20153 dis_branch_compact(True
,
20155 binop(Iop_CmpLE32S
,
20156 mkU32(0x0), getIReg(rt
))),
20159 } else { /* BLTUC */
20160 DIP("bltuc r%u, r%u, %u", rt
, rs
, imm
);
20163 dis_branch_compact(False
,
20164 binop(Iop_CmpLT64U
, getIReg(rs
), getIReg(rt
)),
20167 dis_branch_compact(False
,
20168 binop(Iop_CmpLT32U
, getIReg(rs
), getIReg(rt
)),
20178 #if defined(__mips__) && ((defined(__mips_isa_rev) && __mips_isa_rev < 6))
20180 case 0x08: { /* ADDI */
20181 DIP("addi r%u, r%u, %u", rt
, rs
, imm
);
20182 IRTemp tmpRs32
, t1
, t2
, t3
, t4
;
20183 tmpRs32
= newTemp(Ity_I32
);
20184 assign(tmpRs32
, mkNarrowTo32(ty
, getIReg(rs
)));
20186 t0
= newTemp(Ity_I32
);
20187 t1
= newTemp(Ity_I32
);
20188 t2
= newTemp(Ity_I32
);
20189 t3
= newTemp(Ity_I32
);
20190 t4
= newTemp(Ity_I32
);
20191 /* dst = src0 + sign(imm)
20192 if(sign(src0 ) != sign(imm ))
20194 if(sign(dst) == sign(src0 ))
20196 we have overflow! */
20198 assign(t0
, binop(Iop_Add32
, mkexpr(tmpRs32
),
20199 mkU32(extend_s_16to32(imm
))));
20200 assign(t1
, binop(Iop_Xor32
, mkexpr(tmpRs32
),
20201 mkU32(extend_s_16to32(imm
))));
20202 assign(t2
, unop(Iop_1Sto32
, binop(Iop_CmpEQ32
, binop(Iop_And32
,
20203 mkexpr(t1
), mkU32(0x80000000)), mkU32(0x80000000))));
20205 assign(t3
, binop(Iop_Xor32
, mkexpr(t0
), mkexpr(tmpRs32
)));
20206 assign(t4
, unop(Iop_1Sto32
, binop(Iop_CmpNE32
, binop(Iop_And32
,
20207 mkexpr(t3
), mkU32(0x80000000)), mkU32(0x80000000))));
20209 stmt(IRStmt_Exit(binop(Iop_CmpEQ32
, binop(Iop_Or32
, mkexpr(t2
),
20210 mkexpr(t4
)), mkU32(0)), Ijk_SigFPE_IntOvf
,
20211 mode64
? IRConst_U64(guest_PC_curr_instr
+ 4) :
20212 IRConst_U32(guest_PC_curr_instr
+ 4),
20215 putIReg(rt
, mkWidenFrom32(ty
, mkexpr(t0
), True
));
20219 #elif defined(__mips__) && ((defined(__mips_isa_rev) && __mips_isa_rev >= 6))
20221 case 0x08: { /* BEQZALC, BEQC, BOVC */
20222 IRTemp t1
, t2
, t3
, t4
;
20223 if (rs
== 0) { /* BEQZALC */
20224 DIP("beqzalc r%u, %u", rt
, imm
);
20227 dis_branch_compact(True
,
20228 binop(Iop_CmpEQ64
, getIReg(rt
), mkU64(0x0)),
20231 dis_branch_compact(True
,
20232 binop(Iop_CmpEQ32
, getIReg(rt
), mkU32(0x0)),
20235 } else if (rs
< rt
) { /* BEQC */
20236 DIP("beqc r%u, r%u, %u", rs
, rt
, imm
);
20239 dis_branch_compact(False
,
20240 binop(Iop_CmpEQ64
, getIReg(rt
), getIReg(rs
)),
20243 dis_branch_compact(False
,
20244 binop(Iop_CmpEQ32
, getIReg(rt
), getIReg(rs
)),
20247 } else { /* BOVC */
20248 DIP("bovc r%u, r%u, %u", rs
, rt
, imm
);
20251 t0
= newTemp(Ity_I32
);
20252 t1
= newTemp(Ity_I32
);
20253 t2
= newTemp(Ity_I32
);
20254 t3
= newTemp(Ity_I32
);
20255 assign(t0
, IRExpr_ITE(binop(Iop_CmpLT64S
,
20257 mkU64(0xffffffff80000000ULL
)),
20259 IRExpr_ITE(binop(Iop_CmpLT64S
,
20261 mkU64(0x7FFFFFFFULL
)),
20262 mkU32(0), mkU32(1))));
20263 assign(t1
, IRExpr_ITE(binop(Iop_CmpLT64S
,
20265 mkU64(0xffffffff80000000ULL
)),
20267 IRExpr_ITE(binop(Iop_CmpLT64S
,
20269 mkU64(0x7FFFFFFFULL
)),
20270 mkU32(0), mkU32(1))));
20271 assign(t2
, IRExpr_ITE(binop(Iop_CmpLT64S
,
20273 getIReg(rt
), getIReg(rs
)),
20274 mkU64(0xffffffff80000000ULL
)),
20276 IRExpr_ITE(binop(Iop_CmpLT64S
,
20280 mkU64(0x7FFFFFFFULL
)),
20281 mkU32(0), mkU32(1))));
20282 assign(t3
, binop(Iop_Add32
,
20284 binop(Iop_Add32
, mkexpr(t1
), mkexpr(t2
))));
20285 dis_branch_compact(False
,
20286 binop(Iop_CmpNE32
, mkexpr(t3
), mkU32(0)),
20289 IRTemp tmpRs32
= newTemp(Ity_I32
);
20290 IRTemp tmpRt32
= newTemp(Ity_I32
);
20291 assign(tmpRs32
, getIReg(rs
));
20292 assign(tmpRt32
, getIReg(rt
));
20294 t0
= newTemp(Ity_I32
);
20295 t1
= newTemp(Ity_I32
);
20296 t2
= newTemp(Ity_I32
);
20297 t3
= newTemp(Ity_I32
);
20298 t4
= newTemp(Ity_I32
);
20299 /* dst = src0 + src1
20300 if (sign(src0 ) != sign(src1 ))
20302 if (sign(dst) == sign(src0 ))
20304 we have overflow! */
20306 assign(t0
, binop(Iop_Add32
, mkexpr(tmpRs32
), mkexpr(tmpRt32
)));
20307 assign(t1
, binop(Iop_Xor32
, mkexpr(tmpRs32
), mkexpr(tmpRt32
)));
20308 assign(t2
, unop(Iop_1Uto32
,
20310 binop(Iop_And32
, mkexpr(t1
), mkU32(0x80000000)),
20311 mkU32(0x80000000))));
20313 assign(t3
, binop(Iop_Xor32
, mkexpr(t0
), mkexpr(tmpRs32
)));
20314 assign(t4
, unop(Iop_1Uto32
,
20316 binop(Iop_And32
, mkexpr(t3
), mkU32(0x80000000)),
20317 mkU32(0x80000000))));
20319 dis_branch_compact(False
, binop(Iop_CmpEQ32
,
20320 binop(Iop_Or32
, mkexpr(t2
), mkexpr(t4
)),
20321 mkU32(0)), imm
, dres
);
20326 /* In documentation for BEQC stands rs > rt and for BOVC stands rs >= rt! */
20331 case 0x09: /* ADDIU */
20332 DIP("addiu r%u, r%u, %u", rt
, rs
, imm
);
20335 putIReg(rt
, mkWidenFrom32(ty
, binop(Iop_Add32
,
20336 mkNarrowTo32(ty
, getIReg(rs
)), mkU32(extend_s_16to32(imm
))),
20339 putIReg(rt
, binop(Iop_Add32
, getIReg(rs
), mkU32(extend_s_16to32(imm
))));
20343 case 0x0A: /* SLTI */
20344 DIP("slti r%u, r%u, %u", rt
, rs
, imm
);
20347 putIReg(rt
, unop(Iop_1Uto64
, binop(Iop_CmpLT64S
, getIReg(rs
),
20348 mkU64(extend_s_16to64(imm
)))));
20350 putIReg(rt
, unop(Iop_1Uto32
, binop(Iop_CmpLT32S
, getIReg(rs
),
20351 mkU32(extend_s_16to32(imm
)))));
20355 case 0x0B: /* SLTIU */
20356 DIP("sltiu r%u, r%u, %u", rt
, rs
, imm
);
20359 putIReg(rt
, unop(Iop_1Uto64
, binop(Iop_CmpLT64U
, getIReg(rs
),
20360 mkU64(extend_s_16to64(imm
)))));
20362 putIReg(rt
, unop(Iop_1Uto32
, binop(Iop_CmpLT32U
, getIReg(rs
),
20363 mkU32(extend_s_16to32(imm
)))));
20367 case 0x0C: /* ANDI */
20368 DIP("andi r%u, r%u, %u", rt
, rs
, imm
);
20371 ALUI_PATTERN64(Iop_And64
);
20373 ALUI_PATTERN(Iop_And32
);
20378 case 0x0D: /* ORI */
20379 DIP("ori r%u, r%u, %u", rt
, rs
, imm
);
20382 ALUI_PATTERN64(Iop_Or64
);
20384 ALUI_PATTERN(Iop_Or32
);
20389 case 0x0E: /* XORI */
20390 DIP("xori r%u, r%u, %u", rt
, rs
, imm
);
20393 ALUI_PATTERN64(Iop_Xor64
);
20395 ALUI_PATTERN(Iop_Xor32
);
20400 case 0x0F: /* LUI */
20403 DIP("lui r%u, imm: 0x%x", rt
, imm
);
20406 putIReg(rt
, mkU64(extend_s_32to64(p
)));
20408 putIReg(rt
, mkU32(p
));
20411 } else if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
)) { /* AUI */
20412 DIP("aui r%u, imm: 0x%x", rt
, imm
);
20415 putIReg(rt
, unop(Iop_32Sto64
,
20419 mkU64(extend_s_32to64(imm
<< 16))))));
20421 putIReg(rt
, binop(Iop_Add32
, getIReg(rs
), mkU32(imm
<< 16)));
20437 static UInt
disInstr_MIPS_WRK_10(UInt cins
, const VexArchInfo
* archinfo
,
20438 const VexAbiInfo
* abiinfo
, DisResult
* dres
,
20439 IRStmt
** bstmt
, IRExpr
** lastn
)
20441 IRTemp t0
, t1
= 0, t2
, t3
, t4
, t5
, t6
, t7
;
20442 UInt opcode
, rs
, rt
, ft
, fs
, fd
, fmt
, tf
, nd
, function
, imm
;
20443 /* Additional variables for instruction fields in DSP ASE insructions */
20445 opcode
= get_opcode(cins
);
20446 imm
= get_imm(cins
);
20454 fmt
= get_fmt(cins
);
20455 function
= get_function(cins
);
20456 IRType ty
= mode64
? Ity_I64
: Ity_I32
;
20457 IRType tyF
= fp_mode64
? Ity_F64
: Ity_F32
;
20459 switch (opcode
& 0x0F) {
20460 case 0x01: { /* COP1 */
20461 if (fmt
== 0x3 && fd
== 0 && function
== 0) { /* MFHC1 */
20462 DIP("mfhc1 r%u, f%u", rt
, fs
);
20464 if (VEX_MIPS_CPU_HAS_MIPS32R2(archinfo
->hwcaps
) ||
20465 VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
)) {
20467 t0
= newTemp(Ity_I64
);
20468 t1
= newTemp(Ity_I32
);
20469 assign(t0
, unop(Iop_ReinterpF64asI64
, getDReg(fs
)));
20470 assign(t1
, unop(Iop_64HIto32
, mkexpr(t0
)));
20471 putIReg(rt
, mkWidenFrom32(ty
, mkexpr(t1
), True
));
20473 putIReg(rt
, mkWidenFrom32(ty
, unop(Iop_ReinterpF32asI32
,
20474 getFReg(fs
| 1)), True
));
20477 ILLEGAL_INSTRUCTON
;
20481 } else if (fmt
== 0x7 && fd
== 0 && function
== 0) { /* MTHC1 */
20482 DIP("mthc1 r%u, f%u", rt
, fs
);
20484 if (VEX_MIPS_CPU_HAS_MIPS32R2(archinfo
->hwcaps
) ||
20485 VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
)) {
20487 t0
= newTemp(Ity_I64
);
20488 assign(t0
, binop(Iop_32HLto64
, mkNarrowTo32(ty
, getIReg(rt
)),
20489 unop(Iop_ReinterpF32asI32
,
20490 getLoFromF64(Ity_F64
, getDReg(fs
)))));
20491 putDReg(fs
, unop(Iop_ReinterpI64asF64
, mkexpr(t0
)));
20493 putFReg(fs
| 1, unop(Iop_ReinterpI32asF32
,
20494 mkNarrowTo32(ty
, getIReg(rt
))));
20497 ILLEGAL_INSTRUCTON
;
20501 } else if (fmt
== 0x8) { /* BC */
20502 /* FcConditionalCode(bc1_cc) */
20503 UInt bc1_cc
= get_bc1_cc(cins
);
20504 t1
= newTemp(Ity_I1
);
20505 t2
= newTemp(Ity_I32
);
20506 t3
= newTemp(Ity_I1
);
20508 assign(t1
, binop(Iop_CmpEQ32
, mkU32(0), mkU32(bc1_cc
)));
20509 assign(t2
, IRExpr_ITE(mkexpr(t1
),
20511 binop(Iop_Shr32
, getFCSR(), mkU8(23)),
20514 binop(Iop_Shr32
, getFCSR(),
20515 mkU8(24 + bc1_cc
)),
20518 if (tf
== 1 && nd
== 0) {
20519 /* branch on true */
20520 DIP("bc1t %u, %u", bc1_cc
, imm
);
20521 assign(t3
, binop(Iop_CmpEQ32
, mkU32(1), mkexpr(t2
)));
20522 dis_branch(False
, mkexpr(t3
), imm
, bstmt
);
20524 } else if (tf
== 0 && nd
== 0) {
20525 /* branch on false */
20526 DIP("bc1f %u, %u", bc1_cc
, imm
);
20527 assign(t3
, binop(Iop_CmpEQ32
, mkU32(0), mkexpr(t2
)));
20528 dis_branch(False
, mkexpr(t3
), imm
, bstmt
);
20530 } else if (nd
== 1 && tf
== 0) {
20531 DIP("bc1fl %u, %u", bc1_cc
, imm
);
20532 *lastn
= dis_branch_likely(binop(Iop_CmpNE32
, mkexpr(t2
),
20535 } else if (nd
== 1 && tf
== 1) {
20536 DIP("bc1tl %u, %u", bc1_cc
, imm
);
20537 *lastn
= dis_branch_likely(binop(Iop_CmpEQ32
, mkexpr(t2
),
20542 } else if (fmt
>= 0x1c && has_msa
) { /* BNZ.df */
20544 t0
= newTemp(Ity_I32
);
20545 t1
= newTemp(Ity_V128
);
20546 t2
= newTemp(Ity_V128
);
20547 t3
= newTemp(Ity_V128
);
20548 assign(t1
, getWReg(ft
));
20549 assign(t2
, binop(Iop_64HLtoV128
, mkU64(0), mkU64(0)));
20552 case 0x00: { /* BNZ.B */
20553 DIP("BNZ.B w%u, %u", ft
, imm
);
20554 assign(t3
, binop(Iop_CmpEQ8x16
, mkexpr(t1
), mkexpr(t2
)));
20558 case 0x01: { /* BNZ.H */
20559 DIP("BNZ.H w%u, %u", ft
, imm
);
20560 assign(t3
, binop(Iop_CmpEQ16x8
, mkexpr(t1
), mkexpr(t2
)));
20564 case 0x02: { /* BNZ.W */
20565 DIP("BNZ.W w%u, %u", ft
, imm
);
20566 assign(t3
, binop(Iop_CmpEQ32x4
, mkexpr(t1
), mkexpr(t2
)));
20570 case 0x03: { /* BNZ.D */
20571 DIP("BNZ.D w%u, %u", ft
, imm
);
20572 assign(t3
, binop(Iop_CmpEQ64x2
, mkexpr(t1
), mkexpr(t2
)));
20580 unop(Iop_V128to32
, mkexpr(t3
)),
20581 unop(Iop_64HIto32
, unop(Iop_V128to64
, mkexpr(t3
)))),
20584 unop(Iop_V128HIto64
, mkexpr(t3
))),
20586 unop(Iop_V128HIto64
, mkexpr(t3
))))));
20588 binop(Iop_CmpEQ32
, mkexpr(t0
), mkU32(0)), imm
, bstmt
);
20589 } else if (fmt
== 0x0F && has_msa
) { /* BNZ.V */
20590 t0
= newTemp(Ity_I32
);
20591 t1
= newTemp(Ity_V128
);
20592 assign(t1
, getWReg(ft
));
20596 unop(Iop_V128to32
, mkexpr(t1
)),
20597 unop(Iop_64HIto32
, unop(Iop_V128to64
, mkexpr(t1
)))),
20599 unop(Iop_64to32
, unop(Iop_V128HIto64
, mkexpr(t1
))),
20601 unop(Iop_V128HIto64
, mkexpr(t1
))))));
20603 binop(Iop_CmpNE32
, mkexpr(t0
), mkU32(0)), imm
, bstmt
);
20604 } else if (fmt
>= 0x18 && has_msa
) { /* BZ.df */
20606 t0
= newTemp(Ity_I32
);
20607 t1
= newTemp(Ity_V128
);
20608 t2
= newTemp(Ity_V128
);
20609 t3
= newTemp(Ity_V128
);
20610 assign(t1
, getWReg(ft
));
20611 assign(t2
, binop(Iop_64HLtoV128
, mkU64(0), mkU64(0)));
20614 case 0x00: { /* BZ.B */
20615 DIP("BZ.B w%u, %u", ft
, imm
);
20616 assign(t3
, binop(Iop_CmpEQ8x16
, mkexpr(t1
), mkexpr(t2
)));
20620 case 0x01: { /* BZ.H */
20621 DIP("BZ.H w%u, %u", ft
, imm
);
20622 assign(t3
, binop(Iop_CmpEQ16x8
, mkexpr(t1
), mkexpr(t2
)));
20626 case 0x02: { /* BZ.W */
20627 DIP("BZ.W w%u, %u", ft
, imm
);
20628 assign(t3
, binop(Iop_CmpEQ32x4
, mkexpr(t1
), mkexpr(t2
)));
20632 case 0x03: { /* BZ.D */
20633 DIP("BZ.D w%u, %u", ft
, imm
);
20634 assign(t3
, binop(Iop_CmpEQ64x2
, mkexpr(t1
), mkexpr(t2
)));
20642 unop(Iop_V128to32
, mkexpr(t3
)),
20643 unop(Iop_64HIto32
, unop(Iop_V128to64
, mkexpr(t3
)))),
20645 unop(Iop_64to32
, unop(Iop_V128HIto64
, mkexpr(t3
))),
20647 unop(Iop_V128HIto64
, mkexpr(t3
))))));
20649 binop(Iop_CmpNE32
, mkexpr(t0
), mkU32(0)), imm
, bstmt
);
20650 } else if (fmt
== 0x0B && has_msa
) { /* BZ.V */
20651 t0
= newTemp(Ity_I32
);
20652 t1
= newTemp(Ity_V128
);
20653 assign(t1
, getWReg(ft
));
20657 unop(Iop_V128to32
, mkexpr(t1
)),
20658 unop(Iop_64HIto32
, unop(Iop_V128to64
, mkexpr(t1
)))),
20660 unop(Iop_64to32
, unop(Iop_V128HIto64
, mkexpr(t1
))),
20662 unop(Iop_V128HIto64
, mkexpr(t1
))))));
20664 binop(Iop_CmpEQ32
, mkexpr(t0
), mkU32(0)), imm
, bstmt
);
20665 } else if (fmt
== 0x09) { /* BC1EQZ */
20666 if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
)) {
20667 DIP("bc1eqz f%u, %u", ft
, imm
);
20668 t1
= newTemp(Ity_I1
);
20671 assign(t1
, binop(Iop_CmpEQ64
,
20673 unop(Iop_ReinterpF64asI64
, getDReg(ft
)),
20677 assign(t1
, binop(Iop_CmpEQ32
,
20680 unop(Iop_ReinterpF64asI64
,
20686 dis_branch(False
, mkexpr(t1
), imm
, bstmt
);
20690 } else if (fmt
== 0x0D) { /* BC1NEZ */
20691 if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
)) {
20692 DIP("bc1nez f%u, %u", ft
, imm
);
20693 t1
= newTemp(Ity_I1
);
20696 assign(t1
, binop(Iop_CmpNE64
,
20698 unop(Iop_ReinterpF64asI64
, getDReg(ft
)),
20702 assign(t1
, binop(Iop_CmpNE32
,
20705 unop(Iop_ReinterpF64asI64
, getDReg(ft
))),
20710 dis_branch(False
, mkexpr(t1
), imm
, bstmt
);
20712 ILLEGAL_INSTRUCTON
;
20716 if (fmt
== 0x15) { /* CMP.cond.d */
20717 Bool comparison
= True
;
20718 UInt signaling
= CMPAFD
;
20719 DIP("cmp.cond.d f%u, f%u, f%u, cond %u", fd
, fs
, ft
, function
);
20720 t0
= newTemp(Ity_I32
);
20722 /* Conditions starting with S should signal exception on QNaN inputs. */
20723 switch (function
) {
20724 case 0x08: /* SAF */
20725 signaling
= CMPSAFD
; /* fallthrough */
20727 case 0x00: /* AF */
20728 assign(t0
, binop(Iop_CmpF64
, getDReg(fs
), getDReg(ft
)));
20729 calculateFCSR(fs
, ft
, signaling
, False
, 2);
20731 binop(Iop_I64StoF64
,
20732 get_IR_roundingmode(), mkU64(0)));
20735 case 0x09: /* SUN */
20736 signaling
= CMPSAFD
; /* fallthrough */
20738 case 0x01: /* UN */
20739 assign(t0
, binop(Iop_CmpF64
, getDReg(fs
), getDReg(ft
)));
20740 calculateFCSR(fs
, ft
, signaling
, False
, 2);
20742 IRExpr_ITE(binop(Iop_CmpEQ32
, mkexpr(t0
), mkU32(0x45)),
20743 unop(Iop_ReinterpI64asF64
,
20744 mkU64(0xFFFFFFFFFFFFFFFFULL
)),
20745 binop(Iop_I64StoF64
,
20746 get_IR_roundingmode(), mkU64(0))));
20749 case 0x19: /* SOR */
20750 signaling
= CMPSAFD
; /* fallthrough */
20752 case 0x11: /* OR */
20753 assign(t0
, binop(Iop_CmpF64
, getDReg(fs
), getDReg(ft
)));
20754 calculateFCSR(fs
, ft
, signaling
, False
, 2);
20756 IRExpr_ITE(binop(Iop_CmpEQ32
, mkexpr(t0
), mkU32(0x45)),
20757 binop(Iop_I64StoF64
,
20758 get_IR_roundingmode(), mkU64(0)),
20759 unop(Iop_ReinterpI64asF64
,
20760 mkU64(0xFFFFFFFFFFFFFFFFULL
))));
20763 case 0x0A: /* SEQ */
20764 signaling
= CMPSAFD
; /* fallthrough */
20766 case 0x02: /* EQ */
20767 assign(t0
, binop(Iop_CmpF64
, getDReg(fs
), getDReg(ft
)));
20768 calculateFCSR(fs
, ft
, signaling
, False
, 2);
20770 IRExpr_ITE(binop(Iop_CmpEQ32
, mkexpr(t0
), mkU32(0x40)),
20771 unop(Iop_ReinterpI64asF64
,
20772 mkU64(0xFFFFFFFFFFFFFFFFULL
)),
20773 binop(Iop_I64StoF64
,
20774 get_IR_roundingmode(), mkU64(0))));
20777 case 0x1A: /* SNEQ */
20778 signaling
= CMPSAFD
; /* fallthrough */
20780 case 0x12: /* NEQ */
20781 assign(t0
, binop(Iop_CmpF64
, getDReg(fs
), getDReg(ft
)));
20782 calculateFCSR(fs
, ft
, signaling
, False
, 2);
20784 IRExpr_ITE(binop(Iop_CmpEQ32
, mkexpr(t0
), mkU32(0x40)),
20785 binop(Iop_I64StoF64
,
20786 get_IR_roundingmode(), mkU64(0)),
20787 unop(Iop_ReinterpI64asF64
,
20788 mkU64(0xFFFFFFFFFFFFFFFFULL
))));
20791 case 0x0B: /* SUEQ */
20792 signaling
= CMPSAFD
; /* fallthrough */
20794 case 0x03: /* UEQ */
20795 assign(t0
, binop(Iop_CmpF64
, getDReg(fs
), getDReg(ft
)));
20796 calculateFCSR(fs
, ft
, signaling
, False
, 2);
20798 IRExpr_ITE(binop(Iop_CmpEQ32
, mkexpr(t0
), mkU32(0x40)),
20799 unop(Iop_ReinterpI64asF64
,
20800 mkU64(0xFFFFFFFFFFFFFFFFULL
)),
20801 IRExpr_ITE(binop(Iop_CmpEQ32
,
20802 mkexpr(t0
), mkU32(0x45)),
20803 unop(Iop_ReinterpI64asF64
,
20804 mkU64(0xFFFFFFFFFFFFFFFFULL
)),
20805 binop(Iop_I64StoF64
,
20806 get_IR_roundingmode(),
20810 case 0x1B: /* SNEQ */
20811 signaling
= CMPSAFD
; /* fallthrough */
20813 case 0x13: /* NEQ */
20814 assign(t0
, binop(Iop_CmpF64
, getDReg(fs
), getDReg(ft
)));
20815 calculateFCSR(fs
, ft
, signaling
, False
, 2);
20817 IRExpr_ITE(binop(Iop_CmpEQ32
, mkexpr(t0
), mkU32(0x01)),
20818 unop(Iop_ReinterpI64asF64
,
20819 mkU64(0xFFFFFFFFFFFFFFFFULL
)),
20820 IRExpr_ITE(binop(Iop_CmpEQ32
,
20821 mkexpr(t0
), mkU32(0x00)),
20822 unop(Iop_ReinterpI64asF64
,
20823 mkU64(0xFFFFFFFFFFFFFFFFULL
)),
20824 binop(Iop_I64StoF64
,
20825 get_IR_roundingmode(),
20829 case 0x0C: /* SLT */
20830 signaling
= CMPSAFD
; /* fallthrough */
20832 case 0x04: /* LT */
20833 assign(t0
, binop(Iop_CmpF64
, getDReg(fs
), getDReg(ft
)));
20834 calculateFCSR(fs
, ft
, signaling
, False
, 2);
20836 IRExpr_ITE(binop(Iop_CmpEQ32
, mkexpr(t0
), mkU32(0x01)),
20837 unop(Iop_ReinterpI64asF64
,
20838 mkU64(0xFFFFFFFFFFFFFFFFULL
)),
20839 binop(Iop_I64StoF64
,
20840 get_IR_roundingmode(), mkU64(0))));
20843 case 0x0D: /* SULT */
20844 signaling
= CMPSAFD
; /* fallthrough */
20846 case 0x05: /* ULT */
20847 assign(t0
, binop(Iop_CmpF64
, getDReg(fs
), getDReg(ft
)));
20848 calculateFCSR(fs
, ft
, signaling
, False
, 2);
20850 IRExpr_ITE(binop(Iop_CmpEQ32
, mkexpr(t0
), mkU32(0x01)),
20851 unop(Iop_ReinterpI64asF64
,
20852 mkU64(0xFFFFFFFFFFFFFFFFULL
)),
20853 IRExpr_ITE(binop(Iop_CmpEQ32
,
20854 mkexpr(t0
), mkU32(0x45)),
20855 unop(Iop_ReinterpI64asF64
,
20856 mkU64(0xFFFFFFFFFFFFFFFFULL
)),
20857 binop(Iop_I64StoF64
,
20858 get_IR_roundingmode(),
20862 case 0x0E: /* SLE */
20863 signaling
= CMPSAFD
; /* fallthrough */
20865 case 0x06: /* LE */
20866 assign(t0
, binop(Iop_CmpF64
, getDReg(fs
), getDReg(ft
)));
20867 calculateFCSR(fs
, ft
, signaling
, False
, 2);
20869 IRExpr_ITE(binop(Iop_CmpEQ32
, mkexpr(t0
), mkU32(0x01)),
20870 unop(Iop_ReinterpI64asF64
,
20871 mkU64(0xFFFFFFFFFFFFFFFFULL
)),
20872 IRExpr_ITE(binop(Iop_CmpEQ32
,
20873 mkexpr(t0
), mkU32(0x40)),
20874 unop(Iop_ReinterpI64asF64
,
20875 mkU64(0xFFFFFFFFFFFFFFFFULL
)),
20876 binop(Iop_I64StoF64
,
20877 get_IR_roundingmode(),
20881 case 0x0F: /* SULE */
20882 signaling
= CMPSAFD
; /* fallthrough */
20884 case 0x07: /* ULE */
20885 assign(t0
, binop(Iop_CmpF64
, getDReg(fs
), getDReg(ft
)));
20886 calculateFCSR(fs
, ft
, signaling
, False
, 2);
20888 IRExpr_ITE(binop(Iop_CmpEQ32
, mkexpr(t0
), mkU32(0x0)),
20889 binop(Iop_I64StoF64
,
20890 get_IR_roundingmode(), mkU64(0)),
20891 unop(Iop_ReinterpI64asF64
,
20892 mkU64(0xFFFFFFFFFFFFFFFFULL
))));
20896 comparison
= False
;
20900 if (!VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
)) {
20907 } else if (fmt
== 0x14) {
20908 Bool comparison
= True
;
20909 UInt signaling
= CMPAFS
;
20910 DIP("cmp.cond.s f%u, f%u, f%u, cond %u", fd
, fs
, ft
, function
);
20911 t0
= newTemp(Ity_I32
);
20913 /* Conditions starting with S should signal exception on QNaN inputs. */
20914 switch (function
) {
20915 case 0x08: /* SAF */
20916 signaling
= CMPSAFS
; /* fallthrough */
20918 case 0x00: /* AF */
20919 assign(t0
, binop(Iop_CmpF32
,
20920 getLoFromF64(Ity_F64
, getFReg(fs
)),
20921 getLoFromF64(Ity_F64
, getFReg(ft
))));
20922 calculateFCSR(fs
, ft
, signaling
, True
, 2);
20924 mkWidenFromF32(tyF
,
20925 binop(Iop_I32StoF32
,
20926 get_IR_roundingmode(), mkU32(0))));
20929 case 0x09: /* SUN */
20930 signaling
= CMPSAFS
; /* fallthrough */
20932 case 0x01: /* UN */
20933 assign(t0
, binop(Iop_CmpF32
,
20934 getLoFromF64(Ity_F64
, getFReg(fs
)),
20935 getLoFromF64(Ity_F64
, getFReg(ft
))));
20936 calculateFCSR(fs
, ft
, signaling
, True
, 2);
20938 IRExpr_ITE(binop(Iop_CmpEQ32
, mkexpr(t0
), mkU32(0x45)),
20939 mkWidenFromF32(tyF
,
20940 unop(Iop_ReinterpI32asF32
,
20941 mkU32(0xFFFFFFFFU
))),
20942 mkWidenFromF32(tyF
,
20943 binop(Iop_I32StoF32
,
20944 get_IR_roundingmode(),
20948 case 0x19: /* SOR */
20949 signaling
= CMPSAFS
; /* fallthrough */
20951 case 0x11: /* OR */
20952 assign(t0
, binop(Iop_CmpF32
,
20953 getLoFromF64(Ity_F64
, getFReg(fs
)),
20954 getLoFromF64(Ity_F64
, getFReg(ft
))));
20955 calculateFCSR(fs
, ft
, signaling
, True
, 2);
20957 IRExpr_ITE(binop(Iop_CmpEQ32
, mkexpr(t0
), mkU32(0x45)),
20958 mkWidenFromF32(tyF
,
20959 binop(Iop_I32StoF32
,
20960 get_IR_roundingmode(),
20962 mkWidenFromF32(tyF
,
20963 unop(Iop_ReinterpI32asF32
,
20964 mkU32(0xFFFFFFFFU
)))));
20967 case 0x0A: /* SEQ */
20968 signaling
= CMPSAFS
; /* fallthrough */
20970 case 0x02: /* EQ */
20971 assign(t0
, binop(Iop_CmpF32
,
20972 getLoFromF64(Ity_F64
, getFReg(fs
)),
20973 getLoFromF64(Ity_F64
, getFReg(ft
))));
20974 calculateFCSR(fs
, ft
, signaling
, True
, 2);
20976 IRExpr_ITE(binop(Iop_CmpEQ32
, mkexpr(t0
), mkU32(0x40)),
20977 mkWidenFromF32(tyF
,
20978 unop(Iop_ReinterpI32asF32
,
20979 mkU32(0xFFFFFFFFU
))),
20980 mkWidenFromF32(tyF
,
20981 binop(Iop_I32StoF32
,
20982 get_IR_roundingmode(),
20986 case 0x1A: /* SNEQ */
20987 signaling
= CMPSAFS
; /* fallthrough */
20989 case 0x12: /* NEQ */
20990 assign(t0
, binop(Iop_CmpF32
,
20991 getLoFromF64(Ity_F64
, getFReg(fs
)),
20992 getLoFromF64(Ity_F64
, getFReg(ft
))));
20993 calculateFCSR(fs
, ft
, signaling
, True
, 2);
20995 IRExpr_ITE(binop(Iop_CmpEQ32
, mkexpr(t0
), mkU32(0x40)),
20996 mkWidenFromF32(tyF
,
20997 binop(Iop_I32StoF32
,
20998 get_IR_roundingmode(),
21000 mkWidenFromF32(tyF
,
21001 unop(Iop_ReinterpI32asF32
,
21002 mkU32(0xFFFFFFFFU
)))));
21005 case 0x0B: /* SUEQ */
21006 signaling
= CMPSAFS
; /* fallthrough */
21008 case 0x03: /* UEQ */
21009 assign(t0
, binop(Iop_CmpF32
,
21010 getLoFromF64(Ity_F64
, getFReg(fs
)),
21011 getLoFromF64(Ity_F64
, getFReg(ft
))));
21012 calculateFCSR(fs
, ft
, signaling
, True
, 2);
21014 IRExpr_ITE(binop(Iop_CmpEQ32
, mkexpr(t0
), mkU32(0x40)),
21015 mkWidenFromF32(tyF
,
21016 unop(Iop_ReinterpI32asF32
,
21017 mkU32(0xFFFFFFFFU
))),
21018 IRExpr_ITE(binop(Iop_CmpEQ32
,
21019 mkexpr(t0
), mkU32(0x45)),
21020 mkWidenFromF32(tyF
,
21021 unop(Iop_ReinterpI32asF32
,
21022 mkU32(0xFFFFFFFFU
))),
21023 mkWidenFromF32(tyF
,
21024 binop(Iop_I32StoF32
,
21025 get_IR_roundingmode(),
21029 case 0x1B: /* SNEQ */
21030 signaling
= CMPSAFS
; /* fallthrough */
21032 case 0x13: /* NEQ */
21033 assign(t0
, binop(Iop_CmpF32
,
21034 getLoFromF64(Ity_F64
, getFReg(fs
)),
21035 getLoFromF64(Ity_F64
, getFReg(ft
))));
21036 calculateFCSR(fs
, ft
, signaling
, True
, 2);
21038 IRExpr_ITE(binop(Iop_CmpEQ32
, mkexpr(t0
), mkU32(0x01)),
21039 mkWidenFromF32(tyF
,
21040 unop(Iop_ReinterpI32asF32
,
21041 mkU32(0xFFFFFFFFU
))),
21042 IRExpr_ITE(binop(Iop_CmpEQ32
,
21043 mkexpr(t0
), mkU32(0x00)),
21044 mkWidenFromF32(tyF
,
21045 unop(Iop_ReinterpI32asF32
,
21046 mkU32(0xFFFFFFFFU
))),
21047 mkWidenFromF32(tyF
,
21048 binop(Iop_I32StoF32
,
21049 get_IR_roundingmode(),
21053 case 0x0C: /* SLT */
21054 signaling
= CMPSAFS
; /* fallthrough */
21056 case 0x04: /* LT */
21057 assign(t0
, binop(Iop_CmpF32
,
21058 getLoFromF64(Ity_F64
, getFReg(fs
)),
21059 getLoFromF64(Ity_F64
, getFReg(ft
))));
21060 calculateFCSR(fs
, ft
, signaling
, True
, 2);
21062 IRExpr_ITE(binop(Iop_CmpEQ32
, mkexpr(t0
), mkU32(0x01)),
21063 mkWidenFromF32(tyF
,
21064 unop(Iop_ReinterpI32asF32
,
21065 mkU32(0xFFFFFFFFU
))),
21066 mkWidenFromF32(tyF
,
21067 binop(Iop_I32StoF32
,
21068 get_IR_roundingmode(),
21072 case 0x0D: /* SULT */
21073 signaling
= CMPSAFS
; /* fallthrough */
21075 case 0x05: /* ULT */
21076 assign(t0
, binop(Iop_CmpF32
,
21077 getLoFromF64(Ity_F64
, getFReg(fs
)),
21078 getLoFromF64(Ity_F64
, getFReg(ft
))));
21079 calculateFCSR(fs
, ft
, signaling
, True
, 2);
21081 IRExpr_ITE(binop(Iop_CmpEQ32
, mkexpr(t0
), mkU32(0x01)),
21082 mkWidenFromF32(tyF
,
21083 unop(Iop_ReinterpI32asF32
,
21084 mkU32(0xFFFFFFFFU
))),
21085 IRExpr_ITE(binop(Iop_CmpEQ32
,
21086 mkexpr(t0
), mkU32(0x45)),
21087 mkWidenFromF32(tyF
,
21088 unop(Iop_ReinterpI32asF32
,
21089 mkU32(0xFFFFFFFFU
))),
21090 mkWidenFromF32(tyF
,
21091 binop(Iop_I32StoF32
,
21092 get_IR_roundingmode(),
21096 case 0x0E: /* SLE */
21097 signaling
= CMPSAFS
; /* fallthrough */
21099 case 0x06: /* LE */
21100 assign(t0
, binop(Iop_CmpF32
,
21101 getLoFromF64(Ity_F64
, getFReg(fs
)),
21102 getLoFromF64(Ity_F64
, getFReg(ft
))));
21103 calculateFCSR(fs
, ft
, signaling
, True
, 2);
21105 IRExpr_ITE(binop(Iop_CmpEQ32
, mkexpr(t0
), mkU32(0x01)),
21106 mkWidenFromF32(tyF
,
21107 unop(Iop_ReinterpI32asF32
,
21108 mkU32(0xFFFFFFFFU
))),
21109 IRExpr_ITE(binop(Iop_CmpEQ32
,
21110 mkexpr(t0
), mkU32(0x40)),
21111 mkWidenFromF32(tyF
,
21112 unop(Iop_ReinterpI32asF32
,
21113 mkU32(0xFFFFFFFFU
))),
21114 mkWidenFromF32(tyF
,
21115 binop(Iop_I32StoF32
,
21116 get_IR_roundingmode(),
21120 case 0x0F: /* SULE */
21121 signaling
= CMPSAFS
; /* fallthrough */
21123 case 0x07: /* ULE */
21124 assign(t0
, binop(Iop_CmpF32
,
21125 getLoFromF64(Ity_F64
, getFReg(fs
)),
21126 getLoFromF64(Ity_F64
, getFReg(ft
))));
21127 calculateFCSR(fs
, ft
, signaling
, True
, 2);
21129 IRExpr_ITE(binop(Iop_CmpEQ32
, mkexpr(t0
), mkU32(0x0)),
21130 mkWidenFromF32(tyF
,
21131 binop(Iop_I32StoF32
,
21132 get_IR_roundingmode(),
21134 mkWidenFromF32(tyF
,
21135 unop(Iop_ReinterpI32asF32
,
21136 mkU32(0xFFFFFFFFU
)))));
21140 comparison
= False
;
21144 if (!VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
)) {
21152 switch (function
) {
21153 case 0x04: { /* SQRT.fmt */
21155 case 0x10: { /* S */
21156 IRExpr
*rm
= get_IR_roundingmode();
21157 putFReg(fd
, mkWidenFromF32(tyF
, binop(Iop_SqrtF32
, rm
,
21158 getLoFromF64(tyF
, getFReg(fs
)))));
21162 case 0x11: { /* D */
21163 IRExpr
*rm
= get_IR_roundingmode();
21164 putDReg(fd
, binop(Iop_SqrtF64
, rm
, getDReg(fs
)));
21172 break; /* SQRT.fmt */
21174 case 0x05: /* ABS.fmt */
21177 DIP("abs.s f%u, f%u", fd
, fs
);
21178 putFReg(fd
, mkWidenFromF32(tyF
, unop(Iop_AbsF32
,
21179 getLoFromF64(tyF
, getFReg(fs
)))));
21183 DIP("abs.d f%u, f%u", fd
, fs
);
21184 putDReg(fd
, unop(Iop_AbsF64
, getDReg(fs
)));
21191 break; /* ABS.fmt */
21193 case 0x02: /* MUL.fmt */
21195 case 0x11: { /* D */
21196 DIP("mul.d f%u, f%u, f%u", fd
, fs
, ft
);
21197 IRExpr
*rm
= get_IR_roundingmode();
21198 putDReg(fd
, triop(Iop_MulF64
, rm
, getDReg(fs
),
21203 case 0x10: { /* S */
21204 DIP("mul.s f%u, f%u, f%u", fd
, fs
, ft
);
21205 IRExpr
*rm
= get_IR_roundingmode();
21206 putFReg(fd
, mkWidenFromF32(tyF
, triop(Iop_MulF32
, rm
,
21207 getLoFromF64(tyF
, getFReg(fs
)),
21208 getLoFromF64(tyF
, getFReg(ft
)))));
21216 break; /* MUL.fmt */
21218 case 0x03: /* DIV.fmt */
21220 case 0x11: { /* D */
21221 DIP("div.d f%u, f%u, f%u", fd
, fs
, ft
);
21222 IRExpr
*rm
= get_IR_roundingmode();
21223 putDReg(fd
, triop(Iop_DivF64
, rm
, getDReg(fs
),
21228 case 0x10: { /* S */
21229 DIP("div.s f%u, f%u, f%u", fd
, fs
, ft
);
21230 calculateFCSR(fs
, ft
, DIVS
, False
, 2);
21231 IRExpr
*rm
= get_IR_roundingmode();
21232 putFReg(fd
, mkWidenFromF32(tyF
, triop(Iop_DivF32
, rm
,
21233 getLoFromF64(tyF
, getFReg(fs
)),
21234 getLoFromF64(tyF
, getFReg(ft
)))));
21242 break; /* DIV.fmt */
21244 case 0x01: /* SUB.fmt */
21246 case 0x11: { /* D */
21247 DIP("sub.d f%u, f%u, f%u", fd
, fs
, ft
);
21248 calculateFCSR(fs
, ft
, SUBD
, False
, 2);
21249 IRExpr
*rm
= get_IR_roundingmode();
21250 putDReg(fd
, triop(Iop_SubF64
, rm
, getDReg(fs
),
21255 case 0x10: { /* S */
21256 DIP("sub.s f%u, f%u, f%u", fd
, fs
, ft
);
21257 calculateFCSR(fs
, ft
, SUBS
, True
, 2);
21258 IRExpr
*rm
= get_IR_roundingmode();
21259 putFReg(fd
, mkWidenFromF32(tyF
, triop(Iop_SubF32
, rm
,
21260 getLoFromF64(tyF
, getFReg(fs
)),
21261 getLoFromF64(tyF
, getFReg(ft
)))));
21269 break; /* SUB.fmt */
21271 case 0x06: /* MOV.fmt */
21274 DIP("mov.d f%u, f%u", fd
, fs
);
21277 putDReg(fd
, getDReg(fs
));
21279 putFReg(fd
, getFReg(fs
));
21280 putFReg(fd
+ 1, getFReg(fs
+ 1));
21286 DIP("mov.s f%u, f%u", fd
, fs
);
21287 putFReg(fd
, getFReg(fs
));
21294 break; /* MOV.fmt */
21296 case 0x07: /* NEG.fmt */
21299 DIP("neg.s f%u, f%u", fd
, fs
);
21300 putFReg(fd
, mkWidenFromF32(tyF
, unop(Iop_NegF32
,
21301 getLoFromF64(tyF
, getFReg(fs
)))));
21305 DIP("neg.d f%u, f%u", fd
, fs
);
21306 putDReg(fd
, unop(Iop_NegF64
, getDReg(fs
)));
21313 break; /* NEG.fmt */
21315 case 0x08: /* ROUND.L.fmt */
21318 DIP("round.l.s f%u, f%u", fd
, fs
);
21321 calculateFCSR(fs
, 0, ROUNDLS
, True
, 1);
21322 t0
= newTemp(Ity_I64
);
21324 assign(t0
, binop(Iop_F32toI64S
, mkU32(0x0),
21325 getLoFromF64(Ity_F64
, getFReg(fs
))));
21327 putDReg(fd
, unop(Iop_ReinterpI64asF64
, mkexpr(t0
)));
21335 DIP("round.l.d f%u, f%u", fd
, fs
);
21338 calculateFCSR(fs
, 0, ROUNDLD
, False
, 1);
21339 putDReg(fd
, unop(Iop_ReinterpI64asF64
,
21340 binop(Iop_F64toI64S
,
21354 break; /* ROUND.L.fmt */
21356 case 0x09: /* TRUNC.L.fmt */
21359 DIP("trunc.l.s f%u, f%u", fd
, fs
);
21362 calculateFCSR(fs
, 0, TRUNCLS
, True
, 1);
21363 t0
= newTemp(Ity_I64
);
21364 assign(t0
, binop(Iop_F32toI64S
, mkU32(0x3),
21365 getLoFromF64(Ity_F64
, getFReg(fs
))));
21367 putDReg(fd
, unop(Iop_ReinterpI64asF64
, mkexpr(t0
)));
21375 DIP("trunc.l.d f%u, f%u", fd
, fs
);
21378 calculateFCSR(fs
, 0, TRUNCLD
, False
, 1);
21379 putDReg(fd
, unop(Iop_ReinterpI64asF64
,
21380 binop(Iop_F64toI64S
,
21393 break; /* TRUNC.L.fmt */
21395 case 0x15: /* RECIP.fmt */
21397 case 0x10: { /* S */
21398 DIP("recip.s f%u, f%u", fd
, fs
);
21399 IRExpr
*rm
= get_IR_roundingmode();
21400 putFReg(fd
, mkWidenFromF32(tyF
, triop(Iop_DivF32
,
21401 rm
, unop(Iop_ReinterpI32asF32
,
21402 mkU32(ONE_SINGLE
)), getLoFromF64(tyF
,
21407 case 0x11: { /* D */
21408 DIP("recip.d f%u, f%u", fd
, fs
);
21409 IRExpr
*rm
= get_IR_roundingmode();
21410 /* putDReg(fd, 1.0/getDreg(fs)); */
21411 putDReg(fd
, triop(Iop_DivF64
, rm
,
21412 unop(Iop_ReinterpI64asF64
,
21413 mkU64(ONE_DOUBLE
)), getDReg(fs
)));
21422 break; /* RECIP.fmt */
21424 case 0x13: /* MOVN.fmt */
21427 DIP("movn.s f%u, f%u, r%u", fd
, fs
, rt
);
21428 t1
= newTemp(Ity_I1
);
21431 assign(t1
, binop(Iop_CmpNE64
, mkU64(0), getIReg(rt
)));
21433 assign(t1
, binop(Iop_CmpNE32
, mkU32(0), getIReg(rt
)));
21435 putFReg(fd
, IRExpr_ITE(mkexpr(t1
), getFReg(fs
), getFReg(fd
)));
21439 DIP("movn.d f%u, f%u, r%u", fd
, fs
, rt
);
21440 t1
= newTemp(Ity_I1
);
21443 assign(t1
, binop(Iop_CmpNE64
, mkU64(0), getIReg(rt
)));
21445 assign(t1
, binop(Iop_CmpNE32
, mkU32(0), getIReg(rt
)));
21447 putDReg(fd
, IRExpr_ITE(mkexpr(t1
), getDReg(fs
), getDReg(fd
)));
21454 break; /* MOVN.fmt */
21456 case 0x12: /* MOVZ.fmt */
21459 DIP("movz.s f%u, f%u, r%u", fd
, fs
, rt
);
21460 t1
= newTemp(Ity_I1
);
21463 assign(t1
, binop(Iop_CmpEQ64
, mkU64(0), getIReg(rt
)));
21465 assign(t1
, binop(Iop_CmpEQ32
, mkU32(0), getIReg(rt
)));
21467 putFReg(fd
, IRExpr_ITE(mkexpr(t1
), getFReg(fs
), getFReg(fd
)));
21471 DIP("movz.d f%u, f%u, r%u", fd
, fs
, rt
);
21472 t1
= newTemp(Ity_I1
);
21475 assign(t1
, binop(Iop_CmpEQ64
, mkU64(0), getIReg(rt
)));
21477 assign(t1
, binop(Iop_CmpEQ32
, mkU32(0), getIReg(rt
)));
21479 putDReg(fd
, IRExpr_ITE(mkexpr(t1
), getDReg(fs
), getDReg(fd
)));
21486 break; /* MOVZ.fmt */
21488 case 0x11: /* MOVT.fmt */
21490 UInt mov_cc
= get_mov_cc(cins
);
21492 switch (fmt
) { /* MOVCF = 010001 */
21494 DIP("movt.d f%u, f%u, %u", fd
, fs
, mov_cc
);
21495 t1
= newTemp(Ity_I1
);
21496 t2
= newTemp(Ity_I32
);
21497 t3
= newTemp(Ity_I1
);
21498 t4
= newTemp(Ity_F64
);
21500 assign(t1
, binop(Iop_CmpEQ32
, mkU32(0), mkU32(mov_cc
)));
21501 assign(t2
, IRExpr_ITE(mkexpr(t1
),
21503 binop(Iop_Shr32
, getFCSR(),
21507 binop(Iop_Shr32
, getFCSR(),
21508 mkU8(24 + mov_cc
)),
21512 assign(t3
, binop(Iop_CmpEQ32
, mkU32(1), mkexpr(t2
)));
21513 assign(t4
, IRExpr_ITE(mkexpr(t3
),
21514 getDReg(fs
), getDReg(fd
)));
21515 putDReg(fd
, mkexpr(t4
));
21519 DIP("movt.s f%u, f%u, %u", fd
, fs
, mov_cc
);
21520 t1
= newTemp(Ity_I1
);
21521 t2
= newTemp(Ity_I32
);
21522 t3
= newTemp(Ity_I1
);
21523 t4
= newTemp(Ity_F64
);
21524 t5
= newTemp(Ity_F64
);
21525 t6
= newTemp(Ity_F64
);
21526 t7
= newTemp(Ity_I64
);
21529 assign(t5
, getFReg(fs
));
21530 assign(t6
, getFReg(fd
));
21532 assign(t5
, unop(Iop_F32toF64
, getFReg(fs
)));
21533 assign(t6
, unop(Iop_F32toF64
, getFReg(fd
)));
21536 assign(t1
, binop(Iop_CmpEQ32
, mkU32(0), mkU32(mov_cc
)));
21537 assign(t2
, IRExpr_ITE(mkexpr(t1
),
21539 binop(Iop_Shr32
, getFCSR(),
21543 binop(Iop_Shr32
, getFCSR(),
21544 mkU8(24 + mov_cc
)),
21548 assign(t3
, binop(Iop_CmpEQ32
, mkU32(1), mkexpr(t2
)));
21549 assign(t4
, IRExpr_ITE(mkexpr(t3
),
21550 mkexpr(t5
), mkexpr(t6
)));
21553 IRTemp f
= newTemp(Ity_F64
);
21554 IRTemp fd_hi
= newTemp(Ity_I32
);
21555 assign(f
, getFReg(fd
));
21556 assign(fd_hi
, unop(Iop_64HIto32
,
21557 unop(Iop_ReinterpF64asI64
, mkexpr(f
))));
21558 assign(t7
, mkWidenFrom32(Ity_I64
, unop(Iop_64to32
,
21559 unop(Iop_ReinterpF64asI64
, mkexpr(t4
))),
21562 putFReg(fd
, unop(Iop_ReinterpI64asF64
, mkexpr(t7
)));
21564 putFReg(fd
, binop(Iop_F64toF32
, get_IR_roundingmode(),
21572 } else if (tf
== 0) { /* MOVF.fmt */
21573 UInt mov_cc
= get_mov_cc(cins
);
21575 switch (fmt
) { /* MOVCF = 010001 */
21577 DIP("movf.d f%u, f%u, %u", fd
, fs
, mov_cc
);
21578 t1
= newTemp(Ity_I1
);
21579 t2
= newTemp(Ity_I32
);
21580 t3
= newTemp(Ity_I1
);
21581 t4
= newTemp(Ity_F64
);
21583 assign(t1
, binop(Iop_CmpEQ32
, mkU32(0), mkU32(mov_cc
)));
21584 assign(t2
, IRExpr_ITE(mkexpr(t1
),
21586 binop(Iop_Shr32
, getFCSR(),
21590 binop(Iop_Shr32
, getFCSR(),
21591 mkU8(24 + mov_cc
)),
21595 assign(t3
, binop(Iop_CmpEQ32
, mkU32(0), mkexpr(t2
)));
21596 assign(t4
, IRExpr_ITE(mkexpr(t3
),
21597 getDReg(fs
), getDReg(fd
)));
21598 putDReg(fd
, mkexpr(t4
));
21602 DIP("movf.s f%u, f%u, %u", fd
, fs
, mov_cc
);
21603 t1
= newTemp(Ity_I1
);
21604 t2
= newTemp(Ity_I32
);
21605 t3
= newTemp(Ity_I1
);
21606 t4
= newTemp(Ity_F64
);
21607 t5
= newTemp(Ity_F64
);
21608 t6
= newTemp(Ity_F64
);
21611 assign(t5
, getFReg(fs
));
21612 assign(t6
, getFReg(fd
));
21614 assign(t5
, unop(Iop_F32toF64
, getFReg(fs
)));
21615 assign(t6
, unop(Iop_F32toF64
, getFReg(fd
)));
21618 assign(t1
, binop(Iop_CmpEQ32
, mkU32(0), mkU32(mov_cc
)));
21619 assign(t2
, IRExpr_ITE(mkexpr(t1
),
21621 binop(Iop_Shr32
, getFCSR(),
21625 binop(Iop_Shr32
, getFCSR(),
21626 mkU8(24 + mov_cc
)),
21630 assign(t3
, binop(Iop_CmpEQ32
, mkU32(0), mkexpr(t2
)));
21631 assign(t4
, IRExpr_ITE(mkexpr(t3
),
21632 mkexpr(t5
), mkexpr(t6
)));
21635 IRTemp f
= newTemp(Ity_F64
);
21636 IRTemp fd_hi
= newTemp(Ity_I32
);
21637 t7
= newTemp(Ity_I64
);
21638 assign(f
, getFReg(fd
));
21639 assign(fd_hi
, unop(Iop_64HIto32
,
21640 unop(Iop_ReinterpF64asI64
, mkexpr(f
))));
21641 assign(t7
, mkWidenFrom32(Ity_I64
, unop(Iop_64to32
,
21642 unop(Iop_ReinterpF64asI64
, mkexpr(t4
))),
21645 putFReg(fd
, unop(Iop_ReinterpI64asF64
, mkexpr(t7
)));
21647 putFReg(fd
, binop(Iop_F64toF32
, get_IR_roundingmode(),
21657 break; /* MOVT.fmt */
21659 case 0x00: /* ADD.fmt */
21661 case 0x10: { /* S */
21662 DIP("add.s f%u, f%u, f%u", fd
, fs
, ft
);
21663 calculateFCSR(fs
, ft
, ADDS
, True
, 2);
21664 IRExpr
*rm
= get_IR_roundingmode();
21665 putFReg(fd
, mkWidenFromF32(tyF
, triop(Iop_AddF32
, rm
,
21666 getLoFromF64(tyF
, getFReg(fs
)),
21667 getLoFromF64(tyF
, getFReg(ft
)))));
21671 case 0x11: { /* D */
21672 DIP("add.d f%u, f%u, f%u", fd
, fs
, ft
);
21673 calculateFCSR(fs
, ft
, ADDD
, False
, 2);
21674 IRExpr
*rm
= get_IR_roundingmode();
21675 putDReg(fd
, triop(Iop_AddF64
, rm
, getDReg(fs
), getDReg(ft
)));
21679 case 0x04: /* MTC1 (Move Word to Floating Point) */
21680 DIP("mtc1 r%u, f%u", rt
, fs
);
21683 t0
= newTemp(Ity_I32
);
21684 t1
= newTemp(Ity_F32
);
21685 assign(t0
, mkNarrowTo32(ty
, getIReg(rt
)));
21686 assign(t1
, unop(Iop_ReinterpI32asF32
, mkexpr(t0
)));
21688 putFReg(fs
, mkWidenFromF32(tyF
, mkexpr(t1
)));
21690 putFReg(fs
, unop(Iop_ReinterpI32asF32
,
21691 mkNarrowTo32(ty
, getIReg(rt
))));
21695 case 0x05: /* Doubleword Move to Floating Point DMTC1; MIPS64 */
21696 DIP("dmtc1 r%u, f%u", rt
, fs
);
21698 putDReg(fs
, unop(Iop_ReinterpI64asF64
, getIReg(rt
)));
21701 case 0x00: /* MFC1 */
21702 DIP("mfc1 r%u, f%u", rt
, fs
);
21705 t0
= newTemp(Ity_I64
);
21706 t1
= newTemp(Ity_I32
);
21707 assign(t0
, unop(Iop_ReinterpF64asI64
, getFReg(fs
)));
21708 assign(t1
, unop(Iop_64to32
, mkexpr(t0
)));
21709 putIReg(rt
, mkWidenFrom32(ty
, mkexpr(t1
), True
));
21711 putIReg(rt
, mkWidenFrom32(ty
,
21712 unop(Iop_ReinterpF32asI32
, getFReg(fs
)),
21717 case 0x01: /* Doubleword Move from Floating Point DMFC1;
21719 DIP("dmfc1 r%u, f%u", rt
, fs
);
21720 putIReg(rt
, unop(Iop_ReinterpF64asI64
, getDReg(fs
)));
21723 case 0x06: /* CTC1 */
21724 DIP("ctc1 r%u, f%u", rt
, fs
);
21725 t0
= newTemp(Ity_I32
);
21726 t1
= newTemp(Ity_I32
);
21727 t2
= newTemp(Ity_I32
);
21728 t3
= newTemp(Ity_I32
);
21729 t4
= newTemp(Ity_I32
);
21730 t5
= newTemp(Ity_I32
);
21731 t6
= newTemp(Ity_I32
);
21732 assign(t0
, mkNarrowTo32(ty
, getIReg(rt
)));
21734 if (fs
== 25) { /* FCCR */
21735 assign(t1
, binop(Iop_Shl32
, binop(Iop_And32
, mkexpr(t0
),
21736 mkU32(0x000000FE)), mkU8(24)));
21737 assign(t2
, binop(Iop_And32
, mkexpr(t0
),
21738 mkU32(0x01000000)));
21739 assign(t3
, binop(Iop_Shl32
, binop(Iop_And32
, mkexpr(t0
),
21740 mkU32(0x00000001)), mkU8(23)));
21741 assign(t4
, binop(Iop_And32
, mkexpr(t0
),
21742 mkU32(0x007FFFFF)));
21743 putFCSR(binop(Iop_Or32
, binop(Iop_Or32
, mkexpr(t1
),
21744 mkexpr(t2
)), binop(Iop_Or32
, mkexpr(t3
),
21746 } else if (fs
== 26) { /* FEXR */
21747 assign(t1
, binop(Iop_And32
, getFCSR(), mkU32(0xFFFC0000)));
21748 assign(t2
, binop(Iop_And32
, mkexpr(t0
),
21749 mkU32(0x0003F000)));
21750 assign(t3
, binop(Iop_And32
, getFCSR(), mkU32(0x00000F80)));
21751 assign(t4
, binop(Iop_And32
, mkexpr(t0
),
21752 mkU32(0x0000007C)));
21753 assign(t5
, binop(Iop_And32
, getFCSR(), mkU32(0x00000003)));
21754 putFCSR(binop(Iop_Or32
, binop(Iop_Or32
, binop(Iop_Or32
,
21755 mkexpr(t1
), mkexpr(t2
)), binop(Iop_Or32
,
21756 mkexpr(t3
), mkexpr(t4
))), mkexpr(t5
)));
21757 } else if (fs
== 28) {
21758 assign(t1
, binop(Iop_And32
, getFCSR(), mkU32(0xFE000000)));
21759 assign(t2
, binop(Iop_Shl32
, binop(Iop_And32
, mkexpr(t0
),
21760 mkU32(0x00000002)), mkU8(22)));
21761 assign(t3
, binop(Iop_And32
, getFCSR(), mkU32(0x00FFF000)));
21762 assign(t4
, binop(Iop_And32
, mkexpr(t0
),
21763 mkU32(0x00000F80)));
21764 assign(t5
, binop(Iop_And32
, getFCSR(), mkU32(0x0000007C)));
21765 assign(t6
, binop(Iop_And32
, mkexpr(t0
),
21766 mkU32(0x00000003)));
21767 putFCSR(binop(Iop_Or32
, binop(Iop_Or32
, binop(Iop_Or32
,
21768 mkexpr(t1
), mkexpr(t2
)), binop(Iop_Or32
,
21769 mkexpr(t3
), mkexpr(t4
))), binop(Iop_Or32
,
21770 mkexpr(t5
), mkexpr(t6
))));
21771 } else if (fs
== 31) {
21772 putFCSR(mkexpr(t0
));
21777 case 0x02: /* CFC1 */
21778 DIP("cfc1 r%u, f%u", rt
, fs
);
21779 t0
= newTemp(Ity_I32
);
21780 t1
= newTemp(Ity_I32
);
21781 t2
= newTemp(Ity_I32
);
21782 t3
= newTemp(Ity_I32
);
21783 t4
= newTemp(Ity_I32
);
21784 t5
= newTemp(Ity_I32
);
21785 t6
= newTemp(Ity_I32
);
21786 assign(t0
, getFCSR());
21789 putIReg(rt
, mkWidenFrom32(ty
,
21790 IRExpr_Get(offsetof(VexGuestMIPS32State
,
21794 } else if (fs
== 25) {
21795 assign(t1
, mkU32(0x000000FF));
21796 assign(t2
, binop(Iop_Shr32
, binop(Iop_And32
, mkexpr(t0
),
21797 mkU32(0xFE000000)), mkU8(25)));
21798 assign(t3
, binop(Iop_Shr32
, binop(Iop_And32
, mkexpr(t0
),
21799 mkU32(0x00800000)), mkU8(23)));
21800 putIReg(rt
, mkWidenFrom32(ty
, binop(Iop_Or32
,
21801 binop(Iop_Or32
, mkexpr(t1
), mkexpr(t2
)),
21802 mkexpr(t3
)), False
));
21803 } else if (fs
== 26) {
21804 assign(t1
, mkU32(0xFFFFF07C));
21805 assign(t2
, binop(Iop_And32
, mkexpr(t0
),
21806 mkU32(0x0003F000)));
21807 assign(t3
, binop(Iop_And32
, mkexpr(t0
),
21808 mkU32(0x0000007C)));
21809 putIReg(rt
, mkWidenFrom32(ty
, binop(Iop_Or32
,
21810 binop(Iop_Or32
, mkexpr(t1
), mkexpr(t2
)),
21811 mkexpr(t3
)), False
));
21812 } else if (fs
== 28) {
21813 assign(t1
, mkU32(0x00000F87));
21814 assign(t2
, binop(Iop_And32
, mkexpr(t0
),
21815 mkU32(0x00000F83)));
21816 assign(t3
, binop(Iop_Shr32
, binop(Iop_And32
, mkexpr(t0
),
21817 mkU32(0x01000000)), mkU8(22)));
21818 putIReg(rt
, mkWidenFrom32(ty
, binop(Iop_Or32
,
21819 binop(Iop_Or32
, mkexpr(t1
), mkexpr(t2
)),
21820 mkexpr(t3
)), False
));
21821 } else if (fs
== 31) {
21822 putIReg(rt
, mkWidenFrom32(ty
, getFCSR(), False
));
21833 case 0x21: /* CVT.D */
21836 DIP("cvt.d.s f%u, f%u", fd
, fs
);
21837 calculateFCSR(fs
, 0, CVTDS
, True
, 1);
21840 t0
= newTemp(Ity_I64
);
21841 t1
= newTemp(Ity_I32
);
21842 t3
= newTemp(Ity_F32
);
21843 t4
= newTemp(Ity_F32
);
21844 /* get lo half of FPR */
21845 assign(t0
, unop(Iop_ReinterpF64asI64
, getFReg(fs
)));
21847 assign(t1
, unop(Iop_64to32
, mkexpr(t0
)));
21849 assign(t3
, unop(Iop_ReinterpI32asF32
, mkexpr(t1
)));
21851 putFReg(fd
, unop(Iop_F32toF64
, mkexpr(t3
)));
21853 putDReg(fd
, unop(Iop_F32toF64
, getFReg(fs
)));
21858 DIP("cvt.d.w %u, %u", fd
, fs
);
21859 calculateFCSR(fs
, 0, CVTDW
, True
, 1);
21862 t0
= newTemp(Ity_I64
);
21863 t1
= newTemp(Ity_I32
);
21864 t3
= newTemp(Ity_F32
);
21865 t4
= newTemp(Ity_F32
);
21866 /* get lo half of FPR */
21867 assign(t0
, unop(Iop_ReinterpF64asI64
, getFReg(fs
)));
21869 assign(t1
, unop(Iop_64to32
, mkexpr(t0
)));
21870 putDReg(fd
, unop(Iop_I32StoF64
, mkexpr(t1
)));
21873 t0
= newTemp(Ity_I32
);
21874 assign(t0
, unop(Iop_ReinterpF32asI32
, getFReg(fs
)));
21875 putDReg(fd
, unop(Iop_I32StoF64
, mkexpr(t0
)));
21879 case 0x15: { /* L */
21881 DIP("cvt.d.l %u, %u", fd
, fs
);
21882 calculateFCSR(fs
, 0, CVTDL
, False
, 1);
21883 t0
= newTemp(Ity_I64
);
21884 assign(t0
, unop(Iop_ReinterpF64asI64
, getFReg(fs
)));
21886 putFReg(fd
, binop(Iop_I64StoF64
,
21887 get_IR_roundingmode(), mkexpr(t0
)));
21899 case 0x20: /* CVT.s */
21902 DIP("cvt.s.w %u, %u", fd
, fs
);
21903 calculateFCSR(fs
, 0, CVTSW
, True
, 1);
21906 t0
= newTemp(Ity_I64
);
21907 t1
= newTemp(Ity_I32
);
21908 t3
= newTemp(Ity_F32
);
21909 t4
= newTemp(Ity_F32
);
21910 /* get lo half of FPR */
21911 assign(t0
, unop(Iop_ReinterpF64asI64
, getFReg(fs
)));
21913 assign(t1
, unop(Iop_64to32
, mkexpr(t0
)));
21914 putFReg(fd
, mkWidenFromF32(tyF
, binop(Iop_I32StoF32
,
21915 get_IR_roundingmode(), mkexpr(t1
))));
21917 t0
= newTemp(Ity_I32
);
21918 assign(t0
, unop(Iop_ReinterpF32asI32
, getFReg(fs
)));
21919 putFReg(fd
, binop(Iop_I32StoF32
, get_IR_roundingmode(),
21926 DIP("cvt.s.d %u, %u", fd
, fs
);
21927 calculateFCSR(fs
, 0, CVTSD
, False
, 1);
21928 t0
= newTemp(Ity_F32
);
21929 assign(t0
, binop(Iop_F64toF32
, get_IR_roundingmode(),
21931 putFReg(fd
, mkWidenFromF32(tyF
, mkexpr(t0
)));
21935 DIP("cvt.s.l %u, %u", fd
, fs
);
21938 calculateFCSR(fs
, 0, CVTSL
, False
, 1);
21939 t0
= newTemp(Ity_I64
);
21940 assign(t0
, unop(Iop_ReinterpF64asI64
, getFReg(fs
)));
21942 putFReg(fd
, mkWidenFromF32(tyF
, binop(Iop_I64StoF32
,
21943 get_IR_roundingmode(), mkexpr(t0
))));
21956 case 0x24: /* CVT.w */
21959 DIP("cvt.w.s %u, %u", fd
, fs
);
21960 calculateFCSR(fs
, 0, CVTWS
, True
, 1);
21962 mkWidenFromF32(tyF
,
21963 unop(Iop_ReinterpI32asF32
,
21964 binop(Iop_F32toI32S
,
21965 get_IR_roundingmode(),
21971 DIP("cvt.w.d %u, %u", fd
, fs
);
21972 calculateFCSR(fs
, 0, CVTWD
, False
, 1);
21973 t0
= newTemp(Ity_I32
);
21974 t1
= newTemp(Ity_F32
);
21975 assign(t0
, binop(Iop_F64toI32S
, get_IR_roundingmode(),
21977 assign(t1
, unop(Iop_ReinterpI32asF32
, mkexpr(t0
)));
21978 putFReg(fd
, mkWidenFromF32(tyF
, mkexpr(t1
)));
21988 case 0x25: /* CVT.l */
21991 DIP("cvt.l.s %u, %u", fd
, fs
);
21994 calculateFCSR(fs
, 0, CVTLS
, True
, 1);
21995 t0
= newTemp(Ity_I64
);
21997 assign(t0
, binop(Iop_F32toI64S
, get_IR_roundingmode(),
21998 getLoFromF64(tyF
, getFReg(fs
))));
22000 putDReg(fd
, unop(Iop_ReinterpI64asF64
, mkexpr(t0
)));
22007 case 0x11: { /* D */
22008 DIP("cvt.l.d %u, %u", fd
, fs
);
22011 calculateFCSR(fs
, 0, CVTLD
, False
, 1);
22012 putDReg(fd
, unop(Iop_ReinterpI64asF64
,
22013 binop(Iop_F64toI64S
,
22014 get_IR_roundingmode(),
22029 case 0x0B: /* FLOOR.L.fmt */
22032 DIP("floor.l.s %u, %u", fd
, fs
);
22035 calculateFCSR(fs
, 0, FLOORLS
, True
, 1);
22036 t0
= newTemp(Ity_I64
);
22038 assign(t0
, binop(Iop_F32toI64S
, mkU32(0x1),
22039 getLoFromF64(tyF
, getFReg(fs
))));
22041 putDReg(fd
, unop(Iop_ReinterpI64asF64
, mkexpr(t0
)));
22049 DIP("floor.l.d %u, %u", fd
, fs
);
22052 calculateFCSR(fs
, 0, FLOORLD
, False
, 1);
22053 putDReg(fd
, unop(Iop_ReinterpI64asF64
,
22054 binop(Iop_F64toI64S
,
22069 case 0x0C: /* ROUND.W.fmt */
22072 DIP("round.w.s f%u, f%u", fd
, fs
);
22073 calculateFCSR(fs
, 0, ROUNDWS
, True
, 1);
22075 mkWidenFromF32(tyF
,
22076 unop(Iop_ReinterpI32asF32
,
22077 binop(Iop_F32toI32S
,
22084 DIP("round.w.d f%u, f%u", fd
, fs
);
22085 calculateFCSR(fs
, 0, ROUNDWD
, False
, 1);
22088 t0
= newTemp(Ity_I32
);
22089 assign(t0
, binop(Iop_F64toI32S
, mkU32(0x0),
22091 putFReg(fd
, mkWidenFromF32(tyF
,
22092 unop(Iop_ReinterpI32asF32
, mkexpr(t0
))));
22094 t0
= newTemp(Ity_I32
);
22096 assign(t0
, binop(Iop_F64toI32S
, mkU32(0x0),
22099 putFReg(fd
, unop(Iop_ReinterpI32asF32
, mkexpr(t0
)));
22109 break; /* ROUND.W.fmt */
22111 case 0x0F: /* FLOOR.W.fmt */
22114 DIP("floor.w.s f%u, f%u", fd
, fs
);
22115 calculateFCSR(fs
, 0, FLOORWS
, True
, 1);
22117 mkWidenFromF32(tyF
,
22118 unop(Iop_ReinterpI32asF32
,
22119 binop(Iop_F32toI32S
,
22126 DIP("floor.w.d f%u, f%u", fd
, fs
);
22127 calculateFCSR(fs
, 0, FLOORWD
, False
, 1);
22130 t0
= newTemp(Ity_I32
);
22131 assign(t0
, binop(Iop_F64toI32S
, mkU32(0x1),
22133 putFReg(fd
, mkWidenFromF32(tyF
,
22134 unop(Iop_ReinterpI32asF32
, mkexpr(t0
))));
22137 t0
= newTemp(Ity_I32
);
22139 assign(t0
, binop(Iop_F64toI32S
, mkU32(0x1),
22142 putFReg(fd
, unop(Iop_ReinterpI32asF32
, mkexpr(t0
)));
22151 break; /* FLOOR.W.fmt */
22153 case 0x0D: /* TRUNC.W */
22156 DIP("trunc.w.s %u, %u", fd
, fs
);
22157 calculateFCSR(fs
, 0, TRUNCWS
, True
, 1);
22159 mkWidenFromF32(tyF
,
22160 unop(Iop_ReinterpI32asF32
,
22161 binop(Iop_F32toI32S
,
22168 DIP("trunc.w.d %u, %u", fd
, fs
);
22169 calculateFCSR(fs
, 0, TRUNCWD
, False
, 1);
22172 t0
= newTemp(Ity_I32
);
22174 assign(t0
, binop(Iop_F64toI32S
, mkU32(0x3),
22177 putFReg(fd
, mkWidenFromF32(tyF
,
22178 unop(Iop_ReinterpI32asF32
, mkexpr(t0
))));
22180 t0
= newTemp(Ity_I32
);
22182 assign(t0
, binop(Iop_F64toI32S
, mkU32(0x3),
22185 putFReg(fd
, unop(Iop_ReinterpI32asF32
, mkexpr(t0
)));
22197 case 0x0E: /* CEIL.W.fmt */
22200 DIP("ceil.w.s %u, %u", fd
, fs
);
22201 calculateFCSR(fs
, 0, CEILWS
, True
, 1);
22203 mkWidenFromF32(tyF
,
22204 unop(Iop_ReinterpI32asF32
,
22205 binop(Iop_F32toI32S
,
22212 DIP("ceil.w.d %u, %u", fd
, fs
);
22213 calculateFCSR(fs
, 0, CEILWD
, False
, 1);
22216 t0
= newTemp(Ity_I32
);
22217 assign(t0
, binop(Iop_F64toI32S
, mkU32(0x2),
22219 putFReg(fd
, unop(Iop_ReinterpI32asF32
, mkexpr(t0
)));
22221 t0
= newTemp(Ity_I32
);
22222 assign(t0
, binop(Iop_F64toI32S
, mkU32(0x2),
22224 putFReg(fd
, mkWidenFromF32(tyF
,
22225 unop(Iop_ReinterpI32asF32
, mkexpr(t0
))));
22237 case 0x0A: /* CEIL.L.fmt */
22240 DIP("ceil.l.s %u, %u", fd
, fs
);
22243 calculateFCSR(fs
, 0, CEILLS
, True
, 1);
22244 t0
= newTemp(Ity_I64
);
22246 assign(t0
, binop(Iop_F32toI64S
, mkU32(0x2),
22247 getLoFromF64(tyF
, getFReg(fs
))));
22249 putFReg(fd
, unop(Iop_ReinterpI64asF64
, mkexpr(t0
)));
22257 DIP("ceil.l.d %u, %u", fd
, fs
);
22260 calculateFCSR(fs
, 0, CEILLD
, False
, 1);
22261 putDReg(fd
, unop(Iop_ReinterpI64asF64
,
22262 binop(Iop_F64toI64S
,
22278 case 0x16: /* RSQRT.fmt */
22280 case 0x10: { /* S */
22281 DIP("rsqrt.s %u, %u", fd
, fs
);
22282 IRExpr
*rm
= get_IR_roundingmode();
22283 putFReg(fd
, mkWidenFromF32(tyF
, triop(Iop_DivF32
, rm
,
22284 unop(Iop_ReinterpI32asF32
, mkU32(ONE_SINGLE
)),
22285 binop(Iop_SqrtF32
, rm
, getLoFromF64(tyF
,
22290 case 0x11: { /* D */
22291 DIP("rsqrt.d %u, %u", fd
, fs
);
22292 IRExpr
*rm
= get_IR_roundingmode();
22293 putDReg(fd
, triop(Iop_DivF64
, rm
,
22294 unop(Iop_ReinterpI64asF64
,
22295 mkU64(ONE_DOUBLE
)),
22296 binop(Iop_SqrtF64
, rm
, getDReg(fs
))));
22307 case 0x18: /* MADDF.fmt */
22308 if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
)) {
22310 case 0x11: { /* D */
22311 DIP("maddf.d f%u, f%u, f%u", fd
, fs
, ft
);
22312 IRExpr
*rm
= get_IR_roundingmode();
22313 putDReg(fd
, qop(Iop_MAddF64
, rm
, getDReg(fs
), getDReg(ft
),
22318 case 0x10: { /* S */
22319 DIP("maddf.s f%u, f%u, f%u", fd
, fs
, ft
);
22320 IRExpr
*rm
= get_IR_roundingmode();
22321 t1
= newTemp(Ity_F32
);
22322 assign(t1
, qop(Iop_MAddF32
, rm
,
22323 getLoFromF64(tyF
, getFReg(fs
)),
22324 getLoFromF64(tyF
, getFReg(ft
)),
22325 getLoFromF64(tyF
, getFReg(fd
))));
22326 putFReg(fd
, mkWidenFromF32(tyF
, mkexpr(t1
)));
22334 ILLEGAL_INSTRUCTON
;
22339 case 0x19: /* MSUBF.fmt */
22340 if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
)) {
22342 case 0x11: { /* D */
22343 DIP("msubf.d f%u, f%u, f%u", fd
, fs
, ft
);
22344 IRExpr
*rm
= get_IR_roundingmode();
22345 putDReg(fd
, qop(Iop_MSubF64
, rm
, getDReg(fs
),
22346 getDReg(ft
), getDReg(fd
)));
22350 case 0x10: { /* S */
22351 DIP("msubf.s f%u, f%u, f%u", fd
, fs
, ft
);
22352 IRExpr
*rm
= get_IR_roundingmode();
22353 t1
= newTemp(Ity_F32
);
22354 assign(t1
, qop(Iop_MSubF32
, rm
,
22355 getLoFromF64(tyF
, getFReg(fs
)),
22356 getLoFromF64(tyF
, getFReg(ft
)),
22357 getLoFromF64(tyF
, getFReg(fd
))));
22358 putFReg(fd
, mkWidenFromF32(tyF
, mkexpr(t1
)));
22366 ILLEGAL_INSTRUCTON
;
22371 case 0x1E: /* MAX.fmt */
22372 if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
)) {
22374 case 0x11: { /* D */
22375 DIP("max.d f%u, f%u, f%u", fd
, fs
, ft
);
22376 calculateFCSR(fs
, ft
, MAXD
, False
, 2);
22377 putDReg(fd
, binop(Iop_MaxNumF64
, getDReg(fs
), getDReg(ft
)));
22381 case 0x10: { /* S */
22382 DIP("max.s f%u, f%u, f%u", fd
, fs
, ft
);
22383 calculateFCSR(fs
, ft
, MAXS
, True
, 2);
22384 putFReg(fd
, mkWidenFromF32(tyF
, binop(Iop_MaxNumF32
,
22385 getLoFromF64(Ity_F64
,
22387 getLoFromF64(Ity_F64
,
22396 ILLEGAL_INSTRUCTON
;
22401 case 0x1C: /* MIN.fmt */
22402 if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
)) {
22404 case 0x11: { /* D */
22405 DIP("min.d f%u, f%u, f%u", fd
, fs
, ft
);
22406 calculateFCSR(fs
, ft
, MIND
, False
, 2);
22407 putDReg(fd
, binop(Iop_MinNumF64
, getDReg(fs
), getDReg(ft
)));
22411 case 0x10: { /* S */
22412 DIP("min.s f%u, f%u, f%u", fd
, fs
, ft
);
22413 calculateFCSR(fs
, ft
, MINS
, True
, 2);
22414 putFReg(fd
, mkWidenFromF32(tyF
, binop(Iop_MinNumF32
,
22415 getLoFromF64(Ity_F64
,
22417 getLoFromF64(Ity_F64
,
22426 ILLEGAL_INSTRUCTON
;
22431 case 0x1F: /* MAXA.fmt */
22432 if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
)) {
22434 case 0x11: { /* D */
22435 DIP("maxa.d f%u, f%u, f%u", fd
, fs
, ft
);
22436 calculateFCSR(fs
, ft
, MAXAD
, False
, 2);
22437 t1
= newTemp(Ity_F64
);
22438 t2
= newTemp(Ity_F64
);
22439 t3
= newTemp(Ity_F64
);
22440 t4
= newTemp(Ity_I1
);
22441 assign(t1
, unop(Iop_AbsF64
, getFReg(fs
)));
22442 assign(t2
, unop(Iop_AbsF64
, getFReg(ft
)));
22443 assign(t3
, binop(Iop_MaxNumF64
, mkexpr(t1
), mkexpr(t2
)));
22444 assign(t4
, binop(Iop_CmpEQ32
,
22445 binop(Iop_CmpF64
, mkexpr(t3
), mkexpr(t1
)),
22447 putFReg(fd
, IRExpr_ITE(mkexpr(t4
),
22448 getFReg(fs
), getFReg(ft
)));
22452 case 0x10: { /* S */
22453 DIP("maxa.s f%u, f%u, f%u", fd
, fs
, ft
);
22454 calculateFCSR(fs
, ft
, MAXAS
, True
, 2);
22455 t1
= newTemp(Ity_F32
);
22456 t2
= newTemp(Ity_F32
);
22457 t3
= newTemp(Ity_F32
);
22458 t4
= newTemp(Ity_I1
);
22459 assign(t1
, unop(Iop_AbsF32
, getLoFromF64(Ity_F64
,
22461 assign(t2
, unop(Iop_AbsF32
, getLoFromF64(Ity_F64
,
22463 assign(t3
, binop(Iop_MaxNumF32
, mkexpr(t1
), mkexpr(t2
)));
22464 assign(t4
, binop(Iop_CmpEQ32
,
22465 binop(Iop_CmpF32
, mkexpr(t3
), mkexpr(t1
)),
22467 putFReg(fd
, IRExpr_ITE(mkexpr(t4
),
22468 getFReg(fs
), getFReg(ft
)));
22477 ILLEGAL_INSTRUCTON
;
22482 case 0x1D: /* MINA.fmt */
22483 if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
)) {
22485 case 0x11: { /* D */
22486 DIP("mina.d f%u, f%u, f%u", fd
, fs
, ft
);
22487 calculateFCSR(fs
, ft
, MINAD
, False
, 2);
22488 t1
= newTemp(Ity_F64
);
22489 t2
= newTemp(Ity_F64
);
22490 t3
= newTemp(Ity_F64
);
22491 t4
= newTemp(Ity_I1
);
22492 assign(t1
, unop(Iop_AbsF64
, getFReg(fs
)));
22493 assign(t2
, unop(Iop_AbsF64
, getFReg(ft
)));
22494 assign(t3
, binop(Iop_MinNumF64
, mkexpr(t1
), mkexpr(t2
)));
22495 assign(t4
, binop(Iop_CmpEQ32
,
22496 binop(Iop_CmpF64
, mkexpr(t3
), mkexpr(t1
)),
22498 putFReg(fd
, IRExpr_ITE(mkexpr(t4
),
22499 getFReg(fs
), getFReg(ft
)));
22503 case 0x10: { /* S */
22504 DIP("mina.s f%u, f%u, f%u", fd
, fs
, ft
);
22505 calculateFCSR(fs
, ft
, MINAS
, True
, 2);
22506 t1
= newTemp(Ity_F32
);
22507 t2
= newTemp(Ity_F32
);
22508 t3
= newTemp(Ity_F32
);
22509 t4
= newTemp(Ity_I1
);
22510 assign(t1
, unop(Iop_AbsF32
, getLoFromF64(Ity_F64
,
22512 assign(t2
, unop(Iop_AbsF32
, getLoFromF64(Ity_F64
,
22514 assign(t3
, binop(Iop_MinNumF32
, mkexpr(t1
), mkexpr(t2
)));
22515 assign(t4
, binop(Iop_CmpEQ32
,
22516 binop(Iop_CmpF32
, mkexpr(t3
), mkexpr(t1
)),
22518 putFReg(fd
, IRExpr_ITE(mkexpr(t4
),
22519 getFReg(fs
), getFReg(ft
)));
22530 case 0x1A: /* RINT.fmt */
22533 case 0x11: { /* D */
22534 DIP("rint.d f%u, f%u", fd
, fs
);
22535 calculateFCSR(fs
, 0, RINTS
, True
, 1);
22536 IRExpr
*rm
= get_IR_roundingmode();
22537 putDReg(fd
, binop(Iop_RoundF64toInt
, rm
, getDReg(fs
)));
22541 case 0x10: { /* S */
22542 DIP("rint.s f%u, f%u", fd
, fs
);
22543 calculateFCSR(fs
, 0, RINTD
, True
, 1);
22544 IRExpr
*rm
= get_IR_roundingmode();
22546 mkWidenFromF32(tyF
,
22547 binop(Iop_RoundF32toInt
, rm
,
22561 case 0x10: /* SEL.fmt */
22563 case 0x11: { /* D */
22564 if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
)) {
22565 DIP("sel.d f%u, f%u, f%u", fd
, fs
, ft
);
22566 t1
= newTemp(Ity_I1
);
22569 assign(t1
, binop(Iop_CmpNE64
,
22571 unop(Iop_ReinterpF64asI64
,
22576 assign(t1
, binop(Iop_CmpNE32
,
22579 unop(Iop_ReinterpF64asI64
,
22585 putDReg(fd
, IRExpr_ITE(mkexpr(t1
),
22586 getDReg(ft
), getDReg(fs
)));
22589 ILLEGAL_INSTRUCTON
;
22595 case 0x10: { /* S */
22596 DIP("sel.s f%u, f%u, f%u", fd
, fs
, ft
);
22597 t1
= newTemp(Ity_I1
);
22598 assign(t1
, binop(Iop_CmpNE32
,
22600 unop(Iop_ReinterpF32asI32
,
22601 getLoFromF64(tyF
, getFReg(fd
))),
22604 putFReg(fd
, IRExpr_ITE( mkexpr(t1
),
22605 getFReg(ft
), getFReg(fs
)));
22615 case 0x14: /* SELEQZ.fmt */
22616 if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
)) {
22617 switch (fmt
) { /* SELEQZ.df */
22618 case 0x11: { /* D */
22619 DIP("seleqz.d f%u, f%u, f%u", fd
, fs
, ft
);
22620 t1
= newTemp(Ity_I1
);
22623 assign(t1
, binop(Iop_CmpNE64
,
22625 unop(Iop_ReinterpF64asI64
,
22630 assign(t1
, binop(Iop_CmpNE32
,
22633 unop(Iop_ReinterpF64asI64
,
22639 putDReg(fd
, IRExpr_ITE( mkexpr(t1
),
22640 binop(Iop_I64StoF64
,
22641 get_IR_roundingmode(), mkU64(0)),
22646 case 0x10: { /* S */
22647 DIP("seleqz.s f%u, f%u, f%u", fd
, fs
, ft
);
22648 t1
= newTemp(Ity_I1
);
22649 assign(t1
, binop(Iop_CmpNE32
,
22651 unop(Iop_ReinterpF32asI32
,
22652 getLoFromF64(tyF
, getFReg(ft
))),
22655 putFReg(fd
, IRExpr_ITE(mkexpr(t1
),
22656 mkWidenFromF32(tyF
,
22657 binop(Iop_I32StoF32
,
22658 get_IR_roundingmode(),
22668 ILLEGAL_INSTRUCTON
;
22673 case 0x17: /* SELNEZ.fmt */
22674 if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
)) {
22676 case 0x11: { /* D */
22677 DIP("selnez.d f%u, f%u, f%u", fd
, fs
, ft
);
22678 t1
= newTemp(Ity_I1
);
22681 assign(t1
, binop(Iop_CmpNE64
,
22683 unop(Iop_ReinterpF64asI64
,
22688 assign(t1
, binop(Iop_CmpNE32
,
22691 unop(Iop_ReinterpF64asI64
,
22697 putDReg(fd
, IRExpr_ITE( mkexpr(t1
),
22699 binop(Iop_I64StoF64
,
22700 get_IR_roundingmode(),
22705 case 0x10: { /* S */
22706 DIP("selnez.s f%u, f%u, f%u", fd
, fs
, ft
);
22707 t1
= newTemp(Ity_I1
);
22708 assign(t1
, binop(Iop_CmpNE32
,
22710 unop(Iop_ReinterpF32asI32
,
22711 getLoFromF64(tyF
, getFReg(ft
))),
22714 putFReg(fd
, IRExpr_ITE(mkexpr(t1
),
22716 mkWidenFromF32(tyF
,
22717 binop(Iop_I32StoF32
,
22718 get_IR_roundingmode(),
22729 ILLEGAL_INSTRUCTON
;
22734 case 0x1B: /* CLASS.fmt */
22735 if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
)) {
22736 t0
= newTemp(Ity_I1
); // exp zero
22737 t1
= newTemp(Ity_I1
); // exp max
22738 t2
= newTemp(Ity_I1
); // sign
22739 t3
= newTemp(Ity_I1
); // first
22740 t4
= newTemp(Ity_I1
); // val not zero
22741 t5
= newTemp(Ity_I32
);
22744 case 0x11: { /* D */
22745 DIP("class.d f%u, f%u", fd
, fs
);
22746 assign(t0
, binop(Iop_CmpEQ32
,
22749 unop(Iop_ReinterpF64asI64
,
22751 mkU32(0x7ff00000)),
22753 assign(t1
, binop(Iop_CmpEQ32
,
22756 unop(Iop_ReinterpF64asI64
,
22758 mkU32(0x7ff00000)),
22759 mkU32(0x7ff00000)));
22760 assign(t2
, binop(Iop_CmpEQ32
,
22763 unop(Iop_ReinterpF64asI64
,
22765 mkU32(0x80000000)),
22766 mkU32(0x80000000)));
22767 assign(t3
, binop(Iop_CmpEQ32
,
22770 unop(Iop_ReinterpF64asI64
,
22772 mkU32(0x00080000)),
22773 mkU32(0x00080000)));
22775 if (mode64
) assign(t4
, binop(Iop_CmpNE64
,
22777 unop(Iop_ReinterpF64asI64
,
22779 mkU64(0x000fffffffffffffULL
)),
22781 else assign(t4
, binop(Iop_CmpNE32
,
22785 unop(Iop_ReinterpF64asI64
,
22787 mkU32(0x000fffff)),
22789 unop(Iop_ReinterpF64asI64
,
22793 assign(t5
, binop(Iop_Shl32
,
22794 IRExpr_ITE(mkexpr(t1
),
22795 IRExpr_ITE(mkexpr(t4
),
22796 mkU32(0), mkU32(1)),
22797 IRExpr_ITE(mkexpr(t0
),
22798 IRExpr_ITE(mkexpr(t4
),
22802 IRExpr_ITE(mkexpr(t2
), mkU8(2), mkU8(6))));
22803 putDReg(fd
, unop(Iop_ReinterpI64asF64
,
22805 IRExpr_ITE(binop(Iop_CmpNE32
,
22806 mkexpr(t5
), mkU32(0)),
22808 IRExpr_ITE(mkexpr(t3
),
22814 case 0x10: { /* S */
22815 DIP("class.s f%u, f%u", fd
, fs
);
22816 assign(t0
, binop(Iop_CmpEQ32
,
22818 unop(Iop_ReinterpF32asI32
,
22819 getLoFromF64(tyF
, getFReg(fs
))),
22820 mkU32(0x7f800000)),
22822 assign(t1
, binop(Iop_CmpEQ32
,
22824 unop(Iop_ReinterpF32asI32
,
22825 getLoFromF64(tyF
, getFReg(fs
))),
22826 mkU32(0x7f800000)),
22827 mkU32(0x7f800000)));
22828 assign(t2
, binop(Iop_CmpEQ32
,
22830 unop(Iop_ReinterpF32asI32
,
22831 getLoFromF64(tyF
, getFReg(fs
))),
22832 mkU32(0x80000000)),
22833 mkU32(0x80000000)));
22834 assign(t3
, binop(Iop_CmpEQ32
,
22836 unop(Iop_ReinterpF32asI32
,
22837 getLoFromF64(tyF
, getFReg(fs
))),
22838 mkU32(0x00400000)),
22839 mkU32(0x00400000)));
22840 assign(t4
, binop(Iop_CmpNE32
,
22842 unop(Iop_ReinterpF32asI32
,
22843 getLoFromF64(tyF
, getFReg(fs
))),
22844 mkU32(0x007fffff)),
22846 assign(t5
, binop(Iop_Shl32
,
22847 IRExpr_ITE(mkexpr(t1
),
22848 IRExpr_ITE(mkexpr(t4
),
22849 mkU32(0), mkU32(1)),
22850 IRExpr_ITE(mkexpr(t0
),
22851 IRExpr_ITE(mkexpr(t4
),
22853 mkU32(0x8)), //zero or subnorm
22855 IRExpr_ITE(mkexpr(t2
), mkU8(2), mkU8(6))));
22856 putDReg(fd
, unop(Iop_ReinterpI64asF64
,
22858 IRExpr_ITE(binop(Iop_CmpNE32
,
22859 mkexpr(t5
), mkU32(0)),
22861 IRExpr_ITE(mkexpr(t3
),
22871 ILLEGAL_INSTRUCTON
;
22877 if (dis_instr_CCondFmt(cins
))
22888 case 0x03: /* COP1X */
22889 switch (function
) {
22890 case 0x0: { /* LWXC1 */
22891 /* Load Word Indexed to Floating Point - LWXC1 (MIPS32r2) */
22892 DIP("lwxc1 f%u, r%u(r%u)", fd
, rt
, rs
);
22894 assign(t2
, binop(mode64
? Iop_Add64
: Iop_Add32
, getIReg(rs
),
22898 t0
= newTemp(Ity_I64
);
22899 t1
= newTemp(Ity_I32
);
22900 t3
= newTemp(Ity_F32
);
22901 t4
= newTemp(Ity_I64
);
22902 assign(t3
, load(Ity_F32
, mkexpr(t2
)));
22904 assign(t4
, mkWidenFrom32(Ity_I64
, unop(Iop_ReinterpF32asI32
,
22905 mkexpr(t3
)), True
));
22907 putFReg(fd
, unop(Iop_ReinterpI64asF64
, mkexpr(t4
)));
22909 putFReg(fd
, load(Ity_F32
, mkexpr(t2
)));
22915 case 0x1: { /* LDXC1 */
22916 /* Load Doubleword Indexed to Floating Point
22917 LDXC1 (MIPS32r2 and MIPS64) */
22918 DIP("ldxc1 f%u, r%u(r%u)", fd
, rt
, rs
);
22920 assign(t0
, binop(mode64
? Iop_Add64
: Iop_Add32
, getIReg(rs
),
22922 putDReg(fd
, load(Ity_F64
, mkexpr(t0
)));
22926 case 0x5: /* Load Doubleword Indexed Unaligned to Floating Point - LUXC1;
22927 MIPS32r2 and MIPS64 */
22928 DIP("luxc1 f%u, r%u(r%u)", fd
, rt
, rs
);
22930 if ((mode64
|| VEX_MIPS_CPU_HAS_MIPS32R2(archinfo
->hwcaps
))
22934 assign(t0
, binop(mode64
? Iop_Add64
: Iop_Add32
,
22935 getIReg(rs
), getIReg(rt
)));
22936 assign(t1
, binop(mode64
? Iop_And64
: Iop_And32
,
22938 mode64
? mkU64(0xfffffffffffffff8ULL
)
22939 : mkU32(0xfffffff8ULL
)));
22940 putFReg(fd
, load(Ity_F64
, mkexpr(t1
)));
22947 case 0x8: { /* Store Word Indexed from Floating Point - SWXC1 */
22948 DIP("swxc1 f%u, r%u(r%u)", ft
, rt
, rs
);
22950 assign(t0
, binop(mode64
? Iop_Add64
: Iop_Add32
, getIReg(rs
),
22954 store(mkexpr(t0
), getLoFromF64(tyF
, getFReg(fs
)));
22956 store(mkexpr(t0
), getFReg(fs
));
22962 case 0x9: { /* Store Doubleword Indexed from Floating Point - SDXC1 */
22963 DIP("sdxc1 f%u, r%u(r%u)", fs
, rt
, rs
);
22965 assign(t0
, binop(mode64
? Iop_Add64
: Iop_Add32
, getIReg(rs
),
22967 store(mkexpr(t0
), getDReg(fs
));
22971 case 0xD: /* Store Doubleword Indexed Unaligned from Floating Point -
22972 SUXC1; MIPS64 MIPS32r2 */
22973 DIP("suxc1 f%u, r%u(r%u)", fd
, rt
, rs
);
22975 if ((mode64
|| VEX_MIPS_CPU_HAS_MIPS32R2(archinfo
->hwcaps
))
22979 assign(t0
, binop(mode64
? Iop_Add64
: Iop_Add32
,
22980 getIReg(rs
), getIReg(rt
)));
22981 assign(t1
, binop(mode64
? Iop_And64
: Iop_And32
,
22983 mode64
? mkU64(0xfffffffffffffff8ULL
)
22984 : mkU32(0xfffffff8ULL
)));
22985 store(mkexpr(t1
), getFReg(fs
));
22997 case 0x20: { /* MADD.S */
22998 DIP("madd.s f%u, f%u, f%u, f%u", fd
, fmt
, fs
, ft
);
22999 IRExpr
*rm
= get_IR_roundingmode();
23000 t1
= newTemp(Ity_F32
);
23001 assign(t1
, triop(Iop_AddF32
, rm
, getLoFromF64(tyF
, getFReg(fmt
)),
23002 triop(Iop_MulF32
, rm
, getLoFromF64(tyF
, getFReg(fs
)),
23003 getLoFromF64(tyF
, getFReg(ft
)))));
23004 putFReg(fd
, mkWidenFromF32(tyF
, mkexpr(t1
)));
23005 break; /* MADD.S */
23008 case 0x21: { /* MADD.D */
23009 DIP("madd.d f%u, f%u, f%u, f%u", fd
, fmt
, fs
, ft
);
23010 IRExpr
*rm
= get_IR_roundingmode();
23011 putDReg(fd
, triop(Iop_AddF64
, rm
, getDReg(fmt
),
23012 triop(Iop_MulF64
, rm
, getDReg(fs
),
23014 break; /* MADD.D */
23017 case 0x28: { /* MSUB.S */
23018 DIP("msub.s f%u, f%u, f%u, f%u", fd
, fmt
, fs
, ft
);
23019 IRExpr
*rm
= get_IR_roundingmode();
23020 t1
= newTemp(Ity_F32
);
23021 assign(t1
, triop(Iop_SubF32
, rm
,
23022 triop(Iop_MulF32
, rm
, getLoFromF64(tyF
, getFReg(fs
)),
23023 getLoFromF64(tyF
, getFReg(ft
))),
23024 getLoFromF64(tyF
, getFReg(fmt
))));
23025 putFReg(fd
, mkWidenFromF32(tyF
, mkexpr(t1
)));
23026 break; /* MSUB.S */
23029 case 0x29: { /* MSUB.D */
23030 DIP("msub.d f%u, f%u, f%u, f%u", fd
, fmt
, fs
, ft
);
23031 IRExpr
*rm
= get_IR_roundingmode();
23032 putDReg(fd
, triop(Iop_SubF64
, rm
, triop(Iop_MulF64
, rm
, getDReg(fs
),
23033 getDReg(ft
)), getDReg(fmt
)));
23034 break; /* MSUB.D */
23037 case 0x30: { /* NMADD.S */
23038 DIP("nmadd.s f%u, f%u, f%u, f%u", fd
, fmt
, fs
, ft
);
23039 IRExpr
*rm
= get_IR_roundingmode();
23040 t1
= newTemp(Ity_F32
);
23041 assign(t1
, triop(Iop_AddF32
, rm
, getLoFromF64(tyF
, getFReg(fmt
)),
23042 triop(Iop_MulF32
, rm
, getLoFromF64(tyF
, getFReg(fs
)),
23043 getLoFromF64(tyF
, getFReg(ft
)))));
23044 putFReg(fd
, mkWidenFromF32(tyF
, unop(Iop_NegF32
, mkexpr(t1
))));
23045 break; /* NMADD.S */
23048 case 0x31: { /* NMADD.D */
23049 DIP("nmadd.d f%u, f%u, f%u, f%u", fd
, fmt
, fs
, ft
);
23050 IRExpr
*rm
= get_IR_roundingmode();
23051 t1
= newTemp(Ity_F64
);
23052 assign(t1
, triop(Iop_AddF64
, rm
, getDReg(fmt
),
23053 triop(Iop_MulF64
, rm
, getDReg(fs
),
23055 putDReg(fd
, unop(Iop_NegF64
, mkexpr(t1
)));
23056 break; /* NMADD.D */
23059 case 0x38: { /* NMSUBB.S */
23060 DIP("nmsub.s f%u, f%u, f%u, f%u", fd
, fmt
, fs
, ft
);
23061 IRExpr
*rm
= get_IR_roundingmode();
23062 t1
= newTemp(Ity_F32
);
23063 assign(t1
, triop(Iop_SubF32
, rm
,
23064 triop(Iop_MulF32
, rm
, getLoFromF64(tyF
, getFReg(fs
)),
23065 getLoFromF64(tyF
, getFReg(ft
))),
23066 getLoFromF64(tyF
, getFReg(fmt
))));
23067 putFReg(fd
, mkWidenFromF32(tyF
, unop(Iop_NegF32
, mkexpr(t1
))));
23068 break; /* NMSUBB.S */
23071 case 0x39: { /* NMSUBB.D */
23072 DIP("nmsub.d f%u, f%u, f%u, f%u", fd
, fmt
, fs
, ft
);
23073 IRExpr
*rm
= get_IR_roundingmode();
23074 t1
= newTemp(Ity_F64
);
23075 assign(t1
, triop(Iop_SubF64
, rm
, triop(Iop_MulF64
, rm
, getDReg(fs
),
23076 getDReg(ft
)), getDReg(fmt
)));
23077 putDReg(fd
, unop(Iop_NegF64
, mkexpr(t1
)));
23078 break; /* NMSUBB.D */
23087 case 0x04: /* BEQL */
23088 DIP("beql r%u, r%u, %u", rs
, rt
, imm
);
23089 *lastn
= dis_branch_likely(binop(mode64
? Iop_CmpNE64
: Iop_CmpNE32
,
23090 getIReg(rs
), getIReg(rt
)), imm
);
23093 case 0x05: /* BNEL */
23094 DIP("bnel r%u, r%u, %u", rs
, rt
, imm
);
23095 *lastn
= dis_branch_likely(binop(mode64
? Iop_CmpEQ64
: Iop_CmpEQ32
,
23096 getIReg(rs
), getIReg(rt
)), imm
);
23099 case 0x06: /* 0x16 ??? BLEZL, BLEZC, BGEZC, BGEC */
23100 if (rt
== 0) { /* BLEZL */
23101 DIP("blezl r%u, %u", rs
, imm
);
23102 *lastn
= dis_branch_likely(unop(Iop_Not1
, (binop(mode64
? Iop_CmpLE64S
:
23103 Iop_CmpLE32S
, getIReg(rs
), mode64
?
23104 mkU64(0x0) : mkU32(0x0)))), imm
);
23105 } else if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
)) {
23106 if (rs
== 0) { /* BLEZC */
23107 DIP("blezc r%u, %u", rt
, imm
);
23110 dis_branch_compact(False
,
23111 binop(Iop_CmpLE64S
, getIReg(rt
), mkU64(0x0)),
23114 dis_branch_compact(False
,
23115 binop(Iop_CmpLE32S
, getIReg(rt
), mkU32(0x0)),
23118 } else if (rt
== rs
) { /* BGEZC */
23119 DIP("bgezc r%u, %u", rt
, imm
);
23122 dis_branch_compact(False
,
23123 binop(Iop_CmpLE64S
, mkU64(0x0), getIReg(rt
)),
23126 dis_branch_compact(False
,
23127 binop(Iop_CmpLE32S
, mkU32(0x0), getIReg(rt
)),
23130 } else { /* BGEC */
23131 DIP("bgec r%u, r%u, %u", rs
, rt
, imm
);
23134 dis_branch_compact(False
,
23135 binop(Iop_CmpLE64S
, getIReg(rt
), getIReg(rs
)),
23138 dis_branch_compact(False
,
23139 binop(Iop_CmpLE32S
, getIReg(rt
), getIReg(rs
)),
23149 case 0x07: /* BGTZL, BGTZC, BLTZC, BLTC */
23150 if (rt
== 0) { /* BGTZL */
23151 DIP("bgtzl r%u, %u", rs
, imm
);
23154 *lastn
= dis_branch_likely(binop(Iop_CmpLE64S
, getIReg(rs
),
23155 mkU64(0x00)), imm
);
23157 *lastn
= dis_branch_likely(binop(Iop_CmpLE32S
, getIReg(rs
),
23158 mkU32(0x00)), imm
);
23159 } else if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
)) {
23160 if (rs
== 0) { /* BGTZC */
23161 DIP("bgtzc r%u, %u", rt
, imm
);
23164 dis_branch_compact(False
,
23166 binop(Iop_CmpLE64S
,
23167 getIReg(rt
), mkU64(0x0))),
23170 dis_branch_compact(False
,
23172 binop(Iop_CmpLE32S
,
23173 getIReg(rt
), mkU32(0x0))),
23176 } else if (rs
== rt
) { /* BLTZC */
23177 DIP("bltzc r%u, %u", rt
, imm
);
23180 dis_branch_compact(False
,
23182 binop(Iop_CmpLE64S
,
23183 mkU64(0x0), getIReg(rt
))),
23186 dis_branch_compact(False
,
23188 binop(Iop_CmpLE32S
,
23189 mkU32(0x0), getIReg(rt
))),
23192 } else { /* BLTC */
23193 DIP("bltc r%u, r%u, %u", rs
, rt
, imm
);
23196 dis_branch_compact(False
,
23198 binop(Iop_CmpLE64S
,
23199 getIReg(rt
), getIReg(rs
))),
23202 dis_branch_compact(False
,
23204 binop(Iop_CmpLE32S
,
23205 getIReg(rt
), getIReg(rs
))),
23215 #if defined(__mips__) && ((defined(__mips_isa_rev) && __mips_isa_rev < 6))
23217 case 0x08: { /* Doubleword Add Immidiate - DADDI; MIPS64 */
23218 DIP("daddi r%u, r%u, %u", rt
, rs
, imm
);
23219 IRTemp tmpRs64
= newTemp(Ity_I64
);
23220 assign(tmpRs64
, getIReg(rs
));
23222 t0
= newTemp(Ity_I64
);
23223 t1
= newTemp(Ity_I64
);
23224 t2
= newTemp(Ity_I64
);
23225 t3
= newTemp(Ity_I64
);
23226 t4
= newTemp(Ity_I64
);
23227 /* dst = src0 + sign(imm)
23228 if(sign(src0 ) != sign(imm ))
23230 if(sign(dst) == sign(src0 ))
23232 we have overflow! */
23234 assign(t0
, binop(Iop_Add64
, mkexpr(tmpRs64
),
23235 mkU64(extend_s_16to64(imm
))));
23236 assign(t1
, binop(Iop_Xor64
, mkexpr(tmpRs64
),
23237 mkU64(extend_s_16to64(imm
))));
23238 assign(t2
, unop(Iop_1Sto64
, binop(Iop_CmpEQ64
, binop(Iop_And64
,
23239 mkexpr(t1
), mkU64(0x8000000000000000ULL
)),
23240 mkU64(0x8000000000000000ULL
))));
23242 assign(t3
, binop(Iop_Xor64
, mkexpr(t0
), mkexpr(tmpRs64
)));
23243 assign(t4
, unop(Iop_1Sto64
, binop(Iop_CmpNE64
, binop(Iop_And64
,
23244 mkexpr(t3
), mkU64(0x8000000000000000ULL
)),
23245 mkU64(0x8000000000000000ULL
))));
23247 stmt(IRStmt_Exit(binop(Iop_CmpEQ64
, binop(Iop_Or64
, mkexpr(t2
),
23248 mkexpr(t4
)), mkU64(0)), Ijk_SigFPE_IntOvf
,
23249 IRConst_U64(guest_PC_curr_instr
+ 4),
23252 putIReg(rt
, mkexpr(t0
));
23256 #elif defined(__mips__) && ((defined(__mips_isa_rev) && __mips_isa_rev >= 6))
23258 case 0x08: { /* BNEZALC, BNEC, BNVC */
23259 if (rs
== 0) { /* BNEZALC */
23260 DIP("bnezalc r%u, %u", rt
, imm
);
23263 dis_branch_compact(True
,
23265 binop(Iop_CmpEQ64
, getIReg(rt
), mkU64(0x0))),
23268 dis_branch_compact(True
,
23270 binop(Iop_CmpEQ32
, getIReg(rt
), mkU32(0x0))),
23273 } else if (rs
< rt
) { /* BNEC */
23274 DIP("bnec r%u, %u", rt
, imm
);
23277 dis_branch_compact(False
,
23280 getIReg(rt
), getIReg(rs
))),
23283 dis_branch_compact(False
,
23286 getIReg(rt
), getIReg(rs
))),
23289 } else { /* BNVC */
23290 DIP("bnvc r%u, r%u, %u", rs
, rt
, imm
);
23293 t0
= newTemp(Ity_I32
);
23294 t1
= newTemp(Ity_I32
);
23295 t2
= newTemp(Ity_I32
);
23296 t3
= newTemp(Ity_I32
);
23297 assign(t0
, IRExpr_ITE(binop(Iop_CmpLT64S
,
23299 mkU64(0xffffffff80000000ULL
)),
23301 IRExpr_ITE(binop(Iop_CmpLT64S
,
23303 mkU64(0x7FFFFFFFULL
)),
23304 mkU32(0), mkU32(1))));
23305 assign(t1
, IRExpr_ITE(binop(Iop_CmpLT64S
,
23307 mkU64(0xffffffff80000000ULL
)),
23309 IRExpr_ITE(binop(Iop_CmpLT64S
,
23311 mkU64(0x7FFFFFFFULL
)),
23312 mkU32(0), mkU32(1))));
23313 assign(t2
, IRExpr_ITE(binop(Iop_CmpLT64S
,
23315 getIReg(rt
), getIReg(rs
)),
23316 mkU64(0xffffffff80000000ULL
)),
23318 IRExpr_ITE(binop(Iop_CmpLT64S
,
23322 mkU64(0x7FFFFFFFULL
)),
23323 mkU32(0), mkU32(1))));
23324 assign(t3
, binop(Iop_Add32
,
23326 binop(Iop_Add32
, mkexpr(t1
), mkexpr(t2
))));
23327 dis_branch_compact(False
,
23328 binop(Iop_CmpEQ32
, mkexpr(t3
), mkU32(0)),
23331 IRTemp tmpRs32
= newTemp(Ity_I32
);
23332 IRTemp tmpRt32
= newTemp(Ity_I32
);
23334 assign(tmpRs32
, getIReg(rs
));
23335 assign(tmpRt32
, getIReg(rt
));
23336 t0
= newTemp(Ity_I32
);
23337 t1
= newTemp(Ity_I32
);
23338 t2
= newTemp(Ity_I32
);
23339 t3
= newTemp(Ity_I32
);
23340 t4
= newTemp(Ity_I32
);
23341 /* dst = src0 + src1
23342 if (sign(src0 ) != sign(src1 ))
23344 if (sign(dst) == sign(src0 ))
23346 we have overflow! */
23348 assign(t0
, binop(Iop_Add32
, mkexpr(tmpRs32
), mkexpr(tmpRt32
)));
23349 assign(t1
, binop(Iop_Xor32
, mkexpr(tmpRs32
), mkexpr(tmpRt32
)));
23350 assign(t2
, unop(Iop_1Uto32
,
23352 binop(Iop_And32
, mkexpr(t1
), mkU32(0x80000000)),
23353 mkU32(0x80000000))));
23355 assign(t3
, binop(Iop_Xor32
, mkexpr(t0
), mkexpr(tmpRs32
)));
23356 assign(t4
, unop(Iop_1Uto32
,
23358 binop(Iop_And32
, mkexpr(t3
), mkU32(0x80000000)),
23359 mkU32(0x80000000))));
23361 dis_branch_compact(False
, binop(Iop_CmpNE32
,
23362 binop(Iop_Or32
, mkexpr(t2
), mkexpr(t4
)),
23363 mkU32(0)), imm
, dres
);
23372 case 0x09: /* Doubleword Add Immidiate Unsigned - DADDIU; MIPS64 */
23373 DIP("daddiu r%u, r%u, %u", rt
, rs
, imm
);
23374 putIReg(rt
, binop(Iop_Add64
, getIReg(rs
), mkU64(extend_s_16to64(imm
))));
23377 case 0x0A: { /* LDL */
23378 /* Load Doubleword Left - LDL; MIPS64 */
23380 DIP("ldl r%u, %u(r%u)", rt
, imm
, rs
);
23382 #if defined (_MIPSEL)
23383 t1
= newTemp(Ity_I64
);
23384 assign(t1
, binop(Iop_Add64
, getIReg(rs
), mkU64(extend_s_16to64(imm
))));
23385 #elif defined (_MIPSEB)
23386 t1
= newTemp(Ity_I64
);
23387 assign(t1
, binop(Iop_Xor64
, mkU64(0x7), binop(Iop_Add64
, getIReg(rs
),
23388 mkU64(extend_s_16to64(imm
)))));
23390 /* t2 = word addr */
23391 /* t4 = addr mod 8 */
23392 LWX_SWX_PATTERN64_1
;
23394 /* t3 = word content - shifted */
23395 t3
= newTemp(Ity_I64
);
23396 assign(t3
, binop(Iop_Shl64
, load(Ity_I64
, mkexpr(t2
)),
23397 narrowTo(Ity_I8
, binop(Iop_Shl64
, binop(Iop_Sub64
, mkU64(0x07),
23398 mkexpr(t4
)), mkU8(3)))));
23400 /* rt content - adjusted */
23401 t5
= newTemp(Ity_I64
);
23402 t6
= newTemp(Ity_I64
);
23403 t7
= newTemp(Ity_I64
);
23405 assign(t5
, binop(Iop_Mul64
, mkexpr(t4
), mkU64(0x8)));
23407 assign(t6
, binop(Iop_Shr64
, mkU64(0x00FFFFFFFFFFFFFFULL
),
23408 narrowTo(Ity_I8
, mkexpr(t5
))));
23410 assign(t7
, binop(Iop_And64
, getIReg(rt
), mkexpr(t6
)));
23412 putIReg(rt
, binop(Iop_Or64
, mkexpr(t7
), mkexpr(t3
)));
23416 case 0x0B: { /* LDR */
23417 /* Load Doubleword Right - LDR; MIPS64 */
23419 DIP("ldr r%u,%u(r%u)", rt
, imm
, rs
);
23421 #if defined (_MIPSEL)
23422 t1
= newTemp(Ity_I64
);
23423 assign(t1
, binop(Iop_Add64
, getIReg(rs
), mkU64(extend_s_16to64(imm
))));
23424 #elif defined (_MIPSEB)
23425 t1
= newTemp(Ity_I64
);
23426 assign(t1
, binop(Iop_Xor64
, mkU64(0x7), binop(Iop_Add64
, getIReg(rs
),
23427 mkU64(extend_s_16to64(imm
)))));
23429 /* t2 = word addr */
23430 /* t4 = addr mod 8 */
23431 LWX_SWX_PATTERN64_1
;
23433 /* t3 = word content - shifted */
23434 t3
= newTemp(Ity_I64
);
23435 assign(t3
, binop(Iop_Shr64
, load(Ity_I64
, mkexpr(t2
)),
23436 narrowTo(Ity_I8
, binop(Iop_Shl64
, mkexpr(t4
), mkU8(3)))));
23438 /* rt content - adjusted */
23439 t5
= newTemp(Ity_I64
);
23440 assign(t5
, binop(Iop_And64
, getIReg(rt
), unop(Iop_Not64
,
23441 binop(Iop_Shr64
, mkU64(0xFFFFFFFFFFFFFFFFULL
),
23442 narrowTo(Ity_I8
, binop(Iop_Shl64
, mkexpr(t4
), mkU8(0x3)))))));
23444 putIReg(rt
, binop(Iop_Or64
, mkexpr(t5
), mkexpr(t3
)));
23448 case 0x0C: /* Special2 */
23449 return disInstr_MIPS_WRK_Special2(cins
, archinfo
, abiinfo
,
23450 dres
, bstmt
, lastn
);
23452 case 0x0D: /* DAUI */
23453 if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
)) {
23454 DIP("daui r%u, r%u, %x", rt
, rs
, imm
);
23455 putIReg(rt
, binop(Iop_Add64
, getIReg(rs
), mkU64(extend_s_32to64(imm
<< 16))));
23462 case 0x0E: /* MIPS MSA (SIMD) */
23464 Int retVal
= disMSAInstr_MIPS_WRK(cins
);
23468 } else if (retVal
== -2) {
23474 vex_printf("Error occured while trying to decode MIPS MSA "
23475 "instruction.\nYour platform probably doesn't support "
23476 "MIPS MSA (SIMD) ASE.\n");
23479 case 0x0F: /* Special3 */
23480 return disInstr_MIPS_WRK_Special3(cins
, archinfo
, abiinfo
,
23481 dres
, bstmt
, lastn
);
23490 static UInt
disInstr_MIPS_WRK_20(UInt cins
)
23492 IRTemp t1
= 0, t2
, t3
, t4
, t5
;
23493 UInt opcode
, rs
, rt
, imm
;
23495 opcode
= get_opcode(cins
);
23496 imm
= get_imm(cins
);
23499 IRType ty
= mode64
? Ity_I64
: Ity_I32
;
23501 switch (opcode
& 0x0F) {
23502 case 0x00: /* LB */
23503 DIP("lb r%u, %u(r%u)", rt
, imm
, rs
);
23504 LOAD_STORE_PATTERN
;
23507 putIReg(rt
, unop(Iop_8Sto64
, load(Ity_I8
, mkexpr(t1
))));
23509 putIReg(rt
, unop(Iop_8Sto32
, load(Ity_I8
, mkexpr(t1
))));
23513 case 0x01: /* LH */
23514 DIP("lh r%u, %u(r%u)", rt
, imm
, rs
);
23515 LOAD_STORE_PATTERN
;
23518 putIReg(rt
, unop(Iop_16Sto64
, load(Ity_I16
, mkexpr(t1
))));
23520 putIReg(rt
, unop(Iop_16Sto32
, load(Ity_I16
, mkexpr(t1
))));
23524 case 0x02: /* LWL */
23525 DIP("lwl r%u, %u(r%u)", rt
, imm
, rs
);
23529 t1
= newTemp(Ity_I64
);
23530 #if defined (_MIPSEL)
23531 assign(t1
, binop(Iop_Add64
, getIReg(rs
), mkU64(extend_s_16to64(imm
))));
23532 #elif defined (_MIPSEB)
23533 assign(t1
, binop(Iop_Xor64
,
23537 mkU64(extend_s_16to64(imm
)))));
23539 /* t2 = word addr */
23540 /* t4 = addr mod 4 */
23543 /* t3 = word content - shifted */
23544 t3
= newTemp(Ity_I32
);
23545 assign(t3
, binop(Iop_Shl32
,
23546 load(Ity_I32
, mkexpr(t2
)),
23554 /* rt content - adjusted */
23555 t5
= newTemp(Ity_I32
);
23556 assign(t5
, binop(Iop_And32
,
23557 mkNarrowTo32(ty
, getIReg(rt
)),
23560 narrowTo(Ity_I8
, binop(Iop_Mul32
,
23564 putIReg(rt
, mkWidenFrom32(ty
, binop(Iop_Or32
, mkexpr(t5
),
23565 mkexpr(t3
)), True
));
23568 t1
= newTemp(Ity_I32
);
23569 #if defined (_MIPSEL)
23570 assign(t1
, binop(Iop_Add32
, getIReg(rs
), mkU32(extend_s_16to32(imm
))));
23571 #elif defined (_MIPSEB)
23572 assign(t1
, binop(Iop_Xor32
, mkU32(0x3), binop(Iop_Add32
, getIReg(rs
),
23573 mkU32(extend_s_16to32(imm
)))));
23576 /* t2 = word addr */
23577 /* t4 = addr mod 4 */
23580 /* t3 = word content - shifted */
23581 t3
= newTemp(Ity_I32
);
23582 assign(t3
, binop(Iop_Shl32
, load(Ity_I32
, mkexpr(t2
)), narrowTo(Ity_I8
,
23583 binop(Iop_Shl32
, binop(Iop_Sub32
, mkU32(0x03), mkexpr(t4
)),
23586 /* rt content - adjusted */
23587 t5
= newTemp(Ity_I32
);
23588 assign(t5
, binop(Iop_And32
,
23592 narrowTo(Ity_I8
, binop(Iop_Mul32
,
23596 putIReg(rt
, binop(Iop_Or32
, mkexpr(t5
), mkexpr(t3
)));
23601 case 0x03: /* LW */
23602 DIP("lw r%u, %u(r%u)", rt
, imm
, rs
);
23603 LOAD_STORE_PATTERN
;
23604 putIReg(rt
, mkWidenFrom32(ty
, load(Ity_I32
, mkexpr(t1
)), True
));
23607 case 0x04: /* LBU */
23608 DIP("lbu r%u, %u(r%u)", rt
, imm
, rs
);
23609 LOAD_STORE_PATTERN
;
23612 putIReg(rt
, unop(Iop_8Uto64
, load(Ity_I8
, mkexpr(t1
))));
23614 putIReg(rt
, unop(Iop_8Uto32
, load(Ity_I8
, mkexpr(t1
))));
23618 case 0x05: /* LHU */
23619 DIP("lhu r%u, %u(r%u)", rt
, imm
, rs
);
23620 LOAD_STORE_PATTERN
;
23623 putIReg(rt
, unop(Iop_16Uto64
, load(Ity_I16
, mkexpr(t1
))));
23625 putIReg(rt
, unop(Iop_16Uto32
, load(Ity_I16
, mkexpr(t1
))));
23629 case 0x06: /* LWR */
23630 DIP("lwr r%u, %u(r%u)", rt
, imm
, rs
);
23634 t1
= newTemp(Ity_I64
);
23636 #if defined (_MIPSEL)
23637 assign(t1
, binop(Iop_Add64
, getIReg(rs
), mkU64(extend_s_16to64(imm
))));
23638 #elif defined (_MIPSEB)
23639 assign(t1
, binop(Iop_Xor64
,
23643 mkU64(extend_s_16to64(imm
)))));
23645 /* t2 = word addr */
23646 /* t4 = addr mod 4 */
23649 /* t3 = word content - shifted */
23650 t3
= newTemp(Ity_I32
);
23651 assign(t3
, binop(Iop_Shr32
,
23652 load(Ity_I32
, mkexpr(t2
)),
23654 binop(Iop_Shl32
, mkexpr(t4
), mkU8(0x03)))));
23656 /* rt content - adjusted */
23657 t5
= newTemp(Ity_I32
);
23658 assign(t5
, binop(Iop_And32
, mkNarrowTo32(ty
, getIReg(rt
)),
23659 unop(Iop_Not32
, binop(Iop_Shr32
, mkU32(0xFFFFFFFF),
23660 narrowTo(Ity_I8
, binop(Iop_Shl32
, mkexpr(t4
), mkU8(0x3)))))));
23662 putIReg(rt
, mkWidenFrom32(ty
, binop(Iop_Or32
, mkexpr(t5
),
23663 mkexpr(t3
)), True
));
23667 t1
= newTemp(Ity_I32
);
23668 #if defined (_MIPSEL)
23669 assign(t1
, binop(Iop_Add32
, getIReg(rs
), mkU32(extend_s_16to32(imm
))));
23670 #elif defined (_MIPSEB)
23671 assign(t1
, binop(Iop_Xor32
, mkU32(0x3), binop(Iop_Add32
, getIReg(rs
),
23672 mkU32(extend_s_16to32(imm
)))));
23675 /* t2 = word addr */
23676 /* t4 = addr mod 4 */
23679 /* t3 = word content - shifted */
23680 t3
= newTemp(Ity_I32
);
23681 assign(t3
, binop(Iop_Shr32
, load(Ity_I32
, mkexpr(t2
)),
23682 narrowTo(Ity_I8
, binop(Iop_Shl32
, mkexpr(t4
),
23685 /* rt content - adjusted */
23686 t5
= newTemp(Ity_I32
);
23687 assign(t5
, binop(Iop_And32
, getIReg(rt
), unop(Iop_Not32
,
23688 binop(Iop_Shr32
, mkU32(0xFFFFFFFF), narrowTo(Ity_I8
,
23689 binop(Iop_Shl32
, mkexpr(t4
), mkU8(0x3)))))));
23691 putIReg(rt
, binop(Iop_Or32
, mkexpr(t5
), mkexpr(t3
)));
23696 case 0x07: /* Load Word unsigned - LWU; MIPS64 */
23697 DIP("lwu r%u,%u(r%u)", rt
, imm
, rs
);
23698 LOAD_STORE_PATTERN
;
23700 putIReg(rt
, mkWidenFrom32(ty
, load(Ity_I32
, mkexpr(t1
)), False
));
23703 case 0x08: /* SB */
23704 DIP("sb r%u, %u(r%u)", rt
, imm
, rs
);
23705 LOAD_STORE_PATTERN
;
23706 store(mkexpr(t1
), narrowTo(Ity_I8
, getIReg(rt
)));
23709 case 0x09: /* SH */
23710 DIP("sh r%u, %u(r%u)", rt
, imm
, rs
);
23711 LOAD_STORE_PATTERN
;
23712 store(mkexpr(t1
), narrowTo(Ity_I16
, getIReg(rt
)));
23715 case 0x0A: /* SWL */
23716 DIP("swl r%u, %u(r%u)", rt
, imm
, rs
);
23719 IRTemp E_byte
= newTemp(Ity_I8
);
23720 IRTemp F_byte
= newTemp(Ity_I8
);
23721 IRTemp G_byte
= newTemp(Ity_I8
);
23722 IRTemp H_byte
= newTemp(Ity_I8
);
23723 IRTemp F_pos
= newTemp(Ity_I64
);
23724 IRTemp G_pos
= newTemp(Ity_I64
);
23727 assign(H_byte
, getByteFromReg(rt
, 0));
23729 assign(G_byte
, getByteFromReg(rt
, 1));
23731 assign(F_byte
, getByteFromReg(rt
, 2));
23733 assign(E_byte
, getByteFromReg(rt
, 3));
23736 t1
= newTemp(Ity_I64
);
23737 assign(t1
, binop(Iop_Add64
, getIReg(rs
), mkU64(extend_s_16to64(imm
))));
23739 /* t2 = word addr */
23740 t2
= newTemp(Ity_I64
);
23741 assign(t2
, binop(Iop_And64
, mkexpr(t1
), mkU64(0xFFFFFFFFFFFFFFFCULL
)));
23743 /* t3 = addr mod 4 */
23744 t3
= newTemp(Ity_I64
);
23745 assign(t3
, binop(Iop_And64
, mkexpr(t1
), mkU64(0x3)));
23747 #if defined (_MIPSEL)
23748 /* Calculate X_byte position. */
23749 assign(F_pos
, IRExpr_ITE(binop(Iop_CmpEQ64
, mkexpr(t3
), mkU64(0x0)),
23753 assign(G_pos
, IRExpr_ITE(binop(Iop_CmpEQ64
, mkexpr(t3
), mkU64(0x3)),
23757 /* Store X_byte on the right place. */
23758 store(mkexpr(t2
), mkexpr(H_byte
));
23759 store(binop(Iop_Add64
, mkexpr(t2
), mkexpr(G_pos
)), mkexpr(G_byte
));
23760 store(binop(Iop_Sub64
, mkexpr(t1
), mkexpr(F_pos
)), mkexpr(F_byte
));
23761 store(mkexpr(t1
), mkexpr(E_byte
));
23763 #else /* _MIPSEB */
23764 /* Calculate X_byte position. */
23765 assign(F_pos
, IRExpr_ITE(binop(Iop_CmpEQ64
, mkexpr(t3
), mkU64(0x3)),
23769 assign(G_pos
, IRExpr_ITE(binop(Iop_CmpEQ64
, mkexpr(t3
), mkU64(0x0)),
23773 store(binop(Iop_Add64
, mkexpr(t2
), mkU64(3)), mkexpr(H_byte
));
23774 store(binop(Iop_Add64
, mkexpr(t2
), mkexpr(G_pos
)), mkexpr(G_byte
));
23775 store(binop(Iop_Add64
, mkexpr(t1
), mkexpr(F_pos
)), mkexpr(F_byte
));
23776 store(mkexpr(t1
), mkexpr(E_byte
));
23780 IRTemp E_byte
= newTemp(Ity_I8
);
23781 IRTemp F_byte
= newTemp(Ity_I8
);
23782 IRTemp G_byte
= newTemp(Ity_I8
);
23783 IRTemp H_byte
= newTemp(Ity_I8
);
23784 IRTemp F_pos
= newTemp(Ity_I32
);
23785 IRTemp G_pos
= newTemp(Ity_I32
);
23788 assign(H_byte
, getByteFromReg(rt
, 0));
23790 assign(G_byte
, getByteFromReg(rt
, 1));
23792 assign(F_byte
, getByteFromReg(rt
, 2));
23794 assign(E_byte
, getByteFromReg(rt
, 3));
23797 t1
= newTemp(Ity_I32
);
23798 assign(t1
, binop(Iop_Add32
, getIReg(rs
), mkU32(extend_s_16to32(imm
))));
23800 /* t2 = word addr */
23801 t2
= newTemp(Ity_I32
);
23802 assign(t2
, binop(Iop_And32
, mkexpr(t1
), mkU32(0xFFFFFFFCULL
)));
23804 /* t3 = addr mod 4 */
23805 t3
= newTemp(Ity_I32
);
23806 assign(t3
, binop(Iop_And32
, mkexpr(t1
), mkU32(0x3)));
23808 #if defined (_MIPSEL)
23809 /* Calculate X_byte position. */
23810 assign(F_pos
, IRExpr_ITE(binop(Iop_CmpEQ32
, mkexpr(t3
), mkU32(0x0)),
23814 assign(G_pos
, IRExpr_ITE(binop(Iop_CmpEQ32
, mkexpr(t3
), mkU32(0x3)),
23818 /* Store X_byte on the right place. */
23819 store(mkexpr(t2
), mkexpr(H_byte
));
23820 store(binop(Iop_Add32
, mkexpr(t2
), mkexpr(G_pos
)), mkexpr(G_byte
));
23821 store(binop(Iop_Sub32
, mkexpr(t1
), mkexpr(F_pos
)), mkexpr(F_byte
));
23822 store(mkexpr(t1
), mkexpr(E_byte
));
23824 #else /* _MIPSEB */
23825 /* Calculate X_byte position. */
23826 assign(F_pos
, IRExpr_ITE(binop(Iop_CmpEQ32
, mkexpr(t3
), mkU32(0x3)),
23830 assign(G_pos
, IRExpr_ITE(binop(Iop_CmpEQ32
, mkexpr(t3
), mkU32(0x0)),
23834 store(binop(Iop_Add32
, mkexpr(t2
), mkU32(3)), mkexpr(H_byte
));
23835 store(binop(Iop_Add32
, mkexpr(t2
), mkexpr(G_pos
)), mkexpr(G_byte
));
23836 store(binop(Iop_Add32
, mkexpr(t1
), mkexpr(F_pos
)), mkexpr(F_byte
));
23837 store(mkexpr(t1
), mkexpr(E_byte
));
23844 case 0x0B: /* SW */
23845 DIP("sw r%u, %u(r%u)", rt
, imm
, rs
);
23846 LOAD_STORE_PATTERN
;
23847 store(mkexpr(t1
), mkNarrowTo32(ty
, getIReg(rt
)));
23850 case 0x0C: { /* SDL rt, offset(base) MIPS64 */
23851 DIP("sdl r%u, %u(r%u)", rt
, imm
, rs
);
23853 IRTemp A_byte
= newTemp(Ity_I8
);
23854 IRTemp B_byte
= newTemp(Ity_I8
);
23855 IRTemp C_byte
= newTemp(Ity_I8
);
23856 IRTemp D_byte
= newTemp(Ity_I8
);
23857 IRTemp E_byte
= newTemp(Ity_I8
);
23858 IRTemp F_byte
= newTemp(Ity_I8
);
23859 IRTemp G_byte
= newTemp(Ity_I8
);
23860 IRTemp H_byte
= newTemp(Ity_I8
);
23861 IRTemp B_pos
= newTemp(Ity_I64
);
23862 IRTemp C_pos
= newTemp(Ity_I64
);
23863 IRTemp D_pos
= newTemp(Ity_I64
);
23864 IRTemp E_pos
= newTemp(Ity_I64
);
23865 IRTemp F_pos
= newTemp(Ity_I64
);
23866 IRTemp G_pos
= newTemp(Ity_I64
);
23869 assign(H_byte
, getByteFromReg(rt
, 0));
23871 assign(G_byte
, getByteFromReg(rt
, 1));
23873 assign(F_byte
, getByteFromReg(rt
, 2));
23875 assign(E_byte
, getByteFromReg(rt
, 3));
23877 assign(D_byte
, getByteFromReg(rt
, 4));
23879 assign(C_byte
, getByteFromReg(rt
, 5));
23881 assign(B_byte
, getByteFromReg(rt
, 6));
23883 assign(A_byte
, getByteFromReg(rt
, 7));
23886 t1
= newTemp(Ity_I64
);
23887 assign(t1
, binop(Iop_Add64
, getIReg(rs
), mkU64(extend_s_16to64(imm
))));
23889 /* t2 = word addr */
23890 t2
= newTemp(Ity_I64
);
23891 assign(t2
, binop(Iop_And64
, mkexpr(t1
), mkU64(0xFFFFFFFFFFFFFFF8ULL
)));
23893 /* t3 = addr mod 7 */
23894 t3
= newTemp(Ity_I64
);
23895 assign(t3
, binop(Iop_And64
, mkexpr(t1
), mkU64(0x7)));
23897 #if defined (_MIPSEL)
23898 /* Calculate X_byte position. */
23899 assign(B_pos
, IRExpr_ITE(binop(Iop_CmpLT64U
, mkexpr(t3
), mkU64(0x1)),
23903 assign(C_pos
, IRExpr_ITE(binop(Iop_CmpLT64U
, mkexpr(t3
), mkU64(0x2)),
23907 assign(D_pos
, IRExpr_ITE(binop(Iop_CmpLT64U
, mkexpr(t3
), mkU64(0x3)),
23911 assign(E_pos
, IRExpr_ITE(binop(Iop_CmpLT64U
, mkexpr(t3
), mkU64(0x4)),
23915 assign(F_pos
, IRExpr_ITE(binop(Iop_CmpLT64U
, mkexpr(t3
), mkU64(0x5)),
23919 assign(G_pos
, IRExpr_ITE(binop(Iop_CmpEQ64
, mkexpr(t3
), mkU64(0x7)),
23923 /* Store X_byte on the right place. */
23924 store(mkexpr(t2
), mkexpr(H_byte
));
23925 store(binop(Iop_Add64
, mkexpr(t2
), mkexpr(G_pos
)), mkexpr(G_byte
));
23926 store(binop(Iop_Sub64
, mkexpr(t1
), mkexpr(F_pos
)), mkexpr(F_byte
));
23927 store(binop(Iop_Sub64
, mkexpr(t1
), mkexpr(E_pos
)), mkexpr(E_byte
));
23928 store(binop(Iop_Sub64
, mkexpr(t1
), mkexpr(D_pos
)), mkexpr(D_byte
));
23929 store(binop(Iop_Sub64
, mkexpr(t1
), mkexpr(C_pos
)), mkexpr(C_byte
));
23930 store(binop(Iop_Sub64
, mkexpr(t1
), mkexpr(B_pos
)), mkexpr(B_byte
));
23931 store(mkexpr(t1
), mkexpr(A_byte
));
23933 #else /* _MIPSEB */
23934 /* Calculate X_byte position. */
23935 assign(B_pos
, IRExpr_ITE(binop(Iop_CmpEQ64
, mkexpr(t3
), mkU64(0x7)),
23939 assign(C_pos
, IRExpr_ITE(binop(Iop_CmpLT64U
, mkexpr(t3
), mkU64(0x6)),
23943 assign(D_pos
, IRExpr_ITE(binop(Iop_CmpLT64U
, mkexpr(t3
), mkU64(0x5)),
23947 assign(E_pos
, IRExpr_ITE(binop(Iop_CmpLT64U
, mkexpr(t3
), mkU64(0x4)),
23951 assign(F_pos
, IRExpr_ITE(binop(Iop_CmpLT64U
, mkexpr(t3
), mkU64(0x3)),
23955 assign(G_pos
, IRExpr_ITE(binop(Iop_CmpEQ64
, mkexpr(t3
), mkU64(0x0)),
23959 /* Store X_byte on the right place. */
23960 store(binop(Iop_Add64
, mkexpr(t2
), mkU64(0x7)), mkexpr(H_byte
));
23961 store(binop(Iop_Add64
, mkexpr(t2
), mkexpr(G_pos
)), mkexpr(G_byte
));
23962 store(binop(Iop_Add64
, mkexpr(t1
), mkexpr(F_pos
)), mkexpr(F_byte
));
23963 store(binop(Iop_Add64
, mkexpr(t1
), mkexpr(E_pos
)), mkexpr(E_byte
));
23964 store(binop(Iop_Add64
, mkexpr(t1
), mkexpr(D_pos
)), mkexpr(D_byte
));
23965 store(binop(Iop_Add64
, mkexpr(t1
), mkexpr(C_pos
)), mkexpr(C_byte
));
23966 store(binop(Iop_Add64
, mkexpr(t1
), mkexpr(B_pos
)), mkexpr(B_byte
));
23967 store(mkexpr(t1
), mkexpr(A_byte
));
23974 /* SDR rt, offset(base) - MIPS64 */
23976 DIP("sdr r%u, %u(r%u)", rt
, imm
, rs
);
23977 IRTemp A_byte
= newTemp(Ity_I8
);
23978 IRTemp B_byte
= newTemp(Ity_I8
);
23979 IRTemp C_byte
= newTemp(Ity_I8
);
23980 IRTemp D_byte
= newTemp(Ity_I8
);
23981 IRTemp E_byte
= newTemp(Ity_I8
);
23982 IRTemp F_byte
= newTemp(Ity_I8
);
23983 IRTemp G_byte
= newTemp(Ity_I8
);
23984 IRTemp H_byte
= newTemp(Ity_I8
);
23985 IRTemp B_pos
= newTemp(Ity_I64
);
23986 IRTemp C_pos
= newTemp(Ity_I64
);
23987 IRTemp D_pos
= newTemp(Ity_I64
);
23988 IRTemp E_pos
= newTemp(Ity_I64
);
23989 IRTemp F_pos
= newTemp(Ity_I64
);
23990 IRTemp G_pos
= newTemp(Ity_I64
);
23993 assign(H_byte
, getByteFromReg(rt
, 0));
23995 assign(G_byte
, getByteFromReg(rt
, 1));
23997 assign(F_byte
, getByteFromReg(rt
, 2));
23999 assign(E_byte
, getByteFromReg(rt
, 3));
24001 assign(D_byte
, getByteFromReg(rt
, 4));
24003 assign(C_byte
, getByteFromReg(rt
, 5));
24005 assign(B_byte
, getByteFromReg(rt
, 6));
24007 assign(A_byte
, getByteFromReg(rt
, 7));
24010 t1
= newTemp(Ity_I64
);
24011 assign(t1
, binop(Iop_Add64
, getIReg(rs
), mkU64(extend_s_16to64(imm
))));
24013 /* t2 = word addr */
24014 t2
= newTemp(Ity_I64
);
24015 assign(t2
, binop(Iop_And64
, mkexpr(t1
), mkU64(0xFFFFFFFFFFFFFFF8ULL
)));
24017 /* t3 = addr mod 7 */
24018 t3
= newTemp(Ity_I64
);
24019 assign(t3
, binop(Iop_And64
, mkexpr(t1
), mkU64(0x7)));
24021 #if defined (_MIPSEL)
24022 /* Calculate X_byte position. */
24023 assign(B_pos
, IRExpr_ITE(binop(Iop_CmpLT64U
, mkU64(0x1), mkexpr(t3
)),
24027 assign(C_pos
, IRExpr_ITE(binop(Iop_CmpLT64U
, mkU64(0x2), mkexpr(t3
)),
24031 assign(D_pos
, IRExpr_ITE(binop(Iop_CmpLT64U
, mkU64(0x3), mkexpr(t3
)),
24035 assign(E_pos
, IRExpr_ITE(binop(Iop_CmpLT64U
, mkU64(0x4), mkexpr(t3
)),
24039 assign(F_pos
, IRExpr_ITE(binop(Iop_CmpLT64U
, mkU64(0x5), mkexpr(t3
)),
24043 assign(G_pos
, IRExpr_ITE(binop(Iop_CmpEQ64
, mkexpr(t3
), mkU64(0x7)),
24047 /* Store X_byte on the right place. */
24048 store(binop(Iop_Add64
, mkexpr(t2
), mkU64(0x7)), mkexpr(A_byte
));
24049 store(binop(Iop_Add64
, mkexpr(t1
), mkexpr(B_pos
)), mkexpr(B_byte
));
24050 store(binop(Iop_Add64
, mkexpr(t1
), mkexpr(C_pos
)), mkexpr(C_byte
));
24051 store(binop(Iop_Add64
, mkexpr(t1
), mkexpr(D_pos
)), mkexpr(D_byte
));
24052 store(binop(Iop_Add64
, mkexpr(t1
), mkexpr(E_pos
)), mkexpr(E_byte
));
24053 store(binop(Iop_Add64
, mkexpr(t1
), mkexpr(F_pos
)), mkexpr(F_byte
));
24054 store(binop(Iop_Add64
, mkexpr(t1
), mkexpr(G_pos
)), mkexpr(G_byte
));
24055 store(mkexpr(t1
), mkexpr(H_byte
));
24057 #else /* _MIPSEB */
24058 /* Calculate X_byte position. */
24059 assign(B_pos
, IRExpr_ITE(binop(Iop_CmpLT64U
, mkU64(0x5), mkexpr(t3
)),
24063 assign(C_pos
, IRExpr_ITE(binop(Iop_CmpLT64U
, mkU64(0x4), mkexpr(t3
)),
24067 assign(D_pos
, IRExpr_ITE(binop(Iop_CmpLT64U
, mkU64(0x3), mkexpr(t3
)),
24071 assign(E_pos
, IRExpr_ITE(binop(Iop_CmpLT64U
, mkU64(0x2), mkexpr(t3
)),
24075 assign(F_pos
, IRExpr_ITE(binop(Iop_CmpLT64U
, mkU64(0x1), mkexpr(t3
)),
24079 assign(G_pos
, IRExpr_ITE(binop(Iop_CmpEQ64
, mkexpr(t3
), mkU64(0x0)),
24083 /* Store X_byte on the right place. */
24084 store(mkexpr(t2
), mkexpr(A_byte
));
24085 store(binop(Iop_Sub64
, mkexpr(t1
), mkexpr(B_pos
)), mkexpr(B_byte
));
24086 store(binop(Iop_Sub64
, mkexpr(t1
), mkexpr(C_pos
)), mkexpr(C_byte
));
24087 store(binop(Iop_Sub64
, mkexpr(t1
), mkexpr(D_pos
)), mkexpr(D_byte
));
24088 store(binop(Iop_Sub64
, mkexpr(t1
), mkexpr(E_pos
)), mkexpr(E_byte
));
24089 store(binop(Iop_Sub64
, mkexpr(t1
), mkexpr(F_pos
)), mkexpr(F_byte
));
24090 store(binop(Iop_Sub64
, mkexpr(t1
), mkexpr(G_pos
)), mkexpr(G_byte
));
24091 store(mkexpr(t1
), mkexpr(H_byte
));
24096 case 0x0E: /* SWR */
24097 DIP("swr r%u, %u(r%u)", rt
, imm
, rs
);
24100 IRTemp E_byte
= newTemp(Ity_I8
);
24101 IRTemp F_byte
= newTemp(Ity_I8
);
24102 IRTemp G_byte
= newTemp(Ity_I8
);
24103 IRTemp H_byte
= newTemp(Ity_I8
);
24104 IRTemp F_pos
= newTemp(Ity_I64
);
24105 IRTemp G_pos
= newTemp(Ity_I64
);
24108 assign(H_byte
, getByteFromReg(rt
, 0));
24110 assign(G_byte
, getByteFromReg(rt
, 1));
24112 assign(F_byte
, getByteFromReg(rt
, 2));
24114 assign(E_byte
, getByteFromReg(rt
, 3));
24117 t1
= newTemp(Ity_I64
);
24118 assign(t1
, binop(Iop_Add64
, getIReg(rs
), mkU64(extend_s_16to64(imm
))));
24120 /* t2 = word addr */
24121 t2
= newTemp(Ity_I64
);
24122 assign(t2
, binop(Iop_And64
, mkexpr(t1
), mkU64(0xFFFFFFFFFFFFFFFCULL
)));
24124 /* t3 = addr mod 4 */
24125 t3
= newTemp(Ity_I64
);
24126 assign(t3
, binop(Iop_And64
, mkexpr(t1
), mkU64(0x3)));
24128 #if defined (_MIPSEL)
24129 /* Calculate X_byte position. */
24130 assign(F_pos
, IRExpr_ITE(binop(Iop_CmpEQ64
, mkexpr(t3
), mkU64(0x0)),
24134 assign(G_pos
, IRExpr_ITE(binop(Iop_CmpEQ64
, mkexpr(t3
), mkU64(0x3)),
24138 /* Store X_byte on the right place. */
24139 store(binop(Iop_Add64
, mkexpr(t2
), mkU64(0x3)), mkexpr(E_byte
));
24140 store(binop(Iop_Add64
, mkexpr(t2
), mkexpr(F_pos
)), mkexpr(F_byte
));
24141 store(binop(Iop_Add64
, mkexpr(t1
), mkexpr(G_pos
)), mkexpr(G_byte
));
24142 store(mkexpr(t1
), mkexpr(H_byte
));
24144 #else /* _MIPSEB */
24145 /* Calculate X_byte position. */
24146 assign(F_pos
, IRExpr_ITE(binop(Iop_CmpEQ64
, mkexpr(t3
), mkU64(0x3)),
24150 assign(G_pos
, IRExpr_ITE(binop(Iop_CmpEQ64
, mkexpr(t3
), mkU64(0x0)),
24154 /* Store X_byte on the right place. */
24155 store(mkexpr(t2
), mkexpr(E_byte
));
24156 store(binop(Iop_Add64
, mkexpr(t2
), mkexpr(F_pos
)), mkexpr(F_byte
));
24157 store(binop(Iop_Sub64
, mkexpr(t1
), mkexpr(G_pos
)), mkexpr(G_byte
));
24158 store(mkexpr(t1
), mkexpr(H_byte
));
24161 IRTemp E_byte
= newTemp(Ity_I8
);
24162 IRTemp F_byte
= newTemp(Ity_I8
);
24163 IRTemp G_byte
= newTemp(Ity_I8
);
24164 IRTemp H_byte
= newTemp(Ity_I8
);
24165 IRTemp F_pos
= newTemp(Ity_I32
);
24166 IRTemp G_pos
= newTemp(Ity_I32
);
24169 assign(H_byte
, getByteFromReg(rt
, 0));
24171 assign(G_byte
, getByteFromReg(rt
, 1));
24173 assign(F_byte
, getByteFromReg(rt
, 2));
24175 assign(E_byte
, getByteFromReg(rt
, 3));
24178 t1
= newTemp(Ity_I32
);
24179 assign(t1
, binop(Iop_Add32
, getIReg(rs
), mkU32(extend_s_16to32(imm
))));
24181 /* t2 = word addr */
24182 t2
= newTemp(Ity_I32
);
24183 assign(t2
, binop(Iop_And32
, mkexpr(t1
), mkU32(0xFFFFFFFCULL
)));
24185 /* t3 = addr mod 4 */
24186 t3
= newTemp(Ity_I32
);
24187 assign(t3
, binop(Iop_And32
, mkexpr(t1
), mkU32(0x3)));
24189 #if defined (_MIPSEL)
24190 /* Calculate X_byte position. */
24191 assign(F_pos
, IRExpr_ITE(binop(Iop_CmpEQ32
, mkexpr(t3
), mkU32(0x0)),
24195 assign(G_pos
, IRExpr_ITE(binop(Iop_CmpEQ32
, mkexpr(t3
), mkU32(0x3)),
24199 /* Store X_byte on the right place. */
24200 store(binop(Iop_Add32
, mkexpr(t2
), mkU32(0x3)), mkexpr(E_byte
));
24201 store(binop(Iop_Add32
, mkexpr(t2
), mkexpr(F_pos
)), mkexpr(F_byte
));
24202 store(binop(Iop_Add32
, mkexpr(t1
), mkexpr(G_pos
)), mkexpr(G_byte
));
24203 store(mkexpr(t1
), mkexpr(H_byte
));
24205 #else /* _MIPSEB */
24206 /* Calculate X_byte position. */
24207 assign(F_pos
, IRExpr_ITE(binop(Iop_CmpEQ32
, mkexpr(t3
), mkU32(0x3)),
24211 assign(G_pos
, IRExpr_ITE(binop(Iop_CmpEQ32
, mkexpr(t3
), mkU32(0x0)),
24215 /* Store X_byte on the right place. */
24216 store(mkexpr(t2
), mkexpr(E_byte
));
24217 store(binop(Iop_Add32
, mkexpr(t2
), mkexpr(F_pos
)), mkexpr(F_byte
));
24218 store(binop(Iop_Sub32
, mkexpr(t1
), mkexpr(G_pos
)), mkexpr(G_byte
));
24219 store(mkexpr(t1
), mkexpr(H_byte
));
24229 static UInt
disInstr_MIPS_WRK_30(UInt cins
, const VexArchInfo
* archinfo
,
24230 const VexAbiInfo
* abiinfo
, DisResult
* dres
,
24233 IRTemp t0
, t1
= 0, t2
, t3
, t4
, t5
;
24234 UInt opcode
, rs
, rt
, rd
, ft
, function
, imm
, instr_index
;
24236 opcode
= get_opcode(cins
);
24237 imm
= get_imm(cins
);
24243 instr_index
= get_instr_index(cins
);
24244 function
= get_function(cins
);
24245 IRType ty
= mode64
? Ity_I64
: Ity_I32
;
24247 switch (opcode
& 0x0F) {
24248 case 0x00: /* LL */
24249 DIP("ll r%u, %u(r%u)", rt
, imm
, rs
);
24250 LOAD_STORE_PATTERN
;
24252 if (abiinfo
->guest__use_fallback_LLSC
) {
24254 assign(t2
, mkWidenFrom32(ty
, load(Ity_I32
, mkexpr(t1
)), True
));
24255 putLLaddr(mkexpr(t1
));
24256 putLLdata(mkexpr(t2
));
24257 putIReg(rt
, mkexpr(t2
));
24259 t2
= newTemp(Ity_I32
);
24260 stmt(IRStmt_LLSC(MIPS_IEND
, t2
, mkexpr(t1
), NULL
));
24261 putIReg(rt
, mkWidenFrom32(ty
, mkexpr(t2
), True
));
24266 case 0x01: /* LWC1 */
24267 /* Load Word to Floating Point - LWC1 (MIPS32) */
24268 DIP("lwc1 f%u, %u(r%u)", ft
, imm
, rs
);
24269 LOAD_STORE_PATTERN
;
24272 t0
= newTemp(Ity_F32
);
24273 t2
= newTemp(Ity_I64
);
24274 assign(t0
, load(Ity_F32
, mkexpr(t1
)));
24275 assign(t2
, mkWidenFrom32(Ity_I64
, unop(Iop_ReinterpF32asI32
,
24276 mkexpr(t0
)), True
));
24277 putDReg(ft
, unop(Iop_ReinterpI64asF64
, mkexpr(t2
)));
24279 putFReg(ft
, load(Ity_F32
, mkexpr(t1
)));
24284 case 0x02: /* Branch on Bit Clear - BBIT0; Cavium OCTEON */
24286 /* Cavium Specific instructions. */
24287 if (VEX_MIPS_COMP_ID(archinfo
->hwcaps
) == VEX_PRID_COMP_CAVIUM
) {
24288 DIP("bbit0 r%u, 0x%x, %x", rs
, rt
, imm
);
24289 t0
= newTemp(Ity_I32
);
24290 t1
= newTemp(Ity_I32
);
24291 assign(t0
, mkU32(0x1));
24292 assign(t1
, binop(Iop_Shl32
, mkexpr(t0
), mkU8(rt
)));
24293 dis_branch(False
, binop(Iop_CmpEQ32
,
24296 mkNarrowTo32(ty
, getIReg(rs
))),
24299 } else if (archinfo
->hwcaps
& VEX_MIPS_CPU_ISA_M32R6
) { /* BC */
24300 if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
)) {
24301 DIP("bc %x", instr_index
& 0x3FFFFFF);
24304 t0
= newTemp(Ity_I64
);
24305 assign(t0
, mkU64(guest_PC_curr_instr
+
24306 ((extend_s_26to64(instr_index
& 0x3FFFFFF) + 1 ) << 2)));
24308 t0
= newTemp(Ity_I32
);
24309 assign(t0
, mkU32(guest_PC_curr_instr
+
24310 ((extend_s_26to32(instr_index
& 0x3FFFFFF) + 1) << 2)));
24314 dres
->whatNext
= Dis_StopHere
;
24315 dres
->jk_StopHere
= Ijk_Boring
;
24317 ILLEGAL_INSTRUCTON
;
24326 case 0x03: /* PREF */
24330 case 0x04: /* Load Linked Doubleword - LLD; MIPS64 */
24331 DIP("lld r%u, %u(r%u)", rt
, imm
, rs
);
24334 LOAD_STORE_PATTERN
;
24335 t2
= newTemp(Ity_I64
);
24337 if (abiinfo
->guest__use_fallback_LLSC
) {
24338 assign(t2
, load(Ity_I64
, mkexpr(t1
)));
24339 putLLaddr(mkexpr(t1
));
24340 putLLdata(mkexpr(t2
));
24342 stmt(IRStmt_LLSC(MIPS_IEND
, t2
, mkexpr(t1
), NULL
));
24345 putIReg(rt
, mkexpr(t2
));
24352 case 0x05: /* Load Doubleword to Floating Point - LDC1 (MIPS32) */
24353 DIP("ldc1 f%u, %u(%u)", rt
, imm
, rs
);
24354 LOAD_STORE_PATTERN
;
24355 putDReg(ft
, load(Ity_F64
, mkexpr(t1
)));
24358 case 0x06: /* Branch on Bit Clear Plus 32 - BBIT032; Cavium OCTEON */
24360 /* Cavium Specific instructions. */
24361 if (VEX_MIPS_COMP_ID(archinfo
->hwcaps
) == VEX_PRID_COMP_CAVIUM
) {
24362 DIP("bbit032 r%u, 0x%x, %x", rs
, rt
, imm
);
24363 t0
= newTemp(Ity_I64
);
24364 t1
= newTemp(Ity_I8
); /* Shift. */
24365 t2
= newTemp(Ity_I64
);
24366 assign(t0
, mkU64(0x1));
24367 assign(t1
, binop(Iop_Add8
, mkU8(rt
), mkU8(32)));
24368 assign(t2
, binop(Iop_Shl64
, mkexpr(t0
), mkexpr(t1
)));
24369 dis_branch(False
, binop(Iop_CmpEQ64
,
24375 } else if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
)) {
24376 if (rs
== 0) { /* JIC */
24377 DIP("jic r%u, %u", rt
, instr_index
& 0xFFFF);
24380 t0
= newTemp(Ity_I64
);
24381 assign(t0
, binop(Iop_Add64
, getIReg(rt
),
24382 mkU64(extend_s_16to64((instr_index
& 0xFFFF)))));
24384 t0
= newTemp(Ity_I32
);
24385 assign(t0
, binop(Iop_Add32
, getIReg(rt
),
24386 mkU32(extend_s_16to32((instr_index
& 0xFFFF)))));
24390 dres
->whatNext
= Dis_StopHere
;
24391 dres
->jk_StopHere
= Ijk_Boring
;
24392 } else { /* BEQZC */
24393 DIP("beqzc r%u, %u", rs
, imm
);
24394 dres
->jk_StopHere
= Ijk_Boring
;
24395 dres
->whatNext
= Dis_StopHere
;
24396 ULong branch_offset
;
24397 t0
= newTemp(Ity_I1
);
24400 branch_offset
= extend_s_23to64((instr_index
& 0x1fffff) << 2);
24401 assign(t0
, binop(Iop_CmpEQ64
, getIReg(rs
), mkU64(0x0)));
24402 stmt(IRStmt_Exit(mkexpr(t0
), Ijk_Boring
,
24403 IRConst_U64(guest_PC_curr_instr
+ 4 + branch_offset
),
24405 putPC(mkU64(guest_PC_curr_instr
+ 4));
24407 branch_offset
= extend_s_23to32((instr_index
& 0x1fffff) << 2);
24408 assign(t0
, binop(Iop_CmpEQ32
, getIReg(rs
), mkU32(0x0)));
24409 stmt(IRStmt_Exit(mkexpr(t0
), Ijk_Boring
,
24410 IRConst_U32(guest_PC_curr_instr
+ 4 +
24411 (UInt
) branch_offset
), OFFB_PC
));
24412 putPC(mkU32(guest_PC_curr_instr
+ 4));
24421 case 0x07: /* Load Doubleword - LD; MIPS64 */
24422 DIP("ld r%u, %u(r%u)", rt
, imm
, rs
);
24423 LOAD_STORE_PATTERN
;
24424 putIReg(rt
, load(Ity_I64
, mkexpr(t1
)));
24427 case 0x08: /* SC */
24428 DIP("sc r%u, %u(r%u)", rt
, imm
, rs
);
24429 t2
= newTemp(Ity_I1
);
24430 LOAD_STORE_PATTERN
;
24432 if (abiinfo
->guest__use_fallback_LLSC
) {
24433 t3
= newTemp(Ity_I32
);
24434 assign(t2
, binop(mode64
? Iop_CmpNE64
: Iop_CmpNE32
,
24435 mkexpr(t1
), getLLaddr()));
24436 assign(t3
, mkNarrowTo32(ty
, getIReg(rt
)));
24437 putLLaddr(LLADDR_INVALID
);
24438 putIReg(rt
, getIReg(0));
24440 mips_next_insn_if(mkexpr(t2
));
24442 t4
= newTemp(Ity_I32
);
24443 t5
= newTemp(Ity_I32
);
24445 assign(t5
, mkNarrowTo32(ty
, getLLdata()));
24447 stmt(IRStmt_CAS(mkIRCAS(IRTemp_INVALID
, t4
, /* old_mem */
24448 MIPS_IEND
, mkexpr(t1
), /* addr */
24449 NULL
, mkexpr(t5
), /* expected value */
24450 NULL
, mkexpr(t3
) /* new value */)));
24452 putIReg(rt
, unop(mode64
? Iop_1Uto64
: Iop_1Uto32
,
24453 binop(Iop_CmpEQ32
, mkexpr(t4
), mkexpr(t5
))));
24455 stmt(IRStmt_LLSC(MIPS_IEND
, t2
, mkexpr(t1
),
24456 mkNarrowTo32(ty
, getIReg(rt
))));
24457 putIReg(rt
, unop(mode64
? Iop_1Uto64
: Iop_1Uto32
, mkexpr(t2
)));
24462 case 0x09: /* SWC1 */
24463 DIP("swc1 f%u, %u(r%u)", ft
, imm
, rs
);
24466 t0
= newTemp(Ity_I64
);
24467 t2
= newTemp(Ity_I32
);
24468 LOAD_STORE_PATTERN
;
24469 assign(t0
, unop(Iop_ReinterpF64asI64
, getFReg(ft
)));
24470 assign(t2
, unop(Iop_64to32
, mkexpr(t0
)));
24471 store(mkexpr(t1
), unop(Iop_ReinterpI32asF32
, mkexpr(t2
)));
24473 LOAD_STORE_PATTERN
;
24474 store(mkexpr(t1
), getFReg(ft
));
24479 case 0x0A: /* Branch on Bit Set - BBIT1; Cavium OCTEON */
24481 /* Cavium Specific instructions. */
24482 if (VEX_MIPS_COMP_ID(archinfo
->hwcaps
) == VEX_PRID_COMP_CAVIUM
) {
24483 DIP("bbit1 r%u, 0x%x, %x", rs
, rt
, imm
);
24484 t0
= newTemp(Ity_I32
);
24485 t1
= newTemp(Ity_I32
);
24486 assign(t0
, mkU32(0x1));
24487 assign(t1
, binop(Iop_Shl32
, mkexpr(t0
), mkU8(rt
)));
24488 dis_branch(False
, binop(Iop_CmpNE32
,
24491 mkNarrowTo32(ty
, getIReg(rs
))),
24494 } else if (archinfo
->hwcaps
& VEX_MIPS_CPU_ISA_M32R6
) {/* BALC */
24495 if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
)) {
24496 DIP("balc %x", instr_index
& 0x3FFFFFF);
24499 t0
= newTemp(Ity_I64
);
24500 assign(t0
, mkU64(guest_PC_curr_instr
+ ((extend_s_26to64(
24501 instr_index
& 0x3FFFFFF) + 1) << 2)));
24502 putIReg(31, mkU64(guest_PC_curr_instr
+ 4));
24504 t0
= newTemp(Ity_I32
);
24505 assign(t0
, mkU32(guest_PC_curr_instr
+ ((extend_s_26to32(
24506 instr_index
& 0x3FFFFFF) + 1) << 2)));
24507 putIReg(31, mkU32(guest_PC_curr_instr
+ 4));
24511 dres
->whatNext
= Dis_StopHere
;
24512 dres
->jk_StopHere
= Ijk_Call
;
24514 ILLEGAL_INSTRUCTON
;
24523 case 0x0B: /* PCREL */
24524 if (rt
== 0x1E) { /* AUIPC */
24525 if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
)) {
24526 DIP("auipc r%u, %u", rs
, imm
);
24529 putIReg(rs
, mkU64(guest_PC_curr_instr
+ (imm
<< 16)));
24531 putIReg(rs
, mkU32(guest_PC_curr_instr
+ (imm
<< 16)));
24534 ILLEGAL_INSTRUCTON
;
24538 } else if (rt
== 0x1F) { /* ALUIPC */
24539 if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
)) {
24540 DIP("aluipc r%u, %u", rs
, imm
);
24543 putIReg(rs
, mkU64((~0x0FFFFULL
) &
24544 (guest_PC_curr_instr
+ extend_s_32to64(imm
<< 16))));
24546 putIReg(rs
, mkU32((~0x0FFFFULL
) &
24547 (guest_PC_curr_instr
+ (imm
<< 16))));
24550 ILLEGAL_INSTRUCTON
;
24554 } else if ((rt
& 0x18) == 0) { /* ADDIUPC */
24555 if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
)) {
24556 DIP("addiupc r%u, %u", rs
, instr_index
& 0x7FFFF);
24559 putIReg(rs
, mkU64(guest_PC_curr_instr
+
24560 (extend_s_19to64(instr_index
& 0x7FFFF) << 2)));
24562 putIReg(rs
, mkU32(guest_PC_curr_instr
+
24563 (extend_s_19to32(instr_index
& 0x7FFFF) << 2)));
24566 ILLEGAL_INSTRUCTON
;
24570 } else if ((rt
& 0x18) == 8) { /* LWPC */
24571 if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
)) {
24572 DIP("lwpc r%u, %x", rs
, instr_index
& 0x7FFFF);
24575 t1
= newTemp(Ity_I64
);
24576 assign(t1
, mkU64(guest_PC_curr_instr
+
24577 (extend_s_19to64(instr_index
& 0x7FFFF) << 2)));
24578 putIReg(rs
, unop(Iop_32Sto64
, load(Ity_I32
, mkexpr(t1
))));
24580 t1
= newTemp(Ity_I32
);
24581 assign(t1
, mkU32(guest_PC_curr_instr
+
24582 (extend_s_19to32(instr_index
& 0x7FFFF) << 2)));
24583 putIReg(rs
, load(Ity_I32
, mkexpr(t1
)));
24586 ILLEGAL_INSTRUCTON
;
24590 } else if ((rt
& 0x18) == 16) { /* LWUPC */
24591 if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
)) {
24592 DIP("lwupc r%u, %x", rs
, instr_index
& 0x7FFFF);
24595 t1
= newTemp(Ity_I64
);
24596 assign(t1
, mkU64(guest_PC_curr_instr
+
24597 (extend_s_19to64(instr_index
& 0x7FFFF) << 2)));
24598 putIReg(rs
, unop(Iop_32Uto64
, load(Ity_I32
, mkexpr(t1
))));
24600 t1
= newTemp(Ity_I32
);
24601 assign(t1
, mkU32(guest_PC_curr_instr
+
24602 (extend_s_19to32(instr_index
& 0x7FFFF) << 2)));
24603 putIReg(rs
, load(Ity_I32
, mkexpr(t1
)));
24610 } else if ((rt
& 0x1C) == 0x18) { /* LDPC */
24611 if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
)) {
24612 DIP("ldpc r%u, %x", rs
, instr_index
& 0x3FFFF);
24613 t1
= newTemp(Ity_I64
);
24614 assign(t1
, mkU64(guest_PC_curr_instr
+
24615 (extend_s_18to64(instr_index
& 0x3FFFF) << 3)));
24616 putIReg(rs
, load(Ity_I64
, mkexpr(t1
)));
24626 if (0x3B == function
&&
24627 (VEX_MIPS_COMP_ID(archinfo
->hwcaps
) == VEX_PRID_COMP_BROADCOM
)) {
24629 DIP("rdhwr r%u, r%u", rt
, rd
);
24632 putIReg(rt
, getULR());
24641 case 0x0C: /* Store Conditional Doubleword - SCD; MIPS64 */
24642 DIP("scd r%u, %u(r%u)", rt
, imm
, rs
);
24645 t2
= newTemp(Ity_I1
);
24646 LOAD_STORE_PATTERN
;
24648 if (abiinfo
->guest__use_fallback_LLSC
) {
24649 t3
= newTemp(Ity_I64
);
24650 assign(t2
, binop(Iop_CmpNE64
, mkexpr(t1
), getLLaddr()));
24651 assign(t3
, getIReg(rt
));
24652 putLLaddr(LLADDR_INVALID
);
24653 putIReg(rt
, getIReg(0));
24655 mips_next_insn_if(mkexpr(t2
));
24657 t4
= newTemp(Ity_I64
);
24658 t5
= newTemp(Ity_I64
);
24660 assign(t5
, getLLdata());
24662 stmt(IRStmt_CAS(mkIRCAS(IRTemp_INVALID
, t4
, /* old_mem */
24663 MIPS_IEND
, mkexpr(t1
), /* addr */
24664 NULL
, mkexpr(t5
), /* expected value */
24665 NULL
, mkexpr(t3
) /* new value */)));
24667 putIReg(rt
, unop(Iop_1Uto64
,
24668 binop(Iop_CmpEQ64
, mkexpr(t4
), mkexpr(t5
))));
24670 stmt(IRStmt_LLSC(MIPS_IEND
, t2
, mkexpr(t1
), getIReg(rt
)));
24671 putIReg(rt
, unop(Iop_1Uto64
, mkexpr(t2
)));
24679 case 0x0D: /* Store Doubleword from Floating Point - SDC1 */
24680 DIP("sdc1 f%u, %u(%u)", ft
, imm
, rs
);
24681 LOAD_STORE_PATTERN
;
24682 store(mkexpr(t1
), getDReg(ft
));
24685 case 0x0E: /* Branch on Bit Set Plus 32 - BBIT132; Cavium OCTEON */
24687 /* Cavium Specific instructions. */
24688 if (VEX_MIPS_COMP_ID(archinfo
->hwcaps
) == VEX_PRID_COMP_CAVIUM
) {
24689 DIP("bbit132 r%u, 0x%x, %x", rs
, rt
, imm
);
24690 t0
= newTemp(Ity_I64
);
24691 t1
= newTemp(Ity_I8
); /* Shift. */
24692 t2
= newTemp(Ity_I64
);
24693 assign(t0
, mkU64(0x1));
24694 assign(t1
, binop(Iop_Add8
, mkU8(rt
), mkU8(32)));
24695 assign(t2
, binop(Iop_Shl64
, mkexpr(t0
), mkexpr(t1
)));
24696 dis_branch(False
, binop(Iop_CmpNE64
,
24702 } else if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
)) {
24703 if (rs
== 0) {/* JIALC */
24704 DIP("jialc r%u, %u", rt
, instr_index
& 0xFFFF);
24709 t0
= newTemp(Ity_I64
);
24710 assign(t0
, binop(Iop_Add64
, getIReg(rt
),
24711 mkU64(extend_s_16to64((instr_index
& 0xFFFF)))));
24712 putIReg(31, mkU64(guest_PC_curr_instr
+ 4));
24714 t0
= newTemp(Ity_I32
);
24715 assign(t0
, binop(Iop_Add32
, getIReg(rt
),
24716 mkU32(extend_s_16to32((instr_index
& 0xFFFF)))));
24717 putIReg(31, mkU32(guest_PC_curr_instr
+ 4));
24721 dres
->whatNext
= Dis_StopHere
;
24722 dres
->jk_StopHere
= Ijk_Call
;
24723 } else { /* BNEZC */
24724 DIP("bnezc r%u, %u", rs
, imm
);
24725 dres
->jk_StopHere
= Ijk_Boring
;
24726 dres
->whatNext
= Dis_StopHere
;
24727 ULong branch_offset
;
24728 t0
= newTemp(Ity_I1
);
24731 branch_offset
= extend_s_23to64((instr_index
& 0x1fffff) << 2);
24732 assign(t0
, unop(Iop_Not1
, binop(Iop_CmpEQ64
, getIReg(rs
), mkU64(0x0))));
24733 stmt(IRStmt_Exit(mkexpr(t0
), Ijk_Boring
,
24734 IRConst_U64(guest_PC_curr_instr
+ 4 + branch_offset
),
24736 putPC(mkU64(guest_PC_curr_instr
+ 4));
24738 branch_offset
= extend_s_23to32((instr_index
& 0x1fffff) << 2);
24739 assign(t0
, unop(Iop_Not1
, binop(Iop_CmpEQ32
, getIReg(rs
), mkU32(0x0))));
24740 stmt(IRStmt_Exit(mkexpr(t0
), Ijk_Boring
,
24741 IRConst_U32(guest_PC_curr_instr
+ 4 +
24742 (UInt
) branch_offset
), OFFB_PC
));
24743 putPC(mkU32(guest_PC_curr_instr
+ 4));
24752 case 0x0F: /* Store Doubleword - SD; MIPS64 */
24753 DIP("sd r%u, %u(r%u)", rt
, imm
, rs
);
24754 LOAD_STORE_PATTERN
;
24755 store(mkexpr(t1
), getIReg(rt
));
24765 static DisResult
disInstr_MIPS_WRK ( Long delta64
,
24766 const VexArchInfo
* archinfo
,
24767 const VexAbiInfo
* abiinfo
,
24771 UInt opcode
, cins
, result
;
24775 static IRExpr
*lastn
= NULL
; /* last jump addr */
24776 static IRStmt
*bstmt
= NULL
; /* branch (Exit) stmt */
24778 /* The running delta */
24779 Int delta
= (Int
) delta64
;
24781 /* Holds eip at the start of the insn, so that we can print
24782 consistent error messages for unimplemented insns. */
24783 Int delta_start
= delta
;
24785 /* Are we in a delay slot ? */
24786 Bool delay_slot_branch
, likely_delay_slot
, delay_slot_jump
;
24788 /* Set result defaults. */
24789 dres
.whatNext
= Dis_Continue
;
24791 dres
.jk_StopHere
= Ijk_INVALID
;
24792 dres
.hint
= Dis_HintNone
;
24794 delay_slot_branch
= likely_delay_slot
= delay_slot_jump
= False
;
24796 const UChar
*code
= guest_code
+ delta
;
24797 cins
= getUInt(code
);
24798 opcode
= get_opcode(cins
);
24799 DIP("\t0x%llx:\t0x%08x\t", (Addr64
)guest_PC_curr_instr
, cins
);
24802 if (branch_or_jump(guest_code
+ delta
- 4)) {
24803 if (lastn
== NULL
&& bstmt
== NULL
) {
24806 dres
.whatNext
= Dis_StopHere
;
24808 if (lastn
!= NULL
) {
24809 delay_slot_jump
= True
;
24810 } else if (bstmt
!= NULL
) {
24811 delay_slot_branch
= True
;
24816 if (branch_or_link_likely(guest_code
+ delta
- 4)) {
24817 likely_delay_slot
= True
;
24821 /* Spot "Special" instructions (see comment at top of file). */
24823 /* Spot the 16-byte preamble:
24836 UInt word1
= mode64
? 0xF8 : 0x342;
24837 UInt word2
= mode64
? 0x378 : 0x742;
24838 UInt word3
= mode64
? 0x778 : 0xC2;
24839 UInt word4
= mode64
? 0x4F8 : 0x4C2;
24841 if (getUInt(code
+ 0) == word1
&& getUInt(code
+ 4) == word2
&&
24842 getUInt(code
+ 8) == word3
&& getUInt(code
+ 12) == word4
) {
24843 /* Got a "Special" instruction preamble. Which one is it? */
24844 if (getUInt(code
+ 16) == 0x01ad6825 /* or $13, $13, $13 */ ) {
24845 /* $11 = client_request ( $12 ) */
24846 DIP("$11 = client_request ( $12 )");
24849 putPC(mkU64(guest_PC_curr_instr
+ 20));
24851 putPC(mkU32(guest_PC_curr_instr
+ 20));
24853 dres
.jk_StopHere
= Ijk_ClientReq
;
24854 dres
.whatNext
= Dis_StopHere
;
24856 goto decode_success
;
24857 } else if (getUInt(code
+ 16) == 0x01ce7025 /* or $14, $14, $14 */ ) {
24858 /* $11 = guest_NRADDR */
24859 DIP("$11 = guest_NRADDR");
24864 putIReg(11, IRExpr_Get(offsetof(VexGuestMIPS64State
,
24865 guest_NRADDR
), Ity_I64
));
24867 putIReg(11, IRExpr_Get(offsetof(VexGuestMIPS32State
,
24868 guest_NRADDR
), Ity_I32
));
24870 goto decode_success
;
24871 } else if (getUInt(code
+ 16) == 0x01ef7825 /* or $15, $15, $15 */ ) {
24872 /* branch-and-link-to-noredir $25 */
24873 DIP("branch-and-link-to-noredir $25");
24876 putIReg(31, mkU64(guest_PC_curr_instr
+ 20));
24878 putIReg(31, mkU32(guest_PC_curr_instr
+ 20));
24880 putPC(getIReg(25));
24881 dres
.jk_StopHere
= Ijk_NoRedir
;
24882 dres
.whatNext
= Dis_StopHere
;
24883 goto decode_success
;
24884 } else if (getUInt(code
+ 16) == 0x016b5825 /* or $11,$11,$11 */ ) {
24886 DIP("IR injection");
24887 #if defined (_MIPSEL)
24888 vex_inject_ir(irsb
, Iend_LE
);
24889 #elif defined (_MIPSEB)
24890 vex_inject_ir(irsb
, Iend_BE
);
24894 stmt(IRStmt_Put(offsetof(VexGuestMIPS64State
, guest_CMSTART
),
24895 mkU64(guest_PC_curr_instr
)));
24896 stmt(IRStmt_Put(offsetof(VexGuestMIPS64State
, guest_CMLEN
),
24899 putPC(mkU64(guest_PC_curr_instr
+ 20));
24901 stmt(IRStmt_Put(offsetof(VexGuestMIPS32State
, guest_CMSTART
),
24902 mkU32(guest_PC_curr_instr
)));
24903 stmt(IRStmt_Put(offsetof(VexGuestMIPS32State
, guest_CMLEN
),
24906 putPC(mkU32(guest_PC_curr_instr
+ 20));
24909 dres
.whatNext
= Dis_StopHere
;
24910 dres
.jk_StopHere
= Ijk_InvalICache
;
24913 goto decode_success
;
24916 /* We don't know what it is. Set opc1/opc2 so decode_failure
24917 can print the insn following the Special-insn preamble. */
24919 goto decode_failure
;
24924 switch (opcode
& 0x30) {
24926 result
= disInstr_MIPS_WRK_00(cins
, archinfo
, abiinfo
,
24927 &dres
, &bstmt
, &lastn
);
24929 if (result
== -1) goto decode_failure
;
24931 if (result
== -2) goto decode_failure_dsp
;
24937 result
= disInstr_MIPS_WRK_10(cins
, archinfo
, abiinfo
,
24938 &dres
, &bstmt
, &lastn
);
24940 if (result
== -1) goto decode_failure
;
24942 if (result
== -2) goto decode_failure_dsp
;
24947 result
= disInstr_MIPS_WRK_20(cins
);
24949 if (result
== -1) goto decode_failure
;
24951 if (result
== -2) goto decode_failure_dsp
;
24956 result
= disInstr_MIPS_WRK_30(cins
, archinfo
, abiinfo
, &dres
, &bstmt
);
24958 if (result
== -1) goto decode_failure
;
24960 if (result
== -2) goto decode_failure_dsp
;
24964 decode_failure_dsp
:
24965 vex_printf("Error occured while trying to decode MIPS32 DSP "
24966 "instruction.\nYour platform probably doesn't support "
24967 "MIPS32 DSP ASE.\n");
24970 /* All decode failures end up here. */
24972 vex_printf("vex mips->IR: unhandled instruction bytes: "
24973 "0x%x 0x%x 0x%x 0x%x\n",
24974 (UInt
) getIByte(delta_start
+ 0),
24975 (UInt
) getIByte(delta_start
+ 1),
24976 (UInt
) getIByte(delta_start
+ 2),
24977 (UInt
) getIByte(delta_start
+ 3));
24979 /* Tell the dispatcher that this insn cannot be decoded, and so has
24980 not been executed, and (is currently) the next to be executed.
24981 EIP should be up-to-date since it made so at the start bnezof each
24982 insn, but nevertheless be paranoid and update it again right
24985 stmt(IRStmt_Put(offsetof(VexGuestMIPS64State
, guest_PC
),
24986 mkU64(guest_PC_curr_instr
)));
24987 jmp_lit64(&dres
, Ijk_NoDecode
, guest_PC_curr_instr
);
24989 stmt(IRStmt_Put(offsetof(VexGuestMIPS32State
, guest_PC
),
24990 mkU32(guest_PC_curr_instr
)));
24991 jmp_lit32(&dres
, Ijk_NoDecode
, guest_PC_curr_instr
);
24994 dres
.whatNext
= Dis_StopHere
;
24997 } /* switch (opc) for the main (primary) opcode switch. */
24999 /* All MIPS insn have 4 bytes */
25001 if (delay_slot_branch
) {
25002 delay_slot_branch
= False
;
25007 putPC(mkU64(guest_PC_curr_instr
+ 4));
25009 putPC(mkU32(guest_PC_curr_instr
+ 4));
25011 dres
.jk_StopHere
= is_Branch_or_Jump_and_Link(guest_code
+ delta
- 4) ?
25012 Ijk_Call
: Ijk_Boring
;
25015 if (likely_delay_slot
) {
25016 dres
.jk_StopHere
= Ijk_Boring
;
25017 dres
.whatNext
= Dis_StopHere
;
25022 if (delay_slot_jump
) {
25025 dres
.jk_StopHere
= is_Branch_or_Jump_and_Link(guest_code
+ delta
- 4) ?
25026 Ijk_Call
: Ijk_Boring
;
25031 /* All decode successes end up here. */
25032 switch (dres
.whatNext
) {
25035 putPC(mkU64(guest_PC_curr_instr
+ 4));
25037 putPC(mkU32(guest_PC_curr_instr
+ 4));
25049 /* On MIPS we need to check if the last instruction in block is branch or
25051 if (((vex_control
.guest_max_insns
- 1) == (delta
+ 4) / 4)
25052 && (dres
.whatNext
!= Dis_StopHere
))
25053 if (branch_or_jump(guest_code
+ delta
+ 4)) {
25054 dres
.whatNext
= Dis_StopHere
;
25055 dres
.jk_StopHere
= Ijk_Boring
;
25057 putPC(mkU64(guest_PC_curr_instr
+ 4));
25059 putPC(mkU32(guest_PC_curr_instr
+ 4));
25069 /*------------------------------------------------------------*/
25070 /*--- Top-level fn ---*/
25071 /*------------------------------------------------------------*/
25073 /* Disassemble a single instruction into IR. The instruction
25074 is located in host memory at &guest_code[delta]. */
25075 DisResult
disInstr_MIPS( IRSB
* irsb_IN
,
25076 const UChar
* guest_code_IN
,
25079 VexArch guest_arch
,
25080 const VexArchInfo
* archinfo
,
25081 const VexAbiInfo
* abiinfo
,
25082 VexEndness host_endness_IN
,
25083 Bool sigill_diag_IN
)
25086 /* Set globals (see top of this file) */
25087 vassert(guest_arch
== VexArchMIPS32
|| guest_arch
== VexArchMIPS64
);
25089 mode64
= guest_arch
!= VexArchMIPS32
;
25090 fp_mode64
= abiinfo
->guest_mips_fp_mode
& 1;
25091 fp_mode64_fre
= abiinfo
->guest_mips_fp_mode
& 2;
25092 has_msa
= VEX_MIPS_PROC_MSA(archinfo
->hwcaps
);
25094 vassert(VEX_MIPS_HOST_FP_MODE(archinfo
->hwcaps
) >= fp_mode64
);
25096 guest_code
= guest_code_IN
;
25098 host_endness
= host_endness_IN
;
25099 #if defined(VGP_mips32_linux)
25100 guest_PC_curr_instr
= (Addr32
)guest_IP
;
25101 #elif defined(VGP_mips64_linux)
25102 guest_PC_curr_instr
= (Addr64
)guest_IP
;
25105 dres
= disInstr_MIPS_WRK(delta
, archinfo
, abiinfo
, sigill_diag_IN
);
25110 /*--------------------------------------------------------------------*/
25111 /*--- end guest_mips_toIR.c ---*/
25112 /*--------------------------------------------------------------------*/