arm64 isel: in a couple places, use `xzr` as a source rather than loading zero into...
[valgrind.git] / VEX / priv / host_nanomips_isel.c
blobc4a8f4fe3c45afe6474853b6cf89f096f092a6c3
2 /*---------------------------------------------------------------*/
3 /*--- begin host_nanomips_isel.c ---*/
4 /*---------------------------------------------------------------*/
6 /*
7 This file is part of Valgrind, a dynamic binary instrumentation
8 framework.
10 Copyright (C) 2017-2018 RT-RK
12 This program is free software; you can redistribute it and/or
13 modify it under the terms of the GNU General Public License as
14 published by the Free Software Foundation; either version 2 of the
15 License, or (at your option) any later version.
17 This program is distributed in the hope that it will be useful, but
18 WITHOUT ANY WARRANTY; without even the implied warranty of
19 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 General Public License for more details.
22 You should have received a copy of the GNU General Public License
23 along with this program; if not, write to the Free Software
24 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
25 02110-1301, USA.
27 The GNU General Public License is contained in the file COPYING.
30 #include "libvex_basictypes.h"
31 #include "libvex_ir.h"
32 #include "libvex.h"
34 #include "main_util.h"
35 #include "main_globals.h"
36 #include "host_generic_regs.h"
37 #include "host_nanomips_defs.h"
39 /*---------------------------------------------------------*/
40 /*--- Register Usage Conventions ---*/
41 /*---------------------------------------------------------*/
43 /* Integer Regs
44 ------------
45 ZERO0 Reserved
46 GPR12:22 Allocateable
47 23 GuestStatePointer
48 SP StackFramePointer
49 RA LinkRegister */
51 /* Host hwcaps */
52 static UInt hwcaps_host = 0;
54 /* GPR register class for NANOMIPS */
55 #define HRcGPR HRcInt32
57 /*---------------------------------------------------------*/
58 /*--- ISelEnv ---*/
59 /*---------------------------------------------------------*/
61 /* This carries around:
63 - A mapping from IRTemp to IRType, giving the type of any IRTemp we
64 might encounter. This is computed before insn selection starts,
65 and does not change.
67 - A mapping from IRTemp to HReg. This tells the insn selector
68 which virtual register(s) are associated with each IRTemp
69 temporary. This is computed before insn selection starts, and
70 does not change. We expect this mapping to map precisely the
71 same set of IRTemps as the type mapping does.
73 - vregmap holds the primary register for the IRTemp.
74 - vregmapHI is only used for 64-bit integer-typed
75 IRTemps. It holds the identity of a second
76 32-bit virtual HReg, which holds the high half
77 of the value.
79 - The code array, that is, the insns selected so far.
81 - A counter, for generating new virtual registers.
83 - The host subarchitecture we are selecting insns for.
84 This is set at the start and does not change.
86 - A Bool for indicating whether we may generate chain-me
87 instructions for control flow transfers, or whether we must use
88 XAssisted.
90 - The maximum guest address of any guest insn in this block.
91 Actually, the address of the highest-addressed byte from any insn
92 in this block. Is set at the start and does not change. This is
93 used for detecting jumps which are definitely forward-edges from
94 this block, and therefore can be made (chained) to the fast entry
95 point of the destination, thereby avoiding the destination's
96 event check.
98 Note, this is all (well, mostly) host-independent.
101 typedef
102 struct {
103 /* Constant -- are set at the start and do not change. */
104 IRTypeEnv* type_env;
106 HReg* vregmap;
107 HReg* vregmapHI;
108 Int n_vregmap;
110 UInt hwcaps;
112 Bool chainingAllowed;
113 Addr64 max_ga;
115 /* These are modified as we go along. */
116 HInstrArray* code;
117 Int vreg_ctr;
118 } ISelEnv;
120 static HReg lookupIRTemp(ISelEnv* env, IRTemp tmp)
122 vassert(tmp < env->n_vregmap);
123 return env->vregmap[tmp];
126 static void lookupIRTemp64(HReg* vrHI, HReg* vrLO, ISelEnv* env, IRTemp tmp)
128 vassert(tmp < env->n_vregmap);
129 vassert(!hregIsInvalid(env->vregmapHI[tmp]));
130 *vrLO = env->vregmap[tmp];
131 *vrHI = env->vregmapHI[tmp];
134 static void addInstr(ISelEnv* env, NANOMIPSInstr* instr)
136 addHInstr(env->code, instr);
138 if (vex_traceflags & VEX_TRACE_VCODE) {
139 ppNANOMIPSInstr(instr);
140 vex_printf("\n");
144 static HReg newVRegI(ISelEnv* env)
146 HReg reg = mkHReg(True /* virtual reg */,
147 HRcGPR, 0 /* enc */, env->vreg_ctr);
148 env->vreg_ctr++;
149 return reg;
152 /*---------------------------------------------------------*/
153 /*--- ISEL: Forward declarations ---*/
154 /*---------------------------------------------------------*/
156 /* These are organised as iselXXX and iselXXX_wrk pairs. The
157 iselXXX_wrk do the real work, but are not to be called directly.
158 For each XXX, iselXXX calls its iselXXX_wrk counterpart, then
159 checks that all returned registers are virtual. You should not
160 call the _wrk version directly.
163 /* Compute an I1/I8/I16/I32 into a GPR. */
164 static HReg iselWordExpr_R_wrk(ISelEnv* env, IRExpr* e);
165 static HReg iselWordExpr_R(ISelEnv* env, IRExpr* e);
167 /* Compute an I64 into a pair of GPRs. */
168 static void iselInt64Expr_wrk(HReg* rHi, HReg* rLo, ISelEnv* env, IRExpr* e);
169 static void iselInt64Expr(HReg* rHi, HReg* rLo, ISelEnv* env, IRExpr* e);
171 /*---------------------------------------------------------*/
172 /*--- ISEL: Misc helpers ---*/
173 /*---------------------------------------------------------*/
175 /* Make an int reg-reg move. */
176 static inline NANOMIPSInstr *mk_iMOVds_RR(HReg r_dst, HReg r_src)
178 vassert(hregClass(r_dst) == hregClass(r_src));
179 vassert(hregClass(r_src) == HRcInt32);
180 return NANOMIPSInstr_Alu(NMalu_OR, r_dst, r_src, r_src);
183 /* Extract sign-extended value from IRConst */
184 static inline Int extractConst(IRConst *c)
186 switch (c->tag) {
187 case Ico_U32:
188 return c->Ico.U32;
190 case Ico_U16:
191 return (Int)(Short)c->Ico.U16;
193 case Ico_U8:
194 return (Int)(Char)c->Ico.U8;
196 case Ico_U1:
197 return !!c->Ico.U1;
199 default:
200 vpanic("NANOMIPSisel_extractConst() fails");
204 /*---------------------------------------------------------*/
205 /*--- ISEL: Function call helpers ---*/
206 /*---------------------------------------------------------*/
208 /* Used only in doHelperCall. See big comment in doHelperCall re
209 handling of register-parameter args. This function figures out
210 whether evaluation of an expression might require use of a fixed
211 register. If in doubt return True (safe but suboptimal).
213 static Bool mightRequireFixedRegs(IRExpr* e)
215 switch (e->tag) {
216 case Iex_RdTmp:
217 case Iex_Const:
218 case Iex_Get:
219 return False;
221 default:
222 return True;
226 /* Do a complete function call. |guard| is a Ity_Bit expression
227 indicating whether or not the call happens. If guard==NULL, the
228 call is unconditional. |retloc| is set to indicate where the
229 return value is after the call. The caller (of this fn) must
230 generate code to add |stackAdjustAfterCall| to the stack pointer
231 after the call is done. */
233 static void doHelperCall(/*OUT*/ RetLoc* retloc,
234 ISelEnv* env,
235 IRExpr* guard,
236 IRCallee* cee,
237 IRType retty,
238 IRExpr** args )
240 HReg argregs[8];
241 HReg tmpregs[8];
242 Bool go_fast;
243 UInt n_args, i, argreg, nGSPTRs, argiregs;
244 HReg cond = INVALID_HREG;
246 vassert((retty == Ity_INVALID) ||
247 (retty == Ity_I32) ||
248 (retty == Ity_I64) ||
249 (retty == Ity_I8) ||
250 (retty == Ity_I16));
251 /* NANOMIPS P32 calling convention: up to eight registers ($a0 ... $a7)
252 are allowed to be used for passing integer arguments. */
254 /* The return type can be I{32,16,8}.
255 |args| may contain IRExpr_GSPTR(), in which case the value
256 in the guest state pointer register is passed as the
257 corresponding argument. */
259 *retloc = mk_RetLoc_INVALID();
260 n_args = 0;
261 nGSPTRs = 0;
263 for (i = 0; args[i]; i++) {
264 IRExpr* arg = args[i];
266 if (UNLIKELY(arg->tag == Iex_GSPTR)) {
267 nGSPTRs++;
270 n_args++;
273 vassert(nGSPTRs <= 1);
274 vassert(n_args <= NANOMIPS_N_REGPARMS);
276 argregs[0] = hregNANOMIPS_GPR4();
277 argregs[1] = hregNANOMIPS_GPR5();
278 argregs[2] = hregNANOMIPS_GPR6();
279 argregs[3] = hregNANOMIPS_GPR7();
280 argregs[4] = hregNANOMIPS_GPR8();
281 argregs[5] = hregNANOMIPS_GPR9();
282 argregs[6] = hregNANOMIPS_GPR10();
283 argregs[7] = hregNANOMIPS_GPR11();
284 argiregs = 0;
285 tmpregs[0] = tmpregs[1] = tmpregs[2] =
286 tmpregs[3] = tmpregs[4] = tmpregs[5] =
287 tmpregs[6] = tmpregs[7] = INVALID_HREG;
289 /* First decide which scheme (slow or fast) is to be used. First assume the
290 fast scheme, and select slow if any contraindications (wow) appear. */
291 go_fast = True;
293 if (guard) {
294 vassert(typeOfIRExpr(env->type_env, guard) == Ity_I1);
296 if (guard->tag != Iex_Const || !guard->Iex.Const.con->Ico.U1) {
297 go_fast = False;
298 cond = iselWordExpr_R(env, guard);
302 if (go_fast) {
303 for (i = 0; i < n_args; i++) {
304 if (mightRequireFixedRegs(args[i])) {
305 go_fast = False;
306 break;
311 /* At this point the scheme to use has been established. Generate
312 code to get the arg values into the argument rregs. */
313 if (go_fast) {
314 argreg = 0;
316 for (i = 0; i < n_args; i++) {
317 IRExpr* arg = args[i];
318 IRType aTy = Ity_INVALID;
319 vassert(argreg < NANOMIPS_N_REGPARMS);
321 if (LIKELY(!is_IRExpr_VECRET_or_GSPTR(arg)))
322 aTy = typeOfIRExpr(env->type_env, arg);
324 switch (aTy) {
325 case Ity_I1:
326 case Ity_I8:
327 case Ity_I16:
328 case Ity_I32:
329 argiregs |= (1 << (argreg + 4));
330 addInstr(env, mk_iMOVds_RR(argregs[argreg],
331 iselWordExpr_R(env, arg)));
332 argreg++;
333 break;
335 case Ity_I64:
336 if (argreg & 1) {
337 argreg++;
338 argiregs |= (1 << (argreg + 4));
341 vassert(argreg + 1 < NANOMIPS_N_REGPARMS);
343 HReg rHi, rLo;
344 iselInt64Expr(&rHi, &rLo, env, arg);
345 argiregs |= (1 << (argreg + 4));
346 addInstr(env, mk_iMOVds_RR(argregs[argreg++], rHi));
347 argiregs |= (1 << (argreg + 4));
348 addInstr(env, mk_iMOVds_RR(argregs[argreg], rLo));
349 argreg++;
350 break;
352 case Ity_INVALID:
353 default:
354 vassert(arg->tag == Iex_GSPTR);
355 addInstr(env, mk_iMOVds_RR(argregs[argreg], GuestStatePointer));
356 argreg++;
357 break;
360 } else {
361 argreg = 0;
363 for (i = 0; i < n_args; i++) {
364 IRExpr* arg = args[i];
365 IRType aTy = Ity_INVALID;
366 vassert(argreg < NANOMIPS_N_REGPARMS);
368 if (LIKELY(!is_IRExpr_VECRET_or_GSPTR(arg)))
369 aTy = typeOfIRExpr(env->type_env, arg);
371 switch (aTy) {
372 case Ity_I1:
373 case Ity_I8:
374 case Ity_I16:
375 case Ity_I32:
376 tmpregs[argreg] = iselWordExpr_R(env, arg);
377 argreg++;
378 break;
380 case Ity_I64: {
381 HReg raHi, raLo;
383 if (argreg & 1) {
384 argreg++;
387 vassert(argreg + 1 < NANOMIPS_N_REGPARMS);
389 iselInt64Expr(&raHi, &raLo, env, arg);
390 tmpregs[argreg] = raLo;
391 argreg++;
392 tmpregs[argreg] = raHi;
393 argreg++;
394 break;
397 case Ity_INVALID:
398 default:
399 vassert(arg->tag == Iex_GSPTR);
400 tmpregs[argreg] = GuestStatePointer;
401 argreg++;
402 break;
405 for (i = 0; i < argreg; i++) {
406 if (hregIsInvalid(tmpregs[i]))
407 continue;
409 /* None of these insns, including any spill code that might
410 be generated, may alter the condition codes. */
411 argiregs |= (1 << (i + 4));
412 addInstr(env, mk_iMOVds_RR(argregs[i], tmpregs[i]));
417 switch (retty) {
418 case Ity_INVALID:
419 *retloc = mk_RetLoc_simple(RLPri_None);
420 break;
422 case Ity_I64:
423 *retloc = mk_RetLoc_simple(RLPri_2Int);
424 break;
426 case Ity_I32:
427 case Ity_I16:
428 case Ity_I8:
429 *retloc = mk_RetLoc_simple(RLPri_Int);
430 break;
432 default:
433 vassert(0);
436 addInstr(env, NANOMIPSInstr_Call((Addr)cee->addr, argiregs, cond, *retloc));
439 /*---------------------------------------------------------*/
440 /*--- ISEL: Integer expressions (64/32/16/8 bit) ---*/
441 /*---------------------------------------------------------*/
443 /* Select insns for an integer-typed expression, and add them to the
444 code list. Return a reg holding the result. This reg will be a
445 virtual register. THE RETURNED REG MUST NOT BE MODIFIED. If you
446 want to modify it, ask for a new vreg, copy it in there, and modify
447 the copy. The register allocator will do its best to map both
448 vregs to the same real register, so the copies will often disappear
449 later in the game.
451 This should handle expressions of 64, 32, 16 and 8-bit type.
452 All results are returned in a (mode64 ? 64bit : 32bit) register.
453 For 16- and 8-bit expressions, the upper (32/48/56 : 16/24) bits
454 are arbitrary, so you should mask or sign extend partial values
455 if necessary.
457 static HReg iselWordExpr_R(ISelEnv * env, IRExpr * e)
459 HReg r = iselWordExpr_R_wrk(env, e);
460 /* sanity checks ... */
461 vassert(hregClass(r) == HRcGPR);
462 vassert(hregIsVirtual(r));
463 return r;
466 static HReg iselWordExpr_R_wrk(ISelEnv * env, IRExpr * e)
468 IRType ty = typeOfIRExpr(env->type_env, e);
469 vassert(ty == Ity_I1 || ty == Ity_I8 || ty == Ity_I16 || ty == Ity_I32);
471 switch (e->tag) {
472 case Iex_RdTmp:
473 return lookupIRTemp(env, e->Iex.RdTmp.tmp);
475 case Iex_Load: {
476 HReg r_dst = newVRegI(env);
477 HReg r_addr = iselWordExpr_R(env, e->Iex.Load.addr);
478 addInstr(env, NANOMIPSInstr_Load(sizeofIRType(ty), r_dst, r_addr, 0));
479 return r_dst;
482 case Iex_Get: {
483 vassert(ty == Ity_I8 || ty == Ity_I16 || ty == Ity_I32);
484 HReg r_dst = newVRegI(env);
485 vassert((e->Iex.Get.offset < 0x1000) && (e->Iex.Get.offset >= 0));
486 addInstr(env, NANOMIPSInstr_Load(sizeofIRType(ty), r_dst,
487 GuestStatePointer,
488 e->Iex.Get.offset));
489 return r_dst;
492 case Iex_Binop: {
493 NANOMIPSAluOp aluOp;
494 NANOMIPSCondCode ccOp;
496 switch (e->Iex.Binop.op) {
497 case Iop_Add8:
498 case Iop_Add16:
499 case Iop_Add32:
500 aluOp = NMalu_ADD;
501 break;
503 case Iop_Sub8:
504 case Iop_Sub16:
505 case Iop_Sub32:
506 aluOp = NMalu_SUB;
507 break;
509 case Iop_And1:
510 case Iop_And8:
511 case Iop_And16:
512 case Iop_And32:
513 aluOp = NMalu_AND;
514 break;
516 case Iop_Or1:
517 case Iop_Or8:
518 case Iop_Or16:
519 case Iop_Or32:
520 aluOp = NMalu_OR;
521 break;
523 case Iop_Xor8:
524 case Iop_Xor16:
525 case Iop_Xor32:
526 aluOp = NMalu_XOR;
527 break;
529 case Iop_Shl32:
530 aluOp = NMalu_SLL;
531 break;
533 case Iop_Shr32:
534 aluOp = NMalu_SRL;
535 break;
537 case Iop_Sar32:
538 aluOp = NMalu_SRA;
539 break;
541 case Iop_Mul32:
542 aluOp = NMalu_MULU;
543 break;
545 case Iop_MullS8:
546 case Iop_MullS16:
547 aluOp = NMalu_MUL;
548 break;
550 case Iop_DivS32:
551 aluOp = NMalu_DIV;
552 break;
554 case Iop_DivU32:
555 aluOp = NMalu_DIVU;
556 break;
558 default:
559 aluOp = NMalu_INVALID;
560 break;
563 if (aluOp != NMalu_INVALID) {
564 HReg r_dst = newVRegI(env);
565 HReg r_srcL = iselWordExpr_R(env, e->Iex.Binop.arg1);
567 /* Optimization: If seccond argument is Const and
568 ALU operation can be converted to IMM operation */
569 if ((aluOp <= NMalu_AND) &&
570 (e->Iex.Binop.arg2->tag == Iex_Const)) {
572 UInt val = extractConst(e->Iex.Binop.arg2->Iex.Const.con);
574 if ((val < 0x20) ||
575 ((val < 0x1000) && (aluOp >= NMalu_OR))) {
576 NANOMIPSImmOp immOp = (NANOMIPSImmOp)aluOp;
577 addInstr(env, NANOMIPSInstr_Imm(immOp, r_dst, r_srcL,
578 val));
579 return r_dst;
583 HReg r_srcR = iselWordExpr_R(env, e->Iex.Binop.arg2);
584 addInstr(env, NANOMIPSInstr_Alu(aluOp, r_dst, r_srcL, r_srcR));
585 return r_dst;
588 switch (e->Iex.Binop.op) {
589 case Iop_CmpEQ32:
590 case Iop_CasCmpEQ32:
591 ccOp = NMcc_EQ;
592 break;
594 case Iop_CmpNE32:
595 ccOp = NMcc_NE;
596 break;
598 case Iop_CmpLT32S:
599 ccOp = NMcc_LTS;
600 break;
602 case Iop_CmpLT32U:
603 ccOp = NMcc_LTU;
604 break;
606 case Iop_CmpLE32S:
607 ccOp = NMcc_LES;
608 break;
610 case Iop_CmpLE32U:
611 ccOp = NMcc_LEU;
612 break;
614 default:
615 ccOp = NMcc_INVALID;
616 break;
619 if (ccOp != NMcc_INVALID) {
620 HReg dst = newVRegI(env);
621 HReg r1 = iselWordExpr_R(env, e->Iex.Binop.arg1);
622 HReg r2 = iselWordExpr_R(env, e->Iex.Binop.arg2);
623 addInstr(env, NANOMIPSInstr_Cmp(ccOp, dst, r1, r2));
624 return dst;
627 switch (e->Iex.Binop.op) {
628 case Iop_MullU8: {
629 HReg r_dst = newVRegI(env);
630 HReg r_tmp = newVRegI(env);
631 HReg r_srcL = iselWordExpr_R(env, e->Iex.Binop.arg1);
632 HReg r_srcR = iselWordExpr_R(env, e->Iex.Binop.arg2);
633 addInstr(env, NANOMIPSInstr_Imm(NMimm_ANDI, r_dst, r_srcL, 0xFF));
634 addInstr(env, NANOMIPSInstr_Imm(NMimm_ANDI, r_tmp, r_srcR, 0xFF));
635 addInstr(env, NANOMIPSInstr_Alu(NMalu_MULU, r_dst, r_dst, r_tmp));
636 return r_dst;
639 case Iop_MullU16: {
640 HReg r_dst = newVRegI(env);
641 HReg r_tmp = newVRegI(env);
642 HReg r_mask = newVRegI(env);
643 HReg r_srcL = iselWordExpr_R(env, e->Iex.Binop.arg1);
644 HReg r_srcR = iselWordExpr_R(env, e->Iex.Binop.arg2);
645 addInstr(env, NANOMIPSInstr_Imm(NMimm_LI, r_mask, INVALID_HREG,
646 0xFFFF));
647 addInstr(env, NANOMIPSInstr_Alu(NMalu_AND, r_dst, r_srcL, r_mask));
648 addInstr(env, NANOMIPSInstr_Alu(NMalu_AND, r_tmp, r_srcR, r_mask));
649 addInstr(env, NANOMIPSInstr_Alu(NMalu_MULU, r_dst, r_dst, r_tmp));
650 return r_dst;
653 case Iop_8HLto16:
654 case Iop_16HLto32: {
655 HReg r_dst = newVRegI(env);
656 HReg r_tmp = newVRegI(env);
657 HReg r_srcL = iselWordExpr_R(env, e->Iex.Binop.arg1);
658 HReg r_srcR = iselWordExpr_R(env, e->Iex.Binop.arg2);
660 switch (e->Iex.Binop.op) {
661 case Iop_8HLto16:
662 addInstr(env, NANOMIPSInstr_Imm(NMimm_SLL, r_tmp, r_srcL, 8));
663 addInstr(env, NANOMIPSInstr_Imm(NMimm_ANDI, r_dst, r_srcR,
664 0xFF));
665 break;
667 case Iop_16HLto32: {
668 HReg r_mask = newVRegI(env);
669 addInstr(env, NANOMIPSInstr_Imm(NMimm_LI, r_mask,
670 INVALID_HREG, 0xFFFF));
671 addInstr(env, NANOMIPSInstr_Imm(NMimm_SLL, r_tmp,
672 r_srcL, 16));
673 addInstr(env, NANOMIPSInstr_Alu(NMalu_AND, r_dst, r_srcR,
674 r_mask));
676 break;
678 default:
679 vassert(0);
682 addInstr(env, NANOMIPSInstr_Alu(NMalu_OR, r_dst, r_dst, r_tmp));
683 return r_dst;
686 default:
687 break;
690 vex_printf("Unimplemented binop ");
691 ppIROp(e->Iex.Binop.op);
692 vpanic("\n");
694 break;
697 case Iex_Unop: {
698 IROp op_unop = e->Iex.Unop.op;
700 switch (op_unop) {
701 case Iop_1Sto8:
702 case Iop_1Sto16:
703 case Iop_1Sto32: {
704 HReg r_dst = newVRegI(env);
705 HReg r_src = iselWordExpr_R(env, e->Iex.Unop.arg);
706 addInstr(env, NANOMIPSInstr_Imm(NMimm_SGN, r_dst, r_src, 1));
707 return r_dst;
710 case Iop_16to8:
711 case Iop_32to8: {
712 HReg r_dst = newVRegI(env);
713 HReg r_src = iselWordExpr_R(env, e->Iex.Unop.arg);
714 addInstr(env, NANOMIPSInstr_Imm(NMimm_SGN, r_dst, r_src, 8));
715 return r_dst;
718 case Iop_32to16: {
719 HReg r_dst = newVRegI(env);
720 HReg r_src = iselWordExpr_R(env, e->Iex.Unop.arg);
721 addInstr(env, NANOMIPSInstr_Imm(NMimm_SGN, r_dst, r_src, 16));
722 return r_dst;
725 case Iop_1Uto8:
726 case Iop_1Uto32:
727 case Iop_8Sto16:
728 case Iop_8Sto32:
729 case Iop_16Sto32:
730 return iselWordExpr_R(env, e->Iex.Unop.arg);
732 case Iop_64to32: {
733 HReg rHi, rLo;
734 iselInt64Expr(&rHi, &rLo, env, e->Iex.Unop.arg);
735 return rLo;
738 case Iop_64HIto32: {
739 HReg rHi, rLo;
740 iselInt64Expr(&rHi, &rLo, env, e->Iex.Unop.arg);
741 return rHi;
744 case Iop_32to1: {
745 HReg r_dst = newVRegI(env);
746 HReg r_src = iselWordExpr_R(env, e->Iex.Unop.arg);
747 addInstr(env, NANOMIPSInstr_Imm(NMimm_ANDI, r_dst, r_src, 1));
748 return r_dst;
751 case Iop_8Uto16:
752 case Iop_8Uto32: {
753 HReg r_dst = newVRegI(env);
754 HReg r_src = iselWordExpr_R(env, e->Iex.Unop.arg);
755 addInstr(env, NANOMIPSInstr_Imm(NMimm_ANDI, r_dst, r_src,
756 0xFF));
757 return r_dst;
760 case Iop_16Uto32: {
761 HReg r_dst = newVRegI(env);
762 HReg r_src = iselWordExpr_R(env, e->Iex.Unop.arg);
763 addInstr(env, NANOMIPSInstr_Imm(NMimm_LI, r_dst, INVALID_HREG,
764 0xFFFF));
765 addInstr(env, NANOMIPSInstr_Alu(NMalu_AND, r_dst, r_dst, r_src));
766 return r_dst;
769 case Iop_Not1: {
770 HReg r_dst = newVRegI(env);
771 HReg r_src = iselWordExpr_R(env, e->Iex.Unop.arg);
772 addInstr(env, NANOMIPSInstr_Imm(NMimm_XORI, r_dst, r_src, 1));
773 return r_dst;
776 case Iop_Not8:
777 case Iop_Not16:
778 case Iop_Not32: {
779 HReg r_dst = newVRegI(env);
780 HReg r_src = iselWordExpr_R(env, e->Iex.Unop.arg);
781 addInstr(env, NANOMIPSInstr_Alu(NMalu_NOR, r_dst, r_src, r_src));
782 return r_dst;
785 case Iop_32HIto16: {
786 HReg r_dst = newVRegI(env);
787 HReg r_src = iselWordExpr_R(env, e->Iex.Unop.arg);
788 addInstr(env, NANOMIPSInstr_Imm(NMimm_SRA, r_dst, r_src, 16));
789 return r_dst;
792 case Iop_16HIto8: {
793 HReg r_dst = newVRegI(env);
794 HReg r_src = iselWordExpr_R(env, e->Iex.Unop.arg);
795 addInstr(env, NANOMIPSInstr_Imm(NMimm_SRA, r_dst, r_src, 8));
796 return r_dst;
799 case Iop_CmpNEZ8:
800 case Iop_CmpNEZ16:
801 case Iop_CmpNEZ32: {
802 HReg r_dst = newVRegI(env);
803 HReg r_src = iselWordExpr_R(env, e->Iex.Unop.arg);
804 addInstr(env, NANOMIPSInstr_Cmp(NMcc_NE, r_dst, r_src,
805 Zero));
806 return r_dst;
809 case Iop_CmpwNEZ32: {
810 HReg r_dst = newVRegI(env);
811 HReg r_src = iselWordExpr_R(env, e->Iex.Unop.arg);
812 addInstr(env, NANOMIPSInstr_Cmp(NMcc_NE, r_dst, r_src,
813 Zero));
814 addInstr(env, NANOMIPSInstr_Imm(NMimm_SGN, r_dst, r_dst, 1));
815 return r_dst;
818 case Iop_Clz32: {
819 HReg r_dst = newVRegI(env);
820 HReg r_src = iselWordExpr_R(env, e->Iex.Unop.arg);
821 addInstr(env, NANOMIPSInstr_Unary(NMun_CLZ, r_dst, r_src));
822 return r_dst;
825 case Iop_Left8:
826 case Iop_Left16:
827 case Iop_Left32: {
828 HReg r_dst = newVRegI(env);
829 HReg r_src = iselWordExpr_R(env, e->Iex.Unop.arg);
830 addInstr(env, NANOMIPSInstr_Alu(NMalu_SUB, r_dst, Zero, r_src));
831 addInstr(env, NANOMIPSInstr_Alu(NMalu_OR, r_dst, r_dst,
832 r_src));
833 return r_dst;
836 default:
837 break;
840 vex_printf("Unimplemented unop ");
841 ppIROp(e->Iex.Unop.op);
842 vpanic("\n");
845 case Iex_Qop: {
846 HReg dst = newVRegI(env);
847 HReg src1 = iselWordExpr_R(env, e->Iex.Qop.details->arg1);
848 UChar src2 = e->Iex.Qop.details->arg2->Iex.Const.con->Ico.U8;
849 UChar src3 = e->Iex.Qop.details->arg3->Iex.Const.con->Ico.U8;
850 UChar src4 = e->Iex.Qop.details->arg4->Iex.Const.con->Ico.U8;
851 UInt imm = (src3 << 6) | (src4 << 6) | src2;
852 switch (e->Iex.Qop.details->op) {
853 case Iop_Rotx32:
854 addInstr(env, NANOMIPSInstr_Imm(NMimm_ROTX, dst, src1, imm));
855 return dst;
856 default:
857 break;
859 break;
862 case Iex_ITE: {
863 vassert(typeOfIRExpr(env->type_env, e->Iex.ITE.cond) == Ity_I1);
864 HReg r0 = iselWordExpr_R(env, e->Iex.ITE.iffalse);
865 HReg r1 = iselWordExpr_R(env, e->Iex.ITE.iftrue);
866 HReg r_cond = iselWordExpr_R(env, e->Iex.ITE.cond);
867 HReg r_dst = newVRegI(env);
868 addInstr(env, mk_iMOVds_RR(r_dst, r0));
869 addInstr(env, NANOMIPSInstr_MoveCond(NMMoveCond_movn, r_dst,
870 r1, r_cond));
871 return r_dst;
874 case Iex_Const: {
875 HReg r_dst = newVRegI(env);
876 addInstr(env, NANOMIPSInstr_Imm(NMimm_LI, r_dst, INVALID_HREG,
877 extractConst(e->Iex.Const.con)));
878 return r_dst;
881 case Iex_CCall: {
882 HReg r_dst = newVRegI(env);
883 UInt addToSp = 0;
884 RetLoc rloc = mk_RetLoc_INVALID();
886 /* Be very restrictive for now. Only 32-bit ints allowed for
887 args, and 32 bits for return type. Don't forget to change
888 the RetLoc if more return types are allowed in future. */
889 vassert(Ity_I32 == e->Iex.CCall.retty);
891 /* Marshal args, do the call, clear stack. */
892 doHelperCall(&rloc, env, NULL /*guard*/, e->Iex.CCall.cee,
893 e->Iex.CCall.retty, e->Iex.CCall.args);
894 vassert(is_sane_RetLoc(rloc));
895 vassert(rloc.pri == RLPri_Int);
896 vassert(addToSp == 0);
897 addInstr(env, mk_iMOVds_RR(r_dst, hregNANOMIPS_GPR4()));
898 return r_dst;
901 default:
902 break;
905 ppIRExpr(e);
906 vpanic("iselWordExpr_R(NANOMIPS): cannot reduce tree");
909 /*---------------------------------------------------------*/
910 /*--- ISEL: Integer expressions (64 bit) ---*/
911 /*---------------------------------------------------------*/
913 /* Compute a 64-bit value into the register pair HI, LO.
914 HI and LO must not be changed by subsequent code emitted
915 by the caller. */
916 static void iselInt64Expr(HReg * rHi, HReg * rLo, ISelEnv * env, IRExpr * e)
918 iselInt64Expr_wrk(rHi, rLo, env, e);
919 vassert(hregClass(*rHi) == HRcInt32);
920 vassert(hregIsVirtual(*rHi));
921 vassert(hregClass(*rLo) == HRcInt32);
922 vassert(hregIsVirtual(*rLo));
925 static void iselInt64Expr_wrk(HReg * rHi, HReg * rLo, ISelEnv * env,
926 IRExpr * e)
928 vassert(e);
929 vassert(typeOfIRExpr(env->type_env, e) == Ity_I64);
931 switch (e->tag) {
932 case Iex_RdTmp:
933 lookupIRTemp64(rHi, rLo, env, e->Iex.RdTmp.tmp);
934 return;
936 case Iex_Load: {
937 HReg tLo = newVRegI(env);
938 HReg tHi = newVRegI(env);
939 HReg r_addr = iselWordExpr_R(env, e->Iex.Load.addr);
940 addInstr(env, NANOMIPSInstr_Load(4, tLo, r_addr, 0));
941 addInstr(env, NANOMIPSInstr_Load(4, tHi, r_addr, 4));
942 *rHi = tHi;
943 *rLo = tLo;
944 return;
947 case Iex_Get: {
948 HReg tLo = newVRegI(env);
949 HReg tHi = newVRegI(env);
950 vassert((e->Iex.Get.offset < 0x1000 - 4) && (e->Iex.Get.offset >= 0));
951 addInstr(env, NANOMIPSInstr_Load(4, tLo, GuestStatePointer,
952 e->Iex.Get.offset));
953 addInstr(env, NANOMIPSInstr_Load(4, tHi, GuestStatePointer,
954 e->Iex.Get.offset + 4));
955 *rHi = tHi;
956 *rLo = tLo;
957 return;
960 case Iex_Binop: {
961 switch (e->Iex.Binop.op) {
962 case Iop_DivModS32to32: {
963 HReg r_srcL = iselWordExpr_R(env, e->Iex.Binop.arg1);
964 HReg r_srcR = iselWordExpr_R(env, e->Iex.Binop.arg2);
965 HReg tLo = newVRegI(env);
966 HReg tHi = newVRegI(env);
967 addInstr(env, NANOMIPSInstr_Alu(NMalu_DIV, tLo, r_srcL, r_srcR));
968 addInstr(env, NANOMIPSInstr_Alu(NMalu_MOD, tHi, r_srcL, r_srcR));
969 *rHi = tHi;
970 *rLo = tLo;
971 return;
974 case Iop_DivModU32to32: {
975 HReg r_srcL = iselWordExpr_R(env, e->Iex.Binop.arg1);
976 HReg r_srcR = iselWordExpr_R(env, e->Iex.Binop.arg2);
977 HReg tLo = newVRegI(env);
978 HReg tHi = newVRegI(env);
979 addInstr(env, NANOMIPSInstr_Alu(NMalu_DIVU, tLo, r_srcL, r_srcR));
980 addInstr(env, NANOMIPSInstr_Alu(NMalu_MODU, tHi, r_srcL, r_srcR));
981 *rHi = tHi;
982 *rLo = tLo;
983 return;
986 case Iop_MullS32: {
987 HReg r_srcL = iselWordExpr_R(env, e->Iex.Binop.arg1);
988 HReg r_srcR = iselWordExpr_R(env, e->Iex.Binop.arg2);
989 HReg tLo = newVRegI(env);
990 HReg tHi = newVRegI(env);
991 addInstr(env, NANOMIPSInstr_Alu(NMalu_MUL, tLo, r_srcL, r_srcR));
992 addInstr(env, NANOMIPSInstr_Alu(NMalu_MUH, tHi, r_srcL, r_srcR));
993 *rHi = tHi;
994 *rLo = tLo;
995 return;
998 case Iop_MullU32: {
999 HReg r_srcL = iselWordExpr_R(env, e->Iex.Binop.arg1);
1000 HReg r_srcR = iselWordExpr_R(env, e->Iex.Binop.arg2);
1001 HReg tLo = newVRegI(env);
1002 HReg tHi = newVRegI(env);
1003 addInstr(env, NANOMIPSInstr_Alu(NMalu_MULU, tLo, r_srcL, r_srcR));
1004 addInstr(env, NANOMIPSInstr_Alu(NMalu_MUHU, tHi, r_srcL, r_srcR));
1005 *rHi = tHi;
1006 *rLo = tLo;
1007 return;
1010 case Iop_Shr64: {
1011 #if defined (_MIPSEL)
1012 HReg a0, a1, sa;
1013 HReg a0tmp = newVRegI(env);
1014 HReg a1tmp = newVRegI(env);
1015 HReg a2 = newVRegI(env);
1016 HReg a3 = newVRegI(env);
1017 HReg a4 = newVRegI(env);
1019 iselInt64Expr(&a1, &a0, env, e->Iex.Binop.arg1);
1020 sa = iselWordExpr_R(env, e->Iex.Binop.arg2);
1022 /* andi a2, %sa, 0x3f */
1023 addInstr(env, NANOMIPSInstr_Imm(NMimm_ANDI, a2, sa, 0x3f));
1024 /* nor a4, zero, a2 */
1025 addInstr(env, NANOMIPSInstr_Alu(NMalu_NOR, a4, Zero, a2));
1026 /* sll a3, a1, 1 */
1027 addInstr(env, NANOMIPSInstr_Imm(NMimm_SLL, a3, a1, 0x1));
1028 /* sllv a3, a3, a4 */
1029 addInstr(env, NANOMIPSInstr_Alu(NMalu_SLL, a3, a3, a4));
1030 /* srlv a0, a0, a2 */
1031 addInstr(env, NANOMIPSInstr_Alu(NMalu_SRL, a0tmp, a0, a2));
1032 /* andi a4, a2, 0x20 */
1033 addInstr(env, NANOMIPSInstr_Imm(NMimm_ANDI, a4, a2, 0x20));
1034 /* srlv a2, a1, a2 */
1035 addInstr(env, NANOMIPSInstr_Alu(NMalu_SRL, a2, a1, a2));
1036 /* or a0, a0, a3 */
1037 addInstr(env, NANOMIPSInstr_Alu(NMalu_OR, a0tmp, a0tmp, a3));
1038 /* move a1, a2 */
1039 addInstr(env, mk_iMOVds_RR(a1tmp, a2));
1040 /* movn a1, zero, a4 */
1041 addInstr(env, NANOMIPSInstr_MoveCond(NMMoveCond_movn, a1tmp,
1042 Zero, a4));
1043 /* movn a0, a2, a4 */
1044 addInstr(env, NANOMIPSInstr_MoveCond(NMMoveCond_movn, a0tmp,
1045 a2, a4));
1047 *rHi = a1tmp;
1048 *rLo = a0tmp;
1049 return;
1050 #elif defined (_MIPSEB)
1051 /* 64-bit logical shift right based on what gcc generates:
1052 <shift>:
1053 nor v0, zero, a2
1054 sll a3, a0, 0x1
1055 sllv a3, a3, v0
1056 srlv v1, a1, a2
1057 andi v0, a2, 0x20
1058 or v1, a3, v1
1059 srlv a2, a0, a2
1060 movn v1, a2, v0
1061 movn a2, zero, v0
1062 jr ra
1063 move v0, a2
1065 /* unimplemented yet */
1066 vassert(0);
1068 #endif
1071 case Iop_32HLto64:
1072 *rHi = iselWordExpr_R(env, e->Iex.Binop.arg1);
1073 *rLo = iselWordExpr_R(env, e->Iex.Binop.arg2);
1075 return;
1077 case Iop_Or64: {
1078 HReg rHi_srcL, rLo_srcL;
1079 HReg rHi_srcR, rLo_srcR;
1080 HReg rHi_dst = newVRegI(env);
1081 HReg rLo_dst = newVRegI(env);
1082 iselInt64Expr(&rHi_srcL, &rLo_srcL, env, e->Iex.Binop.arg1);
1083 iselInt64Expr(&rHi_srcR, &rLo_srcR, env, e->Iex.Binop.arg2);
1084 addInstr(env, NANOMIPSInstr_Alu(NMalu_OR, rHi_dst, rHi_srcL,
1085 rHi_srcR));
1086 addInstr(env, NANOMIPSInstr_Alu(NMalu_OR, rLo_dst, rLo_srcL,
1087 rLo_srcR));
1088 *rHi = rHi_dst;
1089 *rLo = rLo_dst;
1091 return;
1094 default:
1095 break;
1098 vex_printf("Unimplemented binop ");
1099 ppIROp(e->Iex.Binop.op);
1100 vpanic("\n");
1102 break;
1105 case Iex_Unop: {
1106 switch (e->Iex.Unop.op) {
1107 case Iop_1Sto64: {
1108 HReg rHi_dst = newVRegI(env);
1109 HReg rLo_dst = newVRegI(env);
1110 HReg r_src = iselWordExpr_R(env, e->Iex.Unop.arg);
1111 addInstr(env, NANOMIPSInstr_Imm(NMimm_SGN, rLo_dst, r_src, 1));
1112 addInstr(env, mk_iMOVds_RR(rHi_dst, rLo_dst));
1113 *rHi = rHi_dst;
1114 *rLo = rLo_dst;
1115 return;
1118 default:
1119 break;
1122 vex_printf("Unimplemented unop ");
1123 ppIROp(e->Iex.Unop.op);
1124 vpanic("\n");
1126 break;
1129 default:
1130 break;
1133 ppIRExpr(e);
1134 vpanic("iselInt64Expr(NANOMIPS): cannot reduce tree");
1137 /*---------------------------------------------------------*/
1138 /*--- ISEL: Statements ---*/
1139 /*---------------------------------------------------------*/
1140 static void iselStmt(ISelEnv * env, IRStmt * stmt)
1142 if (vex_traceflags & VEX_TRACE_VCODE) {
1143 vex_printf("\n-- ");
1144 ppIRStmt(stmt);
1145 vex_printf("\n");
1148 switch (stmt->tag) {
1149 case Ist_Store: {
1150 IRType tyd = typeOfIRExpr(env->type_env, stmt->Ist.Store.data);
1151 HReg r_addr = iselWordExpr_R(env, stmt->Ist.Store.addr);
1153 if (tyd == Ity_I8 || tyd == Ity_I16 || tyd == Ity_I32) {
1154 HReg r_src = iselWordExpr_R(env, stmt->Ist.Store.data);
1155 addInstr(env, NANOMIPSInstr_Store(sizeofIRType(tyd),
1156 r_addr, 0, r_src));
1157 return;
1158 } else if (tyd == Ity_I64) {
1159 HReg vHi, vLo;
1160 iselInt64Expr(&vHi, &vLo, env, stmt->Ist.Store.data);
1161 addInstr(env, NANOMIPSInstr_Store(4, r_addr, 0, vLo));
1162 addInstr(env, NANOMIPSInstr_Store(4, r_addr, 4, vHi));
1163 return;
1166 break;
1169 case Ist_Put: {
1170 IRType ty = typeOfIRExpr(env->type_env, stmt->Ist.Put.data);
1171 vassert(stmt->Ist.Put.offset >= 0);
1173 if (ty == Ity_I8 || ty == Ity_I16 || ty == Ity_I32) {
1174 HReg r_src = iselWordExpr_R(env, stmt->Ist.Put.data);
1175 vassert(stmt->Ist.Put.offset < 0x1000);
1176 addInstr(env, NANOMIPSInstr_Store(sizeofIRType(ty),
1177 GuestStatePointer,
1178 stmt->Ist.Put.offset, r_src));
1179 return;
1180 } else if (ty == Ity_I64) {
1181 HReg vHi, vLo;
1182 vassert(stmt->Ist.Put.offset < 0x1000 - 4);
1183 iselInt64Expr(&vHi, &vLo, env, stmt->Ist.Put.data);
1184 addInstr(env, NANOMIPSInstr_Store(4, GuestStatePointer,
1185 stmt->Ist.Put.offset, vLo));
1186 addInstr(env, NANOMIPSInstr_Store(4, GuestStatePointer,
1187 stmt->Ist.Put.offset + 4,
1188 vHi));
1189 return;
1192 break;
1195 case Ist_WrTmp: {
1196 IRTemp tmp = stmt->Ist.WrTmp.tmp;
1197 IRType ty = typeOfIRTemp(env->type_env, tmp);
1199 if (ty == Ity_I1 || ty == Ity_I8 || ty == Ity_I16 ||
1200 ty == Ity_I32) {
1201 HReg r_dst = lookupIRTemp(env, tmp);
1202 HReg r_src = iselWordExpr_R(env, stmt->Ist.WrTmp.data);
1203 addInstr(env, mk_iMOVds_RR(r_dst, r_src));
1204 return;
1205 } else if (ty == Ity_I64) {
1206 HReg rHi, rLo, dstHi, dstLo;
1207 iselInt64Expr(&rHi, &rLo, env, stmt->Ist.WrTmp.data);
1208 lookupIRTemp64(&dstHi, &dstLo, env, tmp);
1209 addInstr(env, mk_iMOVds_RR(dstHi, rHi));
1210 addInstr(env, mk_iMOVds_RR(dstLo, rLo));
1211 return;
1214 break;
1217 case Ist_Dirty: {
1218 IRDirty *d = stmt->Ist.Dirty.details;
1219 IRType retty = Ity_INVALID;
1221 if (d->tmp != IRTemp_INVALID)
1222 retty = typeOfIRTemp(env->type_env, d->tmp);
1224 vassert((retty == Ity_INVALID) ||
1225 (retty == Ity_I32) ||
1226 (retty == Ity_I64) ||
1227 (retty == Ity_I8) ||
1228 (retty == Ity_I16));
1230 /* Marshal args, do the call, clear stack, set the return value
1231 to 0x555..555 if this is a conditional call that returns a
1232 value and the call is skipped. */
1233 RetLoc rloc = mk_RetLoc_INVALID();
1234 doHelperCall(&rloc, env, d->guard, d->cee, retty, d->args);
1235 vassert(is_sane_RetLoc(rloc));
1237 /* Now figure out what to do with the returned value, if any. */
1238 switch (retty) {
1239 case Ity_INVALID: {
1240 vassert(d->tmp == IRTemp_INVALID);
1241 vassert(rloc.pri == RLPri_None);
1242 return;
1245 case Ity_I32:
1246 case Ity_I16:
1247 case Ity_I8: {
1248 HReg r_dst = lookupIRTemp(env, d->tmp);
1249 vassert(rloc.pri == RLPri_Int);
1250 addInstr(env, mk_iMOVds_RR(r_dst, hregNANOMIPS_GPR4()));
1251 return;
1254 case Ity_I64: {
1255 HReg rHi = newVRegI(env);
1256 HReg rLo = newVRegI(env);
1257 HReg dstHi, dstLo;
1258 vassert(rloc.pri == RLPri_2Int);
1259 addInstr(env, mk_iMOVds_RR(rLo, hregNANOMIPS_GPR4()));
1260 addInstr(env, mk_iMOVds_RR(rHi, hregNANOMIPS_GPR5()));
1261 lookupIRTemp64(&dstHi, &dstLo, env, d->tmp);
1262 addInstr(env, mk_iMOVds_RR(dstHi, rHi));
1263 addInstr(env, mk_iMOVds_RR(dstLo, rLo));
1264 return;
1267 default:
1268 vassert(0);
1271 break;
1274 case Ist_LLSC: {
1275 IRTemp res = stmt->Ist.LLSC.result;
1276 IRType tyAddr = typeOfIRExpr(env->type_env, stmt->Ist.LLSC.addr);
1278 if (tyAddr != Ity_I32)
1279 break;
1281 if (stmt->Ist.LLSC.storedata == NULL) {
1282 /* LL */
1283 HReg r_addr = iselWordExpr_R(env, stmt->Ist.LLSC.addr);
1284 HReg r_dst = lookupIRTemp(env, res);
1286 addInstr(env, NANOMIPSInstr_LoadL(4, r_dst, r_addr, 0));
1287 return;
1288 } else {
1289 /* SC */
1290 HReg r_addr = iselWordExpr_R(env, stmt->Ist.LLSC.addr);
1291 HReg r_src = iselWordExpr_R(env, stmt->Ist.LLSC.storedata);
1292 HReg r_dst = lookupIRTemp(env, res);
1294 addInstr(env, mk_iMOVds_RR(r_dst, r_src));
1295 addInstr(env, NANOMIPSInstr_StoreC(4, r_addr, 0, r_dst));
1296 return;
1298 break;
1299 /* NOTREACHED */}
1300 case Ist_CAS:
1301 if (stmt->Ist.CAS.details->oldHi == IRTemp_INVALID) {
1302 IRCAS *cas = stmt->Ist.CAS.details;
1303 HReg old = lookupIRTemp(env, cas->oldLo);
1304 HReg addr = iselWordExpr_R(env, cas->addr);
1305 HReg expd = iselWordExpr_R(env, cas->expdLo);
1306 HReg data = iselWordExpr_R(env, cas->dataLo);
1307 vassert(typeOfIRTemp(env->type_env, cas->oldLo) == Ity_I32);
1308 addInstr(env, NANOMIPSInstr_Cas(4, old, old, addr, expd, expd, data, data));
1310 else {
1311 IRCAS *cas = stmt->Ist.CAS.details;
1312 HReg oldHi = lookupIRTemp(env, cas->oldHi);
1313 HReg oldLo = lookupIRTemp(env, cas->oldLo);
1314 HReg addr = iselWordExpr_R(env, cas->addr);
1315 HReg expdHi = iselWordExpr_R(env, cas->expdHi);
1316 HReg expdLo = iselWordExpr_R(env, cas->expdLo);
1317 HReg dataHi = iselWordExpr_R(env, cas->dataHi);
1318 HReg dataLo = iselWordExpr_R(env, cas->dataLo);
1319 vassert(typeOfIRTemp(env->type_env, cas->oldLo) == Ity_I32);
1320 addInstr(env, NANOMIPSInstr_Cas(8, oldLo, oldHi, addr,
1321 expdLo, expdHi, dataLo, dataHi));
1323 return;
1325 case Ist_IMark:
1326 case Ist_AbiHint:
1327 case Ist_NoOp:
1328 return;
1330 case Ist_Exit: {
1331 Addr dst = extractConst(stmt->Ist.Exit.dst);
1332 HReg cond = iselWordExpr_R(env, stmt->Ist.Exit.guard);
1334 switch (stmt->Ist.Exit.jk) {
1335 /* case Ijk_Ret: */
1336 case Ijk_Boring:
1337 case Ijk_Call: {
1338 vassert(stmt->Ist.Exit.offsIP >= 0);
1339 vassert(stmt->Ist.Exit.offsIP <= 0x1000);
1341 if (env->chainingAllowed) {
1342 Bool toFastEP = (dst > (Addr)env->max_ga);
1343 addInstr(env, NANOMIPSInstr_XDirect(dst, GuestStatePointer,
1344 stmt->Ist.Exit.offsIP,
1345 cond, toFastEP));
1346 } else {
1347 HReg r = newVRegI(env);
1348 addInstr(env, NANOMIPSInstr_Imm(NMimm_LI, r, INVALID_HREG,
1349 dst));
1350 addInstr(env, NANOMIPSInstr_XAssisted(r, GuestStatePointer,
1351 stmt->Ist.Exit.offsIP,
1352 cond, Ijk_Boring));
1355 return;
1358 case Ijk_ClientReq:
1359 case Ijk_EmFail:
1360 case Ijk_EmWarn:
1361 case Ijk_NoDecode:
1362 case Ijk_NoRedir:
1363 case Ijk_SigBUS:
1364 case Ijk_Yield:
1365 case Ijk_SigTRAP:
1366 case Ijk_SigFPE_IntDiv:
1367 case Ijk_SigFPE_IntOvf:
1368 case Ijk_Sys_syscall:
1369 case Ijk_InvalICache: {
1370 HReg r = newVRegI(env);
1371 addInstr(env, NANOMIPSInstr_Imm(NMimm_LI, r, INVALID_HREG,
1372 dst));
1373 vassert(stmt->Ist.Exit.offsIP >= 0);
1374 vassert(stmt->Ist.Exit.offsIP <= 0x1000);
1375 addInstr(env, NANOMIPSInstr_XAssisted(r, GuestStatePointer,
1376 stmt->Ist.Exit.offsIP,
1377 cond, stmt->Ist.Exit.jk));
1378 return;
1381 default:
1382 vassert(0);
1385 break;
1388 default:
1389 break;
1392 vex_printf("stmt_fail tag: 0x%x\n", stmt->tag);
1393 ppIRStmt(stmt);
1394 vpanic("iselStmt:\n");
1398 /*---------------------------------------------------------*/
1399 /*--- ISEL: Basic block terminators (Nexts) ---*/
1400 /*---------------------------------------------------------*/
1401 static void iselNext(ISelEnv * env,
1402 IRExpr * next, IRJumpKind jk, Int offsIP)
1404 if (vex_traceflags & VEX_TRACE_VCODE) {
1405 vex_printf( "\n-- PUT(%d) = ", offsIP);
1406 ppIRExpr( next );
1407 vex_printf( "; exit-");
1408 ppIRJumpKind(jk);
1409 vex_printf( "\n");
1412 /* Case: boring transfer to known address */
1413 if (next->tag == Iex_Const) {
1414 IRConst* cdst = next->Iex.Const.con;
1415 vassert(cdst->tag == Ico_U32);
1417 if (jk == Ijk_Boring || jk == Ijk_Call) {
1418 vassert(offsIP >= 0);
1419 vassert(offsIP < 0x1000);
1421 /* Boring transfer to known address */
1422 if (env->chainingAllowed) {
1423 /* .. almost always true .. */
1424 /* Skip the event check at the dst if this is a forwards
1425 edge. */
1426 Bool toFastEP
1427 = (((Addr32)cdst->Ico.U32) > (Addr32)env->max_ga);
1428 addInstr(env, NANOMIPSInstr_XDirect((Addr)cdst->Ico.U32,
1429 GuestStatePointer, offsIP,
1430 INVALID_HREG, toFastEP));
1431 } else {
1432 /* .. very occasionally .. */
1433 /* We can't use chaining, so ask for an assisted transfer,
1434 as that's the only alternative that is allowable. */
1435 HReg r = iselWordExpr_R(env, next);
1436 addInstr(env, NANOMIPSInstr_XAssisted(r, GuestStatePointer, offsIP,
1437 INVALID_HREG, Ijk_Boring));
1440 return;
1444 /* Case: call/return (==boring) transfer to any address */
1445 switch (jk) {
1446 case Ijk_Boring:
1447 case Ijk_Ret:
1448 case Ijk_Call: {
1449 HReg r = iselWordExpr_R(env, next);
1450 vassert(offsIP >= 0);
1451 vassert(offsIP < 0x1000);
1453 if (env->chainingAllowed) {
1454 addInstr(env, NANOMIPSInstr_XIndir(r, GuestStatePointer, offsIP,
1455 INVALID_HREG));
1456 } else {
1457 addInstr(env, NANOMIPSInstr_XAssisted(r, GuestStatePointer, offsIP,
1458 INVALID_HREG, Ijk_Boring));
1461 return;
1464 default:
1465 break;
1468 /* Case: assisted transfer to arbitrary address */
1469 switch (jk) {
1470 /* Keep this list in sync with that for Ist_Exit above */
1471 case Ijk_ClientReq:
1472 case Ijk_EmFail:
1473 case Ijk_EmWarn:
1474 case Ijk_NoDecode:
1475 case Ijk_NoRedir:
1476 case Ijk_SigBUS:
1477 case Ijk_SigILL:
1478 case Ijk_SigTRAP:
1479 case Ijk_SigFPE_IntDiv:
1480 case Ijk_SigFPE_IntOvf:
1481 case Ijk_Sys_syscall:
1482 case Ijk_InvalICache: {
1483 HReg r = iselWordExpr_R(env, next);
1484 vassert(offsIP >= 0);
1485 vassert(offsIP < 0x1000);
1486 addInstr(env, NANOMIPSInstr_XAssisted(r, GuestStatePointer,
1487 offsIP, INVALID_HREG, jk));
1488 return;
1491 default:
1492 break;
1495 vex_printf("\n-- PUT(%d) = ", offsIP);
1496 ppIRExpr(next );
1497 vex_printf("; exit-");
1498 ppIRJumpKind(jk);
1499 vex_printf("\n");
1500 vassert(0); /* are we expecting any other kind? */
1503 /*---------------------------------------------------------*/
1504 /*--- Insn selector top-level ---*/
1505 /*---------------------------------------------------------*/
1507 /* Translate an entire BB to NANOMIPS code. */
1508 HInstrArray *iselSB_NANOMIPS(const IRSB * bb,
1509 VexArch arch_host,
1510 const VexArchInfo * archinfo_host,
1511 const VexAbiInfo * vbi,
1512 Int offs_Host_EvC_Counter,
1513 Int offs_Host_EvC_FailAddr,
1514 Bool chainingAllowed,
1515 Bool addProfInc,
1516 Addr max_ga)
1518 Int i, j;
1519 HReg hreg, hregHI;
1520 ISelEnv *env;
1521 hwcaps_host = archinfo_host->hwcaps;
1522 /* sanity ... */
1523 vassert(arch_host == VexArchNANOMIPS);
1524 /* Check that the host's endianness is as expected. */
1525 vassert(archinfo_host->endness == VexEndnessLE
1526 || archinfo_host->endness == VexEndnessBE);
1527 /* Make up an initial environment to use. */
1528 env = LibVEX_Alloc_inline(sizeof(ISelEnv));
1529 env->vreg_ctr = 0;
1530 /* Set up output code array. */
1531 env->code = newHInstrArray();
1532 /* Copy BB's type env. */
1533 env->type_env = bb->tyenv;
1534 /* Make up an IRTemp -> virtual HReg mapping. This doesn't
1535 change as we go along. */
1536 env->n_vregmap = bb->tyenv->types_used;
1537 env->vregmap = LibVEX_Alloc_inline(env->n_vregmap * sizeof(HReg));
1538 env->vregmapHI = LibVEX_Alloc_inline(env->n_vregmap * sizeof(HReg));
1539 env->hwcaps = hwcaps_host;
1540 env->chainingAllowed = chainingAllowed;
1541 env->max_ga = max_ga;
1542 /* For each IR temporary, allocate a suitably-kinded virtual
1543 register. */
1544 j = 0;
1546 for (i = 0; i < env->n_vregmap; i++) {
1547 hregHI = hreg = INVALID_HREG;
1549 switch (bb->tyenv->types[i]) {
1550 case Ity_I1:
1551 case Ity_I8:
1552 case Ity_I16:
1553 case Ity_I32:
1554 hreg = mkHReg(True, HRcInt32, 0, j++);
1555 break;
1557 case Ity_I64:
1558 hreg = mkHReg(True, HRcInt32, 0, j++);
1559 hregHI = mkHReg(True, HRcInt32, 0, j++);
1560 break;
1562 default:
1563 ppIRType(bb->tyenv->types[i]);
1564 vpanic("iselBB(nanomips): IRTemp type");
1565 break;
1568 env->vregmap[i] = hreg;
1569 env->vregmapHI[i] = hregHI;
1572 env->vreg_ctr = j;
1573 /* The very first instruction must be an event check. */
1574 vassert(offs_Host_EvC_Counter >= 0);
1575 vassert(offs_Host_EvC_FailAddr >= 0);
1576 vassert(offs_Host_EvC_Counter < 0x1000);
1577 vassert(offs_Host_EvC_FailAddr < 0x1000);
1578 addInstr(env, NANOMIPSInstr_EvCheck(GuestStatePointer,
1579 offs_Host_EvC_Counter,
1580 GuestStatePointer,
1581 offs_Host_EvC_FailAddr));
1583 /* Possibly a block counter increment (for profiling). At this
1584 point we don't know the address of the counter, so just pretend
1585 it is zero. It will have to be patched later, but before this
1586 translation is used, by a call to LibVEX_patchProfCtr. */
1587 if (addProfInc) {
1588 addInstr(env, NANOMIPSInstr_ProfInc());
1591 /* Ok, finally we can iterate over the statements. */
1592 for (i = 0; i < bb->stmts_used; i++)
1593 iselStmt(env, bb->stmts[i]);
1595 iselNext(env, bb->next, bb->jumpkind, bb->offsIP);
1596 /* record the number of vregs we used. */
1597 env->code->n_vregs = env->vreg_ctr;
1598 return env->code;
1601 /*---------------------------------------------------------------*/
1602 /*--- end host_nanomips_isel.c ---*/
1603 /*---------------------------------------------------------------*/