1 /* -*- Mode: C++; c-basic-offset: 4; indent-tabs-mode: nil; tab-width: 4 -*- */
2 /* vi: set ts=4 sw=4 expandtab: (add to ~/.vimrc: set modeline modelines=5) */
3 /* ***** BEGIN LICENSE BLOCK *****
4 * Version: MPL 1.1/GPL 2.0/LGPL 2.1
6 * The contents of this file are subject to the Mozilla Public License Version
7 * 1.1 (the "License"); you may not use this file except in compliance with
8 * the License. You may obtain a copy of the License at
9 * http://www.mozilla.org/MPL/
11 * Software distributed under the License is distributed on an "AS IS" basis,
12 * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
13 * for the specific language governing rights and limitations under the
16 * The Original Code is [Open Source Virtual Machine].
18 * The Initial Developer of the Original Code is
19 * Adobe System Incorporated.
20 * Portions created by the Initial Developer are Copyright (C) 2004-2007
21 * the Initial Developer. All Rights Reserved.
27 * Alternatively, the contents of this file may be used under the terms of
28 * either the GNU General Public License Version 2 or later (the "GPL"), or
29 * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
30 * in which case the provisions of the GPL or the LGPL are applicable instead
31 * of those above. If you wish to allow use of your version of this file only
32 * under the terms of either the GPL or the LGPL, and not to allow others to
33 * use your version of this file under the terms of the MPL, indicate your
34 * decision by deleting the provisions above and replace them with the notice
35 * and other provisions required by the GPL or the LGPL. If you do not delete
36 * the provisions above, a recipient may use your version of this file under
37 * the terms of any one of the MPL, the GPL or the LGPL.
39 * ***** END LICENSE BLOCK ***** */
47 #ifdef FEATURE_NANOJIT
50 const char *regNames
[] = {
51 "%g0", "%g1", "%g2", "%g3", "%g4", "%g5", "%g6", "%g7",
52 "%o0", "%o1", "%o2", "%o3", "%o4", "%o5", "%sp", "%o7",
53 "%l0", "%l1", "%l2", "%l3", "%l4", "%l5", "%l6", "%l7",
54 "%i0", "%i1", "%i2", "%i3", "%i4", "%i5", "%fp", "%i7",
55 "%f0", "%f1", "%f2", "%f3", "%f4", "%f5", "%f6", "%f7",
56 "%f8", "%f9", "%f10", "%f11", "%f12", "%f13", "%f14", "%f15",
57 "%f16", "%f17", "%f18", "%f19", "%f20", "%f21", "%f22", "%f23",
58 "%f24", "%f25", "%f26", "%f27", "%f28", "%f29", "%f30", "%f31"
62 const Register
Assembler::argRegs
[] = { I0
, I1
, I2
, I3
, I4
, I5
};
63 const Register
Assembler::retRegs
[] = { O0
};
64 const Register
Assembler::savedRegs
[] = { L1
}; // Dummy element not used, as NumSavedRegs == 0
66 static const int kLinkageAreaSize
= 68;
67 static const int kcalleeAreaSize
= 80; // The max size.
69 #define BIT_ROUND_UP(v,q) ( (((uintptr_t)v)+(q)-1) & ~((q)-1) )
70 #define TODO(x) do{ verbose_only(outputf(#x);) NanoAssertMsgf(false, "%s", #x); } while(0)
72 void Assembler::nInit(AvmCore
* core
)
77 void Assembler::nBeginAssembly() {
80 NIns
* Assembler::genPrologue()
86 uint32_t stackNeeded
= STACK_GRANULARITY
* _activation
.stackSlotsNeeded();
87 uint32_t frameSize
= stackNeeded
+ kcalleeAreaSize
+ kLinkageAreaSize
;
88 frameSize
= BIT_ROUND_UP(frameSize
, 8);
90 if (frameSize
<= 4096)
91 SUBI(FP
, frameSize
, SP
);
94 ORI(G1
, frameSize
& 0x3FF, G1
);
99 if (_logc
->lcbits
& LC_Native
) {
100 outputf(" %p:",_nIns
);
101 outputf(" patch entry:");
103 NIns
*patchEntry
= _nIns
;
105 // The frame size in SAVE is faked. We will still re-caculate SP later.
106 // We can use 0 here but it is not good for debuggers.
109 // align the entry point
115 void Assembler::asm_align_code() {
116 while(uintptr_t(_nIns
) & 15) {
121 void Assembler::nFragExit(LIns
* guard
)
123 SideExit
* exit
= guard
->record()->exit
;
124 Fragment
*frag
= exit
->target
;
126 if (frag
&& frag
->fragEntry
)
128 JMP(frag
->fragEntry
);
133 // Target doesn't exit yet. Emit jump to epilog, and set up to patch later.
135 _epilogue
= genEpilogue();
136 lr
= guard
->record();
137 JMP_long((intptr_t)_epilogue
);
141 // return value is GuardRecord*
145 NIns
*Assembler::genEpilogue()
148 RESTORE(G0
, G0
, G0
); //restore
149 JMPLI(I7
, 8, G0
); //ret
154 void Assembler::asm_call(LIns
* ins
)
156 Register retReg
= ( ins
->isop(LIR_calld
) ? F0
: retRegs
[0] );
157 deprecated_prepResultReg(ins
, rmask(retReg
));
159 // Do this after we've handled the call result, so we don't
160 // force the call result to be spilled unnecessarily.
162 evictScratchRegsExcept(0);
164 const CallInfo
* ci
= ins
->callInfo();
169 ArgType argTypes
[MAXARGS
];
170 uint32_t argc
= ci
->getArgTypes(argTypes
);
172 NanoAssert(ins
->isop(LIR_callp
) || ins
->isop(LIR_calld
));
173 verbose_only(if (_logc
->lcbits
& LC_Native
)
174 outputf(" %p:", _nIns
);
176 bool indirect
= ci
->isIndirect();
182 Register r
= findSpecificRegFor(ins
->arg(argc
), I0
);
186 uint32_t GPRIndex
= O0
;
187 uint32_t offset
= kLinkageAreaSize
; // start of parameters stack postion.
189 for(int i
=0; i
<argc
; i
++)
191 uint32_t j
= argc
-i
-1;
192 ArgType ty
= argTypes
[j
];
193 if (ty
== ARGTYPE_D
) {
194 Register r
= findRegFor(ins
->arg(j
), FpRegs
);
199 // We might be calling a varargs function.
200 // So, make sure the GPR's are also loaded with
201 // the value, or the stack contains it.
202 if (GPRIndex
-2 <= O5
) {
203 LDSW32(SP
, offset
-8, (Register
)(GPRIndex
-2));
205 if (GPRIndex
-1 <= O5
) {
206 LDSW32(SP
, offset
-4, (Register
)(GPRIndex
-1));
208 STDF32(r
, offset
-8, SP
);
212 Register r
= findRegFor(ins
->arg(j
), GpRegs
);
213 STW32(r
, offset
, SP
);
215 Register r
= findSpecificRegFor(ins
->arg(j
), (Register
)GPRIndex
);
223 Register
Assembler::nRegisterAllocFromSet(RegisterMask set
)
225 // need to implement faster way
227 while (!(set
& rmask((Register
)i
)))
229 _allocator
.free
&= ~rmask((Register
)i
);
233 void Assembler::nRegisterResetAll(RegAlloc
& a
)
236 a
.free
= GpRegs
| FpRegs
;
239 void Assembler::nPatchBranch(NIns
* branch
, NIns
* location
)
241 *(uint32_t*)&branch
[0] &= 0xFFC00000;
242 *(uint32_t*)&branch
[0] |= ((intptr_t)location
>> 10) & 0x3FFFFF;
243 *(uint32_t*)&branch
[1] &= 0xFFFFFC00;
244 *(uint32_t*)&branch
[1] |= (intptr_t)location
& 0x3FF;
247 RegisterMask
Assembler::nHint(LIns
* ins
)
249 // Never called, because no entries in nHints[] == PREFER_SPECIAL.
254 bool Assembler::canRemat(LIns
* ins
)
256 return ins
->isImmI() || ins
->isop(LIR_allocp
);
259 void Assembler::asm_restore(LIns
* i
, Register r
)
262 if (i
->isop(LIR_allocp
)) {
264 int32_t d
= deprecated_disp(i
);
267 else if (i
->isImmI()) {
271 int d
= findMemFor(i
);
272 if (rmask(r
) & FpRegs
) {
280 void Assembler::asm_store32(LOpcode op
, LIns
*value
, int dr
, LIns
*base
)
285 // handled by mainline code below for now
288 NanoAssertMsg(0, "NJ_EXPANDED_LOADSTORE_SUPPORTED not yet supported for this architecture");
291 NanoAssertMsg(0, "asm_store32 should never receive this LIR opcode");
298 Register rb
= getBaseReg(base
, dr
, GpRegs
);
299 int c
= value
->immI();
312 // make sure what is in a register
314 if (base
->isImmI()) {
317 ra
= findRegFor(value
, GpRegs
);
320 getBaseReg2(GpRegs
, value
, ra
, GpRegs
, base
, rb
, dr
);
333 void Assembler::asm_spill(Register rr
, int d
, bool quad
)
338 if (rmask(rr
) & FpRegs
) {
345 void Assembler::asm_load64(LIns
* ins
)
347 switch (ins
->opcode()) {
349 // handled by mainline code below for now
352 NanoAssertMsg(0, "NJ_EXPANDED_LOADSTORE_SUPPORTED not yet supported for this architecture");
355 NanoAssertMsg(0, "asm_load64 should never receive this LIR opcode");
360 LIns
* base
= ins
->oprnd1();
361 int db
= ins
->disp();
362 Register rr
= ins
->deprecated_getReg();
364 int dr
= deprecated_disp(ins
);
366 if (base
->isop(LIR_allocp
)) {
368 db
+= findMemFor(base
);
370 rb
= findRegFor(base
, GpRegs
);
374 // don't use an fpu reg to simply load & store the value.
376 asm_mmq(FP
, dr
, rb
, db
);
378 deprecated_freeRsrcOf(ins
);
380 if (rr
!= deprecated_UnknownReg
)
382 NanoAssert(rmask(rr
)&FpRegs
);
383 _allocator
.retire(rr
);
388 void Assembler::asm_store64(LOpcode op
, LIns
* value
, int dr
, LIns
* base
)
392 // handled by mainline code below for now
395 NanoAssertMsg(0, "NJ_EXPANDED_LOADSTORE_SUPPORTED not yet supported for this architecture");
398 NanoAssertMsg(0, "asm_store64 should never receive this LIR opcode");
405 // if a constant 64-bit value just store it now rather than
406 // generating a pointless store/load/store sequence
407 Register rb
= findRegFor(base
, GpRegs
);
409 SET32(value
->immDlo(), L2
);
411 SET32(value
->immDhi(), L2
);
415 if (value
->isop(LIR_ldd
))
417 // value is 64bit struct or int64_t, or maybe a double.
418 // it may be live in an FPU reg. Either way, don't
419 // put it in an FPU reg just to load & store it.
421 // a) if we know it's not a double, this is right.
422 // b) if we guarded that its a double, this store could be on
423 // the side exit, copying a non-double.
424 // c) maybe its a double just being stored. oh well.
426 int da
= findMemFor(value
);
428 if (base
->isop(LIR_allocp
)) {
430 dr
+= findMemFor(base
);
432 rb
= findRegFor(base
, GpRegs
);
434 asm_mmq(rb
, dr
, FP
, da
);
439 if (base
->isop(LIR_allocp
)) {
441 dr
+= findMemFor(base
);
443 rb
= findRegFor(base
, GpRegs
);
446 // if value already in a reg, use that, otherwise
447 // try to get it into XMM regs before FPU regs.
448 Register rv
= ( !value
->isInReg()
449 ? findRegFor(value
, FpRegs
)
450 : value
->deprecated_getReg() );
456 * copy 64 bits: (rd+dd) <- (rs+ds)
458 void Assembler::asm_mmq(Register rd
, int dd
, Register rs
, int ds
)
460 // value is either a 64bit struct or maybe a float
461 // that isn't live in an FPU reg. Either way, don't
462 // put it in an FPU reg just to load & store it.
463 Register t
= registerAllocTmp(GpRegs
& ~(rmask(rd
)|rmask(rs
)));
470 NIns
* Assembler::asm_branch(bool branchOnFalse
, LIns
* cond
, NIns
* targ
)
473 LOpcode condop
= cond
->opcode();
474 NanoAssert(cond
->isCmp());
475 if (isCmpDOpcode(condop
))
477 return asm_branchd(branchOnFalse
, cond
, targ
);
481 intptr_t tt
= ((intptr_t)targ
- (intptr_t)_nIns
+ 8) >> 2;
482 // !targ means that it needs patch.
483 if( !(isIMM22((int32_t)tt
)) || !targ
) {
484 JMP_long_nocheck((intptr_t)targ
);
492 // produce the branch
495 if (condop
== LIR_eqi
)
497 else if (condop
== LIR_lti
)
499 else if (condop
== LIR_lei
)
501 else if (condop
== LIR_gti
)
503 else if (condop
== LIR_gei
)
505 else if (condop
== LIR_ltui
)
507 else if (condop
== LIR_leui
)
509 else if (condop
== LIR_gtui
)
511 else //if (condop == LIR_geui)
516 if (condop
== LIR_eqi
)
518 else if (condop
== LIR_lti
)
520 else if (condop
== LIR_lei
)
522 else if (condop
== LIR_gti
)
524 else if (condop
== LIR_gei
)
526 else if (condop
== LIR_ltui
)
528 else if (condop
== LIR_leui
)
530 else if (condop
== LIR_gtui
)
532 else //if (condop == LIR_geui)
539 NIns
* Assembler::asm_branch_ov(LOpcode op
, NIns
* targ
)
543 intptr_t tt
= ((intptr_t)targ
- (intptr_t)_nIns
+ 8) >> 2;
544 // !targ means that it needs patch.
545 if( !(isIMM22((int32_t)tt
)) || !targ
) {
546 JMP_long_nocheck((intptr_t)targ
);
554 if( op
== LIR_mulxovi
|| op
== LIR_muljovi
)
561 void Assembler::asm_cmp(LIns
*cond
)
565 LIns
* lhs
= cond
->oprnd1();
566 LIns
* rhs
= cond
->oprnd2();
568 NanoAssert(lhs
->isI() && rhs
->isI());
570 // ready to issue the compare
574 Register r
= findRegFor(lhs
, GpRegs
);
575 if (c
== 0 && cond
->isop(LIR_eqi
)) {
586 findRegFor2(GpRegs
, lhs
, ra
, GpRegs
, rhs
, rb
);
591 void Assembler::asm_condd(LIns
* ins
)
593 // only want certain regs
594 Register r
= deprecated_prepResultReg(ins
, AllowableFlagRegs
);
596 LOpcode condop
= ins
->opcode();
597 NanoAssert(isCmpDOpcode(condop
));
598 if (condop
== LIR_eqd
)
599 MOVFEI(1, 0, 0, 0, r
);
600 else if (condop
== LIR_led
)
601 MOVFLEI(1, 0, 0, 0, r
);
602 else if (condop
== LIR_ltd
)
603 MOVFLI(1, 0, 0, 0, r
);
604 else if (condop
== LIR_ged
)
605 MOVFGEI(1, 0, 0, 0, r
);
606 else // if (condop == LIR_gtd)
607 MOVFGI(1, 0, 0, 0, r
);
612 void Assembler::asm_cond(LIns
* ins
)
615 // only want certain regs
616 LOpcode op
= ins
->opcode();
617 Register r
= deprecated_prepResultReg(ins
, AllowableFlagRegs
);
620 MOVEI(1, 1, 0, 0, r
);
621 else if (op
== LIR_lti
)
622 MOVLI(1, 1, 0, 0, r
);
623 else if (op
== LIR_lei
)
624 MOVLEI(1, 1, 0, 0, r
);
625 else if (op
== LIR_gti
)
626 MOVGI(1, 1, 0, 0, r
);
627 else if (op
== LIR_gei
)
628 MOVGEI(1, 1, 0, 0, r
);
629 else if (op
== LIR_ltui
)
630 MOVCSI(1, 1, 0, 0, r
);
631 else if (op
== LIR_leui
)
632 MOVLEUI(1, 1, 0, 0, r
);
633 else if (op
== LIR_gtui
)
634 MOVGUI(1, 1, 0, 0, r
);
635 else // if (op == LIR_geui)
636 MOVCCI(1, 1, 0, 0, r
);
641 void Assembler::asm_arith(LIns
* ins
)
644 LOpcode op
= ins
->opcode();
645 LIns
* lhs
= ins
->oprnd1();
646 LIns
* rhs
= ins
->oprnd2();
648 Register rb
= deprecated_UnknownReg
;
649 RegisterMask allow
= GpRegs
;
650 bool forceReg
= (op
== LIR_muli
|| op
== LIR_mulxovi
|| op
== LIR_muljovi
|| !rhs
->isImmI());
652 if (lhs
!= rhs
&& forceReg
)
654 if ((rb
= asm_binop_rhs_reg(ins
)) == deprecated_UnknownReg
) {
655 rb
= findRegFor(rhs
, allow
);
659 else if ((op
== LIR_addi
|| op
== LIR_addxovi
) && lhs
->isop(LIR_allocp
) && rhs
->isImmI()) {
660 // add alloc+const, use lea
661 Register rr
= deprecated_prepResultReg(ins
, allow
);
662 int d
= findMemFor(lhs
) + rhs
->immI();
668 Register rr
= deprecated_prepResultReg(ins
, allow
);
669 // if this is last use of lhs in reg, we can re-use result reg
670 // else, lhs already has a register assigned.
671 Register ra
= ( !lhs
->isInReg()
672 ? findSpecificRegFor(lhs
, rr
)
673 : lhs
->deprecated_getReg() );
680 if (op
== LIR_addi
|| op
== LIR_addxovi
)
682 else if (op
== LIR_subi
|| op
== LIR_subxovi
)
684 else if (op
== LIR_muli
)
686 else if (op
== LIR_mulxovi
|| op
== LIR_muljovi
) {
692 else if (op
== LIR_andi
)
694 else if (op
== LIR_ori
)
696 else if (op
== LIR_xori
)
698 else if (op
== LIR_lshi
)
700 else if (op
== LIR_rshi
)
702 else if (op
== LIR_rshui
)
705 NanoAssertMsg(0, "Unsupported");
710 if (op
== LIR_addi
|| op
== LIR_addxovi
)
712 else if (op
== LIR_subi
|| op
== LIR_subxovi
)
714 else if (op
== LIR_andi
)
716 else if (op
== LIR_ori
)
718 else if (op
== LIR_xori
)
720 else if (op
== LIR_lshi
)
722 else if (op
== LIR_rshi
)
724 else if (op
== LIR_rshui
)
727 NanoAssertMsg(0, "Unsupported");
735 void Assembler::asm_neg_not(LIns
* ins
)
738 LOpcode op
= ins
->opcode();
739 Register rr
= deprecated_prepResultReg(ins
, GpRegs
);
741 LIns
* lhs
= ins
->oprnd1();
742 // if this is last use of lhs in reg, we can re-use result reg
743 // else, lhs already has a register assigned.
744 Register ra
= ( !lhs
->isInReg()
745 ? findSpecificRegFor(lhs
, rr
)
746 : lhs
->deprecated_getReg() );
757 void Assembler::asm_load32(LIns
* ins
)
760 LOpcode op
= ins
->opcode();
761 LIns
* base
= ins
->oprnd1();
763 Register rr
= deprecated_prepResultReg(ins
, GpRegs
);
764 Register ra
= getBaseReg(base
, d
, GpRegs
);
777 NanoAssertMsg(0, "NJ_EXPANDED_LOADSTORE_SUPPORTED not yet supported for this architecture");
780 NanoAssertMsg(0, "asm_load32 should never receive this LIR opcode");
785 void Assembler::asm_cmov(LIns
* ins
)
788 LOpcode op
= ins
->opcode();
789 LIns
* condval
= ins
->oprnd1();
790 LIns
* iftrue
= ins
->oprnd2();
791 LIns
* iffalse
= ins
->oprnd3();
793 NanoAssert(condval
->isCmp());
794 NanoAssert(op
== LIR_cmovi
&& iftrue
->isI() && iffalse
->isI());
796 const Register rr
= deprecated_prepResultReg(ins
, GpRegs
);
798 // this code assumes that neither LD nor MR nor MRcc set any of the condition flags.
799 // (This is true on Intel, is it true on all architectures?)
800 const Register iffalsereg
= findRegFor(iffalse
, GpRegs
& ~rmask(rr
));
801 if (op
== LIR_cmovi
) {
802 switch (condval
->opcode()) {
803 // note that these are all opposites...
804 case LIR_eqi
: MOVNE (iffalsereg
, 1, 0, 0, rr
); break;
805 case LIR_lti
: MOVGE (iffalsereg
, 1, 0, 0, rr
); break;
806 case LIR_lei
: MOVG (iffalsereg
, 1, 0, 0, rr
); break;
807 case LIR_gti
: MOVLE (iffalsereg
, 1, 0, 0, rr
); break;
808 case LIR_gei
: MOVL (iffalsereg
, 1, 0, 0, rr
); break;
809 case LIR_ltui
: MOVCC (iffalsereg
, 1, 0, 0, rr
); break;
810 case LIR_leui
: MOVGU (iffalsereg
, 1, 0, 0, rr
); break;
811 case LIR_gtui
: MOVLEU(iffalsereg
, 1, 0, 0, rr
); break;
812 case LIR_geui
: MOVCS (iffalsereg
, 1, 0, 0, rr
); break;
813 debug_only( default: NanoAssert(0); break; )
816 /*const Register iftruereg =*/ findSpecificRegFor(iftrue
, rr
);
820 void Assembler::asm_param(LIns
* ins
)
822 uint32_t a
= ins
->paramArg();
823 uint32_t kind
= ins
->paramKind();
824 deprecated_prepResultReg(ins
, rmask(argRegs
[a
]));
827 void Assembler::asm_immi(LIns
* ins
)
830 Register rr
= deprecated_prepResultReg(ins
, GpRegs
);
831 int32_t val
= ins
->immI();
838 void Assembler::asm_immd(LIns
* ins
)
841 Register rr
= ins
->deprecated_getReg();
842 if (rr
!= deprecated_UnknownReg
)
844 // @todo -- add special-cases for 0 and 1
845 _allocator
.retire(rr
);
847 NanoAssert((rmask(rr
) & FpRegs
) != 0);
849 int d
= deprecated_disp(ins
);
853 // @todo, if we used xor, ldsd, fldz, etc above, we don't need mem here
854 int d
= deprecated_disp(ins
);
855 deprecated_freeRsrcOf(ins
);
859 SET32(ins
->immDlo(), L2
);
861 SET32(ins
->immDhi(), L2
);
865 void Assembler::asm_fneg(LIns
* ins
)
868 Register rr
= deprecated_prepResultReg(ins
, FpRegs
);
869 LIns
* lhs
= ins
->oprnd1();
871 // lhs into reg, prefer same reg as result
872 // if this is last use of lhs in reg, we can re-use result reg
873 // else, lhs already has a different reg assigned
874 Register ra
= ( !lhs
->isInReg()
875 ? findSpecificRegFor(lhs
, rr
)
876 : findRegFor(lhs
, FpRegs
) );
881 void Assembler::asm_fop(LIns
* ins
)
884 LOpcode op
= ins
->opcode();
885 LIns
*lhs
= ins
->oprnd1();
886 LIns
*rhs
= ins
->oprnd2();
888 RegisterMask allow
= FpRegs
;
890 findRegFor2(allow
, lhs
, ra
, allow
, rhs
, rb
);
891 Register rr
= deprecated_prepResultReg(ins
, allow
);
895 else if (op
== LIR_subd
)
897 else if (op
== LIR_muld
)
899 else //if (op == LIR_divd)
904 void Assembler::asm_i2d(LIns
* ins
)
907 // where our result goes
908 Register rr
= deprecated_prepResultReg(ins
, FpRegs
);
909 int d
= findMemFor(ins
->oprnd1());
914 void Assembler::asm_ui2d(LIns
* ins
)
917 // where our result goes
918 Register rr
= deprecated_prepResultReg(ins
, FpRegs
);
919 Register rt
= registerAllocTmp(FpRegs
& ~(rmask(rr
)));
920 Register gr
= findRegFor(ins
->oprnd1(), GpRegs
);
925 LDDF32(SP
, disp
, rr
);
926 STWI(G0
, disp
+4, SP
);
927 LDDF32(SP
, disp
, rt
);
928 STWI(gr
, disp
+4, SP
);
930 SETHI(0x43300000, G1
);
933 void Assembler::asm_d2i(LIns
* ins
) {
934 LIns
*lhs
= ins
->oprnd1();
935 Register rr
= prepareResultReg(ins
, GpRegs
);
936 Register ra
= findRegFor(lhs
, FpRegs
);
937 int d
= findMemFor(ins
);
943 void Assembler::asm_nongp_copy(Register r
, Register s
)
946 NanoAssert((rmask(r
) & FpRegs
) && (rmask(s
) & FpRegs
));
950 NIns
* Assembler::asm_branchd(bool branchOnFalse
, LIns
*cond
, NIns
*targ
)
953 LOpcode condop
= cond
->opcode();
954 NanoAssert(isCmpDOpcode(condop
));
956 intptr_t tt
= ((intptr_t)targ
- (intptr_t)_nIns
+ 8) >> 2;
957 // !targ means that it needs patch.
958 if( !(isIMM22((int32_t)tt
)) || !targ
) {
959 JMP_long_nocheck((intptr_t)targ
);
967 // produce the branch
970 if (condop
== LIR_eqd
)
972 else if (condop
== LIR_led
)
974 else if (condop
== LIR_ltd
)
976 else if (condop
== LIR_ged
)
978 else //if (condop == LIR_gtd)
983 if (condop
== LIR_eqd
)
985 else if (condop
== LIR_led
)
987 else if (condop
== LIR_ltd
)
989 else if (condop
== LIR_ged
)
991 else //if (condop == LIR_gtd)
998 void Assembler::asm_cmpd(LIns
*cond
)
1001 LIns
* lhs
= cond
->oprnd1();
1002 LIns
* rhs
= cond
->oprnd2();
1004 Register rLhs
= findRegFor(lhs
, FpRegs
);
1005 Register rRhs
= findRegFor(rhs
, FpRegs
);
1010 void Assembler::nativePageReset()
1014 Register
Assembler::asm_binop_rhs_reg(LIns
* ins
)
1016 return deprecated_UnknownReg
;
1019 void Assembler::nativePageSetup()
1021 NanoAssert(!_inExit
);
1023 codeAlloc(codeStart
, codeEnd
, _nIns
verbose_only(, codeBytes
));
1026 // Increment the 32-bit profiling counter at pCtr, without
1027 // changing any registers.
1029 void Assembler::asm_inc_m32(uint32_t*)
1031 // todo: implement this
1036 Assembler::underrunProtect(int n
)
1039 // This may be in a normal code chunk or an exit code chunk.
1040 if (eip
- n
< codeStart
) {
1041 codeAlloc(codeStart
, codeEnd
, _nIns
verbose_only(, codeBytes
));
1042 JMP_long_nocheck((intptr_t)eip
);
1046 void Assembler::asm_ret(LIns
* ins
)
1051 LIns
*val
= ins
->oprnd1();
1052 if (ins
->isop(LIR_reti
)) {
1053 findSpecificRegFor(val
, retRegs
[0]);
1055 NanoAssert(ins
->isop(LIR_retd
));
1056 findSpecificRegFor(val
, F0
);
1060 void Assembler::swapCodeChunks() {
1062 codeAlloc(exitStart
, exitEnd
, _nExitIns
verbose_only(, exitBytes
));
1063 SWAP(NIns
*, _nIns
, _nExitIns
);
1064 SWAP(NIns
*, codeStart
, exitStart
);
1065 SWAP(NIns
*, codeEnd
, exitEnd
);
1066 verbose_only( SWAP(size_t, codeBytes
, exitBytes
); )
1069 #endif /* FEATURE_NANOJIT */