ASC-4041: Skip two spidermonkey regression tests due to stack overflow when compiling...
[tamarin-stm.git] / nanojit / NativeSparc.cpp
blobe331952cee1789ac34a31e3462d377b862be81c9
1 /* -*- Mode: C++; c-basic-offset: 4; indent-tabs-mode: nil; tab-width: 4 -*- */
2 /* vi: set ts=4 sw=4 expandtab: (add to ~/.vimrc: set modeline modelines=5) */
3 /* ***** BEGIN LICENSE BLOCK *****
4 * Version: MPL 1.1/GPL 2.0/LGPL 2.1
6 * The contents of this file are subject to the Mozilla Public License Version
7 * 1.1 (the "License"); you may not use this file except in compliance with
8 * the License. You may obtain a copy of the License at
9 * http://www.mozilla.org/MPL/
11 * Software distributed under the License is distributed on an "AS IS" basis,
12 * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
13 * for the specific language governing rights and limitations under the
14 * License.
16 * The Original Code is [Open Source Virtual Machine].
18 * The Initial Developer of the Original Code is
19 * Adobe System Incorporated.
20 * Portions created by the Initial Developer are Copyright (C) 2004-2007
21 * the Initial Developer. All Rights Reserved.
23 * Contributor(s):
24 * Adobe AS3 Team
25 * leon.sha@sun.com
27 * Alternatively, the contents of this file may be used under the terms of
28 * either the GNU General Public License Version 2 or later (the "GPL"), or
29 * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
30 * in which case the provisions of the GPL or the LGPL are applicable instead
31 * of those above. If you wish to allow use of your version of this file only
32 * under the terms of either the GPL or the LGPL, and not to allow others to
33 * use your version of this file under the terms of the MPL, indicate your
34 * decision by deleting the provisions above and replace them with the notice
35 * and other provisions required by the GPL or the LGPL. If you do not delete
36 * the provisions above, a recipient may use your version of this file under
37 * the terms of any one of the MPL, the GPL or the LGPL.
39 * ***** END LICENSE BLOCK ***** */
41 #include <sys/mman.h>
42 #include <errno.h>
43 #include "nanojit.h"
45 namespace nanojit
47 #ifdef FEATURE_NANOJIT
49 #ifdef NJ_VERBOSE
50 const char *regNames[] = {
51 "%g0", "%g1", "%g2", "%g3", "%g4", "%g5", "%g6", "%g7",
52 "%o0", "%o1", "%o2", "%o3", "%o4", "%o5", "%sp", "%o7",
53 "%l0", "%l1", "%l2", "%l3", "%l4", "%l5", "%l6", "%l7",
54 "%i0", "%i1", "%i2", "%i3", "%i4", "%i5", "%fp", "%i7",
55 "%f0", "%f1", "%f2", "%f3", "%f4", "%f5", "%f6", "%f7",
56 "%f8", "%f9", "%f10", "%f11", "%f12", "%f13", "%f14", "%f15",
57 "%f16", "%f17", "%f18", "%f19", "%f20", "%f21", "%f22", "%f23",
58 "%f24", "%f25", "%f26", "%f27", "%f28", "%f29", "%f30", "%f31"
60 #endif
62 const Register Assembler::argRegs[] = { I0, I1, I2, I3, I4, I5 };
63 const Register Assembler::retRegs[] = { O0 };
64 const Register Assembler::savedRegs[] = { L1 }; // Dummy element not used, as NumSavedRegs == 0
66 static const int kLinkageAreaSize = 68;
67 static const int kcalleeAreaSize = 80; // The max size.
69 #define BIT_ROUND_UP(v,q) ( (((uintptr_t)v)+(q)-1) & ~((q)-1) )
70 #define TODO(x) do{ verbose_only(outputf(#x);) NanoAssertMsgf(false, "%s", #x); } while(0)
72 void Assembler::nInit(AvmCore* core)
74 has_cmov = true;
77 void Assembler::nBeginAssembly() {
80 NIns* Assembler::genPrologue()
82 /**
83 * Prologue
85 underrunProtect(16);
86 uint32_t stackNeeded = STACK_GRANULARITY * _activation.stackSlotsNeeded();
87 uint32_t frameSize = stackNeeded + kcalleeAreaSize + kLinkageAreaSize;
88 frameSize = BIT_ROUND_UP(frameSize, 8);
90 if (frameSize <= 4096)
91 SUBI(FP, frameSize, SP);
92 else {
93 SUB(FP, G1, SP);
94 ORI(G1, frameSize & 0x3FF, G1);
95 SETHI(frameSize, G1);
98 verbose_only(
99 if (_logc->lcbits & LC_Native) {
100 outputf(" %p:",_nIns);
101 outputf(" patch entry:");
103 NIns *patchEntry = _nIns;
105 // The frame size in SAVE is faked. We will still re-caculate SP later.
106 // We can use 0 here but it is not good for debuggers.
107 SAVEI(SP, -148, SP);
109 // align the entry point
110 asm_align_code();
112 return patchEntry;
115 void Assembler::asm_align_code() {
116 while(uintptr_t(_nIns) & 15) {
117 NOP();
121 void Assembler::nFragExit(LIns* guard)
123 SideExit* exit = guard->record()->exit;
124 Fragment *frag = exit->target;
125 GuardRecord *lr;
126 if (frag && frag->fragEntry)
128 JMP(frag->fragEntry);
129 lr = 0;
131 else
133 // Target doesn't exit yet. Emit jump to epilog, and set up to patch later.
134 if (!_epilogue)
135 _epilogue = genEpilogue();
136 lr = guard->record();
137 JMP_long((intptr_t)_epilogue);
138 lr->jmp = _nIns;
141 // return value is GuardRecord*
142 SET32(int(lr), O0);
145 NIns *Assembler::genEpilogue()
147 underrunProtect(12);
148 RESTORE(G0, G0, G0); //restore
149 JMPLI(I7, 8, G0); //ret
150 ORI(O0, 0, I0);
151 return _nIns;
154 void Assembler::asm_call(LIns* ins)
156 Register retReg = ( ins->isop(LIR_calld) ? F0 : retRegs[0] );
157 deprecated_prepResultReg(ins, rmask(retReg));
159 // Do this after we've handled the call result, so we don't
160 // force the call result to be spilled unnecessarily.
162 evictScratchRegsExcept(0);
164 const CallInfo* ci = ins->callInfo();
166 underrunProtect(8);
167 NOP();
169 ArgType argTypes[MAXARGS];
170 uint32_t argc = ci->getArgTypes(argTypes);
172 NanoAssert(ins->isop(LIR_callp) || ins->isop(LIR_calld));
173 verbose_only(if (_logc->lcbits & LC_Native)
174 outputf(" %p:", _nIns);
176 bool indirect = ci->isIndirect();
177 if (!indirect) {
178 CALL(ci);
180 else {
181 argc--;
182 Register r = findSpecificRegFor(ins->arg(argc), I0);
183 JMPL(G0, I0, 15);
186 uint32_t GPRIndex = O0;
187 uint32_t offset = kLinkageAreaSize; // start of parameters stack postion.
189 for(int i=0; i<argc; i++)
191 uint32_t j = argc-i-1;
192 ArgType ty = argTypes[j];
193 if (ty == ARGTYPE_D) {
194 Register r = findRegFor(ins->arg(j), FpRegs);
195 GPRIndex += 2;
196 offset += 8;
198 underrunProtect(48);
199 // We might be calling a varargs function.
200 // So, make sure the GPR's are also loaded with
201 // the value, or the stack contains it.
202 if (GPRIndex-2 <= O5) {
203 LDSW32(SP, offset-8, (Register)(GPRIndex-2));
205 if (GPRIndex-1 <= O5) {
206 LDSW32(SP, offset-4, (Register)(GPRIndex-1));
208 STDF32(r, offset-8, SP);
209 } else {
210 if (GPRIndex > O5) {
211 underrunProtect(12);
212 Register r = findRegFor(ins->arg(j), GpRegs);
213 STW32(r, offset, SP);
214 } else {
215 Register r = findSpecificRegFor(ins->arg(j), (Register)GPRIndex);
217 GPRIndex++;
218 offset += 4;
223 Register Assembler::nRegisterAllocFromSet(RegisterMask set)
225 // need to implement faster way
226 int i=0;
227 while (!(set & rmask((Register)i)))
228 i ++;
229 _allocator.free &= ~rmask((Register)i);
230 return (Register) i;
233 void Assembler::nRegisterResetAll(RegAlloc& a)
235 a.clear();
236 a.free = GpRegs | FpRegs;
239 void Assembler::nPatchBranch(NIns* branch, NIns* location)
241 *(uint32_t*)&branch[0] &= 0xFFC00000;
242 *(uint32_t*)&branch[0] |= ((intptr_t)location >> 10) & 0x3FFFFF;
243 *(uint32_t*)&branch[1] &= 0xFFFFFC00;
244 *(uint32_t*)&branch[1] |= (intptr_t)location & 0x3FF;
247 RegisterMask Assembler::nHint(LIns* ins)
249 // Never called, because no entries in nHints[] == PREFER_SPECIAL.
250 NanoAssert(0);
251 return 0;
254 bool Assembler::canRemat(LIns* ins)
256 return ins->isImmI() || ins->isop(LIR_allocp);
259 void Assembler::asm_restore(LIns* i, Register r)
261 underrunProtect(24);
262 if (i->isop(LIR_allocp)) {
263 ADD(FP, L2, r);
264 int32_t d = deprecated_disp(i);
265 SET32(d, L2);
267 else if (i->isImmI()) {
268 int v = i->immI();
269 SET32(v, r);
270 } else {
271 int d = findMemFor(i);
272 if (rmask(r) & FpRegs) {
273 LDDF32(FP, d, r);
274 } else {
275 LDSW32(FP, d, r);
280 void Assembler::asm_store32(LOpcode op, LIns *value, int dr, LIns *base)
282 switch (op) {
283 case LIR_sti:
284 case LIR_sti2c:
285 // handled by mainline code below for now
286 break;
287 case LIR_sti2s:
288 NanoAssertMsg(0, "NJ_EXPANDED_LOADSTORE_SUPPORTED not yet supported for this architecture");
289 return;
290 default:
291 NanoAssertMsg(0, "asm_store32 should never receive this LIR opcode");
292 return;
295 underrunProtect(20);
296 if (value->isImmI())
298 Register rb = getBaseReg(base, dr, GpRegs);
299 int c = value->immI();
300 switch (op) {
301 case LIR_sti:
302 STW32(L2, dr, rb);
303 break;
304 case LIR_sti2c:
305 STB32(L2, dr, rb);
306 break;
308 SET32(c, L2);
310 else
312 // make sure what is in a register
313 Register ra, rb;
314 if (base->isImmI()) {
315 // absolute address
316 dr += base->immI();
317 ra = findRegFor(value, GpRegs);
318 rb = G0;
319 } else {
320 getBaseReg2(GpRegs, value, ra, GpRegs, base, rb, dr);
322 switch (op) {
323 case LIR_sti:
324 STW32(ra, dr, rb);
325 break;
326 case LIR_sti2c:
327 STB32(ra, dr, rb);
328 break;
333 void Assembler::asm_spill(Register rr, int d, bool quad)
335 underrunProtect(24);
336 (void)quad;
337 NanoAssert(d);
338 if (rmask(rr) & FpRegs) {
339 STDF32(rr, d, FP);
340 } else {
341 STW32(rr, d, FP);
345 void Assembler::asm_load64(LIns* ins)
347 switch (ins->opcode()) {
348 case LIR_ldd:
349 // handled by mainline code below for now
350 break;
351 case LIR_ldf2d:
352 NanoAssertMsg(0, "NJ_EXPANDED_LOADSTORE_SUPPORTED not yet supported for this architecture");
353 return;
354 default:
355 NanoAssertMsg(0, "asm_load64 should never receive this LIR opcode");
356 return;
359 underrunProtect(72);
360 LIns* base = ins->oprnd1();
361 int db = ins->disp();
362 Register rr = ins->deprecated_getReg();
364 int dr = deprecated_disp(ins);
365 Register rb;
366 if (base->isop(LIR_allocp)) {
367 rb = FP;
368 db += findMemFor(base);
369 } else {
370 rb = findRegFor(base, GpRegs);
372 ins->clearReg();
374 // don't use an fpu reg to simply load & store the value.
375 if (dr)
376 asm_mmq(FP, dr, rb, db);
378 deprecated_freeRsrcOf(ins);
380 if (rr != deprecated_UnknownReg)
382 NanoAssert(rmask(rr)&FpRegs);
383 _allocator.retire(rr);
384 LDDF32(rb, db, rr);
388 void Assembler::asm_store64(LOpcode op, LIns* value, int dr, LIns* base)
390 switch (op) {
391 case LIR_std:
392 // handled by mainline code below for now
393 break;
394 case LIR_std2f:
395 NanoAssertMsg(0, "NJ_EXPANDED_LOADSTORE_SUPPORTED not yet supported for this architecture");
396 return;
397 default:
398 NanoAssertMsg(0, "asm_store64 should never receive this LIR opcode");
399 return;
402 underrunProtect(48);
403 if (value->isImmD())
405 // if a constant 64-bit value just store it now rather than
406 // generating a pointless store/load/store sequence
407 Register rb = findRegFor(base, GpRegs);
408 STW32(L2, dr+4, rb);
409 SET32(value->immDlo(), L2);
410 STW32(L2, dr, rb);
411 SET32(value->immDhi(), L2);
412 return;
415 if (value->isop(LIR_ldd))
417 // value is 64bit struct or int64_t, or maybe a double.
418 // it may be live in an FPU reg. Either way, don't
419 // put it in an FPU reg just to load & store it.
421 // a) if we know it's not a double, this is right.
422 // b) if we guarded that its a double, this store could be on
423 // the side exit, copying a non-double.
424 // c) maybe its a double just being stored. oh well.
426 int da = findMemFor(value);
427 Register rb;
428 if (base->isop(LIR_allocp)) {
429 rb = FP;
430 dr += findMemFor(base);
431 } else {
432 rb = findRegFor(base, GpRegs);
434 asm_mmq(rb, dr, FP, da);
435 return;
438 Register rb;
439 if (base->isop(LIR_allocp)) {
440 rb = FP;
441 dr += findMemFor(base);
442 } else {
443 rb = findRegFor(base, GpRegs);
446 // if value already in a reg, use that, otherwise
447 // try to get it into XMM regs before FPU regs.
448 Register rv = ( !value->isInReg()
449 ? findRegFor(value, FpRegs)
450 : value->deprecated_getReg() );
452 STDF32(rv, dr, rb);
456 * copy 64 bits: (rd+dd) <- (rs+ds)
458 void Assembler::asm_mmq(Register rd, int dd, Register rs, int ds)
460 // value is either a 64bit struct or maybe a float
461 // that isn't live in an FPU reg. Either way, don't
462 // put it in an FPU reg just to load & store it.
463 Register t = registerAllocTmp(GpRegs & ~(rmask(rd)|rmask(rs)));
464 STW32(t, dd+4, rd);
465 LDSW32(rs, ds+4, t);
466 STW32(t, dd, rd);
467 LDSW32(rs, ds, t);
470 NIns* Assembler::asm_branch(bool branchOnFalse, LIns* cond, NIns* targ)
472 NIns* at = 0;
473 LOpcode condop = cond->opcode();
474 NanoAssert(cond->isCmp());
475 if (isCmpDOpcode(condop))
477 return asm_branchd(branchOnFalse, cond, targ);
480 underrunProtect(32);
481 intptr_t tt = ((intptr_t)targ - (intptr_t)_nIns + 8) >> 2;
482 // !targ means that it needs patch.
483 if( !(isIMM22((int32_t)tt)) || !targ ) {
484 JMP_long_nocheck((intptr_t)targ);
485 at = _nIns;
486 NOP();
487 BA(0, 5);
488 tt = 4;
490 NOP();
492 // produce the branch
493 if (branchOnFalse)
495 if (condop == LIR_eqi)
496 BNE(0, tt);
497 else if (condop == LIR_lti)
498 BGE(0, tt);
499 else if (condop == LIR_lei)
500 BG(0, tt);
501 else if (condop == LIR_gti)
502 BLE(0, tt);
503 else if (condop == LIR_gei)
504 BL(0, tt);
505 else if (condop == LIR_ltui)
506 BCC(0, tt);
507 else if (condop == LIR_leui)
508 BGU(0, tt);
509 else if (condop == LIR_gtui)
510 BLEU(0, tt);
511 else //if (condop == LIR_geui)
512 BCS(0, tt);
514 else // op == LIR_xt
516 if (condop == LIR_eqi)
517 BE(0, tt);
518 else if (condop == LIR_lti)
519 BL(0, tt);
520 else if (condop == LIR_lei)
521 BLE(0, tt);
522 else if (condop == LIR_gti)
523 BG(0, tt);
524 else if (condop == LIR_gei)
525 BGE(0, tt);
526 else if (condop == LIR_ltui)
527 BCS(0, tt);
528 else if (condop == LIR_leui)
529 BLEU(0, tt);
530 else if (condop == LIR_gtui)
531 BGU(0, tt);
532 else //if (condop == LIR_geui)
533 BCC(0, tt);
535 asm_cmp(cond);
536 return at;
539 NIns* Assembler::asm_branch_ov(LOpcode op, NIns* targ)
541 NIns* at = 0;
542 underrunProtect(32);
543 intptr_t tt = ((intptr_t)targ - (intptr_t)_nIns + 8) >> 2;
544 // !targ means that it needs patch.
545 if( !(isIMM22((int32_t)tt)) || !targ ) {
546 JMP_long_nocheck((intptr_t)targ);
547 at = _nIns;
548 NOP();
549 BA(0, 5);
550 tt = 4;
552 NOP();
554 if( op == LIR_mulxovi || op == LIR_muljovi )
555 BNE(0, tt);
556 else
557 BVS(0, tt);
558 return at;
561 void Assembler::asm_cmp(LIns *cond)
563 underrunProtect(12);
565 LIns* lhs = cond->oprnd1();
566 LIns* rhs = cond->oprnd2();
568 NanoAssert(lhs->isI() && rhs->isI());
570 // ready to issue the compare
571 if (rhs->isImmI())
573 int c = rhs->immI();
574 Register r = findRegFor(lhs, GpRegs);
575 if (c == 0 && cond->isop(LIR_eqi)) {
576 ANDCC(r, r, G0);
578 else {
579 SUBCC(r, L2, G0);
580 SET32(c, L2);
583 else
585 Register ra, rb;
586 findRegFor2(GpRegs, lhs, ra, GpRegs, rhs, rb);
587 SUBCC(ra, rb, G0);
591 void Assembler::asm_condd(LIns* ins)
593 // only want certain regs
594 Register r = deprecated_prepResultReg(ins, AllowableFlagRegs);
595 underrunProtect(8);
596 LOpcode condop = ins->opcode();
597 NanoAssert(isCmpDOpcode(condop));
598 if (condop == LIR_eqd)
599 MOVFEI(1, 0, 0, 0, r);
600 else if (condop == LIR_led)
601 MOVFLEI(1, 0, 0, 0, r);
602 else if (condop == LIR_ltd)
603 MOVFLI(1, 0, 0, 0, r);
604 else if (condop == LIR_ged)
605 MOVFGEI(1, 0, 0, 0, r);
606 else // if (condop == LIR_gtd)
607 MOVFGI(1, 0, 0, 0, r);
608 ORI(G0, 0, r);
609 asm_cmpd(ins);
612 void Assembler::asm_cond(LIns* ins)
614 underrunProtect(8);
615 // only want certain regs
616 LOpcode op = ins->opcode();
617 Register r = deprecated_prepResultReg(ins, AllowableFlagRegs);
619 if (op == LIR_eqi)
620 MOVEI(1, 1, 0, 0, r);
621 else if (op == LIR_lti)
622 MOVLI(1, 1, 0, 0, r);
623 else if (op == LIR_lei)
624 MOVLEI(1, 1, 0, 0, r);
625 else if (op == LIR_gti)
626 MOVGI(1, 1, 0, 0, r);
627 else if (op == LIR_gei)
628 MOVGEI(1, 1, 0, 0, r);
629 else if (op == LIR_ltui)
630 MOVCSI(1, 1, 0, 0, r);
631 else if (op == LIR_leui)
632 MOVLEUI(1, 1, 0, 0, r);
633 else if (op == LIR_gtui)
634 MOVGUI(1, 1, 0, 0, r);
635 else // if (op == LIR_geui)
636 MOVCCI(1, 1, 0, 0, r);
637 ORI(G0, 0, r);
638 asm_cmp(ins);
641 void Assembler::asm_arith(LIns* ins)
643 underrunProtect(28);
644 LOpcode op = ins->opcode();
645 LIns* lhs = ins->oprnd1();
646 LIns* rhs = ins->oprnd2();
648 Register rb = deprecated_UnknownReg;
649 RegisterMask allow = GpRegs;
650 bool forceReg = (op == LIR_muli || op == LIR_mulxovi || op == LIR_muljovi || !rhs->isImmI());
652 if (lhs != rhs && forceReg)
654 if ((rb = asm_binop_rhs_reg(ins)) == deprecated_UnknownReg) {
655 rb = findRegFor(rhs, allow);
657 allow &= ~rmask(rb);
659 else if ((op == LIR_addi || op == LIR_addxovi) && lhs->isop(LIR_allocp) && rhs->isImmI()) {
660 // add alloc+const, use lea
661 Register rr = deprecated_prepResultReg(ins, allow);
662 int d = findMemFor(lhs) + rhs->immI();
663 ADD(FP, L2, rr);
664 SET32(d, L2);
665 return;
668 Register rr = deprecated_prepResultReg(ins, allow);
669 // if this is last use of lhs in reg, we can re-use result reg
670 // else, lhs already has a register assigned.
671 Register ra = ( !lhs->isInReg()
672 ? findSpecificRegFor(lhs, rr)
673 : lhs->deprecated_getReg() );
675 if (forceReg)
677 if (lhs == rhs)
678 rb = ra;
680 if (op == LIR_addi || op == LIR_addxovi)
681 ADDCC(rr, rb, rr);
682 else if (op == LIR_subi || op == LIR_subxovi)
683 SUBCC(rr, rb, rr);
684 else if (op == LIR_muli)
685 SMULCC(rr, rb, rr);
686 else if (op == LIR_mulxovi || op == LIR_muljovi) {
687 SUBCC(L4, L6, L4);
688 SRAI(rr, 31, L6);
689 RDY(L4);
690 SMULCC(rr, rb, rr);
692 else if (op == LIR_andi)
693 AND(rr, rb, rr);
694 else if (op == LIR_ori)
695 OR(rr, rb, rr);
696 else if (op == LIR_xori)
697 XOR(rr, rb, rr);
698 else if (op == LIR_lshi)
699 SLL(rr, rb, rr);
700 else if (op == LIR_rshi)
701 SRA(rr, rb, rr);
702 else if (op == LIR_rshui)
703 SRL(rr, rb, rr);
704 else
705 NanoAssertMsg(0, "Unsupported");
707 else
709 int c = rhs->immI();
710 if (op == LIR_addi || op == LIR_addxovi)
711 ADDCC(rr, L2, rr);
712 else if (op == LIR_subi || op == LIR_subxovi)
713 SUBCC(rr, L2, rr);
714 else if (op == LIR_andi)
715 AND(rr, L2, rr);
716 else if (op == LIR_ori)
717 OR(rr, L2, rr);
718 else if (op == LIR_xori)
719 XOR(rr, L2, rr);
720 else if (op == LIR_lshi)
721 SLL(rr, L2, rr);
722 else if (op == LIR_rshi)
723 SRA(rr, L2, rr);
724 else if (op == LIR_rshui)
725 SRL(rr, L2, rr);
726 else
727 NanoAssertMsg(0, "Unsupported");
728 SET32(c, L2);
731 if ( rr != ra )
732 ORI(ra, 0, rr);
735 void Assembler::asm_neg_not(LIns* ins)
737 underrunProtect(8);
738 LOpcode op = ins->opcode();
739 Register rr = deprecated_prepResultReg(ins, GpRegs);
741 LIns* lhs = ins->oprnd1();
742 // if this is last use of lhs in reg, we can re-use result reg
743 // else, lhs already has a register assigned.
744 Register ra = ( !lhs->isInReg()
745 ? findSpecificRegFor(lhs, rr)
746 : lhs->deprecated_getReg() );
748 if (op == LIR_noti)
749 ORN(G0, rr, rr);
750 else
751 SUB(G0, rr, rr);
753 if ( rr != ra )
754 ORI(ra, 0, rr);
757 void Assembler::asm_load32(LIns* ins)
759 underrunProtect(12);
760 LOpcode op = ins->opcode();
761 LIns* base = ins->oprnd1();
762 int d = ins->disp();
763 Register rr = deprecated_prepResultReg(ins, GpRegs);
764 Register ra = getBaseReg(base, d, GpRegs);
765 switch(op) {
766 case LIR_lduc2ui:
767 LDUB32(ra, d, rr);
768 break;
769 case LIR_ldus2ui:
770 LDUH32(ra, d, rr);
771 break;
772 case LIR_ldi:
773 LDSW32(ra, d, rr);
774 break;
775 case LIR_ldc2i:
776 case LIR_lds2i:
777 NanoAssertMsg(0, "NJ_EXPANDED_LOADSTORE_SUPPORTED not yet supported for this architecture");
778 return;
779 default:
780 NanoAssertMsg(0, "asm_load32 should never receive this LIR opcode");
781 return;
785 void Assembler::asm_cmov(LIns* ins)
787 underrunProtect(4);
788 LOpcode op = ins->opcode();
789 LIns* condval = ins->oprnd1();
790 LIns* iftrue = ins->oprnd2();
791 LIns* iffalse = ins->oprnd3();
793 NanoAssert(condval->isCmp());
794 NanoAssert(op == LIR_cmovi && iftrue->isI() && iffalse->isI());
796 const Register rr = deprecated_prepResultReg(ins, GpRegs);
798 // this code assumes that neither LD nor MR nor MRcc set any of the condition flags.
799 // (This is true on Intel, is it true on all architectures?)
800 const Register iffalsereg = findRegFor(iffalse, GpRegs & ~rmask(rr));
801 if (op == LIR_cmovi) {
802 switch (condval->opcode()) {
803 // note that these are all opposites...
804 case LIR_eqi: MOVNE (iffalsereg, 1, 0, 0, rr); break;
805 case LIR_lti: MOVGE (iffalsereg, 1, 0, 0, rr); break;
806 case LIR_lei: MOVG (iffalsereg, 1, 0, 0, rr); break;
807 case LIR_gti: MOVLE (iffalsereg, 1, 0, 0, rr); break;
808 case LIR_gei: MOVL (iffalsereg, 1, 0, 0, rr); break;
809 case LIR_ltui: MOVCC (iffalsereg, 1, 0, 0, rr); break;
810 case LIR_leui: MOVGU (iffalsereg, 1, 0, 0, rr); break;
811 case LIR_gtui: MOVLEU(iffalsereg, 1, 0, 0, rr); break;
812 case LIR_geui: MOVCS (iffalsereg, 1, 0, 0, rr); break;
813 debug_only( default: NanoAssert(0); break; )
816 /*const Register iftruereg =*/ findSpecificRegFor(iftrue, rr);
817 asm_cmp(condval);
820 void Assembler::asm_param(LIns* ins)
822 uint32_t a = ins->paramArg();
823 uint32_t kind = ins->paramKind();
824 deprecated_prepResultReg(ins, rmask(argRegs[a]));
827 void Assembler::asm_immi(LIns* ins)
829 underrunProtect(8);
830 Register rr = deprecated_prepResultReg(ins, GpRegs);
831 int32_t val = ins->immI();
832 if (val == 0)
833 XOR(rr, rr, rr);
834 else
835 SET32(val, rr);
838 void Assembler::asm_immd(LIns* ins)
840 underrunProtect(64);
841 Register rr = ins->deprecated_getReg();
842 if (rr != deprecated_UnknownReg)
844 // @todo -- add special-cases for 0 and 1
845 _allocator.retire(rr);
846 ins->clearReg();
847 NanoAssert((rmask(rr) & FpRegs) != 0);
848 findMemFor(ins);
849 int d = deprecated_disp(ins);
850 LDDF32(FP, d, rr);
853 // @todo, if we used xor, ldsd, fldz, etc above, we don't need mem here
854 int d = deprecated_disp(ins);
855 deprecated_freeRsrcOf(ins);
856 if (d)
858 STW32(L2, d+4, FP);
859 SET32(ins->immDlo(), L2);
860 STW32(L2, d, FP);
861 SET32(ins->immDhi(), L2);
865 void Assembler::asm_fneg(LIns* ins)
867 underrunProtect(4);
868 Register rr = deprecated_prepResultReg(ins, FpRegs);
869 LIns* lhs = ins->oprnd1();
871 // lhs into reg, prefer same reg as result
872 // if this is last use of lhs in reg, we can re-use result reg
873 // else, lhs already has a different reg assigned
874 Register ra = ( !lhs->isInReg()
875 ? findSpecificRegFor(lhs, rr)
876 : findRegFor(lhs, FpRegs) );
878 FNEGD(ra, rr);
881 void Assembler::asm_fop(LIns* ins)
883 underrunProtect(4);
884 LOpcode op = ins->opcode();
885 LIns *lhs = ins->oprnd1();
886 LIns *rhs = ins->oprnd2();
888 RegisterMask allow = FpRegs;
889 Register ra, rb;
890 findRegFor2(allow, lhs, ra, allow, rhs, rb);
891 Register rr = deprecated_prepResultReg(ins, allow);
893 if (op == LIR_addd)
894 FADDD(ra, rb, rr);
895 else if (op == LIR_subd)
896 FSUBD(ra, rb, rr);
897 else if (op == LIR_muld)
898 FMULD(ra, rb, rr);
899 else //if (op == LIR_divd)
900 FDIVD(ra, rb, rr);
904 void Assembler::asm_i2d(LIns* ins)
906 underrunProtect(32);
907 // where our result goes
908 Register rr = deprecated_prepResultReg(ins, FpRegs);
909 int d = findMemFor(ins->oprnd1());
910 FITOD(rr, rr);
911 LDDF32(FP, d, rr);
914 void Assembler::asm_ui2d(LIns* ins)
916 underrunProtect(72);
917 // where our result goes
918 Register rr = deprecated_prepResultReg(ins, FpRegs);
919 Register rt = registerAllocTmp(FpRegs & ~(rmask(rr)));
920 Register gr = findRegFor(ins->oprnd1(), GpRegs);
921 int disp = -8;
923 FABSS(rr, rr);
924 FSUBD(rt, rr, rr);
925 LDDF32(SP, disp, rr);
926 STWI(G0, disp+4, SP);
927 LDDF32(SP, disp, rt);
928 STWI(gr, disp+4, SP);
929 STWI(G1, disp, SP);
930 SETHI(0x43300000, G1);
933 void Assembler::asm_d2i(LIns* ins) {
934 LIns *lhs = ins->oprnd1();
935 Register rr = prepareResultReg(ins, GpRegs);
936 Register ra = findRegFor(lhs, FpRegs);
937 int d = findMemFor(ins);
938 LDSW32(FP, d, rr);
939 STF32(ra, d, FP);
940 FDTOI(ra, ra);
943 void Assembler::asm_nongp_copy(Register r, Register s)
945 underrunProtect(4);
946 NanoAssert((rmask(r) & FpRegs) && (rmask(s) & FpRegs));
947 FMOVD(s, r);
950 NIns * Assembler::asm_branchd(bool branchOnFalse, LIns *cond, NIns *targ)
952 NIns *at = 0;
953 LOpcode condop = cond->opcode();
954 NanoAssert(isCmpDOpcode(condop));
955 underrunProtect(32);
956 intptr_t tt = ((intptr_t)targ - (intptr_t)_nIns + 8) >> 2;
957 // !targ means that it needs patch.
958 if( !(isIMM22((int32_t)tt)) || !targ ) {
959 JMP_long_nocheck((intptr_t)targ);
960 at = _nIns;
961 NOP();
962 BA(0, 5);
963 tt = 4;
965 NOP();
967 // produce the branch
968 if (branchOnFalse)
970 if (condop == LIR_eqd)
971 FBNE(0, tt);
972 else if (condop == LIR_led)
973 FBUG(0, tt);
974 else if (condop == LIR_ltd)
975 FBUGE(0, tt);
976 else if (condop == LIR_ged)
977 FBUL(0, tt);
978 else //if (condop == LIR_gtd)
979 FBULE(0, tt);
981 else // op == LIR_xt
983 if (condop == LIR_eqd)
984 FBE(0, tt);
985 else if (condop == LIR_led)
986 FBLE(0, tt);
987 else if (condop == LIR_ltd)
988 FBL(0, tt);
989 else if (condop == LIR_ged)
990 FBGE(0, tt);
991 else //if (condop == LIR_gtd)
992 FBG(0, tt);
994 asm_cmpd(cond);
995 return at;
998 void Assembler::asm_cmpd(LIns *cond)
1000 underrunProtect(4);
1001 LIns* lhs = cond->oprnd1();
1002 LIns* rhs = cond->oprnd2();
1004 Register rLhs = findRegFor(lhs, FpRegs);
1005 Register rRhs = findRegFor(rhs, FpRegs);
1007 FCMPD(rLhs, rRhs);
1010 void Assembler::nativePageReset()
1014 Register Assembler::asm_binop_rhs_reg(LIns* ins)
1016 return deprecated_UnknownReg;
1019 void Assembler::nativePageSetup()
1021 NanoAssert(!_inExit);
1022 if (!_nIns)
1023 codeAlloc(codeStart, codeEnd, _nIns verbose_only(, codeBytes));
1026 // Increment the 32-bit profiling counter at pCtr, without
1027 // changing any registers.
1028 verbose_only(
1029 void Assembler::asm_inc_m32(uint32_t*)
1031 // todo: implement this
1035 void
1036 Assembler::underrunProtect(int n)
1038 NIns *eip = _nIns;
1039 // This may be in a normal code chunk or an exit code chunk.
1040 if (eip - n < codeStart) {
1041 codeAlloc(codeStart, codeEnd, _nIns verbose_only(, codeBytes));
1042 JMP_long_nocheck((intptr_t)eip);
1046 void Assembler::asm_ret(LIns* ins)
1048 genEpilogue();
1049 releaseRegisters();
1050 assignSavedRegs();
1051 LIns *val = ins->oprnd1();
1052 if (ins->isop(LIR_reti)) {
1053 findSpecificRegFor(val, retRegs[0]);
1054 } else {
1055 NanoAssert(ins->isop(LIR_retd));
1056 findSpecificRegFor(val, F0);
1060 void Assembler::swapCodeChunks() {
1061 if (!_nExitIns)
1062 codeAlloc(exitStart, exitEnd, _nExitIns verbose_only(, exitBytes));
1063 SWAP(NIns*, _nIns, _nExitIns);
1064 SWAP(NIns*, codeStart, exitStart);
1065 SWAP(NIns*, codeEnd, exitEnd);
1066 verbose_only( SWAP(size_t, codeBytes, exitBytes); )
1069 #endif /* FEATURE_NANOJIT */