1 /* -*- Mode: C++; c-basic-offset: 4; indent-tabs-mode: t; tab-width: 4 -*- */
2 /* ***** BEGIN LICENSE BLOCK *****
3 * Version: MPL 1.1/GPL 2.0/LGPL 2.1
5 * The contents of this file are subject to the Mozilla Public License Version
6 * 1.1 (the "License"); you may not use this file except in compliance with
7 * the License. You may obtain a copy of the License at
8 * http://www.mozilla.org/MPL/
10 * Software distributed under the License is distributed on an "AS IS" basis,
11 * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
12 * for the specific language governing rights and limitations under the
15 * The Original Code is [Open Source Virtual Machine].
17 * The Initial Developer of the Original Code is
18 * Adobe System Incorporated.
19 * Portions created by the Initial Developer are Copyright (C) 2004-2007
20 * the Initial Developer. All Rights Reserved.
25 * Alternatively, the contents of this file may be used under the terms of
26 * either the GNU General Public License Version 2 or later (the "GPL"), or
27 * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
28 * in which case the provisions of the GPL or the LGPL are applicable instead
29 * of those above. If you wish to allow use of your version of this file only
30 * under the terms of either the GPL or the LGPL, and not to allow others to
31 * use your version of this file under the terms of the MPL, indicate your
32 * decision by deleting the provisions above and replace them with the notice
33 * and other provisions required by the GPL or the LGPL. If you do not delete
34 * the provisions above, a recipient may use your version of this file under
35 * the terms of any one of the MPL, the GPL or the LGPL.
37 * ***** END LICENSE BLOCK ***** */
44 #include "../vprof/vprof.h"
49 using namespace avmplus
;
50 #ifdef FEATURE_NANOJIT
52 const uint8_t operandCount
[] = {
53 #define OPDEF(op, number, operands) \
55 #define OPDEF64(op, number, operands) \
57 #include "LIRopcode.tbl"
63 // LIR verbose specific
66 const char* lirNames
[] = {
67 #define OPDEF(op, number, operands) \
69 #define OPDEF64(op, number, operands) \
71 #include "LIRopcode.tbl"
77 #endif /* NANOJIT_VEBROSE */
82 // @todo fixup move to nanojit.h
84 #define counter_value(x) x
85 #endif /* NJ_PROFILE */
87 //static int32_t buffer_count = 0;
90 LirBuffer::LirBuffer(Fragmento
* frago
, const CallInfo
* functions
)
95 _functions(functions
), abi(ABI_FASTCALL
),
96 state(NULL
), param1(NULL
), sp(NULL
), rp(NULL
),
97 _pages(frago
->core()->GetGC())
102 LirBuffer::~LirBuffer()
105 verbose_only(if (names
) NJ_DELETE(names
);)
109 void LirBuffer::clear()
111 // free all the memory and clear the stats
112 _frago
->pagesRelease(_pages
);
113 NanoAssert(!_pages
.size());
118 for (int i
= 0; i
< NumSavedRegs
; ++i
)
120 explicitSavedRegs
= false;
123 void LirBuffer::rewind()
126 // pre-allocate the current and the next page we will be using
127 Page
* start
= pageAlloc();
128 _unused
= start
? &start
->lir
[0] : NULL
;
129 _nextPage
= pageAlloc();
130 NanoAssert((_unused
&& _nextPage
) || _noMem
);
133 int32_t LirBuffer::insCount()
135 // Doesn't include LIR_skip payload or LIR_call arg slots.
139 int32_t LirBuffer::byteCount()
141 return ((_pages
.size() ? _pages
.size()-1 : 0) * sizeof(Page
)) +
142 ((int32_t)_unused
- (int32_t)pageTop(_unused
));
145 Page
* LirBuffer::pageAlloc()
147 Page
* page
= _frago
->pageAlloc();
155 LInsp
LirBuffer::next()
160 void LirBufWriter::ensureRoom(uint32_t count
)
162 NanoAssert(count
<= NJ_PAGE_SIZE
- sizeof(LIns
));
163 LInsp before
= _buf
->next();
164 LInsp after
= before
+count
+1;
165 // transition to the next page?
166 if (!samepage(before
,after
))
168 // we don't want this to fail, so we always have a page in reserve
169 NanoAssert(_buf
->_nextPage
);
170 _buf
->_unused
= &_buf
->_nextPage
->lir
[0];
171 // link LIR stream back to prior instruction (careful,
172 // insSkipWithoutBuffer relies on _unused...)
173 insSkipWithoutBuffer(before
-1);
174 _buf
->_nextPage
= _buf
->pageAlloc();
175 NanoAssert(_buf
->_nextPage
|| _buf
->_noMem
);
179 LInsp
LirBufWriter::insSkipWithoutBuffer(LInsp to
)
181 LInsp l
= _buf
->next();
182 NanoAssert(samepage(l
,l
+1)); // make sure we have room
183 l
->initOpcodeAndClearResv(LIR_skip
);
190 LInsp
LirBuffer::commit(uint32_t count
)
192 NanoAssertMsg( samepage(_unused
, _unused
+count
), "You need to call ensureRoom first!" );
193 return _unused
+= count
;
196 LInsp
LirBufWriter::insStorei(LInsp val
, LInsp base
, int32_t d
)
199 LOpcode op
= val
->isQuad() ? LIR_stqi
: LIR_sti
;
200 LInsp l
= _buf
->next();
201 l
->initOpcodeAndClearResv(op
);
210 LInsp
LirBufWriter::ins0(LOpcode op
)
213 LirBuffer
*b
= this->_buf
;
215 l
->initOpcodeAndClearResv(op
);
221 LInsp
LirBufWriter::ins1(LOpcode op
, LInsp o1
)
224 LInsp l
= _buf
->next();
225 l
->initOpcodeAndClearResv(op
);
232 LInsp
LirBufWriter::ins2(LOpcode op
, LInsp o1
, LInsp o2
)
235 LInsp l
= _buf
->next();
236 l
->initOpcodeAndClearResv(op
);
244 LInsp
LirBufWriter::insLoad(LOpcode op
, LInsp base
, LInsp d
)
246 return ins2(op
,base
,d
);
249 LInsp
LirBufWriter::insGuard(LOpcode op
, LInsp c
, LInsp data
)
251 return ins2(op
, c
, data
);
254 LInsp
LirBufWriter::insBranch(LOpcode op
, LInsp condition
, LInsp toLabel
)
256 NanoAssert(condition
);
257 return ins2(op
, condition
, toLabel
);
260 LInsp
LirBufWriter::insAlloc(int32_t size
)
262 size
= (size
+3)>>2; // # of required 32bit words
263 NanoAssert(isU16(size
));
265 LInsp l
= _buf
->next();
266 l
->initOpcodeAndClearResv(LIR_alloc
);
273 LInsp
LirBufWriter::insParam(int32_t arg
, int32_t kind
)
276 LirBuffer
*b
= this->_buf
;
278 l
->initOpcodeAndClearResv(LIR_param
);
279 NanoAssert(isU8(arg
) && isU8(kind
));
284 NanoAssert(arg
< NumSavedRegs
);
285 b
->savedRegs
[arg
] = l
;
286 b
->explicitSavedRegs
= true;
293 LInsp
LirBufWriter::insImm(int32_t imm
)
296 LInsp l
= _buf
->next();
297 l
->initOpcodeAndClearResv(LIR_int
);
304 LInsp
LirBufWriter::insImmq(uint64_t imm
)
307 LInsp l
= _buf
->next();
308 l
->initOpcodeAndClearResv(LIR_quad
);
309 l
->i64
.imm64_0
= int32_t(imm
);
310 l
->i64
.imm64_1
= int32_t(imm
>>32);
316 LInsp
LirBufWriter::insSkip(size_t size
)
318 const uint32_t nSlots
= (size
+sizeof(LIns
)-1)/sizeof(LIns
);
319 ensureRoom(nSlots
); // make room for it
320 LInsp last
= _buf
->next()-1; // safe, next()-1+nSlots guaranteed to be on same page
321 _buf
->commit(nSlots
);
322 NanoAssert(samepage(last
,_buf
->next()));
324 return insSkipWithoutBuffer(last
);
327 LInsp
LirReader::read()
333 LOpcode iop
= i
->opcode();
342 #if defined NANOJIT_64BIT
349 NanoAssert( samepage(i
, i
+ 1 - i
->callInsSlots()) );
350 i
-= i
->callInsSlots();
354 NanoAssert(i
->oprnd1() != i
);
359 _i
= 0; // start of trace
364 while (iop
==LIR_skip
|| iop
==LIR_2
);
369 bool FASTCALL
isCmp(LOpcode c
) {
370 return (c
>= LIR_eq
&& c
<= LIR_uge
) || (c
>= LIR_feq
&& c
<= LIR_fge
);
373 bool FASTCALL
isCond(LOpcode c
) {
374 return (c
== LIR_ov
) || (c
== LIR_cs
) || isCmp(c
);
377 bool FASTCALL
isFloat(LOpcode c
) {
394 bool LIns::isCmp() const {
395 return nanojit::isCmp(u
.code
);
398 bool LIns::isCond() const {
399 return nanojit::isCond(u
.code
);
402 bool LIns::isQuad() const {
404 // callh in 64bit cpu's means a call that returns an int64 in a single register
405 return (u
.code
& LIR64
) != 0 || u
.code
== LIR_callh
;
407 // callh in 32bit cpu's means the 32bit MSW of an int64 result in 2 registers
408 return (u
.code
& LIR64
) != 0;
412 bool LIns::isconstval(int32_t val
) const
414 return isconst() && imm32()==val
;
417 bool LIns::isconstq() const
419 return isop(LIR_quad
);
422 bool LIns::isconstp() const
431 bool FASTCALL
isCse(LOpcode op
) {
432 op
= LOpcode(op
& ~LIR64
);
433 return op
>= LIR_ldcs
&& op
<= LIR_uge
;
436 bool LIns::isCse(const CallInfo
*functions
) const
438 return nanojit::isCse(u
.code
) || (isCall() && callInfo()->_cse
);
441 void LIns::initOpcodeAndClearResv(LOpcode op
)
443 NanoAssert(4*sizeof(void*) == sizeof(LIns
));
445 u
.resv
= 0; // have to zero this; the Assembler relies on it
448 void LIns::setTarget(LInsp label
)
450 NanoAssert(label
&& label
->isop(LIR_label
));
451 NanoAssert(isBranch());
455 LInsp
LIns::getTarget()
457 NanoAssert(isBranch());
461 void *LIns::payload() const
463 NanoAssert(isop(LIR_skip
));
464 return (void*) (oprnd1()+1);
467 uint64_t LIns::imm64() const
469 #ifdef AVMPLUS_UNALIGNED_ACCESS
470 return *(const uint64_t*)i64
.imm32
;
472 union { uint64_t tmp
; int32_t dst
[2]; } u
;
473 #ifdef AVMPLUS_BIG_ENDIAN
474 u
.dst
[0] = i64
.imm64_1
;
475 u
.dst
[1] = i64
.imm64_0
;
477 u
.dst
[0] = i64
.imm64_0
;
478 u
.dst
[1] = i64
.imm64_1
;
484 double LIns::imm64f() const
486 NanoAssert(isconstq());
487 #ifdef AVMPLUS_UNALIGNED_ACCESS
488 return *(const double*)i64
.imm32
;
490 union { uint32_t dst
[2]; double tmpf
; } u
;
491 #ifdef AVMPLUS_BIG_ENDIAN
492 u
.dst
[0] = i64
.imm64_1
;
493 u
.dst
[1] = i64
.imm64_0
;
495 u
.dst
[0] = i64
.imm64_0
;
496 u
.dst
[1] = i64
.imm64_1
;
502 inline uint32_t argSlots(uint32_t argc
) {
503 NanoAssert(4*sizeof(void*) == sizeof(LIns
));
504 return (argc
+ 3) / 4; // we can fit four args per slot
507 size_t LIns::callInsSlots() const
509 return argSlots(argc()) + 1;
512 const CallInfo
* LIns::callInfo() const
517 // Index args in r-l order. arg(0) is rightmost arg.
518 // Nb: this must be kept in sync with insCall().
519 LInsp
LIns::arg(uint32_t i
)
521 NanoAssert(i
< argc());
522 LInsp
* offs
= (LInsp
*)this - (i
+1);
526 LIns
* LirWriter::ins2i(LOpcode v
, LIns
* oprnd1
, int32_t imm
)
528 return ins2(v
, oprnd1
, insImm(imm
));
531 bool insIsS16(LInsp i
)
537 if (i
->isop(LIR_cmov
) || i
->isop(LIR_qcmov
)) {
538 LInsp vals
= i
->oprnd2();
539 return insIsS16(vals
->oprnd1()) && insIsS16(vals
->oprnd2());
543 // many other possibilities too.
547 LIns
* ExprFilter::ins1(LOpcode v
, LIns
* i
)
551 return insImm(int32_t(i
->imm64()));
552 if (i
->isop(LIR_qjoin
))
555 else if (v
== LIR_qhi
) {
557 return insImm(int32_t(i
->imm64()>>32));
558 if (i
->isop(LIR_qjoin
))
561 else if (i
->isconst()) {
562 int32_t c
= i
->imm32();
568 else if (v
== i
->opcode() && (v
== LIR_not
|| v
== LIR_neg
|| v
== LIR_fneg
)) {
569 // not(not(x)) = x; neg(neg(x)) = x; fneg(fneg(x)) = x;
572 /* [ed 8.27.08] this causes a big slowdown in gameoflife.as. why?
573 else if (i->isconst()) {
575 return insImmf(i->imm32());
577 else if (v == LIR_u2f) {
578 return insImmf((uint32_t)i->imm32());
585 return out
->ins1(v
, i
);
588 LIns
* ExprFilter::ins2(LOpcode v
, LIns
* oprnd1
, LIns
* oprnd2
)
590 NanoAssert(oprnd1
&& oprnd2
);
591 if (v
== LIR_cmov
|| v
== LIR_qcmov
) {
592 if (oprnd2
->oprnd1() == oprnd2
->oprnd2()) {
594 return oprnd2
->oprnd1();
596 if (oprnd1
->isconst()) {
597 // const ? x : y => return x or y depending on const
598 return oprnd1
->imm32() ? oprnd2
->oprnd1() : oprnd2
->oprnd2();
601 if (oprnd1
== oprnd2
)
603 if (v
== LIR_xor
|| v
== LIR_sub
||
604 v
== LIR_ult
|| v
== LIR_ugt
|| v
== LIR_gt
|| v
== LIR_lt
)
606 if (v
== LIR_or
|| v
== LIR_and
)
608 if (v
== LIR_le
|| v
== LIR_ule
|| v
== LIR_ge
|| v
== LIR_uge
) {
609 // x <= x == 1; x >= x == 1
613 if (oprnd1
->isconst() && oprnd2
->isconst())
615 int32_t c1
= oprnd1
->imm32();
616 int32_t c2
= oprnd2
->imm32();
619 if (v
== LIR_qjoin
) {
620 uint64_t q
= c1
| uint64_t(c2
)<<32;
625 return insImm(c1
== c2
);
627 return insImm((c2
!= 0) && ((c1
+ c2
) <= c1
));
629 return insImm((c2
!= 0) && ((uint32_t(c1
) + uint32_t(c2
)) <= uint32_t(c1
)));
631 return insImm(c1
< c2
);
633 return insImm(c1
> c2
);
635 return insImm(c1
<= c2
);
637 return insImm(c1
>= c2
);
639 return insImm(uint32_t(c1
) < uint32_t(c2
));
641 return insImm(uint32_t(c1
) > uint32_t(c2
));
643 return insImm(uint32_t(c1
) <= uint32_t(c2
));
645 return insImm(uint32_t(c1
) >= uint32_t(c2
));
647 return insImm(int32_t(c1
) >> int32_t(c2
));
649 return insImm(int32_t(c1
) << int32_t(c2
));
651 return insImm(uint32_t(c1
) >> int32_t(c2
));
653 return insImm(uint32_t(c1
) | int32_t(c2
));
655 return insImm(uint32_t(c1
) & int32_t(c2
));
657 return insImm(uint32_t(c1
) ^ int32_t(c2
));
659 d
= double(c1
) + double(c2
);
666 d
= double(c1
) - double(c2
);
669 d
= double(c1
) * double(c2
);
675 else if (oprnd1
->isconstq() && oprnd2
->isconstq())
677 double c1
= oprnd1
->imm64f();
678 double c2
= oprnd2
->imm64f();
681 return insImm(c1
== c2
);
683 return insImm(c1
< c2
);
685 return insImm(c1
> c2
);
687 return insImm(c1
<= c2
);
689 return insImm(c1
>= c2
);
691 return insImmf(c1
+ c2
);
693 return insImmf(c1
- c2
);
695 return insImmf(c1
* c2
);
697 return insImmf(c1
/ c2
);
702 else if (oprnd1
->isconst() && !oprnd2
->isconst())
704 if (v
== LIR_add
|| v
== LIR_addp
|| v
== LIR_mul
||
705 v
== LIR_fadd
|| v
== LIR_fmul
||
706 v
== LIR_xor
|| v
== LIR_or
|| v
== LIR_and
||
713 else if (v
>= LIR_lt
&& v
<= LIR_uge
) {
714 NanoStaticAssert((LIR_lt
^ 1) == LIR_gt
);
715 NanoStaticAssert((LIR_le
^ 1) == LIR_ge
);
716 NanoStaticAssert((LIR_ult
^ 1) == LIR_ugt
);
717 NanoStaticAssert((LIR_ule
^ 1) == LIR_uge
);
719 // move const to rhs, swap the operator
727 if (oprnd2
->isconst())
729 int c
= oprnd2
->imm32();
730 if (v
== LIR_add
&& oprnd1
->isop(LIR_add
) && oprnd1
->oprnd2()->isconst()) {
731 // add(add(x,c1),c2) => add(x,c1+c2)
732 c
+= oprnd1
->oprnd2()->imm32();
734 oprnd1
= oprnd1
->oprnd1();
736 else if (v
== LIR_sub
&& oprnd1
->isop(LIR_add
) && oprnd1
->oprnd2()->isconst()) {
737 // sub(add(x,c1),c2) => add(x,c1-c2)
738 c
= oprnd1
->oprnd2()->imm32() - c
;
740 oprnd1
= oprnd1
->oprnd1();
743 else if (v
== LIR_rsh
&& c
== 16 && oprnd1
->isop(LIR_lsh
) &&
744 oprnd1
->oprnd2()->isconstval(16)) {
745 if (insIsS16(oprnd1
->oprnd1())) {
746 // rsh(lhs(x,16),16) == x, if x is S16
747 return oprnd1
->oprnd1();
750 else if (v
== LIR_ult
) {
751 if (oprnd1
->isop(LIR_cmov
) || oprnd1
->isop(LIR_qcmov
)) {
752 LInsp a
= oprnd1
->oprnd2()->oprnd1();
753 LInsp b
= oprnd1
->oprnd2()->oprnd2();
754 if (a
->isconst() && b
->isconst()) {
755 bool a_lt
= uint32_t(a
->imm32()) < uint32_t(oprnd2
->imm32());
756 bool b_lt
= uint32_t(b
->imm32()) < uint32_t(oprnd2
->imm32());
765 if (v
== LIR_add
|| v
== LIR_addp
|| v
== LIR_or
|| v
== LIR_xor
||
766 v
== LIR_sub
|| v
== LIR_lsh
|| v
== LIR_rsh
|| v
== LIR_ush
)
768 else if (v
== LIR_and
|| v
== LIR_mul
)
770 else if (v
== LIR_eq
&& oprnd1
->isop(LIR_or
) &&
771 oprnd1
->oprnd2()->isconst() &&
772 oprnd1
->oprnd2()->imm32() != 0) {
773 // (x or c) != 0 if c != 0
777 else if (c
== -1 || (c
== 1 && oprnd1
->isCmp())) {
779 // x | -1 = -1, cmp | 1 = 1
782 else if (v
== LIR_and
) {
783 // x & -1 = x, cmp & 1 = cmp
790 if (v
== LIR_qjoin
&& oprnd1
->isop(LIR_qlo
) && oprnd2
->isop(LIR_qhi
)
791 && (i
= oprnd1
->oprnd1()) == oprnd2
->oprnd1()) {
792 // qjoin(qlo(x),qhi(x)) == x
796 return out
->ins2(v
, oprnd1
, oprnd2
);
799 LIns
* ExprFilter::insGuard(LOpcode v
, LInsp c
, LInsp x
)
801 if (v
== LIR_xt
|| v
== LIR_xf
) {
803 if ((v
== LIR_xt
&& !c
->imm32()) || (v
== LIR_xf
&& c
->imm32())) {
804 return 0; // no guard needed
808 // We're emitting a guard that will always fail. Any code
809 // emitted after this guard is dead code. We could
810 // silently optimize out the rest of the emitted code, but
811 // this could indicate a performance problem or other bug,
812 // so assert in debug builds.
813 NanoAssertMsg(0, "Constantly false guard detected");
815 return out
->insGuard(LIR_x
, out
->insImm(1), x
);
819 NanoStaticAssert((LIR_xt
^ 1) == LIR_xf
);
820 while (c
->isop(LIR_eq
) && c
->oprnd1()->isCmp() &&
821 c
->oprnd2()->isconstval(0)) {
822 // xt(eq(cmp,0)) => xf(cmp) or xf(eq(cmp,0)) => xt(cmp)
828 return out
->insGuard(v
, c
, x
);
831 LIns
* ExprFilter::insBranch(LOpcode v
, LIns
*c
, LIns
*t
)
833 if (v
== LIR_jt
|| v
== LIR_jf
) {
834 while (c
->isop(LIR_eq
) && c
->oprnd1()->isCmp() && c
->oprnd2()->isconstval(0)) {
835 // jt(eq(cmp,0)) => jf(cmp) or jf(eq(cmp,0)) => jt(cmp)
840 return out
->insBranch(v
, c
, t
);
843 LIns
* LirWriter::insLoadi(LIns
*base
, int disp
)
845 return insLoad(LIR_ld
,base
,disp
);
848 LIns
* LirWriter::insLoad(LOpcode op
, LIns
*base
, int disp
)
850 return insLoad(op
, base
, insImm(disp
));
853 LIns
* LirWriter::ins_eq0(LIns
* oprnd1
)
855 return ins2i(LIR_eq
, oprnd1
, 0);
858 LIns
* LirWriter::insImmf(double f
)
868 LIns
* LirWriter::qjoin(LInsp lo
, LInsp hi
)
870 return ins2(LIR_qjoin
, lo
, hi
);
873 LIns
* LirWriter::insImmPtr(const void *ptr
)
875 return sizeof(ptr
) == 8 ? insImmq((uintptr_t)ptr
) : insImm((intptr_t)ptr
);
878 LIns
* LirWriter::ins_choose(LIns
* cond
, LIns
* iftrue
, LIns
* iffalse
)
880 // if not a conditional, make it implicitly an ==0 test (then flop results)
883 cond
= ins_eq0(cond
);
889 if (true/*avmplus::AvmCore::use_cmov()*/)
891 return ins2((iftrue
->isQuad() || iffalse
->isQuad()) ? LIR_qcmov
: LIR_cmov
, cond
, ins2(LIR_2
, iftrue
, iffalse
));
894 // @todo -- it might be better to use a short conditional branch rather than
895 // the bit-twiddling on systems that don't provide a conditional move instruction.
896 LInsp ncond
= ins1(LIR_neg
, cond
); // cond ? -1 : 0
898 ins2(LIR_and
, iftrue
, ncond
),
899 ins2(LIR_and
, iffalse
, ins1(LIR_not
, ncond
)));
902 LIns
* LirBufWriter::insCall(const CallInfo
*ci
, LInsp args
[])
904 static const LOpcode k_callmap
[] = { LIR_call
, LIR_fcall
, LIR_call
, LIR_callh
};
905 static const LOpcode k_callimap
[] = { LIR_calli
, LIR_fcalli
, LIR_calli
, LIR_skip
};
907 uint32_t argt
= ci
->_argtypes
;
908 LOpcode op
= (ci
->isIndirect() ? k_callimap
: k_callmap
)[argt
& 3];
909 NanoAssert(op
!= LIR_skip
); // LIR_skip here is just an error condition
911 ArgSize sizes
[MAXARGS
];
912 int32_t argc
= ci
->get_sizes(sizes
);
914 if (AvmCore::config
.soft_float
) {
919 // An example of what we're trying to serialize (for a 32-bit machine):
923 // N [ arg operand #3 ---------------------- K
924 // N+4 arg operand #2 ----------------------
925 // N+8 arg operand #1 ----------------------
926 // N+12 arg operand #0 ---------------------- ]
927 // N+16 [ code=LIR_call | resv | (pad16) ------ K+1
928 // imm8a | (pad24) ---------------------
929 // imm8b | (pad24) ---------------------
930 // ci ---------------------------------- ]
934 // argSlots(argc) = 1
936 NanoAssert(argc
<= (int)MAXARGS
);
937 int32_t nSlots
= argSlots(argc
) + 1;
940 // Skip slots needed for call parameters.
941 LInsp l
= _buf
->next() + argSlots(argc
);
943 // Call parameters laid in reverse order.
944 // Nb: this must be kept in sync with arg().
945 LInsp
* offs
= (LInsp
*)l
;
946 for (int32_t i
=0; i
< argc
; i
++)
948 NanoAssert((LInsp
)offs
>= _buf
->next());
950 #ifndef NANOJIT_64BIT
951 l
->initOpcodeAndClearResv(op
==LIR_callh
? LIR_call
: op
);
953 l
->initOpcodeAndClearResv(op
);
958 _buf
->commit(nSlots
);
963 using namespace avmplus
;
965 StackFilter::StackFilter(LirFilter
*in
, GC
*gc
, LirBuffer
*lirbuf
, LInsp sp
)
966 : LirFilter(in
), gc(gc
), lirbuf(lirbuf
), sp(sp
), top(0)
969 LInsp
StackFilter::read()
973 LInsp i
= in
->read();
978 LInsp base
= i
->oprnd2();
981 LInsp v
= i
->oprnd1();
982 int d
= i
->immdisp() >> 2;
989 if (stk
.get(d
) && stk
.get(d
-1)) {
1007 * NB: If there is a backward branch other than the loop-restart branch, this is
1008 * going to be wrong. Unfortunately there doesn't seem to be an easy way to detect
1009 * such branches. Just do not create any.
1011 else if (i
->isGuard())
1014 top
= getTop(i
) >> 2;
1021 // inlined/separated version of SuperFastHash
1022 // This content is copyrighted by Paul Hsieh, For reference see : http://www.azillionmonkeys.com/qed/hash.html
1024 inline uint32_t _hash8(uint32_t hash
, const uint8_t data
)
1032 inline uint32_t _hash32(uint32_t hash
, const uint32_t data
)
1034 const uint32_t dlo
= data
& 0xffff;
1035 const uint32_t dhi
= data
>> 16;
1037 const uint32_t tmp
= (dhi
<< 11) ^ hash
;
1038 hash
= (hash
<< 16) ^ tmp
;
1043 inline uint32_t _hashptr(uint32_t hash
, const void* data
)
1045 #ifdef NANOJIT_64BIT
1046 hash
= _hash32(hash
, uint32_t(uintptr_t(data
) >> 32));
1047 hash
= _hash32(hash
, uint32_t(uintptr_t(data
)));
1050 return _hash32(hash
, uint32_t(data
));
1054 inline uint32_t _hashfinish(uint32_t hash
)
1056 /* Force "avalanching" of final 127 bits */
1066 LInsHashSet::LInsHashSet(GC
* gc
) :
1067 m_used(0), m_cap(kInitialCap
), m_gc(gc
)
1070 // m_list.set_meminfo_name("LInsHashSet.list");
1072 LInsp
*list
= (LInsp
*) gc
->Alloc(sizeof(LInsp
)*m_cap
, GC::kZero
);
1073 WB(gc
, this, &m_list
, list
);
1076 LInsHashSet::~LInsHashSet()
1081 void LInsHashSet::clear() {
1082 memset(m_list
, 0, sizeof(LInsp
)*m_cap
);
1086 /*static*/ uint32_t FASTCALL
LInsHashSet::hashcode(LInsp i
)
1088 const LOpcode op
= i
->opcode();
1092 return hashimm(i
->imm32());
1094 return hashimmq(i
->imm64());
1097 #if defined NANOJIT_64BIT
1102 int32_t argc
= i
->argc();
1103 NanoAssert(argc
< 10);
1104 for (int32_t j
=0; j
< argc
; j
++)
1105 args
[j
] = i
->arg(j
);
1106 return hashcall(i
->callInfo(), argc
, args
);
1109 if (operandCount
[op
] == 2)
1110 return hash2(op
, i
->oprnd1(), i
->oprnd2());
1112 return hash1(op
, i
->oprnd1());
1116 /*static*/ bool FASTCALL
LInsHashSet::equals(LInsp a
, LInsp b
)
1120 if (a
->opcode() != b
->opcode())
1122 AvmAssert(a
->opcode() == b
->opcode());
1123 const LOpcode op
= a
->opcode();
1128 return a
->imm32() == b
->imm32();
1132 return a
->imm64() == b
->imm64();
1136 #if defined NANOJIT_64BIT
1140 if (a
->callInfo() != b
->callInfo()) return false;
1141 uint32_t argc
=a
->argc();
1142 NanoAssert(argc
== b
->argc());
1143 for (uint32_t i
=0; i
< argc
; i
++)
1144 if (a
->arg(i
) != b
->arg(i
))
1150 const uint32_t count
= operandCount
[op
];
1151 if ((count
>= 1 && a
->oprnd1() != b
->oprnd1()) ||
1152 (count
>= 2 && a
->oprnd2() != b
->oprnd2()))
1159 void FASTCALL
LInsHashSet::grow()
1161 const uint32_t newcap
= m_cap
<< 1;
1162 LInsp
*newlist
= (LInsp
*) m_gc
->Alloc(newcap
* sizeof(LInsp
), GC::kZero
);
1163 LInsp
*list
= m_list
;
1165 // newlist.set_meminfo_name("LInsHashSet.list");
1167 for (uint32_t i
=0, n
=m_cap
; i
< n
; i
++) {
1168 LInsp name
= list
[i
];
1169 if (!name
) continue;
1170 uint32_t j
= find(name
, hashcode(name
), newlist
, newcap
);
1175 WB(m_gc
, this, &m_list
, newlist
);
1178 uint32_t FASTCALL
LInsHashSet::find(LInsp name
, uint32_t hash
, const LInsp
*list
, uint32_t cap
)
1180 const uint32_t bitmask
= (cap
- 1) & ~0x1;
1182 uint32_t n
= 7 << 1;
1185 while ((k
= list
[hash
]) != NULL
&& !equals(k
, name
))
1187 hash
= (hash
+ (n
+= 2)) & bitmask
; // quadratic probe
1192 LInsp
LInsHashSet::add(LInsp name
, uint32_t k
)
1194 // this is relatively short-lived so let's try a more aggressive load factor
1195 // in the interest of improving performance
1196 if (((m_used
+1)<<1) >= m_cap
) // 0.50
1199 k
= find(name
, hashcode(name
), m_list
, m_cap
);
1201 NanoAssert(!m_list
[k
]);
1203 return m_list
[k
] = name
;
1206 void LInsHashSet::replace(LInsp i
)
1208 LInsp
*list
= m_list
;
1209 uint32_t k
= find(i
, hashcode(i
), list
, m_cap
);
1211 // already there, so replace it
1218 uint32_t LInsHashSet::hashimm(int32_t a
) {
1219 return _hashfinish(_hash32(0,a
));
1222 uint32_t LInsHashSet::hashimmq(uint64_t a
) {
1223 uint32_t hash
= _hash32(0, uint32_t(a
>> 32));
1224 return _hashfinish(_hash32(hash
, uint32_t(a
)));
1227 uint32_t LInsHashSet::hash1(LOpcode op
, LInsp a
) {
1228 uint32_t hash
= _hash8(0,uint8_t(op
));
1229 return _hashfinish(_hashptr(hash
, a
));
1232 uint32_t LInsHashSet::hash2(LOpcode op
, LInsp a
, LInsp b
) {
1233 uint32_t hash
= _hash8(0,uint8_t(op
));
1234 hash
= _hashptr(hash
, a
);
1235 return _hashfinish(_hashptr(hash
, b
));
1238 uint32_t LInsHashSet::hashcall(const CallInfo
*ci
, uint32_t argc
, LInsp args
[]) {
1239 uint32_t hash
= _hashptr(0, ci
);
1240 for (int32_t j
=argc
-1; j
>= 0; j
--)
1241 hash
= _hashptr(hash
,args
[j
]);
1242 return _hashfinish(hash
);
1245 LInsp
LInsHashSet::find32(int32_t a
, uint32_t &i
)
1247 uint32_t cap
= m_cap
;
1248 const LInsp
*list
= m_list
;
1249 const uint32_t bitmask
= (cap
- 1) & ~0x1;
1250 uint32_t hash
= hashimm(a
) & bitmask
;
1251 uint32_t n
= 7 << 1;
1253 while ((k
= list
[hash
]) != NULL
&&
1254 (!k
->isconst() || k
->imm32() != a
))
1256 hash
= (hash
+ (n
+= 2)) & bitmask
; // quadratic probe
1262 LInsp
LInsHashSet::find64(uint64_t a
, uint32_t &i
)
1264 uint32_t cap
= m_cap
;
1265 const LInsp
*list
= m_list
;
1266 const uint32_t bitmask
= (cap
- 1) & ~0x1;
1267 uint32_t hash
= hashimmq(a
) & bitmask
;
1268 uint32_t n
= 7 << 1;
1270 while ((k
= list
[hash
]) != NULL
&&
1271 (!k
->isconstq() || k
->imm64() != a
))
1273 hash
= (hash
+ (n
+= 2)) & bitmask
; // quadratic probe
1279 LInsp
LInsHashSet::find1(LOpcode op
, LInsp a
, uint32_t &i
)
1281 uint32_t cap
= m_cap
;
1282 const LInsp
*list
= m_list
;
1283 const uint32_t bitmask
= (cap
- 1) & ~0x1;
1284 uint32_t hash
= hash1(op
,a
) & bitmask
;
1285 uint32_t n
= 7 << 1;
1287 while ((k
= list
[hash
]) != NULL
&&
1288 (k
->opcode() != op
|| k
->oprnd1() != a
))
1290 hash
= (hash
+ (n
+= 2)) & bitmask
; // quadratic probe
1296 LInsp
LInsHashSet::find2(LOpcode op
, LInsp a
, LInsp b
, uint32_t &i
)
1298 uint32_t cap
= m_cap
;
1299 const LInsp
*list
= m_list
;
1300 const uint32_t bitmask
= (cap
- 1) & ~0x1;
1301 uint32_t hash
= hash2(op
,a
,b
) & bitmask
;
1302 uint32_t n
= 7 << 1;
1304 while ((k
= list
[hash
]) != NULL
&&
1305 (k
->opcode() != op
|| k
->oprnd1() != a
|| k
->oprnd2() != b
))
1307 hash
= (hash
+ (n
+= 2)) & bitmask
; // quadratic probe
1313 bool argsmatch(LInsp i
, uint32_t argc
, LInsp args
[])
1315 for (uint32_t j
=0; j
< argc
; j
++)
1316 if (i
->arg(j
) != args
[j
])
1321 LInsp
LInsHashSet::findcall(const CallInfo
*ci
, uint32_t argc
, LInsp args
[], uint32_t &i
)
1323 uint32_t cap
= m_cap
;
1324 const LInsp
*list
= m_list
;
1325 const uint32_t bitmask
= (cap
- 1) & ~0x1;
1326 uint32_t hash
= hashcall(ci
, argc
, args
) & bitmask
;
1327 uint32_t n
= 7 << 1;
1329 while ((k
= list
[hash
]) != NULL
&&
1330 (!k
->isCall() || k
->callInfo() != ci
|| !argsmatch(k
, argc
, args
)))
1332 hash
= (hash
+ (n
+= 2)) & bitmask
; // quadratic probe
1338 GuardRecord
*LIns::record()
1340 NanoAssert(isGuard());
1341 return (GuardRecord
*)oprnd2()->payload();
1345 class RetiredEntry
: public GCObject
1348 List
<LInsp
, LIST_NonGCObjects
> live
;
1350 RetiredEntry(GC
*gc
): live(gc
) {}
1355 SortedMap
<LInsp
,LInsp
,LIST_NonGCObjects
> live
;
1356 List
<RetiredEntry
*, LIST_GCObjects
> retired
;
1358 LiveTable(GC
*gc
) : live(gc
), retired(gc
), maxlive(0) {}
1361 for (size_t i
= 0; i
< retired
.size(); i
++) {
1362 NJ_DELETE(retired
.get(i
));
1366 void add(LInsp i
, LInsp use
) {
1367 if (!i
->isconst() && !i
->isconstq() && !live
.containsKey(i
)) {
1368 NanoAssert(size_t(i
->opcode()) < sizeof(lirNames
) / sizeof(lirNames
[0]));
1372 void retire(LInsp i
, GC
*gc
) {
1373 RetiredEntry
*e
= NJ_NEW(gc
, RetiredEntry
)(gc
);
1375 for (int j
=0, n
=live
.size(); j
< n
; j
++) {
1376 LInsp l
= live
.keyAt(j
);
1377 if (!l
->isStore() && !l
->isGuard())
1381 if ((size
= e
->live
.size()) > maxlive
)
1387 bool contains(LInsp i
) {
1388 return live
.containsKey(i
);
1392 void live(GC
*gc
, LirBuffer
*lirbuf
)
1394 // traverse backwards to find live exprs and a few other stats.
1398 LirReader
br(lirbuf
);
1399 StackFilter
sf(&br
, gc
, lirbuf
, lirbuf
->sp
);
1400 StackFilter
r(&sf
, gc
, lirbuf
, lirbuf
->rp
);
1403 live
.add(lirbuf
->state
, r
.pos());
1404 for (LInsp i
= r
.read(); i
!= 0; i
= r
.read())
1408 // first handle side-effect instructions
1409 if (!i
->isCse(lirbuf
->_functions
))
1416 // now propagate liveness
1417 if (live
.contains(i
))
1420 NanoAssert(size_t(i
->opcode()) < sizeof(operandCount
) / sizeof(operandCount
[0]));
1422 live
.add(i
->oprnd2(),i
); // base
1423 live
.add(i
->oprnd1(),i
); // val
1425 else if (i
->isop(LIR_cmov
) || i
->isop(LIR_qcmov
)) {
1426 live
.add(i
->oprnd1(),i
);
1427 live
.add(i
->oprnd2()->oprnd1(),i
);
1428 live
.add(i
->oprnd2()->oprnd2(),i
);
1430 else if (operandCount
[i
->opcode()] == 1) {
1431 live
.add(i
->oprnd1(),i
);
1433 else if (operandCount
[i
->opcode()] == 2) {
1434 live
.add(i
->oprnd1(),i
);
1435 live
.add(i
->oprnd2(),i
);
1437 else if (i
->isCall()) {
1438 for (int j
=0, c
=i
->argc(); j
< c
; j
++)
1439 live
.add(i
->arg(j
),i
);
1444 printf("live instruction count %d, total %u, max pressure %d\n",
1445 live
.retired
.size(), total
, live
.maxlive
);
1446 printf("side exits %u\n", exits
);
1448 // print live exprs, going forwards
1449 LirNameMap
*names
= lirbuf
->names
;
1450 bool newblock
= true;
1451 for (int j
=live
.retired
.size()-1; j
>= 0; j
--)
1453 RetiredEntry
*e
= live
.retired
[j
];
1454 char livebuf
[4000], *s
=livebuf
;
1456 if (!newblock
&& e
->i
->isop(LIR_label
)) {
1460 for (int k
=0,n
=e
->live
.size(); k
< n
; k
++) {
1461 strcpy(s
, names
->formatRef(e
->live
[k
]));
1464 NanoAssert(s
< livebuf
+sizeof(livebuf
));
1466 printf("%-60s %s\n", livebuf
, names
->formatIns(e
->i
));
1467 if (e
->i
->isGuard() || e
->i
->isBranch() || isRet(e
->i
->opcode())) {
1474 LabelMap::Entry::~Entry()
1478 LirNameMap::Entry::~Entry()
1482 LirNameMap::~LirNameMap()
1486 while ((e
= names
.removeLast()) != NULL
) {
1487 labels
->core
->freeString(e
->name
);
1492 bool LirNameMap::addName(LInsp i
, Stringp name
) {
1493 if (!names
.containsKey(i
)) {
1494 Entry
*e
= NJ_NEW(labels
->core
->gc
, Entry
)(name
);
1500 void LirNameMap::addName(LInsp i
, const char *name
) {
1501 Stringp new_name
= labels
->core
->newString(name
);
1502 if (!addName(i
, new_name
)) {
1503 labels
->core
->freeString(new_name
);
1507 void LirNameMap::copyName(LInsp i
, const char *s
, int suffix
) {
1509 if (isdigit(s
[strlen(s
)-1])) {
1510 // if s ends with a digit, add '_' to clarify the suffix
1511 sprintf(s2
,"%s_%d", s
, suffix
);
1513 sprintf(s2
,"%s%d", s
, suffix
);
1515 addName(i
, labels
->core
->newString(s2
));
1518 void LirNameMap::formatImm(int32_t c
, char *buf
) {
1519 if (c
>= 10000 || c
<= -10000)
1520 sprintf(buf
,"#%s",labels
->format((void*)c
));
1522 sprintf(buf
,"%d", c
);
1525 const char* LirNameMap::formatRef(LIns
*ref
)
1527 char buffer
[200], *buf
=buffer
;
1529 GC
*gc
= labels
->core
->gc
;
1530 if (names
.containsKey(ref
)) {
1531 StringNullTerminatedUTF8
cname(gc
, names
.get(ref
)->name
);
1532 strcat(buf
, cname
.c_str());
1534 else if (ref
->isconstq()) {
1535 #if defined NANOJIT_64BIT
1536 sprintf(buf
, "#0x%lx", (nj_printf_ld
)ref
->imm64());
1538 formatImm(uint32_t(ref
->imm64()>>32), buf
);
1541 formatImm(uint32_t(ref
->imm64()), buf
);
1544 else if (ref
->isconst()) {
1545 formatImm(ref
->imm32(), buf
);
1548 if (ref
->isCall()) {
1549 #if !defined NANOJIT_64BIT
1550 if (ref
->isop(LIR_callh
)) {
1551 // we've presumably seen the other half already
1552 ref
= ref
->oprnd1();
1555 copyName(ref
, ref
->callInfo()->_name
, funccounts
.add(ref
->callInfo()));
1556 #if !defined NANOJIT_64BIT
1560 NanoAssert(size_t(ref
->opcode()) < sizeof(lirNames
) / sizeof(lirNames
[0]));
1561 copyName(ref
, lirNames
[ref
->opcode()], lircounts
.add(ref
->opcode()));
1563 StringNullTerminatedUTF8
cname(gc
, names
.get(ref
)->name
);
1564 strcat(buf
, cname
.c_str());
1566 return labels
->dup(buffer
);
1569 const char* LirNameMap::formatIns(LIns
* i
)
1573 LOpcode op
= i
->opcode();
1578 sprintf(s
, "%s", formatRef(i
));
1583 sprintf(s
, "%s = %s %d", formatRef(i
), lirNames
[op
], i
->size());
1589 sprintf(s
, "#%X:%X /* %g */", i
->imm64_1(), i
->imm64_0(), i
->imm64f());
1595 sprintf(s
, "%s", lirNames
[op
]);
1598 #if defined NANOJIT_64BIT
1603 sprintf(s
, "%s = %s ( ", formatRef(i
), i
->callInfo()->_name
);
1604 for (int32_t j
=i
->argc()-1; j
>= 0; j
--) {
1606 sprintf(s
, "%s ",formatRef(i
->arg(j
)));
1614 int32_t argc
= i
->argc();
1615 sprintf(s
, "%s = [%s] ( ", formatRef(i
), formatRef(i
->arg(argc
-1)));
1618 for (int32_t j
=argc
-1; j
>= 0; j
--) {
1620 sprintf(s
, "%s ",formatRef(i
->arg(j
)));
1628 uint32_t arg
= i
->imm8();
1630 if (arg
< sizeof(Assembler::argRegs
)/sizeof(Assembler::argRegs
[0])) {
1631 sprintf(s
, "%s = %s %d %s", formatRef(i
), lirNames
[op
],
1632 arg
, gpn(Assembler::argRegs
[arg
]));
1634 sprintf(s
, "%s = %s %d", formatRef(i
), lirNames
[op
], arg
);
1637 sprintf(s
, "%s = %s %d %s", formatRef(i
), lirNames
[op
],
1638 arg
, gpn(Assembler::savedRegs
[arg
]));
1644 sprintf(s
, "%s:", formatRef(i
));
1649 sprintf(s
, "%s %s -> %s", lirNames
[op
], formatRef(i
->oprnd1()),
1650 i
->oprnd2() ? formatRef(i
->oprnd2()) : "unpatched");
1654 sprintf(s
, "%s -> %s", lirNames
[op
],
1655 i
->oprnd2() ? formatRef(i
->oprnd2()) : "unpatched");
1661 sprintf(s
, "%s %s", lirNames
[op
], formatRef(i
->oprnd1()));
1674 sprintf(s
, "%s = %s %s", formatRef(i
), lirNames
[op
], formatRef(i
->oprnd1()));
1717 sprintf(s
, "%s = %s %s, %s", formatRef(i
), lirNames
[op
],
1718 formatRef(i
->oprnd1()),
1719 formatRef(i
->oprnd2()));
1723 sprintf(s
, "%s (%s), %s", lirNames
[op
],
1724 formatIns(i
->oprnd1()),
1725 formatRef(i
->oprnd2()));
1730 sprintf(s
, "%s = %s %s ? %s : %s", formatRef(i
), lirNames
[op
],
1731 formatRef(i
->oprnd1()),
1732 formatRef(i
->oprnd2()->oprnd1()),
1733 formatRef(i
->oprnd2()->oprnd2()));
1742 sprintf(s
, "%s = %s %s[%s]", formatRef(i
), lirNames
[op
],
1743 formatRef(i
->oprnd1()),
1744 formatRef(i
->oprnd2()));
1749 sprintf(s
, "%s %s[%d] = %s", lirNames
[op
],
1750 formatRef(i
->oprnd2()),
1752 formatRef(i
->oprnd1()));
1759 return labels
->dup(sbuf
);
1764 CseFilter::CseFilter(LirWriter
*out
, GC
*gc
)
1765 : LirWriter(out
), exprs(gc
) {}
1767 LIns
* CseFilter::insImm(int32_t imm
)
1770 LInsp found
= exprs
.find32(imm
, k
);
1773 return exprs
.add(out
->insImm(imm
), k
);
1776 LIns
* CseFilter::insImmq(uint64_t q
)
1779 LInsp found
= exprs
.find64(q
, k
);
1782 return exprs
.add(out
->insImmq(q
), k
);
1785 LIns
* CseFilter::ins0(LOpcode v
)
1789 return out
->ins0(v
);
1792 LIns
* CseFilter::ins1(LOpcode v
, LInsp a
)
1795 NanoAssert(operandCount
[v
]==1);
1797 LInsp found
= exprs
.find1(v
, a
, k
);
1800 return exprs
.add(out
->ins1(v
,a
), k
);
1802 return out
->ins1(v
,a
);
1805 LIns
* CseFilter::ins2(LOpcode v
, LInsp a
, LInsp b
)
1808 NanoAssert(operandCount
[v
]==2);
1810 LInsp found
= exprs
.find2(v
, a
, b
, k
);
1813 return exprs
.add(out
->ins2(v
,a
,b
), k
);
1815 return out
->ins2(v
,a
,b
);
1818 LIns
* CseFilter::insLoad(LOpcode v
, LInsp base
, LInsp disp
)
1821 NanoAssert(operandCount
[v
]==2);
1823 LInsp found
= exprs
.find2(v
, base
, disp
, k
);
1826 return exprs
.add(out
->insLoad(v
,base
,disp
), k
);
1828 return out
->insLoad(v
,base
,disp
);
1831 LInsp
CseFilter::insGuard(LOpcode v
, LInsp c
, LInsp x
)
1834 // conditional guard
1835 NanoAssert(operandCount
[v
]==1);
1837 LInsp found
= exprs
.find1(v
, c
, k
);
1840 return exprs
.add(out
->insGuard(v
,c
,x
), k
);
1842 return out
->insGuard(v
, c
, x
);
1845 LInsp
CseFilter::insCall(const CallInfo
*ci
, LInsp args
[])
1849 uint32_t argc
= ci
->count_args();
1850 LInsp found
= exprs
.findcall(ci
, argc
, args
, k
);
1853 return exprs
.add(out
->insCall(ci
, args
), k
);
1855 return out
->insCall(ci
, args
);
1858 CseReader::CseReader(LirFilter
*in
, LInsHashSet
*exprs
, const CallInfo
*functions
)
1859 : LirFilter(in
), exprs(exprs
), functions(functions
)
1862 LInsp
CseReader::read()
1864 LInsp i
= in
->read();
1866 if (i
->isCse(functions
))
1872 LIns
* FASTCALL
callArgN(LIns
* i
, uint32_t n
)
1874 return i
->arg(i
->argc()-n
-1);
1877 void compile(Assembler
* assm
, Fragment
* triggerFrag
)
1879 Fragmento
*frago
= triggerFrag
->lirbuf
->_frago
;
1880 AvmCore
*core
= frago
->core();
1883 verbose_only( StringList
asmOutput(gc
); )
1884 verbose_only( assm
->_outputCache
= &asmOutput
; )
1886 verbose_only(if (assm
->_verbose
&& core
->config
.verbose_live
)
1887 live(gc
, triggerFrag
->lirbuf
);)
1889 bool treeCompile
= core
->config
.tree_opt
&& (triggerFrag
->kind
== BranchTrace
);
1890 RegAllocMap
regMap(gc
);
1891 NInsList
loopJumps(gc
);
1893 // loopJumps.set_meminfo_name("LIR loopjumps");
1895 assm
->beginAssembly(triggerFrag
, ®Map
);
1899 //fprintf(stderr, "recompile trigger %X kind %d\n", (int)triggerFrag, triggerFrag->kind);
1900 Fragment
* root
= triggerFrag
;
1903 // recompile the entire tree
1904 root
= triggerFrag
->root
;
1905 root
->fragEntry
= 0;
1906 root
->loopEntry
= 0;
1907 root
->releaseCode(frago
);
1909 // do the tree branches
1910 Fragment
* frag
= root
->treeBranches
;
1913 // compile til no more frags
1916 assm
->assemble(frag
, loopJumps
);
1917 verbose_only(if (assm
->_verbose
)
1918 assm
->outputf("compiling branch %s ip %s",
1919 frago
->labels
->format(frag
),
1920 frago
->labels
->format(frag
->ip
)); )
1922 NanoAssert(frag
->kind
== BranchTrace
);
1923 RegAlloc
* regs
= NJ_NEW(gc
, RegAlloc
)();
1924 assm
->copyRegisters(regs
);
1925 assm
->releaseRegisters();
1926 SideExit
* exit
= frag
->spawnedFrom
;
1927 regMap
.put(exit
, regs
);
1929 frag
= frag
->treeBranches
;
1933 // now the the main trunk
1934 assm
->assemble(root
, loopJumps
);
1935 verbose_only(if (assm
->_verbose
)
1936 assm
->outputf("compiling trunk %s",
1937 frago
->labels
->format(root
));)
1938 NanoAssert(!frago
->core()->config
.tree_opt
|| root
== root
->anchor
|| root
->kind
== MergeTrace
);
1939 assm
->endAssembly(root
, loopJumps
);
1941 // reverse output so that assembly is displayed low-to-high
1942 verbose_only( assm
->_outputCache
= 0; )
1943 verbose_only(for(int i
=asmOutput
.size()-1; i
>=0; --i
) { assm
->outputf("%s",asmOutput
.get(i
)); } );
1945 if (assm
->error()) {
1946 root
->fragEntry
= 0;
1947 root
->loopEntry
= 0;
1951 LInsp
LoadFilter::insLoad(LOpcode v
, LInsp base
, LInsp disp
)
1953 if (base
!= sp
&& base
!= rp
&& (v
== LIR_ld
|| v
== LIR_ldq
)) {
1955 LInsp found
= exprs
.find2(v
, base
, disp
, k
);
1958 return exprs
.add(out
->insLoad(v
,base
,disp
), k
);
1960 return out
->insLoad(v
, base
, disp
);
1963 void LoadFilter::clear(LInsp p
)
1965 if (p
!= sp
&& p
!= rp
)
1969 LInsp
LoadFilter::insStorei(LInsp v
, LInsp b
, int32_t d
)
1972 return out
->insStorei(v
, b
, d
);
1975 LInsp
LoadFilter::insCall(const CallInfo
*ci
, LInsp args
[])
1979 return out
->insCall(ci
, args
);
1982 LInsp
LoadFilter::ins0(LOpcode op
)
1984 if (op
== LIR_label
)
1986 return out
->ins0(op
);
1989 #endif /* FEATURE_NANOJIT */
1991 #if defined(NJ_VERBOSE)
1992 LabelMap::LabelMap(AvmCore
*core
, LabelMap
* parent
)
1993 : parent(parent
), names(core
->gc
), addrs(core
->config
.verbose_addrs
), end(buf
), core(core
)
1996 LabelMap::~LabelMap()
2001 void LabelMap::clear()
2004 while ((e
= names
.removeLast()) != NULL
) {
2005 core
->freeString(e
->name
);
2010 void LabelMap::add(const void *p
, size_t size
, size_t align
, const char *name
)
2012 if (!this || names
.containsKey(p
))
2014 add(p
, size
, align
, core
->newString(name
));
2017 void LabelMap::add(const void *p
, size_t size
, size_t align
, Stringp name
)
2019 if (!this || names
.containsKey(p
))
2021 Entry
*e
= NJ_NEW(core
->gc
, Entry
)(name
, size
<<align
, align
);
2025 const char *LabelMap::format(const void *p
)
2028 int i
= names
.findNear(p
);
2030 const void *start
= names
.keyAt(i
);
2031 Entry
*e
= names
.at(i
);
2032 const void *end
= (const char*)start
+ e
->size
;
2033 avmplus::StringNullTerminatedUTF8
cname(core
->gc
, e
->name
);
2034 const char *name
= cname
.c_str();
2037 sprintf(b
,"%p %s",p
,name
);
2042 else if (p
> start
&& p
< end
) {
2043 int32_t d
= int32_t(intptr_t(p
)-intptr_t(start
)) >> e
->align
;
2045 sprintf(b
, "%p %s+%d", p
, name
, d
);
2047 sprintf(b
,"%s+%d", name
, d
);
2052 return parent
->format(p
);
2054 sprintf(b
, "%p", p
);
2059 return parent
->format(p
);
2061 sprintf(b
, "%p", p
);
2065 const char *LabelMap::dup(const char *b
)
2067 size_t need
= strlen(b
)+1;
2070 if (end
> buf
+sizeof(buf
)) {
2078 // copy all labels to parent, adding newbase to label addresses
2079 void LabelMap::promoteAll(const void *newbase
)
2081 for (int i
=0, n
=names
.size(); i
< n
; i
++) {
2082 void *base
= (char*)newbase
+ (intptr_t)names
.keyAt(i
);
2083 parent
->names
.put(base
, names
.at(i
));
2086 #endif // NJ_VERBOSE