1 /* -*- Mode: C++; c-basic-offset: 4; indent-tabs-mode: t; tab-width: 4 -*- */
2 /* ***** BEGIN LICENSE BLOCK *****
3 * Version: MPL 1.1/GPL 2.0/LGPL 2.1
5 * The contents of this file are subject to the Mozilla Public License Version
6 * 1.1 (the "License"); you may not use this file except in compliance with
7 * the License. You may obtain a copy of the License at
8 * http://www.mozilla.org/MPL/
10 * Software distributed under the License is distributed on an "AS IS" basis,
11 * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
12 * for the specific language governing rights and limitations under the
15 * The Original Code is [Open Source Virtual Machine].
17 * The Initial Developer of the Original Code is
18 * Adobe System Incorporated.
19 * Portions created by the Initial Developer are Copyright (C) 2004-2007
20 * the Initial Developer. All Rights Reserved.
25 * Alternatively, the contents of this file may be used under the terms of
26 * either the GNU General Public License Version 2 or later (the "GPL"), or
27 * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
28 * in which case the provisions of the GPL or the LGPL are applicable instead
29 * of those above. If you wish to allow use of your version of this file only
30 * under the terms of either the GPL or the LGPL, and not to allow others to
31 * use your version of this file under the terms of the MPL, indicate your
32 * decision by deleting the provisions above and replace them with the notice
33 * and other provisions required by the GPL or the LGPL. If you do not delete
34 * the provisions above, a recipient may use your version of this file under
35 * the terms of any one of the MPL, the GPL or the LGPL.
37 * ***** END LICENSE BLOCK ***** */
45 using namespace avmplus
;
46 #ifdef FEATURE_NANOJIT
48 const uint8_t operandCount
[] = {
49 /* 0 */ 2, 2, /*trace*/0, /*nearskip*/0, /*skip*/0, /*neartramp*/0, /*tramp*/0, 2, 2, 2,
50 /* 10 */ /*param*/0, 2, 2, 2, 2, 2, 2, 2, /*call*/0, /*loop*/0,
51 /* 20 */ /*x*/0, 2, 2, 2, 2, 2, 2, 2, 2, 2,
52 /* 30 */ 2, 2, /*short*/0, /*int*/0, 2, 2, /*neg*/1, 2, 2, 2,
53 /* 40 */ /*callh*/1, 2, 2, 2, /*not*/1, 2, 2, 2, /*xt*/1, /*xf*/1,
54 /* 50 */ /*qlo*/1, /*qhi*/1, 2, /*ov*/1, /*cs*/1, 2, 2, 2, 2, 2,
55 /* 60 */ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
56 /* 70 */ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
57 /* 80 */ 2, 2, /*fcall*/0, 2, 2, 2, 2, 2, 2, 2,
58 /* 90 */ 2, 2, 2, 2, 2, 2, 2, /*quad*/0, 2, 2,
59 /* 100 */ /*fneg*/1, 2, 2, 2, 2, 2, /*i2f*/1, /*u2f*/1, 2, 2,
60 /* 110 */ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
61 /* 120 */ 2, 2, 2, 2, 2, 2, 2, 2,
64 // LIR verbose specific
67 const char* lirNames
[] = {
68 /* 0-9 */ "0","1","trace","nearskip","skip","neartramp","tramp","7","8","9",
69 /* 10-19 */ "param","st","ld","13","sti","15","16","17","call","loop",
70 /* 20-29 */ "x","21","22","23","24","25","feq","flt","fgt","fle",
71 /* 30-39 */ "fge","cmov","short","int","ldc","","neg","add","sub","mul",
72 /* 40-49 */ "callh","and","or","xor","not","lsh","rsh","ush","xt","xf",
73 /* 50-59 */ "qlo","qhi","ldcb","ov","cs","eq","lt","gt","le","ge",
74 /* 60-63 */ "ult","ugt","ule","uge",
75 /* 64-69 */ "LIR64","65","66","67","68","69",
76 /* 70-79 */ "70","71","72","73","74","stq","ldq","77","stqi","79",
77 /* 80-89 */ "80","81","fcall","83","84","85","86","87","88","89",
78 /* 90-99 */ "90","91","92","93","94","95","96","quad","98","99",
79 /* 100-109 */ "fneg","fadd","fsub","fmul","fdiv","qjoin","i2f","u2f","108","109",
80 /* 110-119 */ "110","111","112","113","114","115","116","117","118","119",
81 /* 120-127 */ "120","121","122","123","124","125","126","127"
84 #endif /* NANOJIT_VEBROSE */
89 // @todo fixup move to nanojit.h
91 #define counter_value(x) x
92 #endif /* NJ_PROFILE */
94 //static int32_t buffer_count = 0;
97 LirBuffer::LirBuffer(Fragmento
* frago
, const CallInfo
* functions
)
98 : _frago(frago
), _functions(functions
)
102 _start
= pageAlloc();
105 verbose_only(_start
->seq
= 0;)
106 _unused
= &_start
->lir
[0];
109 //fprintf(stderr, "LirBuffer %x start %x\n", (int)this, (int)_start);
112 LirBuffer::~LirBuffer()
115 //fprintf(stderr, "~LirBuffer %x start %x\n", (int)this, (int)_start);
123 void LirBuffer::clear()
125 // free all the memory and clear the stats
126 debug_only( if (_start
) validate();)
129 Page
*next
= _start
->next
;
130 _frago
->pageFree( _start
);
134 NanoAssert(_stats
.pages
== 0);
141 void LirBuffer::validate() const
152 NanoAssert(count
== _stats
.pages
);
153 NanoAssert(_noMem
|| _unused
->page()->next
== 0);
154 NanoAssert(_noMem
|| samepage(last
,_unused
));
159 int LirBuffer::insCount() {
162 int LirBuffer::byteCount() {
163 return (_stats
.pages
-1) * (sizeof(Page
)-sizeof(PageHeader
)) +
164 (_unused
- &_unused
->page()->lir
[0]) * sizeof(LIns
);
168 Page
* LirBuffer::pageAlloc()
170 Page
* page
= _frago
->pageAlloc();
173 page
->next
= 0; // end of list marker for new page
183 LInsp
LirBuffer::next()
185 debug_only( validate(); )
189 bool LirBuffer::addPage()
191 LInsp last
= _unused
;
192 // we need to pull in a new page and stamp the old one with a link to it
193 Page
*lastPage
= last
->page();
194 Page
*page
= pageAlloc();
197 lastPage
->next
= page
; // forward link to next page
198 _unused
= &page
->lir
[0];
199 verbose_only(page
->seq
= lastPage
->seq
+1;)
200 //fprintf(stderr, "Fragmento::ensureRoom stamping %x with %x; start %x unused %x\n", (int)pageBottom(last), (int)page, (int)_start, (int)_unused);
201 debug_only( validate(); )
205 // mem failure, rewind pointer to top of page so that subsequent instruction works
206 verbose_only(if (_frago
->assm()->_verbose
) _frago
->assm()->outputf("page alloc failed");)
207 _unused
= &lastPage
->lir
[0];
212 bool LirBufWriter::ensureRoom(uint32_t count
)
214 LInsp last
= _buf
->next();
215 if (!samepage(last
,last
+2*count
)
218 // link LIR stream back to prior instruction (careful insFar relies on _unused...)
219 insFar(LIR_skip
, last
-1);
221 return !_buf
->outOmem();
224 LInsp
LirBuffer::commit(uint32_t count
)
226 debug_only(validate();)
227 NanoAssertMsg( samepage(_unused
, _unused
+count
), "You need to call ensureRoom first!" );
228 return _unused
+= count
;
231 uint32_t LIns::reference(LIns
*r
) const
233 int delta
= this-r
-1;
234 NanoAssert(isU8(delta
));
238 LIns
* LIns::deref(int32_t off
) const
240 LInsp i
= (LInsp
) this-1 - off
;
246 LInsp
LirBufWriter::ensureReferenceable(LInsp i
, int32_t addedDistance
)
248 NanoAssert(!i
->isTramp());
249 LInsp next
= _buf
->next();
250 LInsp from
= next
+ 2*addedDistance
;
251 if (canReference(from
,i
))
253 if (i
== _buf
->sp
&& spref
&& canReference(from
, spref
))
255 if (i
== _buf
->rp
&& rpref
&& canReference(from
, rpref
))
258 // need a trampoline to get to i
259 LInsp tramp
= insFar(LIR_tramp
, i
);
260 NanoAssert( tramp
->ref() == i
);
264 else if (i
== _buf
->rp
)
269 LInsp
LirBufWriter::insStore(LInsp val
, LInsp base
, LInsp off
)
271 LOpcode op
= val
->isQuad() ? LIR_stq
: LIR_st
;
272 NanoAssert(val
&& base
&& off
);
274 LInsp r1
= ensureReferenceable(val
,3);
275 LInsp r2
= ensureReferenceable(base
,2);
276 LInsp r3
= ensureReferenceable(off
,1);
278 LInsp l
= _buf
->next();
289 LInsp
LirBufWriter::insStorei(LInsp val
, LInsp base
, int32_t d
)
291 LOpcode op
= val
->isQuad() ? LIR_stqi
: LIR_sti
;
292 NanoAssert(val
&& base
&& isS8(d
));
294 LInsp r1
= ensureReferenceable(val
,2);
295 LInsp r2
= ensureReferenceable(base
,1);
297 LInsp l
= _buf
->next();
301 l
->setDisp(int8_t(d
));
308 LInsp
LirBufWriter::ins0(LOpcode op
)
311 LInsp l
= _buf
->next();
318 LInsp
LirBufWriter::ins1(LOpcode op
, LInsp o1
)
321 LInsp r1
= ensureReferenceable(o1
,1);
323 LInsp l
= _buf
->next();
333 LInsp
LirBufWriter::ins2(LOpcode op
, LInsp o1
, LInsp o2
)
336 LInsp r1
= ensureReferenceable(o1
,2);
337 LInsp r2
= ensureReferenceable(o2
,1);
339 LInsp l
= _buf
->next();
351 LInsp
LirBufWriter::insLoad(LOpcode op
, LInsp base
, LInsp d
)
353 return ins2(op
,base
,d
);
356 LInsp
LirBufWriter::insGuard(LOpcode op
, LInsp c
, SideExit
*x
)
358 LInsp data
= skip(SideExitSize(x
));
359 *((SideExit
*)data
->payload()) = *x
;
360 return ins2(op
, c
, data
);
363 LInsp
LirBufWriter::insParam(int32_t arg
)
366 LInsp l
= _buf
->next();
367 l
->initOpcode(LIR_param
);
368 l
->c
.imm8a
= Assembler::argRegs
[arg
];
375 #define isS24(x) (((int32_t(x)<<8)>>8) == (x))
377 LInsp
LirBufWriter::insFar(LOpcode op
, LInsp target
)
379 NanoAssert(op
== LIR_skip
|| op
== LIR_tramp
);
380 LInsp l
= _buf
->next();
384 l
->initOpcode(LOpcode(op
-1)); // nearskip or neartramp
391 // write the pointer and instruction
393 *((LInsp
*)(l
-1)) = target
;
400 LInsp
LirBufWriter::insImm(int32_t imm
)
404 LInsp l
= _buf
->next();
405 l
->initOpcode(LIR_short
);
412 int32_t* l
= (int32_t*)_buf
->next();
415 return ins0(LIR_int
);
419 LInsp
LirBufWriter::insImmq(uint64_t imm
)
422 int32_t* l
= (int32_t*)_buf
->next();
424 l
[1] = int32_t(imm
>>32);
426 return ins0(LIR_quad
);
429 LInsp
LirBufWriter::skip(size_t size
)
431 const uint32_t n
= (size
+sizeof(LIns
)-1)/sizeof(LIns
);
433 LInsp last
= _buf
->next()-1;
435 return insFar(LIR_skip
, last
);
438 LInsp
LirReader::read()
444 LOpcode iop
= i
->opcode();
455 i
-= argwords(i
->argc())+1;
460 NanoAssert(i
->ref() != i
);
466 NanoAssert(samepage(i
, i
-2));
471 NanoAssert(samepage(i
, i
-3));
476 _i
= 0; // start of trace
481 while (is_trace_skip_tramp(iop
)||iop
==LIR_2
);
486 bool FASTCALL
isCmp(LOpcode c
) {
487 return c
>= LIR_eq
&& c
<= LIR_uge
|| c
>= LIR_feq
&& c
<= LIR_fge
;
490 bool FASTCALL
isCond(LOpcode c
) {
491 return (c
== LIR_ov
) || (c
== LIR_cs
) || isCmp(c
);
494 bool LIns::isCmp() const {
495 return nanojit::isCmp(u
.code
);
498 bool LIns::isCond() const {
499 return nanojit::isCond(u
.code
);
502 bool LIns::isCall() const
504 return (u
.code
&~LIR64
) == LIR_call
;
507 bool LIns::isGuard() const
509 return u
.code
==LIR_x
|| u
.code
==LIR_xf
|| u
.code
==LIR_xt
|| u
.code
==LIR_loop
;
512 bool LIns::isStore() const
514 int c
= u
.code
& ~LIR64
;
515 return c
== LIR_st
|| c
== LIR_sti
;
518 bool LIns::isLoad() const
520 return u
.code
== LIR_ldq
|| u
.code
== LIR_ld
|| u
.code
== LIR_ldc
;
523 bool LIns::isconst() const
525 return (opcode()&~1) == LIR_short
;
528 bool LIns::isconstval(int32_t val
) const
530 return isconst() && constval()==val
;
533 bool LIns::isconstq() const
535 return isop(LIR_quad
);
538 bool LIns::isconstp() const
547 bool FASTCALL
isCse(LOpcode op
) {
548 op
= LOpcode(op
& ~LIR64
);
549 return op
>= LIR_feq
&& op
<= LIR_uge
;
552 bool LIns::isCse(const CallInfo
*functions
) const
554 return nanojit::isCse(u
.code
) || isCall() && functions
[fid()]._cse
;
557 void LIns::setimm16(int32_t x
)
559 NanoAssert(isS16(x
));
560 i
.imm16
= int16_t(x
);
563 void LIns::setresv(uint32_t resv
)
565 NanoAssert(isU8(resv
));
569 void LIns::initOpcode(LOpcode op
)
576 void LIns::setOprnd1(LInsp r
)
578 u
.oprnd_1
= reference(r
);
581 void LIns::setOprnd2(LInsp r
)
583 u
.oprnd_2
= reference(r
);
586 void LIns::setOprnd3(LInsp r
)
588 u
.oprnd_3
= reference(r
);
591 void LIns::setDisp(int8_t d
)
596 LInsp
LIns::oprnd1() const
598 return deref(u
.oprnd_1
);
601 LInsp
LIns::oprnd2() const
603 return deref(u
.oprnd_2
);
606 LInsp
LIns::oprnd3() const
608 return deref(u
.oprnd_3
);
611 void *LIns::payload() const
613 NanoAssert(opcode()==LIR_skip
|| opcode()==LIR_nearskip
);
614 return (void*) (ref()+1);
617 LIns
* LirWriter::ins2i(LOpcode v
, LIns
* oprnd1
, int32_t imm
)
619 return ins2(v
, oprnd1
, insImm(imm
));
622 bool insIsS16(LInsp i
)
625 int c
= i
->constval();
628 if (i
->isop(LIR_cmov
)) {
629 LInsp vals
= i
->oprnd2();
630 return insIsS16(vals
->oprnd1()) && insIsS16(vals
->oprnd2());
634 // many other possibilities too.
638 LIns
* ExprFilter::ins1(LOpcode v
, LIns
* i
)
642 return insImm(int32_t(i
->constvalq()));
643 if (i
->isop(LIR_qjoin
))
646 else if (v
== LIR_qhi
) {
648 return insImm(int32_t(i
->constvalq()>>32));
649 if (i
->isop(LIR_qjoin
))
652 else if (v
== i
->opcode() && (v
== LIR_not
|| v
== LIR_neg
|| v
== LIR_fneg
)) {
659 return out
->ins1(v
, i
);
662 LIns
* ExprFilter::ins2(LOpcode v
, LIns
* oprnd1
, LIns
* oprnd2
)
664 NanoAssert(oprnd1
&& oprnd2
);
666 if (oprnd2
->oprnd1() == oprnd2
->oprnd2()) {
668 return oprnd2
->oprnd1();
671 if (oprnd1
== oprnd2
)
673 if (v
== LIR_xor
|| v
== LIR_sub
||
674 v
== LIR_ult
|| v
== LIR_ugt
|| v
== LIR_gt
|| v
== LIR_lt
)
676 if (v
== LIR_or
|| v
== LIR_and
)
678 if (v
== LIR_le
|| v
== LIR_ule
|| v
== LIR_ge
|| v
== LIR_uge
) {
679 // x <= x == 1; x >= x == 1
683 if (oprnd1
->isconst() && oprnd2
->isconst())
685 int c1
= oprnd1
->constval();
686 int c2
= oprnd2
->constval();
687 if (v
== LIR_qjoin
) {
688 uint64_t q
= c1
| uint64_t(c2
)<<32;
692 return insImm(c1
== c2
);
694 return insImm((c2
!= 0) && ((c1
+ c2
) <= c1
));
696 return insImm((c2
!= 0) && ((uint32_t(c1
) + uint32_t(c2
)) <= uint32_t(c1
)));
698 return insImm(c1
< c2
);
700 return insImm(c1
> c2
);
702 return insImm(c1
<= c2
);
704 return insImm(c1
>= c2
);
706 return insImm(uint32_t(c1
) < uint32_t(c2
));
708 return insImm(uint32_t(c1
) > uint32_t(c2
));
710 return insImm(uint32_t(c1
) <= uint32_t(c2
));
712 return insImm(uint32_t(c1
) >= uint32_t(c2
));
714 return insImm(int32_t(c1
) >> int32_t(c2
));
716 return insImm(int32_t(c1
) << int32_t(c2
));
718 return insImm(uint32_t(c1
) >> int32_t(c2
));
720 else if (oprnd1
->isconstq() && oprnd2
->isconstq())
722 double c1
= oprnd1
->constvalf();
723 double c2
= oprnd1
->constvalf();
725 return insImm(c1
== c2
);
727 return insImm(c1
< c2
);
729 return insImm(c1
> c2
);
731 return insImm(c1
<= c2
);
733 return insImm(c1
>= c2
);
735 else if (oprnd1
->isconst() && !oprnd2
->isconst())
737 if (v
== LIR_add
|| v
== LIR_mul
||
738 v
== LIR_fadd
|| v
== LIR_fmul
||
739 v
== LIR_xor
|| v
== LIR_or
|| v
== LIR_and
||
746 else if (v
>= LIR_lt
&& v
<= LIR_uge
) {
747 // move const to rhs, swap the operator
753 else if (v
== LIR_cmov
) {
754 // const ? x : y => return x or y depending on const
755 return oprnd1
->constval() ? oprnd2
->oprnd1() : oprnd2
->oprnd2();
759 if (oprnd2
->isconst())
761 int c
= oprnd2
->constval();
762 if (v
== LIR_add
&& oprnd1
->isop(LIR_add
) && oprnd1
->oprnd2()->isconst()) {
763 // add(add(x,c1),c2) => add(x,c1+c2)
764 c
+= oprnd1
->oprnd2()->constval();
766 oprnd1
= oprnd1
->oprnd1();
768 else if (v
== LIR_sub
&& oprnd1
->isop(LIR_add
) && oprnd1
->oprnd2()->isconst()) {
769 // sub(add(x,c1),c2) => add(x,c1-c2)
770 c
= oprnd1
->oprnd2()->constval() - c
;
772 oprnd1
= oprnd1
->oprnd1();
775 else if (v
== LIR_rsh
&& c
== 16 && oprnd1
->isop(LIR_lsh
) &&
776 oprnd1
->oprnd2()->isconstval(16)) {
777 if (insIsS16(oprnd1
->oprnd1())) {
778 // rsh(lhs(x,16),16) == x, if x is S16
779 return oprnd1
->oprnd1();
782 else if (v
== LIR_ult
) {
783 if (oprnd1
->isop(LIR_cmov
)) {
784 LInsp a
= oprnd1
->oprnd2()->oprnd1();
785 LInsp b
= oprnd1
->oprnd2()->oprnd2();
786 if (a
->isconst() && b
->isconst()) {
787 bool a_lt
= uint32_t(a
->constval()) < uint32_t(oprnd2
->constval());
788 bool b_lt
= uint32_t(b
->constval()) < uint32_t(oprnd2
->constval());
797 if (v
== LIR_add
|| v
== LIR_or
|| v
== LIR_xor
||
798 v
== LIR_sub
|| v
== LIR_lsh
|| v
== LIR_rsh
|| v
== LIR_ush
)
800 else if (v
== LIR_and
|| v
== LIR_mul
)
802 else if (v
== LIR_eq
&& oprnd1
->isop(LIR_or
) &&
803 oprnd1
->oprnd2()->isconst() &&
804 oprnd1
->oprnd2()->constval() != 0) {
805 // (x or c) != 0 if c != 0
809 else if (c
== -1 || c
== 1 && oprnd1
->isCmp()) {
811 // x | -1 = -1, cmp | 1 = 1
814 else if (v
== LIR_and
) {
815 // x & -1 = x, cmp & 1 = cmp
822 if (v
== LIR_qjoin
&& oprnd1
->isop(LIR_qlo
) && oprnd2
->isop(LIR_qhi
)
823 && (i
= oprnd1
->oprnd1()) == oprnd1
->oprnd1()) {
824 // qjoin(qlo(x),qhi(x)) == x
828 return out
->ins2(v
, oprnd1
, oprnd2
);
831 LIns
* ExprFilter::insGuard(LOpcode v
, LInsp c
, SideExit
*x
)
833 if (v
== LIR_xt
|| v
== LIR_xf
) {
835 if (v
== LIR_xt
&& !c
->constval() || v
== LIR_xf
&& c
->constval()) {
836 return 0; // no guard needed
839 // need a way to EOT now, since this is trace end.
840 return out
->insGuard(LIR_x
, out
->insImm(1), x
);
844 while (c
->isop(LIR_eq
) && c
->oprnd1()->isCmp() &&
845 c
->oprnd2()->isconstval(0)) {
846 // xt(eq(cmp,0)) => xf(cmp) or xf(eq(cmp,0)) => xt(cmp)
852 return out
->insGuard(v
, c
, x
);
855 LIns
* LirWriter::insLoadi(LIns
*base
, int disp
)
857 return insLoad(LIR_ld
,base
,disp
);
860 LIns
* LirWriter::insLoad(LOpcode op
, LIns
*base
, int disp
)
862 return insLoad(op
, base
, insImm(disp
));
865 LIns
* LirWriter::ins_eq0(LIns
* oprnd1
)
867 return ins2i(LIR_eq
, oprnd1
, 0);
870 LIns
* LirWriter::qjoin(LInsp lo
, LInsp hi
)
872 return ins2(LIR_qjoin
, lo
, hi
);
875 LIns
* LirWriter::insImmPtr(const void *ptr
)
877 return sizeof(ptr
) == 8 ? insImmq((uintptr_t)ptr
) : insImm((intptr_t)ptr
);
880 LIns
* LirWriter::ins_choose(LIns
* cond
, LIns
* iftrue
, LIns
* iffalse
, bool hasConditionalMove
)
882 // if not a conditional, make it implicitly an ==0 test (then flop results)
885 cond
= ins_eq0(cond
);
891 if (hasConditionalMove
)
893 return ins2(LIR_cmov
, cond
, ins2(LIR_2
, iftrue
, iffalse
));
896 // @todo -- it might be better to use a short conditional branch rather than
897 // the bit-twiddling on systems that don't provide a conditional move instruction.
898 LInsp ncond
= ins1(LIR_neg
, cond
); // cond ? -1 : 0
900 ins2(LIR_and
, iftrue
, ncond
),
901 ins2(LIR_and
, iffalse
, ins1(LIR_not
, ncond
)));
904 LIns
* LirBufWriter::insCall(uint32_t fid
, LInsp args
[])
906 static const LOpcode k_callmap
[] = { LIR_call
, LIR_fcall
, LIR_call
, LIR_callh
};
908 const CallInfo
& ci
= _functions
[fid
];
909 uint32_t argt
= ci
._argtypes
;
910 LOpcode op
= k_callmap
[argt
& 3];
913 uint32_t argc
= ci
.get_sizes(sizes
);
918 LInsp args2
[5*2]; // arm could require 2 args per double
920 for (int32_t i
= 0; i
< 5; i
++) {
922 ArgSize a
= ArgSize(argt
&3);
923 if (a
== ARGSIZE_F
) {
925 args2
[j
++] = ins1(LIR_qhi
, q
);
926 args2
[j
++] = ins1(LIR_qlo
, q
);
927 } else if (a
!= ARGSIZE_NONE
) {
928 args2
[j
++] = args
[i
];
932 NanoAssert(j
== argc
);
935 NanoAssert(argc
< 8);
936 uint32_t words
= argwords(argc
);
937 ensureRoom(words
+argc
+1); // ins size + possible tramps
938 for (uint32_t i
=0; i
< argc
; i
++)
939 args
[i
] = ensureReferenceable(args
[i
], argc
-i
);
940 uint8_t* offs
= (uint8_t*)_buf
->next();
941 LIns
*l
= _buf
->next() + words
;
942 for (uint32_t i
=0; i
< argc
; i
++)
943 offs
[i
] = (uint8_t) l
->reference(args
[i
]);
944 l
->initOpcode(op
==LIR_callh
? LIR_call
: op
);
947 _buf
->commit(words
+1);
952 using namespace avmplus
;
954 StackFilter::StackFilter(LirFilter
*in
, GC
*gc
, Fragment
*frag
, LInsp sp
)
955 : LirFilter(in
), gc(gc
), frag(frag
), sp(sp
), top(0)
958 LInsp
StackFilter::read()
962 LInsp i
= in
->read();
967 LInsp base
= i
->oprnd2();
970 LInsp v
= i
->oprnd1();
971 int d
= i
->immdisp() >> 2;
978 if (stk
.get(d
) && stk
.get(d
-1)) {
995 else if (i
->isGuard())
998 top
= getTop(i
) >> 2;
1005 // inlined/separated version of SuperFastHash
1006 // This content is copyrighted by Paul Hsieh, For reference see : http://www.azillionmonkeys.com/qed/hash.html
1008 inline uint32_t _hash8(uint32_t hash
, const uint8_t data
)
1016 inline uint32_t _hash32(uint32_t hash
, const uint32_t data
)
1018 const uint32_t dlo
= data
& 0xffff;
1019 const uint32_t dhi
= data
>> 16;
1021 const uint32_t tmp
= (dhi
<< 11) ^ hash
;
1022 hash
= (hash
<< 16) ^ tmp
;
1027 inline uint32_t _hashptr(uint32_t hash
, const void* data
)
1029 #ifdef NANOJIT_64BIT
1030 hash
= _hash32(hash
, uint32_t(uintptr_t(data
) >> 32));
1031 hash
= _hash32(hash
, uint32_t(uintptr_t(data
)));
1034 return _hash32(hash
, uint32_t(data
));
1038 inline uint32_t _hashfinish(uint32_t hash
)
1040 /* Force "avalanching" of final 127 bits */
1050 LInsHashSet::LInsHashSet(GC
* gc
) :
1051 m_list(gc
, kInitialCap
), m_used(0), m_gc(gc
)
1054 m_list
.set_meminfo_name("LInsHashSet.list");
1056 m_list
.set(kInitialCap
-1, 0);
1059 /*static*/ uint32_t FASTCALL
LInsHashSet::hashcode(LInsp i
)
1061 const LOpcode op
= i
->opcode();
1065 return hashimm(i
->imm16());
1067 return hashimm(i
->imm32());
1069 return hashimmq(i
->constvalq());
1074 int32_t argc
= i
->argc();
1075 NanoAssert(argc
< 10);
1076 for (int32_t j
=0; j
< argc
; j
++)
1077 args
[j
] = i
->arg(j
);
1078 return hashcall(i
->fid(), argc
, args
);
1081 if (operandCount
[op
] == 2)
1082 return hash2(op
, i
->oprnd1(), i
->oprnd2());
1084 return hash1(op
, i
->oprnd1());
1088 /*static*/ bool FASTCALL
LInsHashSet::equals(LInsp a
, LInsp b
)
1092 AvmAssert(a
->opcode() == b
->opcode());
1093 const LOpcode op
= a
->opcode();
1098 return a
->imm16() == b
->imm16();
1102 return a
->imm32() == b
->imm32();
1106 return a
->constvalq() == b
->constvalq();
1111 if (a
->fid() != b
->fid()) return false;
1112 uint32_t argc
=a
->argc();
1113 NanoAssert(argc
== b
->argc());
1114 for (uint32_t i
=0; i
< argc
; i
++)
1115 if (a
->arg(i
) != b
->arg(i
))
1121 const uint32_t count
= operandCount
[op
];
1122 if ((count
>= 1 && a
->oprnd1() != b
->oprnd1()) ||
1123 (count
>= 2 && a
->oprnd2() != b
->oprnd2()))
1130 void FASTCALL
LInsHashSet::grow()
1132 const uint32_t newcap
= m_list
.size() << 1;
1133 InsList
newlist(m_gc
, newcap
);
1135 newlist
.set_meminfo_name("LInsHashSet.list");
1137 newlist
.set(newcap
-1, 0);
1138 for (uint32_t i
=0, n
=m_list
.size(); i
< n
; i
++)
1140 LInsp name
= m_list
.get(i
);
1141 if (!name
) continue;
1142 uint32_t j
= find(name
, hashcode(name
), newlist
, newcap
);
1143 newlist
.set(j
, name
);
1145 m_list
.become(newlist
);
1148 uint32_t FASTCALL
LInsHashSet::find(LInsp name
, uint32_t hash
, const InsList
& list
, uint32_t cap
)
1150 const uint32_t bitmask
= (cap
- 1) & ~0x1;
1152 uint32_t n
= 7 << 1;
1155 while ((k
= list
.get(hash
)) != NULL
&&
1156 (!LIns::sameop(k
,name
) || !equals(k
, name
)))
1158 hash
= (hash
+ (n
+= 2)) & bitmask
; // quadratic probe
1163 LInsp
LInsHashSet::add(LInsp name
, uint32_t k
)
1165 // this is relatively short-lived so let's try a more aggressive load factor
1166 // in the interest of improving performance
1167 if (((m_used
+1)<<1) >= m_list
.size()) // 0.50
1170 k
= find(name
, hashcode(name
), m_list
, m_list
.size());
1172 NanoAssert(!m_list
.get(k
));
1174 m_list
.set(k
, name
);
1178 void LInsHashSet::replace(LInsp i
)
1180 uint32_t k
= find(i
, hashcode(i
), m_list
, m_list
.size());
1181 if (m_list
.get(k
)) {
1182 // already there, so replace it
1189 uint32_t LInsHashSet::hashimm(int32_t a
) {
1190 return _hashfinish(_hash32(0,a
));
1193 uint32_t LInsHashSet::hashimmq(uint64_t a
) {
1194 uint32_t hash
= _hash32(0, uint32_t(a
>> 32));
1195 return _hashfinish(_hash32(hash
, uint32_t(a
)));
1198 uint32_t LInsHashSet::hash1(LOpcode op
, LInsp a
) {
1199 uint32_t hash
= _hash8(0,uint8_t(op
));
1200 return _hashfinish(_hashptr(hash
, a
));
1203 uint32_t LInsHashSet::hash2(LOpcode op
, LInsp a
, LInsp b
) {
1204 uint32_t hash
= _hash8(0,uint8_t(op
));
1205 hash
= _hashptr(hash
, a
);
1206 return _hashfinish(_hashptr(hash
, b
));
1209 uint32_t LInsHashSet::hashcall(uint32_t fid
, uint32_t argc
, LInsp args
[]) {
1210 uint32_t hash
= _hash32(0,fid
);
1211 for (int32_t j
=argc
-1; j
>= 0; j
--)
1212 hash
= _hashptr(hash
,args
[j
]);
1213 return _hashfinish(hash
);
1216 LInsp
LInsHashSet::find32(int32_t a
, uint32_t &i
)
1218 uint32_t cap
= m_list
.size();
1219 const InsList
& list
= m_list
;
1220 const uint32_t bitmask
= (cap
- 1) & ~0x1;
1221 uint32_t hash
= hashimm(a
) & bitmask
;
1222 uint32_t n
= 7 << 1;
1224 while ((k
= list
.get(hash
)) != NULL
&&
1225 (!k
->isconst() || k
->constval() != a
))
1227 hash
= (hash
+ (n
+= 2)) & bitmask
; // quadratic probe
1233 LInsp
LInsHashSet::find64(uint64_t a
, uint32_t &i
)
1235 uint32_t cap
= m_list
.size();
1236 const InsList
& list
= m_list
;
1237 const uint32_t bitmask
= (cap
- 1) & ~0x1;
1238 uint32_t hash
= hashimmq(a
) & bitmask
;
1239 uint32_t n
= 7 << 1;
1241 while ((k
= list
.get(hash
)) != NULL
&&
1242 (!k
->isconstq() || k
->constvalq() != a
))
1244 hash
= (hash
+ (n
+= 2)) & bitmask
; // quadratic probe
1250 LInsp
LInsHashSet::find1(LOpcode op
, LInsp a
, uint32_t &i
)
1252 uint32_t cap
= m_list
.size();
1253 const InsList
& list
= m_list
;
1254 const uint32_t bitmask
= (cap
- 1) & ~0x1;
1255 uint32_t hash
= hash1(op
,a
) & bitmask
;
1256 uint32_t n
= 7 << 1;
1258 while ((k
= list
.get(hash
)) != NULL
&&
1259 (k
->opcode() != op
|| k
->oprnd1() != a
))
1261 hash
= (hash
+ (n
+= 2)) & bitmask
; // quadratic probe
1267 LInsp
LInsHashSet::find2(LOpcode op
, LInsp a
, LInsp b
, uint32_t &i
)
1269 uint32_t cap
= m_list
.size();
1270 const InsList
& list
= m_list
;
1271 const uint32_t bitmask
= (cap
- 1) & ~0x1;
1272 uint32_t hash
= hash2(op
,a
,b
) & bitmask
;
1273 uint32_t n
= 7 << 1;
1275 while ((k
= list
.get(hash
)) != NULL
&&
1276 (k
->opcode() != op
|| k
->oprnd1() != a
|| k
->oprnd2() != b
))
1278 hash
= (hash
+ (n
+= 2)) & bitmask
; // quadratic probe
1284 bool argsmatch(LInsp i
, uint32_t argc
, LInsp args
[])
1286 for (uint32_t j
=0; j
< argc
; j
++)
1287 if (i
->arg(j
) != args
[j
])
1292 LInsp
LInsHashSet::findcall(uint32_t fid
, uint32_t argc
, LInsp args
[], uint32_t &i
)
1294 uint32_t cap
= m_list
.size();
1295 const InsList
& list
= m_list
;
1296 const uint32_t bitmask
= (cap
- 1) & ~0x1;
1297 uint32_t hash
= hashcall(fid
, argc
, args
) & bitmask
;
1298 uint32_t n
= 7 << 1;
1300 while ((k
= list
.get(hash
)) != NULL
&&
1301 (!k
->isCall() || k
->fid() != fid
|| !argsmatch(k
, argc
, args
)))
1303 hash
= (hash
+ (n
+= 2)) & bitmask
; // quadratic probe
1309 SideExit
*LIns::exit()
1311 NanoAssert(isGuard());
1312 return (SideExit
*)oprnd2()->payload();
1316 class RetiredEntry
: public GCObject
1319 List
<LInsp
, LIST_NonGCObjects
> live
;
1321 RetiredEntry(GC
*gc
): live(gc
) {}
1326 SortedMap
<LInsp
,LInsp
,LIST_NonGCObjects
> live
;
1327 List
<RetiredEntry
*, LIST_GCObjects
> retired
;
1329 LiveTable(GC
*gc
) : live(gc
), retired(gc
), maxlive(0) {}
1332 for (size_t i
= 0; i
< retired
.size(); i
++) {
1333 delete retired
.get(i
);
1337 void add(LInsp i
, LInsp use
) {
1338 if (!i
->isconst() && !i
->isconstq() && !live
.containsKey(i
)) {
1342 void retire(LInsp i
, GC
*gc
) {
1343 RetiredEntry
*e
= new (gc
) RetiredEntry(gc
);
1345 for (int j
=0, n
=live
.size(); j
< n
; j
++) {
1346 LInsp l
= live
.keyAt(j
);
1347 if (!l
->isStore() && !l
->isGuard())
1351 if ((size
= e
->live
.size()) > maxlive
)
1357 bool contains(LInsp i
) {
1358 return live
.containsKey(i
);
1362 void live(GC
*gc
, Assembler
*assm
, Fragment
*frag
)
1364 // traverse backwards to find live exprs and a few other stats.
1366 LInsp sp
= frag
->lirbuf
->sp
;
1367 LInsp rp
= frag
->lirbuf
->rp
;
1370 LirBuffer
*lirbuf
= frag
->lirbuf
;
1371 LirReader
br(lirbuf
);
1372 StackFilter
sf(&br
, gc
, frag
, sp
);
1373 StackFilter
r(&sf
, gc
, frag
, rp
);
1375 live
.add(frag
->lirbuf
->state
, r
.pos());
1376 for (LInsp i
= r
.read(); i
!= 0; i
= r
.read())
1380 // first handle side-effect instructions
1381 if (i
->isStore() || i
->isGuard() ||
1382 i
->isCall() && !assm
->callInfoFor(i
->fid())->_cse
)
1389 // now propagate liveness
1390 if (live
.contains(i
))
1394 live
.add(i
->oprnd2(),i
); // base
1395 live
.add(i
->oprnd1(),i
); // val
1397 else if (i
->isop(LIR_cmov
)) {
1398 live
.add(i
->oprnd1(),i
);
1399 live
.add(i
->oprnd2()->oprnd1(),i
);
1400 live
.add(i
->oprnd2()->oprnd2(),i
);
1402 else if (operandCount
[i
->opcode()] == 1) {
1403 live
.add(i
->oprnd1(),i
);
1405 else if (operandCount
[i
->opcode()] == 2) {
1406 live
.add(i
->oprnd1(),i
);
1407 live
.add(i
->oprnd2(),i
);
1409 else if (i
->isCall()) {
1410 for (int j
=0, c
=i
->argc(); j
< c
; j
++)
1411 live
.add(i
->arg(j
),i
);
1416 assm
->outputf("live instruction count %ld, total %ld, max pressure %d",
1417 live
.retired
.size(), total
, live
.maxlive
);
1418 assm
->outputf("side exits %ld", exits
);
1420 // print live exprs, going forwards
1421 LirNameMap
*names
= frag
->lirbuf
->names
;
1422 for (int j
=live
.retired
.size()-1; j
>= 0; j
--)
1424 RetiredEntry
*e
= live
.retired
[j
];
1425 char livebuf
[1000], *s
=livebuf
;
1427 for (int k
=0,n
=e
->live
.size(); k
< n
; k
++) {
1428 strcpy(s
, names
->formatRef(e
->live
[k
]));
1431 NanoAssert(s
< livebuf
+sizeof(livebuf
));
1433 printf("%-60s %s\n", livebuf
, names
->formatIns(e
->i
));
1434 if (e
->i
->isGuard())
1439 LabelMap::Entry::~Entry()
1444 LirNameMap::Entry::~Entry()
1449 LirNameMap::~LirNameMap()
1453 while ((e
= names
.removeLast()) != NULL
) {
1458 void LirNameMap::addName(LInsp i
, Stringp name
) {
1459 if (!names
.containsKey(i
)) {
1460 Entry
*e
= new (labels
->core
->gc
) Entry(name
);
1464 void LirNameMap::addName(LInsp i
, const char *name
) {
1465 addName(i
, labels
->core
->newString(name
));
1468 void LirNameMap::copyName(LInsp i
, const char *s
, int suffix
) {
1470 sprintf(s2
,"%s%d", s
,suffix
);
1471 addName(i
, labels
->core
->newString(s2
));
1474 void LirNameMap::formatImm(int32_t c
, char *buf
) {
1475 if (c
>= 10000 || c
<= -10000)
1476 sprintf(buf
,"#%s",labels
->format((void*)c
));
1478 sprintf(buf
,"%d", c
);
1481 const char* LirNameMap::formatRef(LIns
*ref
)
1483 char buffer
[200], *buf
=buffer
;
1485 GC
*gc
= labels
->core
->gc
;
1486 if (names
.containsKey(ref
)) {
1487 StringNullTerminatedUTF8
cname(gc
, names
.get(ref
)->name
);
1488 strcat(buf
, cname
.c_str());
1490 else if (ref
->isconstq()) {
1491 formatImm(uint32_t(ref
->constvalq()>>32), buf
);
1494 formatImm(uint32_t(ref
->constvalq()), buf
);
1496 else if (ref
->isconst()) {
1497 formatImm(ref
->constval(), buf
);
1500 if (ref
->isCall()) {
1501 copyName(ref
, _functions
[ref
->fid()]._name
, funccounts
.add(ref
->fid()));
1503 copyName(ref
, lirNames
[ref
->opcode()], lircounts
.add(ref
->opcode()));
1505 StringNullTerminatedUTF8
cname(gc
, names
.get(ref
)->name
);
1506 strcat(buf
, cname
.c_str());
1508 return labels
->dup(buffer
);
1511 const char* LirNameMap::formatIns(LIns
* i
)
1515 if (!i
->isStore() && !i
->isGuard() && !i
->isop(LIR_trace
)) {
1516 sprintf(s
, "%s = ", formatRef(i
));
1520 LOpcode op
= i
->opcode();
1526 sprintf(s
, "%s", formatRef(i
));
1532 int32_t *p
= (int32_t*) (i
-2);
1533 sprintf(s
, "#%X:%X", p
[1], p
[0]);
1539 sprintf(s
, "%s", lirNames
[op
]);
1544 sprintf(s
, "%s ( ", _functions
[i
->fid()]._name
);
1545 for (int32_t j
=i
->argc()-1; j
>= 0; j
--) {
1547 sprintf(s
, "%s ",formatRef(i
->arg(j
)));
1555 sprintf(s
, "%s %s", lirNames
[op
], gpn(i
->imm8()));
1568 sprintf(s
, "%s %s", lirNames
[op
], formatRef(i
->oprnd1()));
1605 sprintf(s
, "%s %s, %s", lirNames
[op
],
1606 formatRef(i
->oprnd1()),
1607 formatRef(i
->oprnd2()));
1611 sprintf(s
, "%s ? %s : %s",
1612 formatRef(i
->oprnd1()),
1613 formatRef(i
->oprnd2()->oprnd1()),
1614 formatRef(i
->oprnd2()->oprnd2()));
1621 sprintf(s
, "%s %s[%s]", lirNames
[op
],
1622 formatRef(i
->oprnd1()),
1623 formatRef(i
->oprnd2()));
1630 sprintf(s
, "%s[%d] = %s",
1631 formatRef(i
->oprnd2()),
1633 formatRef(i
->oprnd1()));
1640 return labels
->dup(sbuf
);
1645 CseFilter::CseFilter(LirWriter
*out
, GC
*gc
)
1646 : LirWriter(out
), exprs(gc
) {}
1648 LIns
* CseFilter::insImm(int32_t imm
)
1651 LInsp found
= exprs
.find32(imm
, k
);
1654 return exprs
.add(out
->insImm(imm
), k
);
1657 LIns
* CseFilter::insImmq(uint64_t q
)
1660 LInsp found
= exprs
.find64(q
, k
);
1663 return exprs
.add(out
->insImmq(q
), k
);
1666 LIns
* CseFilter::ins1(LOpcode v
, LInsp a
)
1669 NanoAssert(operandCount
[v
]==1);
1671 LInsp found
= exprs
.find1(v
, a
, k
);
1674 return exprs
.add(out
->ins1(v
,a
), k
);
1676 return out
->ins1(v
,a
);
1679 LIns
* CseFilter::ins2(LOpcode v
, LInsp a
, LInsp b
)
1682 NanoAssert(operandCount
[v
]==2);
1684 LInsp found
= exprs
.find2(v
, a
, b
, k
);
1687 return exprs
.add(out
->ins2(v
,a
,b
), k
);
1689 return out
->ins2(v
,a
,b
);
1692 LIns
* CseFilter::insLoad(LOpcode v
, LInsp base
, LInsp disp
)
1695 NanoAssert(operandCount
[v
]==2);
1697 LInsp found
= exprs
.find2(v
, base
, disp
, k
);
1700 return exprs
.add(out
->insLoad(v
,base
,disp
), k
);
1702 return out
->insLoad(v
,base
,disp
);
1705 LInsp
CseFilter::insGuard(LOpcode v
, LInsp c
, SideExit
*x
)
1708 // conditional guard
1709 NanoAssert(operandCount
[v
]==1);
1711 LInsp found
= exprs
.find1(v
, c
, k
);
1714 return exprs
.add(out
->insGuard(v
,c
,x
), k
);
1716 return out
->insGuard(v
, c
, x
);
1719 LInsp
CseFilter::insCall(uint32_t fid
, LInsp args
[])
1721 const CallInfo
*c
= &_functions
[fid
];
1724 uint32_t argc
= c
->count_args();
1725 LInsp found
= exprs
.findcall(fid
, argc
, args
, k
);
1728 return exprs
.add(out
->insCall(fid
, args
), k
);
1730 return out
->insCall(fid
, args
);
1733 CseReader::CseReader(LirFilter
*in
, LInsHashSet
*exprs
, const CallInfo
*functions
)
1734 : LirFilter(in
), exprs(exprs
), functions(functions
)
1737 LInsp
CseReader::read()
1739 LInsp i
= in
->read();
1741 if (i
->isCse(functions
))
1747 LIns
* FASTCALL
callArgN(LIns
* i
, uint32_t n
)
1749 return i
->arg(i
->argc()-n
-1);
1752 void compile(Assembler
* assm
, Fragment
* triggerFrag
)
1754 Fragmento
*frago
= triggerFrag
->lirbuf
->_frago
;
1755 AvmCore
*core
= frago
->core();
1758 verbose_only( StringList
asmOutput(gc
); )
1759 verbose_only( assm
->_outputCache
= &asmOutput
; )
1761 verbose_only(if (assm
->_verbose
&& core
->config
.verbose_live
)
1762 live(gc
, assm
, triggerFrag
);)
1764 bool treeCompile
= core
->config
.tree_opt
&& (triggerFrag
->kind
== BranchTrace
);
1765 RegAllocMap
regMap(gc
);
1766 NInsList
loopJumps(gc
);
1768 loopJumps
.set_meminfo_name("LIR loopjumps");
1770 assm
->beginAssembly(triggerFrag
, ®Map
);
1772 //fprintf(stderr, "recompile trigger %X kind %d\n", (int)triggerFrag, triggerFrag->kind);
1773 Fragment
* root
= triggerFrag
;
1776 // recompile the entire tree
1777 root
= triggerFrag
->root
;
1778 root
->removeIntraLinks();
1779 root
->unlink(assm
); // unlink all incoming jumps ; since the compile() can fail
1780 root
->unlinkBranches(assm
); // no one jumps into a branch (except from within the tree) so safe to clear the links table
1781 root
->fragEntry
= 0;
1782 root
->releaseCode(frago
);
1784 // do the tree branches
1785 Fragment
* frag
= root
->treeBranches
;
1788 // compile til no more frags
1791 assm
->assemble(frag
, loopJumps
);
1792 verbose_only(if (assm
->_verbose
)
1793 assm
->outputf("compiling branch %s ip %s",
1794 frago
->labels
->format(frag
),
1795 frago
->labels
->format(frag
->ip
)); )
1797 NanoAssert(frag
->kind
== BranchTrace
);
1798 RegAlloc
* regs
= new (gc
) RegAlloc();
1799 assm
->copyRegisters(regs
);
1800 assm
->releaseRegisters();
1801 SideExit
* exit
= frag
->spawnedFrom
->exit();
1802 regMap
.put(exit
, regs
);
1804 frag
= frag
->treeBranches
;
1808 // now the the main trunk
1809 assm
->assemble(root
, loopJumps
);
1810 verbose_only(if (assm
->_verbose
)
1811 assm
->outputf("compiling trunk %s",
1812 frago
->labels
->format(root
));)
1813 assm
->endAssembly(root
, loopJumps
);
1815 // reverse output so that assembly is displayed low-to-high
1816 verbose_only( assm
->_outputCache
= 0; )
1817 verbose_only(for(int i
=asmOutput
.size()-1; i
>=0; --i
) { assm
->outputf("%s",asmOutput
.get(i
)); } );
1821 root
->fragEntry
= 0;
1826 if (treeCompile
) root
->linkBranches(assm
);
1829 #if defined(NJ_VERBOSE)
1830 for (size_t i
= 0; i
< asmOutput
.size(); i
++) {
1831 gc
->Free(asmOutput
.get(i
));
1836 #endif /* FEATURE_NANOJIT */
1838 #if defined(NJ_VERBOSE)
1839 LabelMap::LabelMap(AvmCore
*core
, LabelMap
* parent
)
1840 : parent(parent
), names(core
->gc
), addrs(core
->config
.verbose_addrs
), end(buf
), core(core
)
1843 LabelMap::~LabelMap()
1847 while ((e
= names
.removeLast()) != NULL
) {
1852 void LabelMap::add(const void *p
, size_t size
, size_t align
, const char *name
)
1854 if (!this || names
.containsKey(p
))
1856 add(p
, size
, align
, core
->newString(name
));
1859 void LabelMap::add(const void *p
, size_t size
, size_t align
, Stringp name
)
1861 if (!this || names
.containsKey(p
))
1863 Entry
*e
= new (core
->gc
) Entry(name
, size
<<align
, align
);
1867 const char *LabelMap::format(const void *p
)
1870 int i
= names
.findNear(p
);
1872 const void *start
= names
.keyAt(i
);
1873 Entry
*e
= names
.at(i
);
1874 const void *end
= (const char*)start
+ e
->size
;
1875 avmplus::StringNullTerminatedUTF8
cname(core
->gc
, e
->name
);
1876 const char *name
= cname
.c_str();
1879 sprintf(b
,"%p %s",p
,name
);
1884 else if (p
> start
&& p
< end
) {
1885 int d
= (intptr_t(p
)-intptr_t(start
)) >> e
->align
;
1887 sprintf(b
, "%p %s+%d", p
, name
, d
);
1889 sprintf(b
,"%s+%d", name
, d
);
1894 return parent
->format(p
);
1896 sprintf(b
, "%p", p
);
1901 return parent
->format(p
);
1903 sprintf(b
, "%p", p
);
1907 const char *LabelMap::dup(const char *b
)
1909 int need
= strlen(b
)+1;
1912 if (end
> buf
+sizeof(buf
)) {
1920 // copy all labels to parent, adding newbase to label addresses
1921 void LabelMap::promoteAll(const void *newbase
)
1923 for (int i
=0, n
=names
.size(); i
< n
; i
++) {
1924 void *base
= (char*)newbase
+ (intptr_t)names
.keyAt(i
);
1925 parent
->names
.put(base
, names
.at(i
));
1928 #endif // NJ_VERBOSE