2 ** IR assembler (SSA IR -> machine code).
3 ** Copyright (C) 2005-2023 Mike Pall. See Copyright Notice in luajit.h
23 #include "lj_ircall.h"
29 #include "lj_dispatch.h"
31 #include "lj_target.h"
38 /* -- Assembler state and common macros ----------------------------------- */
40 /* Assembler state. */
41 typedef struct ASMState
{
42 RegCost cost
[RID_MAX
]; /* Reference and blended allocation cost for regs. */
44 MCode
*mcp
; /* Current MCode pointer (grows down). */
45 MCode
*mclim
; /* Lower limit for MCode memory + red zone. */
47 MCode
*mcp_prev
; /* Red zone overflow check. */
50 IRIns
*ir
; /* Copy of pointer to IR instructions/constants. */
51 jit_State
*J
; /* JIT compiler state. */
53 #if LJ_TARGET_X86ORX64
54 x86ModRM mrm
; /* Fused x86 address operand. */
57 RegSet freeset
; /* Set of free registers. */
58 RegSet modset
; /* Set of registers modified inside the loop. */
59 RegSet weakset
; /* Set of weakly referenced registers. */
60 RegSet phiset
; /* Set of PHI registers. */
62 uint32_t flags
; /* Copy of JIT compiler flags. */
63 int loopinv
; /* Loop branch inversion (0:no, 1:yes, 2:yes+CC_P). */
65 int32_t evenspill
; /* Next even spill slot. */
66 int32_t oddspill
; /* Next odd spill slot (or 0). */
68 IRRef curins
; /* Reference of current instruction. */
69 IRRef stopins
; /* Stop assembly before hitting this instruction. */
70 IRRef orignins
; /* Original T->nins. */
72 IRRef snapref
; /* Current snapshot is active after this reference. */
73 IRRef snaprename
; /* Rename highwater mark for snapshot check. */
74 SnapNo snapno
; /* Current snapshot number. */
75 SnapNo loopsnapno
; /* Loop snapshot number. */
76 int snapalloc
; /* Current snapshot needs allocation. */
77 BloomFilter snapfilt1
, snapfilt2
; /* Filled with snapshot refs. */
79 IRRef fuseref
; /* Fusion limit (loopref, 0 or FUSE_DISABLED). */
80 IRRef sectref
; /* Section base reference (loopref or 0). */
81 IRRef loopref
; /* Reference of LOOP instruction (or 0). */
83 BCReg topslot
; /* Number of slots for stack check (unless 0). */
84 int32_t gcsteps
; /* Accumulated number of GC steps (per section). */
86 GCtrace
*T
; /* Trace to assemble. */
87 GCtrace
*parent
; /* Parent trace (or NULL). */
89 MCode
*mcbot
; /* Bottom of reserved MCode. */
90 MCode
*mctop
; /* Top of generated MCode. */
91 MCode
*mctoporig
; /* Original top of generated MCode. */
92 MCode
*mcloop
; /* Pointer to loop MCode (or NULL). */
93 MCode
*invmcp
; /* Points to invertible loop branch (or NULL). */
94 MCode
*flagmcp
; /* Pending opportunity to merge flag setting ins. */
95 MCode
*realign
; /* Realign loop if not NULL. */
97 #ifdef LUAJIT_RANDOM_RA
98 /* Randomize register allocation. OK for fuzz testing, not for production. */
104 intptr_t krefk
[RID_NUM_KREF
];
106 IRRef1 phireg
[RID_MAX
]; /* PHI register references. */
107 uint16_t parentmap
[LJ_MAX_JSLOTS
]; /* Parent instruction to RegSP map. */
110 #ifdef LUA_USE_ASSERT
111 #define lj_assertA(c, ...) lj_assertG_(J2G(as->J), (c), __VA_ARGS__)
113 #define lj_assertA(c, ...) ((void)as)
116 #define IR(ref) (&as->ir[(ref)])
118 #define ASMREF_TMP1 REF_TRUE /* Temp. register. */
119 #define ASMREF_TMP2 REF_FALSE /* Temp. register. */
120 #define ASMREF_L REF_NIL /* Stores register for L. */
122 /* Check for variant to invariant references. */
123 #define iscrossref(as, ref) ((ref) < as->sectref)
125 /* Inhibit memory op fusion from variant to invariant references. */
126 #define FUSE_DISABLED (~(IRRef)0)
127 #define mayfuse(as, ref) ((ref) > as->fuseref)
128 #define neverfuse(as) (as->fuseref == FUSE_DISABLED)
129 #define canfuse(as, ir) (!neverfuse(as) && !irt_isphi((ir)->t))
130 #define opisfusableload(o) \
131 ((o) == IR_ALOAD || (o) == IR_HLOAD || (o) == IR_ULOAD || \
132 (o) == IR_FLOAD || (o) == IR_XLOAD || (o) == IR_SLOAD || (o) == IR_VLOAD)
134 /* Sparse limit checks using a red zone before the actual limit. */
135 #define MCLIM_REDZONE 64
137 static LJ_NORET LJ_NOINLINE
void asm_mclimit(ASMState
*as
)
139 lj_mcode_limiterr(as
->J
, (size_t)(as
->mctop
- as
->mcp
+ 4*MCLIM_REDZONE
));
142 static LJ_AINLINE
void checkmclim(ASMState
*as
)
144 #ifdef LUA_USE_ASSERT
145 if (as
->mcp
+ MCLIM_REDZONE
< as
->mcp_prev
) {
146 IRIns
*ir
= IR(as
->curins
+1);
147 lj_assertA(0, "red zone overflow: %p IR %04d %02d %04d %04d\n", as
->mcp
,
148 as
->curins
+1-REF_BIAS
, ir
->o
, ir
->op1
-REF_BIAS
, ir
->op2
-REF_BIAS
);
151 if (LJ_UNLIKELY(as
->mcp
< as
->mclim
)) asm_mclimit(as
);
152 #ifdef LUA_USE_ASSERT
153 as
->mcp_prev
= as
->mcp
;
158 #define ra_iskref(ref) ((ref) < RID_NUM_KREF)
159 #define ra_krefreg(ref) ((Reg)(RID_MIN_KREF + (Reg)(ref)))
160 #define ra_krefk(as, ref) (as->krefk[(ref)])
162 static LJ_AINLINE
void ra_setkref(ASMState
*as
, Reg r
, intptr_t k
)
164 IRRef ref
= (IRRef
)(r
- RID_MIN_KREF
);
166 as
->cost
[r
] = REGCOST(ref
, ref
);
170 #define ra_iskref(ref) 0
171 #define ra_krefreg(ref) RID_MIN_GPR
172 #define ra_krefk(as, ref) 0
175 /* Arch-specific field offsets. */
176 static const uint8_t field_ofs
[IRFL__MAX
+1] = {
177 #define FLOFS(name, ofs) (uint8_t)(ofs),
183 #ifdef LUAJIT_RANDOM_RA
184 /* Return a fixed number of random bits from the local PRNG state. */
185 static uint32_t ra_random_bits(ASMState
*as
, uint32_t nbits
) {
186 uint64_t b
= as
->prngbits
;
187 uint32_t res
= (1u << nbits
) - 1u;
188 if (b
<= res
) b
= lj_prng_u64(&as
->prngstate
) | (1ull << 63);
190 as
->prngbits
= b
>> nbits
;
194 /* Pick a random register from a register set. */
195 static Reg
rset_pickrandom(ASMState
*as
, RegSet rs
)
197 Reg r
= rset_pickbot_(rs
);
199 if (rs
> 1) { /* More than one bit set? */
201 /* We need to sample max. the GPR or FPR half of the set. */
202 uint32_t d
= ra_random_bits(as
, RSET_BITS
-1);
211 #define rset_picktop(rs) rset_pickrandom(as, rs)
212 #define rset_pickbot(rs) rset_pickrandom(as, rs)
214 #define rset_picktop(rs) rset_picktop_(rs)
215 #define rset_pickbot(rs) rset_pickbot_(rs)
218 /* -- Target-specific instruction emitter --------------------------------- */
220 #if LJ_TARGET_X86ORX64
221 #include "lj_emit_x86.h"
223 #include "lj_emit_arm.h"
224 #elif LJ_TARGET_ARM64
225 #include "lj_emit_arm64.h"
227 #include "lj_emit_ppc.h"
229 #include "lj_emit_mips.h"
231 #error "Missing instruction emitter for target CPU"
234 /* Generic load/store of register from/to stack slot. */
235 #define emit_spload(as, ir, r, ofs) \
236 emit_loadofs(as, ir, (r), RID_SP, (ofs))
237 #define emit_spstore(as, ir, r, ofs) \
238 emit_storeofs(as, ir, (r), RID_SP, (ofs))
240 /* -- Register allocator debugging ---------------------------------------- */
242 /* #define LUAJIT_DEBUG_RA */
244 #ifdef LUAJIT_DEBUG_RA
249 #define RIDNAME(name) #name,
250 static const char *const ra_regname
[] = {
258 static char ra_dbg_buf
[65536];
259 static char *ra_dbg_p
;
260 static char *ra_dbg_merge
;
261 static MCode
*ra_dbg_mcp
;
263 static void ra_dstart(void)
265 ra_dbg_p
= ra_dbg_buf
;
270 static void ra_dflush(void)
272 fwrite(ra_dbg_buf
, 1, (size_t)(ra_dbg_p
-ra_dbg_buf
), stdout
);
276 static void ra_dprintf(ASMState
*as
, const char *fmt
, ...)
281 p
= ra_dbg_mcp
== as
->mcp
? ra_dbg_merge
: ra_dbg_p
;
283 p
+= sprintf(p
, "%08x \e[36m%04d ", (uintptr_t)as
->mcp
, as
->curins
-REF_BIAS
);
285 const char *e
= strchr(fmt
, '$');
286 if (e
== NULL
) break;
287 memcpy(p
, fmt
, (size_t)(e
-fmt
));
290 Reg r
= va_arg(argp
, Reg
) & RID_MASK
;
293 for (q
= ra_regname
[r
]; *q
; q
++)
294 *p
++ = *q
>= 'A' && *q
<= 'Z' ? *q
+ 0x20 : *q
;
297 lj_assertA(0, "bad register %d for debug format \"%s\"", r
, fmt
);
299 } else if (e
[1] == 'f' || e
[1] == 'i') {
302 ref
= va_arg(argp
, IRRef
);
304 ref
= va_arg(argp
, IRIns
*) - as
->ir
;
306 p
+= sprintf(p
, "%04d", ref
- REF_BIAS
);
308 p
+= sprintf(p
, "K%03d", REF_BIAS
- ref
);
309 } else if (e
[1] == 's') {
310 uint32_t slot
= va_arg(argp
, uint32_t);
311 p
+= sprintf(p
, "[sp+0x%x]", sps_scale(slot
));
312 } else if (e
[1] == 'x') {
313 p
+= sprintf(p
, "%08x", va_arg(argp
, int32_t));
315 lj_assertA(0, "bad debug format code");
322 *p
++ = '\e'; *p
++ = '['; *p
++ = 'm'; *p
++ = '\n';
323 if (p
> ra_dbg_buf
+sizeof(ra_dbg_buf
)-256) {
324 fwrite(ra_dbg_buf
, 1, (size_t)(p
-ra_dbg_buf
), stdout
);
330 #define RA_DBG_START() ra_dstart()
331 #define RA_DBG_FLUSH() ra_dflush()
332 #define RA_DBG_REF() \
333 do { char *_p = ra_dbg_p; ra_dprintf(as, ""); \
334 ra_dbg_merge = _p; ra_dbg_mcp = as->mcp; } while (0)
335 #define RA_DBGX(x) ra_dprintf x
338 #define RA_DBG_START() ((void)0)
339 #define RA_DBG_FLUSH() ((void)0)
340 #define RA_DBG_REF() ((void)0)
341 #define RA_DBGX(x) ((void)0)
344 /* -- Register allocator -------------------------------------------------- */
346 #define ra_free(as, r) rset_set(as->freeset, (r))
347 #define ra_modified(as, r) rset_set(as->modset, (r))
348 #define ra_weak(as, r) rset_set(as->weakset, (r))
349 #define ra_noweak(as, r) rset_clear(as->weakset, (r))
351 #define ra_used(ir) (ra_hasreg((ir)->r) || ra_hasspill((ir)->s))
353 /* Setup register allocator. */
354 static void ra_setup(ASMState
*as
)
357 /* Initially all regs (except the stack pointer) are free for use. */
358 as
->freeset
= RSET_INIT
;
359 as
->modset
= RSET_EMPTY
;
360 as
->weakset
= RSET_EMPTY
;
361 as
->phiset
= RSET_EMPTY
;
362 memset(as
->phireg
, 0, sizeof(as
->phireg
));
363 for (r
= RID_MIN_GPR
; r
< RID_MAX
; r
++)
364 as
->cost
[r
] = REGCOST(~0u, 0u);
367 /* Rematerialize constants. */
368 static Reg
ra_rematk(ASMState
*as
, IRRef ref
)
372 if (ra_iskref(ref
)) {
374 lj_assertA(!rset_test(as
->freeset
, r
), "rematk of free reg %d", r
);
378 emit_loadu64(as
, r
, ra_krefk(as
, ref
));
380 emit_loadi(as
, r
, ra_krefk(as
, ref
));
386 lj_assertA(ra_hasreg(r
), "rematk of K%03d has no reg", REF_BIAS
- ref
);
387 lj_assertA(!ra_hasspill(ir
->s
),
388 "rematk of K%03d has spill slot [%x]", REF_BIAS
- ref
, ir
->s
);
391 ir
->r
= RID_INIT
; /* Do not keep any hint. */
392 RA_DBGX((as
, "remat $i $r", ir
, r
));
394 if (ir
->o
== IR_KNUM
) {
395 emit_loadk64(as
, r
, ir
);
398 if (emit_canremat(REF_BASE
) && ir
->o
== IR_BASE
) {
399 ra_sethint(ir
->r
, RID_BASE
); /* Restore BASE register hint. */
400 emit_getgl(as
, r
, jit_base
);
401 } else if (emit_canremat(ASMREF_L
) && ir
->o
== IR_KPRI
) {
402 /* REF_NIL stores ASMREF_L register. */
403 lj_assertA(irt_isnil(ir
->t
), "rematk of bad ASMREF_L");
404 emit_getgl(as
, r
, cur_L
);
406 } else if (ir
->o
== IR_KINT64
) {
407 emit_loadu64(as
, r
, ir_kint64(ir
)->u64
);
409 } else if (ir
->o
== IR_KGC
) {
410 emit_loadu64(as
, r
, (uintptr_t)ir_kgc(ir
));
411 } else if (ir
->o
== IR_KPTR
|| ir
->o
== IR_KKPTR
) {
412 emit_loadu64(as
, r
, (uintptr_t)ir_kptr(ir
));
416 lj_assertA(ir
->o
== IR_KINT
|| ir
->o
== IR_KGC
||
417 ir
->o
== IR_KPTR
|| ir
->o
== IR_KKPTR
|| ir
->o
== IR_KNULL
,
418 "rematk of bad IR op %d", ir
->o
);
419 emit_loadi(as
, r
, ir
->i
);
424 /* Force a spill. Allocate a new spill slot if needed. */
425 static int32_t ra_spill(ASMState
*as
, IRIns
*ir
)
427 int32_t slot
= ir
->s
;
428 lj_assertA(ir
>= as
->ir
+ REF_TRUE
,
429 "spill of K%03d", REF_BIAS
- (int)(ir
- as
->ir
));
430 if (!ra_hasspill(slot
)) {
431 if (irt_is64(ir
->t
)) {
432 slot
= as
->evenspill
;
434 } else if (as
->oddspill
) {
438 slot
= as
->evenspill
;
439 as
->oddspill
= slot
+1;
442 if (as
->evenspill
> 256)
443 lj_trace_err(as
->J
, LJ_TRERR_SPILLOV
);
444 ir
->s
= (uint8_t)slot
;
446 return sps_scale(slot
);
449 /* Release the temporarily allocated register in ASMREF_TMP1/ASMREF_TMP2. */
450 static Reg
ra_releasetmp(ASMState
*as
, IRRef ref
)
454 lj_assertA(ra_hasreg(r
), "release of TMP%d has no reg", ref
-ASMREF_TMP1
+1);
455 lj_assertA(!ra_hasspill(ir
->s
),
456 "release of TMP%d has spill slot [%x]", ref
-ASMREF_TMP1
+1, ir
->s
);
463 /* Restore a register (marked as free). Rematerialize or force a spill. */
464 static Reg
ra_restore(ASMState
*as
, IRRef ref
)
466 if (emit_canremat(ref
)) {
467 return ra_rematk(as
, ref
);
470 int32_t ofs
= ra_spill(as
, ir
); /* Force a spill slot. */
472 lj_assertA(ra_hasreg(r
), "restore of IR %04d has no reg", ref
- REF_BIAS
);
473 ra_sethint(ir
->r
, r
); /* Keep hint. */
475 if (!rset_test(as
->weakset
, r
)) { /* Only restore non-weak references. */
477 RA_DBGX((as
, "restore $i $r", ir
, r
));
478 emit_spload(as
, ir
, r
, ofs
);
484 /* Save a register to a spill slot. */
485 static void ra_save(ASMState
*as
, IRIns
*ir
, Reg r
)
487 RA_DBGX((as
, "save $i $r", ir
, r
));
488 emit_spstore(as
, ir
, r
, sps_scale(ir
->s
));
491 #define MINCOST(name) \
492 if (rset_test(RSET_ALL, RID_##name) && \
493 LJ_LIKELY(allow&RID2RSET(RID_##name)) && as->cost[RID_##name] < cost) \
494 cost = as->cost[RID_##name];
496 /* Evict the register with the lowest cost, forcing a restore. */
497 static Reg
ra_evict(ASMState
*as
, RegSet allow
)
500 RegCost cost
= ~(RegCost
)0;
501 lj_assertA(allow
!= RSET_EMPTY
, "evict from empty set");
502 if (RID_NUM_FPR
== 0 || allow
< RID2RSET(RID_MAX_GPR
)) {
507 ref
= regcost_ref(cost
);
508 lj_assertA(ra_iskref(ref
) || (ref
>= as
->T
->nk
&& ref
< as
->T
->nins
),
509 "evict of out-of-range IR %04d", ref
- REF_BIAS
);
510 /* Preferably pick any weak ref instead of a non-weak, non-const ref. */
511 if (!irref_isk(ref
) && (as
->weakset
& allow
)) {
513 if (!rset_test(as
->weakset
, ir
->r
))
514 ref
= regcost_ref(as
->cost
[rset_pickbot((as
->weakset
& allow
))]);
516 return ra_restore(as
, ref
);
519 /* Pick any register (marked as free). Evict on-demand. */
520 static Reg
ra_pick(ASMState
*as
, RegSet allow
)
522 RegSet pick
= as
->freeset
& allow
;
524 return ra_evict(as
, allow
);
526 return rset_picktop(pick
);
529 /* Get a scratch register (marked as free). */
530 static Reg
ra_scratch(ASMState
*as
, RegSet allow
)
532 Reg r
= ra_pick(as
, allow
);
534 RA_DBGX((as
, "scratch $r", r
));
538 /* Evict all registers from a set (if not free). */
539 static void ra_evictset(ASMState
*as
, RegSet drop
)
544 work
= (drop
& ~as
->freeset
) & RSET_FPR
;
546 Reg r
= rset_pickbot(work
);
547 ra_restore(as
, regcost_ref(as
->cost
[r
]));
552 work
= (drop
& ~as
->freeset
);
554 Reg r
= rset_pickbot(work
);
555 ra_restore(as
, regcost_ref(as
->cost
[r
]));
561 /* Evict (rematerialize) all registers allocated to constants. */
562 static void ra_evictk(ASMState
*as
)
566 work
= ~as
->freeset
& RSET_FPR
;
568 Reg r
= rset_pickbot(work
);
569 IRRef ref
= regcost_ref(as
->cost
[r
]);
570 if (emit_canremat(ref
) && irref_isk(ref
)) {
577 work
= ~as
->freeset
& RSET_GPR
;
579 Reg r
= rset_pickbot(work
);
580 IRRef ref
= regcost_ref(as
->cost
[r
]);
581 if (emit_canremat(ref
) && irref_isk(ref
)) {
590 /* Allocate a register for a constant. */
591 static Reg
ra_allock(ASMState
*as
, intptr_t k
, RegSet allow
)
593 /* First try to find a register which already holds the same constant. */
594 RegSet pick
, work
= ~as
->freeset
& RSET_GPR
;
598 r
= rset_pickbot(work
);
599 ref
= regcost_ref(as
->cost
[r
]);
601 if (ref
< ASMREF_L
) {
602 if (ra_iskref(ref
)) {
603 if (k
== ra_krefk(as
, ref
))
607 if ((ir
->o
== IR_KINT64
&& k
== (int64_t)ir_kint64(ir
)->u64
) ||
610 (ir
->o
== IR_KINT
&& (uint64_t)k
== (uint32_t)ir
->i
) ||
612 (ir
->o
== IR_KINT
&& k
== ir
->i
) ||
614 (ir
->o
== IR_KGC
&& k
== (intptr_t)ir_kgc(ir
)) ||
615 ((ir
->o
== IR_KPTR
|| ir
->o
== IR_KKPTR
) &&
616 k
== (intptr_t)ir_kptr(ir
))
618 (ir
->o
!= IR_KINT64
&& k
== ir
->i
)
625 if (ref
< ASMREF_L
&&
626 k
== (ra_iskref(ref
) ? ra_krefk(as
, ref
) : IR(ref
)->i
))
631 pick
= as
->freeset
& allow
;
633 /* Constants should preferably get unmodified registers. */
634 if ((pick
& ~as
->modset
))
636 r
= rset_pickbot(pick
); /* Reduce conflicts with inverse allocation. */
638 r
= ra_evict(as
, allow
);
640 RA_DBGX((as
, "allock $x $r", k
, r
));
641 ra_setkref(as
, r
, k
);
642 rset_clear(as
->freeset
, r
);
647 /* Allocate a specific register for a constant. */
648 static void ra_allockreg(ASMState
*as
, intptr_t k
, Reg r
)
650 Reg kr
= ra_allock(as
, k
, RID2RSET(r
));
653 irdummy
.t
.irt
= IRT_INT
;
654 ra_scratch(as
, RID2RSET(r
));
655 emit_movrr(as
, &irdummy
, r
, kr
);
659 #define ra_allockreg(as, k, r) emit_loadi(as, (r), (k))
662 /* Allocate a register for ref from the allowed set of registers.
663 ** Note: this function assumes the ref does NOT have a register yet!
664 ** Picks an optimal register, sets the cost and marks the register as non-free.
666 static Reg
ra_allocref(ASMState
*as
, IRRef ref
, RegSet allow
)
669 RegSet pick
= as
->freeset
& allow
;
671 lj_assertA(ra_noreg(ir
->r
),
672 "IR %04d already has reg %d", ref
- REF_BIAS
, ir
->r
);
674 /* First check register hint from propagation or PHI. */
675 if (ra_hashint(ir
->r
)) {
676 r
= ra_gethint(ir
->r
);
677 if (rset_test(pick
, r
)) /* Use hint register if possible. */
679 /* Rematerialization is cheaper than missing a hint. */
680 if (rset_test(allow
, r
) && emit_canremat(regcost_ref(as
->cost
[r
]))) {
681 ra_rematk(as
, regcost_ref(as
->cost
[r
]));
684 RA_DBGX((as
, "hintmiss $f $r", ref
, r
));
686 /* Invariants should preferably get unmodified registers. */
687 if (ref
< as
->loopref
&& !irt_isphi(ir
->t
)) {
688 if ((pick
& ~as
->modset
))
690 r
= rset_pickbot(pick
); /* Reduce conflicts with inverse allocation. */
692 /* We've got plenty of regs, so get callee-save regs if possible. */
693 if (RID_NUM_GPR
> 8 && (pick
& ~RSET_SCRATCH
))
694 pick
&= ~RSET_SCRATCH
;
695 r
= rset_picktop(pick
);
698 r
= ra_evict(as
, allow
);
701 RA_DBGX((as
, "alloc $f $r", ref
, r
));
703 rset_clear(as
->freeset
, r
);
705 as
->cost
[r
] = REGCOST_REF_T(ref
, irt_t(ir
->t
));
709 /* Allocate a register on-demand. */
710 static Reg
ra_alloc1(ASMState
*as
, IRRef ref
, RegSet allow
)
713 /* Note: allow is ignored if the register is already allocated. */
714 if (ra_noreg(r
)) r
= ra_allocref(as
, ref
, allow
);
719 /* Add a register rename to the IR. */
720 static void ra_addrename(ASMState
*as
, Reg down
, IRRef ref
, SnapNo snapno
)
723 lj_ir_set(as
->J
, IRT(IR_RENAME
, IRT_NIL
), ref
, snapno
);
724 ren
= tref_ref(lj_ir_emit(as
->J
));
725 as
->J
->cur
.ir
[ren
].r
= (uint8_t)down
;
726 as
->J
->cur
.ir
[ren
].s
= SPS_NONE
;
729 /* Rename register allocation and emit move. */
730 static void ra_rename(ASMState
*as
, Reg down
, Reg up
)
732 IRRef ref
= regcost_ref(as
->cost
[up
] = as
->cost
[down
]);
736 lj_assertA((down
< RID_MAX_GPR
) == (up
< RID_MAX_GPR
),
737 "rename between GPR/FPR %d and %d", down
, up
);
738 lj_assertA(!rset_test(as
->freeset
, down
), "rename from free reg %d", down
);
739 lj_assertA(rset_test(as
->freeset
, up
), "rename to non-free reg %d", up
);
740 ra_free(as
, down
); /* 'down' is free ... */
741 ra_modified(as
, down
);
742 rset_clear(as
->freeset
, up
); /* ... and 'up' is now allocated. */
744 RA_DBGX((as
, "rename $f $r $r", regcost_ref(as
->cost
[up
]), down
, up
));
745 emit_movrr(as
, ir
, down
, up
); /* Backwards codegen needs inverse move. */
746 if (!ra_hasspill(IR(ref
)->s
)) { /* Add the rename to the IR. */
748 ** The rename is effective at the subsequent (already emitted) exit
749 ** branch. This is for the current snapshot (as->snapno). Except if we
750 ** haven't yet allocated any refs for the snapshot (as->snapalloc == 1),
751 ** then it belongs to the next snapshot.
752 ** See also the discussion at asm_snap_checkrename().
754 ra_addrename(as
, down
, ref
, as
->snapno
+ as
->snapalloc
);
758 /* Pick a destination register (marked as free).
759 ** Caveat: allow is ignored if there's already a destination register.
760 ** Use ra_destreg() to get a specific register.
762 static Reg
ra_dest(ASMState
*as
, IRIns
*ir
, RegSet allow
)
765 if (ra_hasreg(dest
)) {
767 ra_modified(as
, dest
);
769 if (ra_hashint(dest
) && rset_test((as
->freeset
&allow
), ra_gethint(dest
))) {
770 dest
= ra_gethint(dest
);
771 ra_modified(as
, dest
);
772 RA_DBGX((as
, "dest $r", dest
));
774 dest
= ra_scratch(as
, allow
);
778 if (LJ_UNLIKELY(ra_hasspill(ir
->s
))) ra_save(as
, ir
, dest
);
782 /* Force a specific destination register (marked as free). */
783 static void ra_destreg(ASMState
*as
, IRIns
*ir
, Reg r
)
785 Reg dest
= ra_dest(as
, ir
, RID2RSET(r
));
787 lj_assertA(rset_test(as
->freeset
, r
), "dest reg %d is not free", r
);
789 emit_movrr(as
, ir
, dest
, r
);
793 #if LJ_TARGET_X86ORX64
794 /* Propagate dest register to left reference. Emit moves as needed.
795 ** This is a required fixup step for all 2-operand machine instructions.
797 static void ra_left(ASMState
*as
, Reg dest
, IRRef lref
)
799 IRIns
*ir
= IR(lref
);
801 if (ra_noreg(left
)) {
802 if (irref_isk(lref
)) {
803 if (ir
->o
== IR_KNUM
) {
804 /* FP remat needs a load except for +0. Still better than eviction. */
805 if (tvispzero(ir_knum(ir
)) || !(as
->freeset
& RSET_FPR
)) {
806 emit_loadk64(as
, dest
, ir
);
810 } else if (ir
->o
== IR_KINT64
) {
811 emit_loadk64(as
, dest
, ir
);
814 } else if (ir
->o
== IR_KGC
|| ir
->o
== IR_KPTR
|| ir
->o
== IR_KKPTR
) {
815 emit_loadk64(as
, dest
, ir
);
819 } else if (ir
->o
!= IR_KPRI
) {
820 lj_assertA(ir
->o
== IR_KINT
|| ir
->o
== IR_KGC
||
821 ir
->o
== IR_KPTR
|| ir
->o
== IR_KKPTR
|| ir
->o
== IR_KNULL
,
822 "K%03d has bad IR op %d", REF_BIAS
- lref
, ir
->o
);
823 emit_loadi(as
, dest
, ir
->i
);
827 if (!ra_hashint(left
) && !iscrossref(as
, lref
))
828 ra_sethint(ir
->r
, dest
); /* Propagate register hint. */
829 left
= ra_allocref(as
, lref
, dest
< RID_MAX_GPR
? RSET_GPR
: RSET_FPR
);
832 /* Move needed for true 3-operand instruction: y=a+b ==> y=a; y+=b. */
834 /* Use register renaming if dest is the PHI reg. */
835 if (irt_isphi(ir
->t
) && as
->phireg
[dest
] == lref
) {
836 ra_modified(as
, left
);
837 ra_rename(as
, left
, dest
);
839 emit_movrr(as
, ir
, dest
, left
);
844 /* Similar to ra_left, except we override any hints. */
845 static void ra_leftov(ASMState
*as
, Reg dest
, IRRef lref
)
847 IRIns
*ir
= IR(lref
);
849 if (ra_noreg(left
)) {
850 ra_sethint(ir
->r
, dest
); /* Propagate register hint. */
851 left
= ra_allocref(as
, lref
,
852 (LJ_SOFTFP
|| dest
< RID_MAX_GPR
) ? RSET_GPR
: RSET_FPR
);
856 /* Use register renaming if dest is the PHI reg. */
857 if (irt_isphi(ir
->t
) && as
->phireg
[dest
] == lref
) {
858 ra_modified(as
, left
);
859 ra_rename(as
, left
, dest
);
861 emit_movrr(as
, ir
, dest
, left
);
867 /* Force a RID_RETLO/RID_RETHI destination register pair (marked as free). */
868 static void ra_destpair(ASMState
*as
, IRIns
*ir
)
870 Reg destlo
= ir
->r
, desthi
= (ir
+1)->r
;
871 IRIns
*irx
= (LJ_64
&& !irt_is64(ir
->t
)) ? ir
+1 : ir
;
872 /* First spill unrelated refs blocking the destination registers. */
873 if (!rset_test(as
->freeset
, RID_RETLO
) &&
874 destlo
!= RID_RETLO
&& desthi
!= RID_RETLO
)
875 ra_restore(as
, regcost_ref(as
->cost
[RID_RETLO
]));
876 if (!rset_test(as
->freeset
, RID_RETHI
) &&
877 destlo
!= RID_RETHI
&& desthi
!= RID_RETHI
)
878 ra_restore(as
, regcost_ref(as
->cost
[RID_RETHI
]));
879 /* Next free the destination registers (if any). */
880 if (ra_hasreg(destlo
)) {
882 ra_modified(as
, destlo
);
886 if (ra_hasreg(desthi
)) {
888 ra_modified(as
, desthi
);
892 /* Check for conflicts and shuffle the registers as needed. */
893 if (destlo
== RID_RETHI
) {
894 if (desthi
== RID_RETLO
) {
895 #if LJ_TARGET_X86ORX64
896 *--as
->mcp
= XI_XCHGa
+ RID_RETHI
;
897 if (LJ_64
&& irt_is64(irx
->t
)) *--as
->mcp
= 0x48;
899 emit_movrr(as
, irx
, RID_RETHI
, RID_TMP
);
900 emit_movrr(as
, irx
, RID_RETLO
, RID_RETHI
);
901 emit_movrr(as
, irx
, RID_TMP
, RID_RETLO
);
904 emit_movrr(as
, irx
, RID_RETHI
, RID_RETLO
);
905 if (desthi
!= RID_RETHI
) emit_movrr(as
, irx
, desthi
, RID_RETHI
);
907 } else if (desthi
== RID_RETLO
) {
908 emit_movrr(as
, irx
, RID_RETLO
, RID_RETHI
);
909 if (destlo
!= RID_RETLO
) emit_movrr(as
, irx
, destlo
, RID_RETLO
);
911 if (desthi
!= RID_RETHI
) emit_movrr(as
, irx
, desthi
, RID_RETHI
);
912 if (destlo
!= RID_RETLO
) emit_movrr(as
, irx
, destlo
, RID_RETLO
);
914 /* Restore spill slots (if any). */
915 if (ra_hasspill((ir
+1)->s
)) ra_save(as
, ir
+1, RID_RETHI
);
916 if (ra_hasspill(ir
->s
)) ra_save(as
, ir
, RID_RETLO
);
919 /* -- Snapshot handling --------- ----------------------------------------- */
921 /* Can we rematerialize a KNUM instead of forcing a spill? */
922 static int asm_snap_canremat(ASMState
*as
)
925 for (r
= RID_MIN_FPR
; r
< RID_MAX_FPR
; r
++)
926 if (irref_isk(regcost_ref(as
->cost
[r
])))
931 /* Check whether a sunk store corresponds to an allocation. */
932 static int asm_sunk_store(ASMState
*as
, IRIns
*ira
, IRIns
*irs
)
935 if (irs
->o
== IR_ASTORE
|| irs
->o
== IR_HSTORE
||
936 irs
->o
== IR_FSTORE
|| irs
->o
== IR_XSTORE
) {
937 IRIns
*irk
= IR(irs
->op1
);
938 if (irk
->o
== IR_AREF
|| irk
->o
== IR_HREFK
)
940 return (IR(irk
->op1
) == ira
);
944 return (ira
+ irs
->s
== irs
); /* Quick check. */
948 /* Allocate register or spill slot for a ref that escapes to a snapshot. */
949 static void asm_snap_alloc1(ASMState
*as
, IRRef ref
)
952 if (!irref_isk(ref
) && ir
->r
!= RID_SUNK
) {
953 bloomset(as
->snapfilt1
, ref
);
954 bloomset(as
->snapfilt2
, hashrot(ref
, ref
+ HASH_BIAS
));
955 if (ra_used(ir
)) return;
956 if (ir
->r
== RID_SINK
) {
959 if (ir
->o
== IR_CNEWI
) { /* Allocate CNEWI value. */
960 asm_snap_alloc1(as
, ir
->op2
);
961 if (LJ_32
&& (ir
+1)->o
== IR_HIOP
)
962 asm_snap_alloc1(as
, (ir
+1)->op2
);
965 { /* Allocate stored values for TNEW, TDUP and CNEW. */
967 lj_assertA(ir
->o
== IR_TNEW
|| ir
->o
== IR_TDUP
|| ir
->o
== IR_CNEW
,
968 "sink of IR %04d has bad op %d", ref
- REF_BIAS
, ir
->o
);
969 for (irs
= IR(as
->snapref
-1); irs
> ir
; irs
--)
970 if (irs
->r
== RID_SINK
&& asm_sunk_store(as
, ir
, irs
)) {
971 lj_assertA(irs
->o
== IR_ASTORE
|| irs
->o
== IR_HSTORE
||
972 irs
->o
== IR_FSTORE
|| irs
->o
== IR_XSTORE
,
973 "sunk store IR %04d has bad op %d",
974 (int)(irs
- as
->ir
) - REF_BIAS
, irs
->o
);
975 asm_snap_alloc1(as
, irs
->op2
);
976 if (LJ_32
&& (irs
+1)->o
== IR_HIOP
)
977 asm_snap_alloc1(as
, (irs
+1)->op2
);
982 if (ir
->o
== IR_CONV
&& ir
->op2
== IRCONV_NUM_INT
) {
984 for (irc
= IR(as
->curins
); irc
> ir
; irc
--)
985 if ((irc
->op1
== ref
|| irc
->op2
== ref
) &&
986 !(irc
->r
== RID_SINK
|| irc
->r
== RID_SUNK
))
987 goto nosink
; /* Don't sink conversion if result is used. */
988 asm_snap_alloc1(as
, ir
->op1
);
992 allow
= (!LJ_SOFTFP
&& irt_isfp(ir
->t
)) ? RSET_FPR
: RSET_GPR
;
993 if ((as
->freeset
& allow
) ||
994 (allow
== RSET_FPR
&& asm_snap_canremat(as
))) {
995 /* Get a weak register if we have a free one or can rematerialize. */
996 Reg r
= ra_allocref(as
, ref
, allow
); /* Allocate a register. */
997 if (!irt_isphi(ir
->t
))
998 ra_weak(as
, r
); /* But mark it as weakly referenced. */
1000 RA_DBGX((as
, "snapreg $f $r", ref
, ir
->r
));
1002 ra_spill(as
, ir
); /* Otherwise force a spill slot. */
1003 RA_DBGX((as
, "snapspill $f $s", ref
, ir
->s
));
1009 /* Allocate refs escaping to a snapshot. */
1010 static void asm_snap_alloc(ASMState
*as
, int snapno
)
1012 SnapShot
*snap
= &as
->T
->snap
[snapno
];
1013 SnapEntry
*map
= &as
->T
->snapmap
[snap
->mapofs
];
1014 MSize n
, nent
= snap
->nent
;
1015 as
->snapfilt1
= as
->snapfilt2
= 0;
1016 for (n
= 0; n
< nent
; n
++) {
1017 SnapEntry sn
= map
[n
];
1018 IRRef ref
= snap_ref(sn
);
1019 if (!irref_isk(ref
)) {
1020 asm_snap_alloc1(as
, ref
);
1021 if (LJ_SOFTFP
&& (sn
& SNAP_SOFTFPNUM
)) {
1022 lj_assertA(irt_type(IR(ref
+1)->t
) == IRT_SOFTFP
,
1023 "snap %d[%d] points to bad SOFTFP IR %04d",
1024 snapno
, n
, ref
- REF_BIAS
);
1025 asm_snap_alloc1(as
, ref
+1);
1031 /* All guards for a snapshot use the same exitno. This is currently the
1032 ** same as the snapshot number. Since the exact origin of the exit cannot
1033 ** be determined, all guards for the same snapshot must exit with the same
1035 ** A renamed ref which has been used in a prior guard for the same snapshot
1036 ** would cause an inconsistency. The easy way out is to force a spill slot.
1038 static int asm_snap_checkrename(ASMState
*as
, IRRef ren
)
1040 if (bloomtest(as
->snapfilt1
, ren
) &&
1041 bloomtest(as
->snapfilt2
, hashrot(ren
, ren
+ HASH_BIAS
))) {
1042 IRIns
*ir
= IR(ren
);
1043 ra_spill(as
, ir
); /* Register renamed, so force a spill slot. */
1044 RA_DBGX((as
, "snaprensp $f $s", ren
, ir
->s
));
1045 return 1; /* Found. */
1047 return 0; /* Not found. */
1050 /* Prepare snapshot for next guard or throwing instruction. */
1051 static void asm_snap_prep(ASMState
*as
)
1053 if (as
->snapalloc
) {
1054 /* Alloc on first invocation for each snapshot. */
1056 asm_snap_alloc(as
, as
->snapno
);
1057 as
->snaprename
= as
->T
->nins
;
1059 /* Check any renames above the highwater mark. */
1060 for (; as
->snaprename
< as
->T
->nins
; as
->snaprename
++) {
1061 IRIns
*ir
= &as
->T
->ir
[as
->snaprename
];
1062 if (asm_snap_checkrename(as
, ir
->op1
))
1063 ir
->op2
= REF_BIAS
-1; /* Kill rename. */
1068 /* Move to previous snapshot when we cross the current snapshot ref. */
1069 static void asm_snap_prev(ASMState
*as
)
1071 if (as
->curins
< as
->snapref
) {
1072 uintptr_t ofs
= (uintptr_t)(as
->mctoporig
- as
->mcp
);
1073 if (ofs
>= 0x10000) lj_trace_err(as
->J
, LJ_TRERR_MCODEOV
);
1075 if (as
->snapno
== 0) return;
1077 as
->snapref
= as
->T
->snap
[as
->snapno
].ref
;
1078 as
->T
->snap
[as
->snapno
].mcofs
= (uint16_t)ofs
; /* Remember mcode ofs. */
1079 } while (as
->curins
< as
->snapref
); /* May have no ins inbetween. */
1084 /* Fixup snapshot mcode offsetst. */
1085 static void asm_snap_fixup_mcofs(ASMState
*as
)
1087 uint32_t sz
= (uint32_t)(as
->mctoporig
- as
->mcp
);
1088 SnapShot
*snap
= as
->T
->snap
;
1090 for (i
= as
->T
->nsnap
-1; i
> 0; i
--) {
1091 /* Compute offset from mcode start and store in correct snapshot. */
1092 snap
[i
].mcofs
= (uint16_t)(sz
- snap
[i
-1].mcofs
);
1097 /* -- Miscellaneous helpers ----------------------------------------------- */
1099 /* Calculate stack adjustment. */
1100 static int32_t asm_stack_adjust(ASMState
*as
)
1102 if (as
->evenspill
<= SPS_FIXED
)
1104 return sps_scale(sps_align(as
->evenspill
));
1107 /* Must match with hash*() in lj_tab.c. */
1108 static uint32_t ir_khash(ASMState
*as
, IRIns
*ir
)
1112 if (irt_isstr(ir
->t
)) {
1113 return ir_kstr(ir
)->sid
;
1114 } else if (irt_isnum(ir
->t
)) {
1115 lo
= ir_knum(ir
)->u32
.lo
;
1116 hi
= ir_knum(ir
)->u32
.hi
<< 1;
1117 } else if (irt_ispri(ir
->t
)) {
1118 lj_assertA(!irt_isnil(ir
->t
), "hash of nil key");
1119 return irt_type(ir
->t
)-IRT_FALSE
;
1121 lj_assertA(irt_isgcv(ir
->t
), "hash of bad IR type %d", irt_type(ir
->t
));
1122 lo
= u32ptr(ir_kgc(ir
));
1124 hi
= (uint32_t)(u64ptr(ir_kgc(ir
)) >> 32) | (irt_toitype(ir
->t
) << 15);
1126 hi
= lo
+ HASH_BIAS
;
1129 return hashrot(lo
, hi
);
1132 /* -- Allocations --------------------------------------------------------- */
1134 static void asm_gencall(ASMState
*as
, const CCallInfo
*ci
, IRRef
*args
);
1135 static void asm_setupresult(ASMState
*as
, IRIns
*ir
, const CCallInfo
*ci
);
1137 static void asm_snew(ASMState
*as
, IRIns
*ir
)
1139 const CCallInfo
*ci
= &lj_ir_callinfo
[IRCALL_lj_str_new
];
1142 args
[0] = ASMREF_L
; /* lua_State *L */
1143 args
[1] = ir
->op1
; /* const char *str */
1144 args
[2] = ir
->op2
; /* size_t len */
1146 asm_setupresult(as
, ir
, ci
); /* GCstr * */
1147 asm_gencall(as
, ci
, args
);
1150 static void asm_tnew(ASMState
*as
, IRIns
*ir
)
1152 const CCallInfo
*ci
= &lj_ir_callinfo
[IRCALL_lj_tab_new1
];
1155 args
[0] = ASMREF_L
; /* lua_State *L */
1156 args
[1] = ASMREF_TMP1
; /* uint32_t ahsize */
1158 asm_setupresult(as
, ir
, ci
); /* GCtab * */
1159 asm_gencall(as
, ci
, args
);
1160 ra_allockreg(as
, ir
->op1
| (ir
->op2
<< 24), ra_releasetmp(as
, ASMREF_TMP1
));
1163 static void asm_tdup(ASMState
*as
, IRIns
*ir
)
1165 const CCallInfo
*ci
= &lj_ir_callinfo
[IRCALL_lj_tab_dup
];
1168 args
[0] = ASMREF_L
; /* lua_State *L */
1169 args
[1] = ir
->op1
; /* const GCtab *kt */
1171 asm_setupresult(as
, ir
, ci
); /* GCtab * */
1172 asm_gencall(as
, ci
, args
);
1175 static void asm_gc_check(ASMState
*as
);
1177 /* Explicit GC step. */
1178 static void asm_gcstep(ASMState
*as
, IRIns
*ir
)
1181 for (ira
= IR(as
->stopins
+1); ira
< ir
; ira
++)
1182 if ((ira
->o
== IR_TNEW
|| ira
->o
== IR_TDUP
||
1183 (LJ_HASFFI
&& (ira
->o
== IR_CNEW
|| ira
->o
== IR_CNEWI
))) &&
1188 as
->gcsteps
= 0x80000000; /* Prevent implicit GC check further up. */
1191 /* -- Buffer operations --------------------------------------------------- */
1193 static void asm_tvptr(ASMState
*as
, Reg dest
, IRRef ref
, MSize mode
);
1195 static void asm_bufhdr_write(ASMState
*as
, Reg sb
);
1198 static void asm_bufhdr(ASMState
*as
, IRIns
*ir
)
1200 Reg sb
= ra_dest(as
, ir
, RSET_GPR
);
1202 case IRBUFHDR_RESET
: {
1203 Reg tmp
= ra_scratch(as
, rset_exclude(RSET_GPR
, sb
));
1205 irbp
.ot
= IRT(0, IRT_PTR
); /* Buffer data pointer type. */
1206 emit_storeofs(as
, &irbp
, tmp
, sb
, offsetof(SBuf
, w
));
1207 emit_loadofs(as
, &irbp
, tmp
, sb
, offsetof(SBuf
, b
));
1210 case IRBUFHDR_APPEND
: {
1211 /* Rematerialize const buffer pointer instead of likely spill. */
1212 IRIns
*irp
= IR(ir
->op1
);
1213 if (!(ra_hasreg(irp
->r
) || irp
== ir
-1 ||
1214 (irp
== ir
-2 && !ra_used(ir
-1)))) {
1215 while (!(irp
->o
== IR_BUFHDR
&& irp
->op2
== IRBUFHDR_RESET
))
1217 if (irref_isk(irp
->op1
)) {
1218 ra_weak(as
, ra_allocref(as
, ir
->op1
, RSET_GPR
));
1225 case IRBUFHDR_WRITE
:
1226 asm_bufhdr_write(as
, sb
);
1229 default: lj_assertA(0, "bad BUFHDR op2 %d", ir
->op2
); break;
1231 #if LJ_TARGET_X86ORX64
1232 ra_left(as
, sb
, ir
->op1
);
1234 ra_leftov(as
, sb
, ir
->op1
);
1238 static void asm_bufput(ASMState
*as
, IRIns
*ir
)
1240 const CCallInfo
*ci
= &lj_ir_callinfo
[IRCALL_lj_buf_putstr
];
1244 args
[0] = ir
->op1
; /* SBuf * */
1245 args
[1] = ir
->op2
; /* GCstr * */
1247 lj_assertA(irt_isstr(irs
->t
),
1248 "BUFPUT of non-string IR %04d", ir
->op2
- REF_BIAS
);
1249 if (irs
->o
== IR_KGC
) {
1250 GCstr
*s
= ir_kstr(irs
);
1251 if (s
->len
== 1) { /* Optimize put of single-char string constant. */
1252 kchar
= (int8_t)strdata(s
)[0]; /* Signed! */
1253 args
[1] = ASMREF_TMP1
; /* int, truncated to char */
1254 ci
= &lj_ir_callinfo
[IRCALL_lj_buf_putchar
];
1256 } else if (mayfuse(as
, ir
->op2
) && ra_noreg(irs
->r
)) {
1257 if (irs
->o
== IR_TOSTR
) { /* Fuse number to string conversions. */
1258 if (irs
->op2
== IRTOSTR_NUM
) {
1259 args
[1] = ASMREF_TMP1
; /* TValue * */
1260 ci
= &lj_ir_callinfo
[IRCALL_lj_strfmt_putnum
];
1262 lj_assertA(irt_isinteger(IR(irs
->op1
)->t
),
1263 "TOSTR of non-numeric IR %04d", irs
->op1
);
1264 args
[1] = irs
->op1
; /* int */
1265 if (irs
->op2
== IRTOSTR_INT
)
1266 ci
= &lj_ir_callinfo
[IRCALL_lj_strfmt_putint
];
1268 ci
= &lj_ir_callinfo
[IRCALL_lj_buf_putchar
];
1270 } else if (irs
->o
== IR_SNEW
) { /* Fuse string allocation. */
1271 args
[1] = irs
->op1
; /* const void * */
1272 args
[2] = irs
->op2
; /* MSize */
1273 ci
= &lj_ir_callinfo
[IRCALL_lj_buf_putmem
];
1276 asm_setupresult(as
, ir
, ci
); /* SBuf * */
1277 asm_gencall(as
, ci
, args
);
1278 if (args
[1] == ASMREF_TMP1
) {
1279 Reg tmp
= ra_releasetmp(as
, ASMREF_TMP1
);
1281 asm_tvptr(as
, tmp
, irs
->op1
, IRTMPREF_IN1
);
1283 ra_allockreg(as
, kchar
, tmp
);
1287 static void asm_bufstr(ASMState
*as
, IRIns
*ir
)
1289 const CCallInfo
*ci
= &lj_ir_callinfo
[IRCALL_lj_buf_tostr
];
1291 args
[0] = ir
->op1
; /* SBuf *sb */
1293 asm_setupresult(as
, ir
, ci
); /* GCstr * */
1294 asm_gencall(as
, ci
, args
);
1297 /* -- Type conversions ---------------------------------------------------- */
1299 static void asm_tostr(ASMState
*as
, IRIns
*ir
)
1301 const CCallInfo
*ci
;
1306 if (ir
->op2
== IRTOSTR_NUM
) {
1307 args
[1] = ASMREF_TMP1
; /* cTValue * */
1308 ci
= &lj_ir_callinfo
[IRCALL_lj_strfmt_num
];
1310 args
[1] = ir
->op1
; /* int32_t k */
1311 if (ir
->op2
== IRTOSTR_INT
)
1312 ci
= &lj_ir_callinfo
[IRCALL_lj_strfmt_int
];
1314 ci
= &lj_ir_callinfo
[IRCALL_lj_strfmt_char
];
1316 asm_setupresult(as
, ir
, ci
); /* GCstr * */
1317 asm_gencall(as
, ci
, args
);
1318 if (ir
->op2
== IRTOSTR_NUM
)
1319 asm_tvptr(as
, ra_releasetmp(as
, ASMREF_TMP1
), ir
->op1
, IRTMPREF_IN1
);
1322 #if LJ_32 && LJ_HASFFI && !LJ_SOFTFP && !LJ_TARGET_X86
1323 static void asm_conv64(ASMState
*as
, IRIns
*ir
)
1325 IRType st
= (IRType
)((ir
-1)->op2
& IRCONV_SRCMASK
);
1326 IRType dt
= (((ir
-1)->op2
& IRCONV_DSTMASK
) >> IRCONV_DSH
);
1329 lj_assertA((ir
-1)->o
== IR_CONV
&& ir
->o
== IR_HIOP
,
1330 "not a CONV/HIOP pair at IR %04d", (int)(ir
- as
->ir
) - REF_BIAS
);
1331 args
[LJ_BE
] = (ir
-1)->op1
;
1332 args
[LJ_LE
] = ir
->op1
;
1333 if (st
== IRT_NUM
|| st
== IRT_FLOAT
) {
1334 id
= IRCALL_fp64_d2l
+ ((st
== IRT_FLOAT
) ? 2 : 0) + (dt
- IRT_I64
);
1337 id
= IRCALL_fp64_l2d
+ ((dt
== IRT_FLOAT
) ? 2 : 0) + (st
- IRT_I64
);
1340 #if LJ_TARGET_ARM && !LJ_ABI_SOFTFP
1341 CCallInfo cim
= lj_ir_callinfo
[id
], *ci
= &cim
;
1342 cim
.flags
|= CCI_VARARG
; /* These calls don't use the hard-float ABI! */
1344 const CCallInfo
*ci
= &lj_ir_callinfo
[id
];
1346 asm_setupresult(as
, ir
, ci
);
1347 asm_gencall(as
, ci
, args
);
1352 /* -- Memory references --------------------------------------------------- */
1354 static void asm_newref(ASMState
*as
, IRIns
*ir
)
1356 const CCallInfo
*ci
= &lj_ir_callinfo
[IRCALL_lj_tab_newkey
];
1358 if (ir
->r
== RID_SINK
)
1361 args
[0] = ASMREF_L
; /* lua_State *L */
1362 args
[1] = ir
->op1
; /* GCtab *t */
1363 args
[2] = ASMREF_TMP1
; /* cTValue *key */
1364 asm_setupresult(as
, ir
, ci
); /* TValue * */
1365 asm_gencall(as
, ci
, args
);
1366 asm_tvptr(as
, ra_releasetmp(as
, ASMREF_TMP1
), ir
->op2
, IRTMPREF_IN1
);
1369 static void asm_tmpref(ASMState
*as
, IRIns
*ir
)
1371 Reg r
= ra_dest(as
, ir
, RSET_GPR
);
1372 asm_tvptr(as
, r
, ir
->op1
, ir
->op2
);
1375 static void asm_lref(ASMState
*as
, IRIns
*ir
)
1377 Reg r
= ra_dest(as
, ir
, RSET_GPR
);
1378 #if LJ_TARGET_X86ORX64
1379 ra_left(as
, r
, ASMREF_L
);
1381 ra_leftov(as
, r
, ASMREF_L
);
1385 /* -- Calls --------------------------------------------------------------- */
1387 /* Collect arguments from CALL* and CARG instructions. */
1388 static void asm_collectargs(ASMState
*as
, IRIns
*ir
,
1389 const CCallInfo
*ci
, IRRef
*args
)
1391 uint32_t n
= CCI_XNARGS(ci
);
1392 /* Account for split args. */
1393 lj_assertA(n
<= CCI_NARGS_MAX
*2, "too many args %d to collect", n
);
1394 if ((ci
->flags
& CCI_L
)) { *args
++ = ASMREF_L
; n
--; }
1397 lj_assertA(ir
->o
== IR_CARG
, "malformed CALL arg tree");
1398 args
[n
] = ir
->op2
== REF_NIL
? 0 : ir
->op2
;
1400 args
[0] = ir
->op1
== REF_NIL
? 0 : ir
->op1
;
1401 lj_assertA(IR(ir
->op1
)->o
!= IR_CARG
, "malformed CALL arg tree");
1404 /* Reconstruct CCallInfo flags for CALLX*. */
1405 static uint32_t asm_callx_flags(ASMState
*as
, IRIns
*ir
)
1408 if (ir
->op1
!= REF_NIL
) { /* Count number of arguments first. */
1409 IRIns
*ira
= IR(ir
->op1
);
1411 while (ira
->o
== IR_CARG
) { nargs
++; ira
= IR(ira
->op1
); }
1414 if (IR(ir
->op2
)->o
== IR_CARG
) { /* Copy calling convention info. */
1415 CTypeID id
= (CTypeID
)IR(IR(ir
->op2
)->op2
)->i
;
1416 CType
*ct
= ctype_get(ctype_ctsG(J2G(as
->J
)), id
);
1417 nargs
|= ((ct
->info
& CTF_VARARG
) ? CCI_VARARG
: 0);
1419 nargs
|= (ctype_cconv(ct
->info
) << CCI_CC_SHIFT
);
1423 return (nargs
| (ir
->t
.irt
<< CCI_OTSHIFT
));
1426 static void asm_callid(ASMState
*as
, IRIns
*ir
, IRCallID id
)
1428 const CCallInfo
*ci
= &lj_ir_callinfo
[id
];
1432 asm_setupresult(as
, ir
, ci
);
1433 asm_gencall(as
, ci
, args
);
1436 static void asm_call(ASMState
*as
, IRIns
*ir
)
1438 IRRef args
[CCI_NARGS_MAX
];
1439 const CCallInfo
*ci
= &lj_ir_callinfo
[ir
->op2
];
1440 asm_collectargs(as
, ir
, ci
, args
);
1441 asm_setupresult(as
, ir
, ci
);
1442 asm_gencall(as
, ci
, args
);
1445 /* -- PHI and loop handling ----------------------------------------------- */
1447 /* Break a PHI cycle by renaming to a free register (evict if needed). */
1448 static void asm_phi_break(ASMState
*as
, RegSet blocked
, RegSet blockedby
,
1451 RegSet candidates
= blocked
& allow
;
1452 if (candidates
) { /* If this register file has candidates. */
1453 /* Note: the set for ra_pick cannot be empty, since each register file
1454 ** has some registers never allocated to PHIs.
1456 Reg down
, up
= ra_pick(as
, ~blocked
& allow
); /* Get a free register. */
1457 if (candidates
& ~blockedby
) /* Optimize shifts, else it's a cycle. */
1458 candidates
= candidates
& ~blockedby
;
1459 down
= rset_picktop(candidates
); /* Pick candidate PHI register. */
1460 ra_rename(as
, down
, up
); /* And rename it to the free register. */
1464 /* PHI register shuffling.
1466 ** The allocator tries hard to preserve PHI register assignments across
1467 ** the loop body. Most of the time this loop does nothing, since there
1468 ** are no register mismatches.
1470 ** If a register mismatch is detected and ...
1471 ** - the register is currently free: rename it.
1472 ** - the register is blocked by an invariant: restore/remat and rename it.
1473 ** - Otherwise the register is used by another PHI, so mark it as blocked.
1475 ** The renames are order-sensitive, so just retry the loop if a register
1476 ** is marked as blocked, but has been freed in the meantime. A cycle is
1477 ** detected if all of the blocked registers are allocated. To break the
1478 ** cycle rename one of them to a free register and retry.
1480 ** Note that PHI spill slots are kept in sync and don't need to be shuffled.
1482 static void asm_phi_shuffle(ASMState
*as
)
1486 /* Find and resolve PHI register mismatches. */
1488 RegSet blocked
= RSET_EMPTY
;
1489 RegSet blockedby
= RSET_EMPTY
;
1490 RegSet phiset
= as
->phiset
;
1491 while (phiset
) { /* Check all left PHI operand registers. */
1492 Reg r
= rset_pickbot(phiset
);
1493 IRIns
*irl
= IR(as
->phireg
[r
]);
1495 if (r
!= left
) { /* Mismatch? */
1496 if (!rset_test(as
->freeset
, r
)) { /* PHI register blocked? */
1497 IRRef ref
= regcost_ref(as
->cost
[r
]);
1498 /* Blocked by other PHI (w/reg)? */
1499 if (!ra_iskref(ref
) && irt_ismarked(IR(ref
)->t
)) {
1500 rset_set(blocked
, r
);
1501 if (ra_hasreg(left
))
1502 rset_set(blockedby
, left
);
1504 } else { /* Otherwise grab register from invariant. */
1505 ra_restore(as
, ref
);
1509 if (ra_hasreg(left
)) {
1510 ra_rename(as
, left
, r
);
1514 rset_clear(phiset
, r
);
1516 if (!blocked
) break; /* Finished. */
1517 if (!(as
->freeset
& blocked
)) { /* Break cycles if none are free. */
1518 asm_phi_break(as
, blocked
, blockedby
, RSET_GPR
);
1519 if (!LJ_SOFTFP
) asm_phi_break(as
, blocked
, blockedby
, RSET_FPR
);
1521 } /* Else retry some more renames. */
1524 /* Restore/remat invariants whose registers are modified inside the loop. */
1526 work
= as
->modset
& ~(as
->freeset
| as
->phiset
) & RSET_FPR
;
1528 Reg r
= rset_pickbot(work
);
1529 ra_restore(as
, regcost_ref(as
->cost
[r
]));
1530 rset_clear(work
, r
);
1534 work
= as
->modset
& ~(as
->freeset
| as
->phiset
);
1536 Reg r
= rset_pickbot(work
);
1537 ra_restore(as
, regcost_ref(as
->cost
[r
]));
1538 rset_clear(work
, r
);
1542 /* Allocate and save all unsaved PHI regs and clear marks. */
1545 Reg r
= rset_picktop(work
);
1546 IRRef lref
= as
->phireg
[r
];
1547 IRIns
*ir
= IR(lref
);
1548 if (ra_hasspill(ir
->s
)) { /* Left PHI gained a spill slot? */
1549 irt_clearmark(ir
->t
); /* Handled here, so clear marker now. */
1550 ra_alloc1(as
, lref
, RID2RSET(r
));
1551 ra_save(as
, ir
, r
); /* Save to spill slot inside the loop. */
1554 rset_clear(work
, r
);
1558 /* Copy unsynced left/right PHI spill slots. Rarely needed. */
1559 static void asm_phi_copyspill(ASMState
*as
)
1563 for (ir
= IR(as
->orignins
-1); ir
->o
== IR_PHI
; ir
--)
1564 if (ra_hasspill(ir
->s
) && ra_hasspill(IR(ir
->op1
)->s
))
1565 need
|= irt_isfp(ir
->t
) ? 2 : 1; /* Unsynced spill slot? */
1566 if ((need
& 1)) { /* Copy integer spill slots. */
1567 #if !LJ_TARGET_X86ORX64
1571 if ((as
->freeset
& RSET_GPR
))
1572 r
= rset_pickbot((as
->freeset
& RSET_GPR
));
1574 emit_spload(as
, IR(regcost_ref(as
->cost
[r
])), r
, SPOFS_TMP
);
1576 for (ir
= IR(as
->orignins
-1); ir
->o
== IR_PHI
; ir
--) {
1577 if (ra_hasspill(ir
->s
)) {
1578 IRIns
*irl
= IR(ir
->op1
);
1579 if (ra_hasspill(irl
->s
) && !irt_isfp(ir
->t
)) {
1580 emit_spstore(as
, irl
, r
, sps_scale(irl
->s
));
1581 emit_spload(as
, ir
, r
, sps_scale(ir
->s
));
1586 #if LJ_TARGET_X86ORX64
1587 if (!rset_test(as
->freeset
, r
))
1588 emit_spstore(as
, IR(regcost_ref(as
->cost
[r
])), r
, SPOFS_TMP
);
1592 if ((need
& 2)) { /* Copy FP spill slots. */
1598 if ((as
->freeset
& RSET_FPR
))
1599 r
= rset_pickbot((as
->freeset
& RSET_FPR
));
1600 if (!rset_test(as
->freeset
, r
))
1601 emit_spload(as
, IR(regcost_ref(as
->cost
[r
])), r
, SPOFS_TMP
);
1602 for (ir
= IR(as
->orignins
-1); ir
->o
== IR_PHI
; ir
--) {
1603 if (ra_hasspill(ir
->s
)) {
1604 IRIns
*irl
= IR(ir
->op1
);
1605 if (ra_hasspill(irl
->s
) && irt_isfp(ir
->t
)) {
1606 emit_spstore(as
, irl
, r
, sps_scale(irl
->s
));
1607 emit_spload(as
, ir
, r
, sps_scale(ir
->s
));
1612 if (!rset_test(as
->freeset
, r
))
1613 emit_spstore(as
, IR(regcost_ref(as
->cost
[r
])), r
, SPOFS_TMP
);
1618 /* Emit renames for left PHIs which are only spilled outside the loop. */
1619 static void asm_phi_fixup(ASMState
*as
)
1621 RegSet work
= as
->phiset
;
1623 Reg r
= rset_picktop(work
);
1624 IRRef lref
= as
->phireg
[r
];
1625 IRIns
*ir
= IR(lref
);
1626 if (irt_ismarked(ir
->t
)) {
1627 irt_clearmark(ir
->t
);
1628 /* Left PHI gained a spill slot before the loop? */
1629 if (ra_hasspill(ir
->s
)) {
1630 ra_addrename(as
, r
, lref
, as
->loopsnapno
);
1633 rset_clear(work
, r
);
1637 /* Setup right PHI reference. */
1638 static void asm_phi(ASMState
*as
, IRIns
*ir
)
1640 RegSet allow
= ((!LJ_SOFTFP
&& irt_isfp(ir
->t
)) ? RSET_FPR
: RSET_GPR
) &
1642 RegSet afree
= (as
->freeset
& allow
);
1643 IRIns
*irl
= IR(ir
->op1
);
1644 IRIns
*irr
= IR(ir
->op2
);
1645 if (ir
->r
== RID_SINK
) /* Sink PHI. */
1647 /* Spill slot shuffling is not implemented yet (but rarely needed). */
1648 if (ra_hasspill(irl
->s
) || ra_hasspill(irr
->s
))
1649 lj_trace_err(as
->J
, LJ_TRERR_NYIPHI
);
1650 /* Leave at least one register free for non-PHIs (and PHI cycle breaking). */
1651 if ((afree
& (afree
-1))) { /* Two or more free registers? */
1653 if (ra_noreg(irr
->r
)) { /* Get a register for the right PHI. */
1654 r
= ra_allocref(as
, ir
->op2
, allow
);
1655 } else { /* Duplicate right PHI, need a copy (rare). */
1656 r
= ra_scratch(as
, allow
);
1657 emit_movrr(as
, irr
, r
, irr
->r
);
1660 rset_set(as
->phiset
, r
);
1661 as
->phireg
[r
] = (IRRef1
)ir
->op1
;
1662 irt_setmark(irl
->t
); /* Marks left PHIs _with_ register. */
1663 if (ra_noreg(irl
->r
))
1664 ra_sethint(irl
->r
, r
); /* Set register hint for left PHI. */
1665 } else { /* Otherwise allocate a spill slot. */
1666 /* This is overly restrictive, but it triggers only on synthetic code. */
1667 if (ra_hasreg(irl
->r
) || ra_hasreg(irr
->r
))
1668 lj_trace_err(as
->J
, LJ_TRERR_NYIPHI
);
1670 irr
->s
= ir
->s
; /* Set right PHI spill slot. Sync left slot later. */
1674 static void asm_loop_fixup(ASMState
*as
);
1676 /* Middle part of a loop. */
1677 static void asm_loop(ASMState
*as
)
1680 /* LOOP is a guard, so the snapno is up to date. */
1681 as
->loopsnapno
= as
->snapno
;
1684 /* LOOP marks the transition from the variant to the invariant part. */
1685 as
->flagmcp
= as
->invmcp
= NULL
;
1687 if (!neverfuse(as
)) as
->fuseref
= 0;
1688 asm_phi_shuffle(as
);
1690 asm_phi_copyspill(as
);
1692 as
->mcloop
= as
->mcp
;
1693 RA_DBGX((as
, "===== LOOP ====="));
1694 if (!as
->realign
) RA_DBG_FLUSH();
1695 if (as
->mcp
!= mcspill
)
1696 emit_jmp(as
, mcspill
);
1699 /* -- Target-specific assembler ------------------------------------------- */
1701 #if LJ_TARGET_X86ORX64
1702 #include "lj_asm_x86.h"
1704 #include "lj_asm_arm.h"
1705 #elif LJ_TARGET_ARM64
1706 #include "lj_asm_arm64.h"
1708 #include "lj_asm_ppc.h"
1709 #elif LJ_TARGET_MIPS
1710 #include "lj_asm_mips.h"
1712 #error "Missing assembler for target CPU"
1715 /* -- Common instruction helpers ------------------------------------------ */
1718 #if !LJ_TARGET_X86ORX64
1719 #define asm_ldexp(as, ir) asm_callid(as, ir, IRCALL_ldexp)
1722 static void asm_pow(ASMState
*as
, IRIns
*ir
)
1724 #if LJ_64 && LJ_HASFFI
1725 if (!irt_isnum(ir
->t
))
1726 asm_callid(as
, ir
, irt_isi64(ir
->t
) ? IRCALL_lj_carith_powi64
:
1727 IRCALL_lj_carith_powu64
);
1730 asm_callid(as
, ir
, IRCALL_pow
);
1733 static void asm_div(ASMState
*as
, IRIns
*ir
)
1735 #if LJ_64 && LJ_HASFFI
1736 if (!irt_isnum(ir
->t
))
1737 asm_callid(as
, ir
, irt_isi64(ir
->t
) ? IRCALL_lj_carith_divi64
:
1738 IRCALL_lj_carith_divu64
);
1745 static void asm_mod(ASMState
*as
, IRIns
*ir
)
1747 #if LJ_64 && LJ_HASFFI
1748 if (!irt_isint(ir
->t
))
1749 asm_callid(as
, ir
, irt_isi64(ir
->t
) ? IRCALL_lj_carith_modi64
:
1750 IRCALL_lj_carith_modu64
);
1753 asm_callid(as
, ir
, IRCALL_lj_vm_modi
);
1756 static void asm_fuseequal(ASMState
*as
, IRIns
*ir
)
1758 /* Fuse HREF + EQ/NE. */
1759 if ((ir
-1)->o
== IR_HREF
&& ir
->op1
== as
->curins
-1) {
1761 asm_href(as
, ir
-1, (IROp
)ir
->o
);
1767 static void asm_alen(ASMState
*as
, IRIns
*ir
)
1769 asm_callid(as
, ir
, ir
->op2
== REF_NIL
? IRCALL_lj_tab_len
:
1770 IRCALL_lj_tab_len_hint
);
1773 /* -- Instruction dispatch ------------------------------------------------ */
1775 /* Assemble a single instruction. */
1776 static void asm_ir(ASMState
*as
, IRIns
*ir
)
1778 switch ((IROp
)ir
->o
) {
1779 /* Miscellaneous ops. */
1780 case IR_LOOP
: asm_loop(as
); break;
1781 case IR_NOP
: case IR_XBAR
:
1782 lj_assertA(!ra_used(ir
),
1783 "IR %04d not unused", (int)(ir
- as
->ir
) - REF_BIAS
);
1786 ra_alloc1(as
, ir
->op1
, irt_isfp(ir
->t
) ? RSET_FPR
: RSET_GPR
); break;
1787 case IR_PHI
: asm_phi(as
, ir
); break;
1788 case IR_HIOP
: asm_hiop(as
, ir
); break;
1789 case IR_GCSTEP
: asm_gcstep(as
, ir
); break;
1790 case IR_PROF
: asm_prof(as
, ir
); break;
1792 /* Guarded assertions. */
1793 case IR_LT
: case IR_GE
: case IR_LE
: case IR_GT
:
1794 case IR_ULT
: case IR_UGE
: case IR_ULE
: case IR_UGT
:
1798 case IR_EQ
: case IR_NE
: asm_fuseequal(as
, ir
); break;
1800 case IR_RETF
: asm_retf(as
, ir
); break;
1803 case IR_BNOT
: asm_bnot(as
, ir
); break;
1804 case IR_BSWAP
: asm_bswap(as
, ir
); break;
1805 case IR_BAND
: asm_band(as
, ir
); break;
1806 case IR_BOR
: asm_bor(as
, ir
); break;
1807 case IR_BXOR
: asm_bxor(as
, ir
); break;
1808 case IR_BSHL
: asm_bshl(as
, ir
); break;
1809 case IR_BSHR
: asm_bshr(as
, ir
); break;
1810 case IR_BSAR
: asm_bsar(as
, ir
); break;
1811 case IR_BROL
: asm_brol(as
, ir
); break;
1812 case IR_BROR
: asm_bror(as
, ir
); break;
1814 /* Arithmetic ops. */
1815 case IR_ADD
: asm_add(as
, ir
); break;
1816 case IR_SUB
: asm_sub(as
, ir
); break;
1817 case IR_MUL
: asm_mul(as
, ir
); break;
1818 case IR_MOD
: asm_mod(as
, ir
); break;
1819 case IR_NEG
: asm_neg(as
, ir
); break;
1821 case IR_DIV
: case IR_POW
: case IR_ABS
:
1822 case IR_LDEXP
: case IR_FPMATH
: case IR_TOBIT
:
1823 /* Unused for LJ_SOFTFP32. */
1824 lj_assertA(0, "IR %04d with unused op %d",
1825 (int)(ir
- as
->ir
) - REF_BIAS
, ir
->o
);
1828 case IR_DIV
: asm_div(as
, ir
); break;
1829 case IR_POW
: asm_pow(as
, ir
); break;
1830 case IR_ABS
: asm_abs(as
, ir
); break;
1831 case IR_LDEXP
: asm_ldexp(as
, ir
); break;
1832 case IR_FPMATH
: asm_fpmath(as
, ir
); break;
1833 case IR_TOBIT
: asm_tobit(as
, ir
); break;
1835 case IR_MIN
: asm_min(as
, ir
); break;
1836 case IR_MAX
: asm_max(as
, ir
); break;
1838 /* Overflow-checking arithmetic ops. */
1839 case IR_ADDOV
: asm_addov(as
, ir
); break;
1840 case IR_SUBOV
: asm_subov(as
, ir
); break;
1841 case IR_MULOV
: asm_mulov(as
, ir
); break;
1843 /* Memory references. */
1844 case IR_AREF
: asm_aref(as
, ir
); break;
1845 case IR_HREF
: asm_href(as
, ir
, 0); break;
1846 case IR_HREFK
: asm_hrefk(as
, ir
); break;
1847 case IR_NEWREF
: asm_newref(as
, ir
); break;
1848 case IR_UREFO
: case IR_UREFC
: asm_uref(as
, ir
); break;
1849 case IR_FREF
: asm_fref(as
, ir
); break;
1850 case IR_TMPREF
: asm_tmpref(as
, ir
); break;
1851 case IR_STRREF
: asm_strref(as
, ir
); break;
1852 case IR_LREF
: asm_lref(as
, ir
); break;
1854 /* Loads and stores. */
1855 case IR_ALOAD
: case IR_HLOAD
: case IR_ULOAD
: case IR_VLOAD
:
1856 asm_ahuvload(as
, ir
);
1858 case IR_FLOAD
: asm_fload(as
, ir
); break;
1859 case IR_XLOAD
: asm_xload(as
, ir
); break;
1860 case IR_SLOAD
: asm_sload(as
, ir
); break;
1861 case IR_ALEN
: asm_alen(as
, ir
); break;
1863 case IR_ASTORE
: case IR_HSTORE
: case IR_USTORE
: asm_ahustore(as
, ir
); break;
1864 case IR_FSTORE
: asm_fstore(as
, ir
); break;
1865 case IR_XSTORE
: asm_xstore(as
, ir
); break;
1868 case IR_SNEW
: case IR_XSNEW
: asm_snew(as
, ir
); break;
1869 case IR_TNEW
: asm_tnew(as
, ir
); break;
1870 case IR_TDUP
: asm_tdup(as
, ir
); break;
1871 case IR_CNEW
: case IR_CNEWI
:
1875 lj_assertA(0, "IR %04d with unused op %d",
1876 (int)(ir
- as
->ir
) - REF_BIAS
, ir
->o
);
1880 /* Buffer operations. */
1881 case IR_BUFHDR
: asm_bufhdr(as
, ir
); break;
1882 case IR_BUFPUT
: asm_bufput(as
, ir
); break;
1883 case IR_BUFSTR
: asm_bufstr(as
, ir
); break;
1885 /* Write barriers. */
1886 case IR_TBAR
: asm_tbar(as
, ir
); break;
1887 case IR_OBAR
: asm_obar(as
, ir
); break;
1889 /* Type conversions. */
1890 case IR_CONV
: asm_conv(as
, ir
); break;
1891 case IR_TOSTR
: asm_tostr(as
, ir
); break;
1892 case IR_STRTO
: asm_strto(as
, ir
); break;
1898 case IR_CALLN
: case IR_CALLL
: case IR_CALLS
: asm_call(as
, ir
); break;
1899 case IR_CALLXS
: asm_callx(as
, ir
); break;
1900 case IR_CARG
: break;
1903 setintV(&as
->J
->errinfo
, ir
->o
);
1904 lj_trace_err_info(as
->J
, LJ_TRERR_NYIIR
);
1909 /* -- Head of trace ------------------------------------------------------- */
1911 /* Head of a root trace. */
1912 static void asm_head_root(ASMState
*as
)
1915 asm_head_root_base(as
);
1916 emit_setvmstate(as
, (int32_t)as
->T
->traceno
);
1917 spadj
= asm_stack_adjust(as
);
1918 as
->T
->spadjust
= (uint16_t)spadj
;
1919 emit_spsub(as
, spadj
);
1920 /* Root traces assume a checked stack for the starting proto. */
1921 as
->T
->topslot
= gcref(as
->T
->startpt
)->pt
.framesize
;
1924 /* Head of a side trace.
1926 ** The current simplistic algorithm requires that all slots inherited
1927 ** from the parent are live in a register between pass 2 and pass 3. This
1928 ** avoids the complexity of stack slot shuffling. But of course this may
1929 ** overflow the register set in some cases and cause the dreaded error:
1930 ** "NYI: register coalescing too complex". A refined algorithm is needed.
1932 static void asm_head_side(ASMState
*as
)
1934 IRRef1 sloadins
[RID_MAX
];
1935 RegSet allow
= RSET_ALL
; /* Inverse of all coalesced registers. */
1936 RegSet live
= RSET_EMPTY
; /* Live parent registers. */
1937 RegSet pallow
= RSET_GPR
; /* Registers needed by the parent stack check. */
1939 IRIns
*irp
= &as
->parent
->ir
[REF_BASE
]; /* Parent base. */
1940 int32_t spadj
, spdelta
;
1945 if (as
->snapno
&& as
->topslot
> as
->parent
->topslot
) {
1946 /* Force snap #0 alloc to prevent register overwrite in stack check. */
1947 asm_snap_alloc(as
, 0);
1949 pbase
= asm_head_side_base(as
, irp
);
1950 if (pbase
!= RID_NONE
) {
1951 rset_clear(allow
, pbase
);
1952 rset_clear(pallow
, pbase
);
1955 /* Scan all parent SLOADs and collect register dependencies. */
1956 for (i
= as
->stopins
; i
> REF_BASE
; i
--) {
1959 lj_assertA((ir
->o
== IR_SLOAD
&& (ir
->op2
& IRSLOAD_PARENT
)) ||
1960 (LJ_SOFTFP
&& ir
->o
== IR_HIOP
) || ir
->o
== IR_PVAL
,
1961 "IR %04d has bad parent op %d",
1962 (int)(ir
- as
->ir
) - REF_BIAS
, ir
->o
);
1963 rs
= as
->parentmap
[i
- REF_FIRST
];
1964 if (ra_hasreg(ir
->r
)) {
1965 rset_clear(allow
, ir
->r
);
1966 if (ra_hasspill(ir
->s
)) {
1967 ra_save(as
, ir
, ir
->r
);
1970 } else if (ra_hasspill(ir
->s
)) {
1974 if (ir
->r
== rs
) { /* Coalesce matching registers right now. */
1976 } else if (ra_hasspill(regsp_spill(rs
))) {
1977 if (ra_hasreg(ir
->r
))
1979 } else if (ra_used(ir
)) {
1980 sloadins
[rs
] = (IRRef1
)i
;
1981 rset_set(live
, rs
); /* Block live parent register. */
1983 if (!ra_hasspill(regsp_spill(rs
))) rset_clear(pallow
, regsp_reg(rs
));
1986 /* Calculate stack frame adjustment. */
1987 spadj
= asm_stack_adjust(as
);
1988 spdelta
= spadj
- (int32_t)as
->parent
->spadjust
;
1989 if (spdelta
< 0) { /* Don't shrink the stack frame. */
1990 spadj
= (int32_t)as
->parent
->spadjust
;
1993 as
->T
->spadjust
= (uint16_t)spadj
;
1995 /* Reload spilled target registers. */
1997 for (i
= as
->stopins
; i
> REF_BASE
; i
--) {
1999 if (irt_ismarked(ir
->t
)) {
2003 irt_clearmark(ir
->t
);
2004 rs
= as
->parentmap
[i
- REF_FIRST
];
2005 if (!ra_hasspill(regsp_spill(rs
)))
2006 ra_sethint(ir
->r
, rs
); /* Hint may be gone, set it again. */
2007 else if (sps_scale(regsp_spill(rs
))+spdelta
== sps_scale(ir
->s
))
2008 continue; /* Same spill slot, do nothing. */
2009 mask
= ((!LJ_SOFTFP
&& irt_isfp(ir
->t
)) ? RSET_FPR
: RSET_GPR
) & allow
;
2010 if (mask
== RSET_EMPTY
)
2011 lj_trace_err(as
->J
, LJ_TRERR_NYICOAL
);
2012 r
= ra_allocref(as
, i
, mask
);
2014 rset_clear(allow
, r
);
2015 if (r
== rs
) { /* Coalesce matching registers right now. */
2017 rset_clear(live
, r
);
2018 } else if (ra_hasspill(regsp_spill(rs
))) {
2026 /* Store trace number and adjust stack frame relative to the parent. */
2027 emit_setvmstate(as
, (int32_t)as
->T
->traceno
);
2028 emit_spsub(as
, spdelta
);
2030 #if !LJ_TARGET_X86ORX64
2031 /* Restore BASE register from parent spill slot. */
2032 if (ra_hasspill(irp
->s
))
2033 emit_spload(as
, IR(REF_BASE
), IR(REF_BASE
)->r
, sps_scale(irp
->s
));
2036 /* Restore target registers from parent spill slots. */
2038 RegSet work
= ~as
->freeset
& RSET_ALL
;
2040 Reg r
= rset_pickbot(work
);
2041 IRRef ref
= regcost_ref(as
->cost
[r
]);
2042 RegSP rs
= as
->parentmap
[ref
- REF_FIRST
];
2043 rset_clear(work
, r
);
2044 if (ra_hasspill(regsp_spill(rs
))) {
2045 int32_t ofs
= sps_scale(regsp_spill(rs
));
2047 emit_spload(as
, IR(ref
), r
, ofs
);
2053 /* Shuffle registers to match up target regs with parent regs. */
2057 /* Repeatedly coalesce free live registers by moving to their target. */
2058 while ((work
= as
->freeset
& live
) != RSET_EMPTY
) {
2059 Reg rp
= rset_pickbot(work
);
2060 IRIns
*ir
= IR(sloadins
[rp
]);
2061 rset_clear(live
, rp
);
2062 rset_clear(allow
, rp
);
2064 emit_movrr(as
, ir
, ir
->r
, rp
);
2068 /* We're done if no live registers remain. */
2069 if (live
== RSET_EMPTY
)
2072 /* Break cycles by renaming one target to a temp. register. */
2073 if (live
& RSET_GPR
) {
2074 RegSet tmpset
= as
->freeset
& ~live
& allow
& RSET_GPR
;
2075 if (tmpset
== RSET_EMPTY
)
2076 lj_trace_err(as
->J
, LJ_TRERR_NYICOAL
);
2077 ra_rename(as
, rset_pickbot(live
& RSET_GPR
), rset_pickbot(tmpset
));
2079 if (!LJ_SOFTFP
&& (live
& RSET_FPR
)) {
2080 RegSet tmpset
= as
->freeset
& ~live
& allow
& RSET_FPR
;
2081 if (tmpset
== RSET_EMPTY
)
2082 lj_trace_err(as
->J
, LJ_TRERR_NYICOAL
);
2083 ra_rename(as
, rset_pickbot(live
& RSET_FPR
), rset_pickbot(tmpset
));
2086 /* Continue with coalescing to fix up the broken cycle(s). */
2089 /* Inherit top stack slot already checked by parent trace. */
2090 as
->T
->topslot
= as
->parent
->topslot
;
2091 if (as
->topslot
> as
->T
->topslot
) { /* Need to check for higher slot? */
2092 #ifdef EXITSTATE_CHECKEXIT
2093 /* Highest exit + 1 indicates stack check. */
2094 ExitNo exitno
= as
->T
->nsnap
;
2096 /* Reuse the parent exit in the context of the parent trace. */
2097 ExitNo exitno
= as
->J
->exitno
;
2099 as
->T
->topslot
= (uint8_t)as
->topslot
; /* Remember for child traces. */
2100 asm_stack_check(as
, as
->topslot
, irp
, pallow
, exitno
);
2104 /* -- Tail of trace ------------------------------------------------------- */
2106 /* Get base slot for a snapshot. */
2107 static BCReg
asm_baseslot(ASMState
*as
, SnapShot
*snap
, int *gotframe
)
2109 SnapEntry
*map
= &as
->T
->snapmap
[snap
->mapofs
];
2111 for (n
= snap
->nent
; n
> 0; n
--) {
2112 SnapEntry sn
= map
[n
-1];
2113 if ((sn
& SNAP_FRAME
)) {
2115 return snap_slot(sn
) - LJ_FR2
;
2121 /* Link to another trace. */
2122 static void asm_tail_link(ASMState
*as
)
2124 SnapNo snapno
= as
->T
->nsnap
-1; /* Last snapshot. */
2125 SnapShot
*snap
= &as
->T
->snap
[snapno
];
2127 BCReg baseslot
= asm_baseslot(as
, snap
, &gotframe
);
2129 as
->topslot
= snap
->topslot
;
2131 ra_allocref(as
, REF_BASE
, RID2RSET(RID_BASE
));
2133 if (as
->T
->link
== 0) {
2134 /* Setup fixed registers for exit to interpreter. */
2135 const BCIns
*pc
= snap_pc(&as
->T
->snapmap
[snap
->mapofs
+ snap
->nent
]);
2137 if (bc_op(*pc
) == BC_JLOOP
) { /* NYI: find a better way to do this. */
2138 BCIns
*retpc
= &traceref(as
->J
, bc_d(*pc
))->startins
;
2139 if (bc_isret(bc_op(*retpc
)))
2143 emit_loadu64(as
, RID_LPC
, u64ptr(pc
));
2145 ra_allockreg(as
, i32ptr(J2GG(as
->J
)->dispatch
), RID_DISPATCH
);
2146 ra_allockreg(as
, i32ptr(pc
), RID_LPC
);
2148 mres
= (int32_t)(snap
->nslots
- baseslot
- LJ_FR2
);
2149 switch (bc_op(*pc
)) {
2150 case BC_CALLM
: case BC_CALLMT
:
2151 mres
-= (int32_t)(1 + LJ_FR2
+ bc_a(*pc
) + bc_c(*pc
)); break;
2152 case BC_RETM
: mres
-= (int32_t)(bc_a(*pc
) + bc_d(*pc
)); break;
2153 case BC_TSETM
: mres
-= (int32_t)bc_a(*pc
); break;
2154 default: if (bc_op(*pc
) < BC_FUNCF
) mres
= 0; break;
2156 ra_allockreg(as
, mres
, RID_RET
); /* Return MULTRES or 0. */
2157 } else if (baseslot
) {
2158 /* Save modified BASE for linking to trace with higher start frame. */
2159 emit_setgl(as
, RID_BASE
, jit_base
);
2161 emit_addptr(as
, RID_BASE
, 8*(int32_t)baseslot
);
2163 if (as
->J
->ktrace
) { /* Patch ktrace slot with the final GCtrace pointer. */
2164 setgcref(IR(as
->J
->ktrace
)[LJ_GC64
].gcr
, obj2gco(as
->J
->curfinal
));
2165 IR(as
->J
->ktrace
)->o
= IR_KGC
;
2168 /* Sync the interpreter state with the on-trace state. */
2169 asm_stack_restore(as
, snap
);
2171 /* Root traces that add frames need to check the stack at the end. */
2172 if (!as
->parent
&& gotframe
)
2173 asm_stack_check(as
, as
->topslot
, NULL
, as
->freeset
& RSET_GPR
, snapno
);
2176 /* -- Trace setup --------------------------------------------------------- */
2178 /* Clear reg/sp for all instructions and add register hints. */
2179 static void asm_setup_regsp(ASMState
*as
)
2182 int sink
= T
->sinktags
;
2183 IRRef nins
= T
->nins
;
2187 uint32_t rload
= 0xa6402a64;
2192 ra_setkref(as
, RID_GL
, (intptr_t)J2G(as
->J
));
2195 /* Clear reg/sp for constants. */
2196 for (ir
= IR(T
->nk
), lastir
= IR(REF_BASE
); ir
< lastir
; ir
++) {
2197 ir
->prev
= REGSP_INIT
;
2198 if (irt_is64(ir
->t
) && ir
->o
!= IR_KNULL
) {
2200 /* The false-positive of irt_is64() for ASMREF_L (REF_NIL) is OK here. */
2201 ir
->i
= 0; /* Will become non-zero only for RIP-relative addresses. */
2203 /* Make life easier for backends by putting address of constant in i. */
2204 ir
->i
= (int32_t)(intptr_t)(ir
+1);
2210 /* REF_BASE is used for implicit references to the BASE register. */
2211 lastir
->prev
= REGSP_HINT(RID_BASE
);
2213 as
->snaprename
= nins
;
2215 as
->snapno
= T
->nsnap
;
2218 as
->stopins
= REF_BASE
;
2219 as
->orignins
= nins
;
2222 /* Setup register hints for parent link instructions. */
2226 lastir
= lj_snap_regspmap(as
->J
, as
->parent
, as
->J
->exitno
, ir
);
2227 if (lastir
- ir
> LJ_MAX_JSLOTS
)
2228 lj_trace_err(as
->J
, LJ_TRERR_NYICOAL
);
2229 as
->stopins
= (IRRef
)((lastir
-1) - as
->ir
);
2230 for (p
= as
->parentmap
; ir
< lastir
; ir
++) {
2231 RegSP rs
= ir
->prev
;
2232 *p
++ = (uint16_t)rs
; /* Copy original parent RegSP to parentmap. */
2233 if (!ra_hasspill(regsp_spill(rs
)))
2234 ir
->prev
= (uint16_t)REGSP_HINT(regsp_reg(rs
));
2236 ir
->prev
= REGSP_INIT
;
2241 as
->evenspill
= SPS_FIRST
;
2242 for (lastir
= IR(nins
); ir
< lastir
; ir
++) {
2244 if (ir
->r
== RID_SINK
)
2246 if (ir
->r
== RID_SUNK
) { /* Revert after ASM restart. */
2257 if (!((ir
->op2
& IRSLOAD_TYPECHECK
) || (ir
+1)->o
== IR_HIOP
))
2260 case IR_ALOAD
: case IR_HLOAD
: case IR_ULOAD
: case IR_VLOAD
:
2261 if (!LJ_SOFTFP
&& irt_isnum(ir
->t
)) break;
2262 ir
->prev
= (uint16_t)REGSP_HINT((rload
& 15));
2263 rload
= lj_ror(rload
, 4);
2266 if ((ir
->op2
& IRTMPREF_OUT2
) && as
->evenspill
< 4)
2267 as
->evenspill
= 4; /* TMPREF OUT2 needs two TValues on the stack. */
2272 ci
.flags
= asm_callx_flags(as
, ir
);
2273 ir
->prev
= asm_setup_call_slots(as
, ir
, &ci
);
2275 as
->modset
|= RSET_SCRATCH
;
2279 /* lj_vm_next needs two TValues on the stack. */
2280 #if LJ_TARGET_X64 && LJ_ABI_WIN
2281 if (ir
->op2
== IRCALL_lj_vm_next
&& as
->evenspill
< SPS_FIRST
+ 4)
2282 as
->evenspill
= SPS_FIRST
+ 4;
2284 if (SPS_FIRST
< 4 && ir
->op2
== IRCALL_lj_vm_next
&& as
->evenspill
< 4)
2288 case IR_CALLN
: case IR_CALLA
: case IR_CALLS
: {
2289 const CCallInfo
*ci
= &lj_ir_callinfo
[ir
->op2
];
2290 ir
->prev
= asm_setup_call_slots(as
, ir
, ci
);
2292 as
->modset
|= (ci
->flags
& CCI_NOFPRCLOBBER
) ?
2293 (RSET_SCRATCH
& ~RSET_FPR
) : RSET_SCRATCH
;
2297 switch ((ir
-1)->o
) {
2298 #if LJ_SOFTFP && LJ_TARGET_ARM
2299 case IR_SLOAD
: case IR_ALOAD
: case IR_HLOAD
: case IR_ULOAD
: case IR_VLOAD
:
2300 if (ra_hashint((ir
-1)->r
)) {
2301 ir
->prev
= (ir
-1)->prev
+ 1;
2306 #if !LJ_SOFTFP && LJ_NEED_FP64 && LJ_32 && LJ_HASFFI
2308 if (irt_isfp((ir
-1)->t
)) {
2309 ir
->prev
= REGSP_HINT(RID_FPRET
);
2314 case IR_CALLN
: case IR_CALLL
: case IR_CALLS
: case IR_CALLXS
:
2316 case IR_MIN
: case IR_MAX
:
2318 (ir
-1)->prev
= REGSP_HINT(RID_RETLO
);
2319 ir
->prev
= REGSP_HINT(RID_RETHI
);
2326 case IR_MIN
: case IR_MAX
:
2327 if ((ir
+1)->o
!= IR_HIOP
) break;
2330 /* C calls evict all scratch regs and return results in RID_RET. */
2331 case IR_SNEW
: case IR_XSNEW
: case IR_NEWREF
: case IR_BUFPUT
:
2332 if (REGARG_NUMGPR
< 3 && as
->evenspill
< 3)
2333 as
->evenspill
= 3; /* lj_str_new and lj_tab_newkey need 3 args. */
2334 #if LJ_TARGET_X86 && LJ_HASFFI
2337 if (ir
->op2
!= REF_NIL
&& as
->evenspill
< 4)
2338 as
->evenspill
= 4; /* lj_cdata_newv needs 4 args. */
2346 case IR_TNEW
: case IR_TDUP
: case IR_CNEWI
: case IR_TOSTR
:
2348 ir
->prev
= REGSP_HINT(RID_RET
);
2350 as
->modset
= RSET_SCRATCH
;
2352 case IR_STRTO
: case IR_OBAR
:
2354 as
->modset
= RSET_SCRATCH
;
2357 #if !LJ_TARGET_X86ORX64
2363 if (!LJ_SOFTFP
&& irt_isnum(ir
->t
)) {
2365 as
->modset
|= RSET_SCRATCH
;
2367 if (irt_isnum(IR(ir
->op2
)->t
)) {
2368 if (as
->evenspill
< 4) /* Leave room to call pow(). */
2373 ir
->prev
= REGSP_HINT(RID_FPRET
);
2377 /* fallthrough */ /* for integer POW */
2378 case IR_DIV
: case IR_MOD
:
2379 if ((LJ_64
&& LJ_SOFTFP
) || !irt_isnum(ir
->t
)) {
2380 ir
->prev
= REGSP_HINT(RID_RET
);
2382 as
->modset
|= (RSET_SCRATCH
& RSET_GPR
);
2386 #if LJ_64 && LJ_SOFTFP
2387 case IR_ADD
: case IR_SUB
: case IR_MUL
:
2388 if (irt_isnum(ir
->t
)) {
2389 ir
->prev
= REGSP_HINT(RID_RET
);
2391 as
->modset
|= (RSET_SCRATCH
& RSET_GPR
);
2397 #if LJ_TARGET_X86ORX64
2398 if (ir
->op2
<= IRFPM_TRUNC
) {
2399 if (!(as
->flags
& JIT_F_SSE4_1
)) {
2400 ir
->prev
= REGSP_HINT(RID_XMM0
);
2402 as
->modset
|= RSET_RANGE(RID_XMM0
, RID_XMM3
+1)|RID2RSET(RID_EAX
);
2409 as
->modset
|= RSET_SCRATCH
;
2413 ir
->prev
= REGSP_HINT(RID_FPRET
);
2416 #if LJ_TARGET_X86ORX64
2417 /* Non-constant shift counts need to be in RID_ECX on x86/x64. */
2418 case IR_BSHL
: case IR_BSHR
: case IR_BSAR
:
2419 if ((as
->flags
& JIT_F_BMI2
)) /* Except if BMI2 is available. */
2422 case IR_BROL
: case IR_BROR
:
2423 if (!irref_isk(ir
->op2
) && !ra_hashint(IR(ir
->op2
)->r
)) {
2424 IR(ir
->op2
)->r
= REGSP_HINT(RID_ECX
);
2426 rset_set(as
->modset
, RID_ECX
);
2430 /* Do not propagate hints across type conversions or loads. */
2434 case IR_ALOAD
: case IR_HLOAD
: case IR_ULOAD
: case IR_VLOAD
:
2438 if (irt_isfp(ir
->t
) || (ir
->op2
& IRCONV_SRCMASK
) == IRT_NUM
||
2439 (ir
->op2
& IRCONV_SRCMASK
) == IRT_FLOAT
)
2443 /* Propagate hints across likely 'op reg, imm' or 'op reg'. */
2444 if (irref_isk(ir
->op2
) && !irref_isk(ir
->op1
) &&
2445 ra_hashint(regsp_reg(IR(ir
->op1
)->prev
))) {
2446 ir
->prev
= IR(ir
->op1
)->prev
;
2451 ir
->prev
= REGSP_INIT
;
2453 if ((as
->evenspill
& 1))
2454 as
->oddspill
= as
->evenspill
++;
2459 /* -- Assembler core ------------------------------------------------------ */
2461 /* Assemble a trace. */
2462 void lj_asm_trace(jit_State
*J
, GCtrace
*T
)
2465 ASMState
*as
= &as_
;
2467 /* Remove nops/renames left over from ASM restart due to LJ_TRERR_MCODELM. */
2469 IRRef nins
= T
->nins
;
2470 IRIns
*ir
= &T
->ir
[nins
-1];
2471 if (ir
->o
== IR_NOP
|| ir
->o
== IR_RENAME
) {
2472 do { ir
--; nins
--; } while (ir
->o
== IR_NOP
|| ir
->o
== IR_RENAME
);
2477 /* Ensure an initialized instruction beyond the last one for HIOP checks. */
2478 /* This also allows one RENAME to be added without reallocating curfinal. */
2479 as
->orignins
= lj_ir_nextins(J
);
2480 lj_ir_nop(&J
->cur
.ir
[as
->orignins
]);
2482 /* Setup initial state. Copy some fields to reduce indirections. */
2485 J
->curfinal
= lj_trace_alloc(J
->L
, T
); /* This copies the IR, too. */
2486 as
->flags
= J
->flags
;
2487 as
->loopref
= J
->loopref
;
2490 as
->parent
= J
->parent
? traceref(J
, J
->parent
) : NULL
;
2491 #ifdef LUAJIT_RANDOM_RA
2492 (void)lj_prng_u64(&J2G(J
)->prng
); /* Ensure PRNG step between traces. */
2495 /* Reserve MCode memory. */
2496 as
->mctop
= as
->mctoporig
= lj_mcode_reserve(J
, &as
->mcbot
);
2497 as
->mcp
= as
->mctop
;
2498 as
->mclim
= as
->mcbot
+ MCLIM_REDZONE
;
2499 asm_setup_target(as
);
2502 ** This is a loop, because the MCode may have to be (re-)assembled
2505 ** 1. as->realign is set (and the assembly aborted), if the arch-specific
2506 ** backend wants the MCode to be aligned differently.
2508 ** This is currently only the case on x86/x64, where small loops get
2509 ** an aligned loop body plus a short branch. Not much effort is wasted,
2510 ** because the abort happens very quickly and only once.
2512 ** 2. The IR is immovable, since the MCode embeds pointers to various
2513 ** constants inside the IR. But RENAMEs may need to be added to the IR
2514 ** during assembly, which might grow and reallocate the IR. We check
2515 ** at the end if the IR (in J->cur.ir) has actually grown, resize the
2516 ** copy (in J->curfinal.ir) and try again.
2518 ** 95% of all traces have zero RENAMEs, 3% have one RENAME, 1.5% have
2519 ** 2 RENAMEs and only 0.5% have more than that. That's why we opt to
2520 ** always have one spare slot in the IR (see above), which means we
2521 ** have to redo the assembly for only ~2% of all traces.
2523 ** Very, very rarely, this needs to be done repeatedly, since the
2524 ** location of constants inside the IR (actually, reachability from
2525 ** a global pointer) may affect register allocation and thus the
2526 ** number of RENAMEs.
2529 as
->mcp
= as
->mctop
;
2530 #ifdef LUA_USE_ASSERT
2531 as
->mcp_prev
= as
->mcp
;
2533 as
->ir
= J
->curfinal
->ir
; /* Use the copied IR. */
2534 as
->curins
= J
->cur
.nins
= as
->orignins
;
2535 #ifdef LUAJIT_RANDOM_RA
2536 as
->prngstate
= J2G(J
)->prng
; /* Must (re)start from identical state. */
2541 RA_DBGX((as
, "===== STOP ====="));
2543 /* General trace setup. Emit tail of trace. */
2549 as
->sectref
= as
->loopref
;
2550 as
->fuseref
= (as
->flags
& JIT_F_OPT_FUSE
) ? as
->loopref
: FUSE_DISABLED
;
2551 asm_setup_regsp(as
);
2555 /* Assemble a trace in linear backwards order. */
2556 for (as
->curins
--; as
->curins
> as
->stopins
; as
->curins
--) {
2557 IRIns
*ir
= IR(as
->curins
);
2558 /* 64 bit types handled by SPLIT for 32 bit archs. */
2559 lj_assertA(!(LJ_32
&& irt_isint64(ir
->t
)),
2560 "IR %04d has unsplit 64 bit type",
2561 (int)(ir
- as
->ir
) - REF_BIAS
);
2563 if (!ra_used(ir
) && !ir_sideeff(ir
) && (as
->flags
& JIT_F_OPT_DCE
))
2564 continue; /* Dead-code elimination can be soooo easy. */
2565 if (irt_isguard(ir
->t
))
2572 if (as
->realign
&& J
->curfinal
->nins
>= T
->nins
)
2573 continue; /* Retry in case only the MCode needs to be realigned. */
2575 /* Emit head of trace. */
2578 if (as
->gcsteps
> 0) {
2579 as
->curins
= as
->T
->snap
[0].ref
;
2580 asm_snap_prep(as
); /* The GC check is a guard. */
2582 as
->curins
= as
->stopins
;
2591 if (J
->curfinal
->nins
>= T
->nins
) { /* IR didn't grow? */
2592 lj_assertA(J
->curfinal
->nk
== T
->nk
, "unexpected IR constant growth");
2593 memcpy(J
->curfinal
->ir
+ as
->orignins
, T
->ir
+ as
->orignins
,
2594 (T
->nins
- as
->orignins
) * sizeof(IRIns
)); /* Copy RENAMEs. */
2595 T
->nins
= J
->curfinal
->nins
;
2596 /* Fill mcofs of any unprocessed snapshots. */
2597 as
->curins
= REF_FIRST
;
2602 /* Otherwise try again with a bigger IR. */
2603 lj_trace_free(J2G(J
), J
->curfinal
);
2604 J
->curfinal
= NULL
; /* In case lj_trace_alloc() OOMs. */
2605 J
->curfinal
= lj_trace_alloc(J
->L
, T
);
2609 RA_DBGX((as
, "===== START ===="));
2611 if (as
->freeset
!= RSET_ALL
)
2612 lj_trace_err(as
->J
, LJ_TRERR_BADRA
); /* Ouch! Should never happen. */
2614 /* Set trace entry point before fixing up tail to allow link to self. */
2616 T
->mcloop
= as
->mcloop
? (MSize
)((char *)as
->mcloop
- (char *)as
->mcp
) : 0;
2618 asm_loop_tail_fixup(as
);
2620 asm_tail_fixup(as
, T
->link
); /* Note: this may change as->mctop! */
2621 T
->szmcode
= (MSize
)((char *)as
->mctop
- (char *)as
->mcp
);
2622 asm_snap_fixup_mcofs(as
);
2623 #if LJ_TARGET_MCODE_FIXUP
2624 asm_mcode_fixup(T
->mcode
, T
->szmcode
);
2626 lj_mcode_sync(T
->mcode
, as
->mctoporig
);