2 ** IR assembler (SSA IR -> machine code).
3 ** Copyright (C) 2005-2023 Mike Pall. See Copyright Notice in luajit.h
22 #include "lj_ircall.h"
28 #include "lj_dispatch.h"
30 #include "lj_target.h"
36 /* -- Assembler state and common macros ----------------------------------- */
38 /* Assembler state. */
39 typedef struct ASMState
{
40 RegCost cost
[RID_MAX
]; /* Reference and blended allocation cost for regs. */
42 MCode
*mcp
; /* Current MCode pointer (grows down). */
43 MCode
*mclim
; /* Lower limit for MCode memory + red zone. */
45 MCode
*mcp_prev
; /* Red zone overflow check. */
48 IRIns
*ir
; /* Copy of pointer to IR instructions/constants. */
49 jit_State
*J
; /* JIT compiler state. */
51 #if LJ_TARGET_X86ORX64
52 x86ModRM mrm
; /* Fused x86 address operand. */
55 RegSet freeset
; /* Set of free registers. */
56 RegSet modset
; /* Set of registers modified inside the loop. */
57 RegSet weakset
; /* Set of weakly referenced registers. */
58 RegSet phiset
; /* Set of PHI registers. */
60 uint32_t flags
; /* Copy of JIT compiler flags. */
61 int loopinv
; /* Loop branch inversion (0:no, 1:yes, 2:yes+CC_P). */
63 int32_t evenspill
; /* Next even spill slot. */
64 int32_t oddspill
; /* Next odd spill slot (or 0). */
66 IRRef curins
; /* Reference of current instruction. */
67 IRRef stopins
; /* Stop assembly before hitting this instruction. */
68 IRRef orignins
; /* Original T->nins. */
70 IRRef snapref
; /* Current snapshot is active after this reference. */
71 IRRef snaprename
; /* Rename highwater mark for snapshot check. */
72 SnapNo snapno
; /* Current snapshot number. */
73 SnapNo loopsnapno
; /* Loop snapshot number. */
74 BloomFilter snapfilt1
, snapfilt2
; /* Filled with snapshot refs. */
76 IRRef fuseref
; /* Fusion limit (loopref, 0 or FUSE_DISABLED). */
77 IRRef sectref
; /* Section base reference (loopref or 0). */
78 IRRef loopref
; /* Reference of LOOP instruction (or 0). */
80 BCReg topslot
; /* Number of slots for stack check (unless 0). */
81 int32_t gcsteps
; /* Accumulated number of GC steps (per section). */
83 GCtrace
*T
; /* Trace to assemble. */
84 GCtrace
*parent
; /* Parent trace (or NULL). */
86 MCode
*mcbot
; /* Bottom of reserved MCode. */
87 MCode
*mctop
; /* Top of generated MCode. */
88 MCode
*mcloop
; /* Pointer to loop MCode (or NULL). */
89 MCode
*invmcp
; /* Points to invertible loop branch (or NULL). */
90 MCode
*flagmcp
; /* Pending opportunity to merge flag setting ins. */
91 MCode
*realign
; /* Realign loop if not NULL. */
94 int32_t krefk
[RID_NUM_KREF
];
96 IRRef1 phireg
[RID_MAX
]; /* PHI register references. */
97 uint16_t parentmap
[LJ_MAX_JSLOTS
]; /* Parent instruction to RegSP map. */
100 #define IR(ref) (&as->ir[(ref)])
102 #define ASMREF_TMP1 REF_TRUE /* Temp. register. */
103 #define ASMREF_TMP2 REF_FALSE /* Temp. register. */
104 #define ASMREF_L REF_NIL /* Stores register for L. */
106 /* Check for variant to invariant references. */
107 #define iscrossref(as, ref) ((ref) < as->sectref)
109 /* Inhibit memory op fusion from variant to invariant references. */
110 #define FUSE_DISABLED (~(IRRef)0)
111 #define mayfuse(as, ref) ((ref) > as->fuseref)
112 #define neverfuse(as) (as->fuseref == FUSE_DISABLED)
113 #define canfuse(as, ir) (!neverfuse(as) && !irt_isphi((ir)->t))
114 #define opisfusableload(o) \
115 ((o) == IR_ALOAD || (o) == IR_HLOAD || (o) == IR_ULOAD || \
116 (o) == IR_FLOAD || (o) == IR_XLOAD || (o) == IR_SLOAD || (o) == IR_VLOAD)
118 /* Sparse limit checks using a red zone before the actual limit. */
119 #define MCLIM_REDZONE 64
121 static LJ_NORET LJ_NOINLINE
void asm_mclimit(ASMState
*as
)
123 lj_mcode_limiterr(as
->J
, (size_t)(as
->mctop
- as
->mcp
+ 4*MCLIM_REDZONE
));
126 static LJ_AINLINE
void checkmclim(ASMState
*as
)
128 #ifdef LUA_USE_ASSERT
129 if (as
->mcp
+ MCLIM_REDZONE
< as
->mcp_prev
) {
130 IRIns
*ir
= IR(as
->curins
+1);
131 fprintf(stderr
, "RED ZONE OVERFLOW: %p IR %04d %02d %04d %04d\n", as
->mcp
,
132 as
->curins
+1-REF_BIAS
, ir
->o
, ir
->op1
-REF_BIAS
, ir
->op2
-REF_BIAS
);
136 if (LJ_UNLIKELY(as
->mcp
< as
->mclim
)) asm_mclimit(as
);
137 #ifdef LUA_USE_ASSERT
138 as
->mcp_prev
= as
->mcp
;
143 #define ra_iskref(ref) ((ref) < RID_NUM_KREF)
144 #define ra_krefreg(ref) ((Reg)(RID_MIN_KREF + (Reg)(ref)))
145 #define ra_krefk(as, ref) (as->krefk[(ref)])
147 static LJ_AINLINE
void ra_setkref(ASMState
*as
, Reg r
, int32_t k
)
149 IRRef ref
= (IRRef
)(r
- RID_MIN_KREF
);
151 as
->cost
[r
] = REGCOST(ref
, ref
);
155 #define ra_iskref(ref) 0
156 #define ra_krefreg(ref) RID_MIN_GPR
157 #define ra_krefk(as, ref) 0
160 /* Arch-specific field offsets. */
161 static const uint8_t field_ofs
[IRFL__MAX
+1] = {
162 #define FLOFS(name, ofs) (uint8_t)(ofs),
168 /* -- Target-specific instruction emitter --------------------------------- */
170 #if LJ_TARGET_X86ORX64
171 #include "lj_emit_x86.h"
173 #include "lj_emit_arm.h"
175 #include "lj_emit_ppc.h"
177 #include "lj_emit_mips.h"
179 #error "Missing instruction emitter for target CPU"
182 /* -- Register allocator debugging ---------------------------------------- */
184 /* #define LUAJIT_DEBUG_RA */
186 #ifdef LUAJIT_DEBUG_RA
191 #define RIDNAME(name) #name,
192 static const char *const ra_regname
[] = {
200 static char ra_dbg_buf
[65536];
201 static char *ra_dbg_p
;
202 static char *ra_dbg_merge
;
203 static MCode
*ra_dbg_mcp
;
205 static void ra_dstart(void)
207 ra_dbg_p
= ra_dbg_buf
;
212 static void ra_dflush(void)
214 fwrite(ra_dbg_buf
, 1, (size_t)(ra_dbg_p
-ra_dbg_buf
), stdout
);
218 static void ra_dprintf(ASMState
*as
, const char *fmt
, ...)
223 p
= ra_dbg_mcp
== as
->mcp
? ra_dbg_merge
: ra_dbg_p
;
225 p
+= sprintf(p
, "%08x \e[36m%04d ", (uintptr_t)as
->mcp
, as
->curins
-REF_BIAS
);
227 const char *e
= strchr(fmt
, '$');
228 if (e
== NULL
) break;
229 memcpy(p
, fmt
, (size_t)(e
-fmt
));
232 Reg r
= va_arg(argp
, Reg
) & RID_MASK
;
235 for (q
= ra_regname
[r
]; *q
; q
++)
236 *p
++ = *q
>= 'A' && *q
<= 'Z' ? *q
+ 0x20 : *q
;
241 } else if (e
[1] == 'f' || e
[1] == 'i') {
244 ref
= va_arg(argp
, IRRef
);
246 ref
= va_arg(argp
, IRIns
*) - as
->ir
;
248 p
+= sprintf(p
, "%04d", ref
- REF_BIAS
);
250 p
+= sprintf(p
, "K%03d", REF_BIAS
- ref
);
251 } else if (e
[1] == 's') {
252 uint32_t slot
= va_arg(argp
, uint32_t);
253 p
+= sprintf(p
, "[sp+0x%x]", sps_scale(slot
));
254 } else if (e
[1] == 'x') {
255 p
+= sprintf(p
, "%08x", va_arg(argp
, int32_t));
264 *p
++ = '\e'; *p
++ = '['; *p
++ = 'm'; *p
++ = '\n';
265 if (p
> ra_dbg_buf
+sizeof(ra_dbg_buf
)-256) {
266 fwrite(ra_dbg_buf
, 1, (size_t)(p
-ra_dbg_buf
), stdout
);
272 #define RA_DBG_START() ra_dstart()
273 #define RA_DBG_FLUSH() ra_dflush()
274 #define RA_DBG_REF() \
275 do { char *_p = ra_dbg_p; ra_dprintf(as, ""); \
276 ra_dbg_merge = _p; ra_dbg_mcp = as->mcp; } while (0)
277 #define RA_DBGX(x) ra_dprintf x
280 #define RA_DBG_START() ((void)0)
281 #define RA_DBG_FLUSH() ((void)0)
282 #define RA_DBG_REF() ((void)0)
283 #define RA_DBGX(x) ((void)0)
286 /* -- Register allocator -------------------------------------------------- */
288 #define ra_free(as, r) rset_set(as->freeset, (r))
289 #define ra_modified(as, r) rset_set(as->modset, (r))
290 #define ra_weak(as, r) rset_set(as->weakset, (r))
291 #define ra_noweak(as, r) rset_clear(as->weakset, (r))
293 #define ra_used(ir) (ra_hasreg((ir)->r) || ra_hasspill((ir)->s))
295 /* Setup register allocator. */
296 static void ra_setup(ASMState
*as
)
299 /* Initially all regs (except the stack pointer) are free for use. */
300 as
->freeset
= RSET_INIT
;
301 as
->modset
= RSET_EMPTY
;
302 as
->weakset
= RSET_EMPTY
;
303 as
->phiset
= RSET_EMPTY
;
304 memset(as
->phireg
, 0, sizeof(as
->phireg
));
305 for (r
= RID_MIN_GPR
; r
< RID_MAX
; r
++)
306 as
->cost
[r
] = REGCOST(~0u, 0u);
309 /* Rematerialize constants. */
310 static Reg
ra_rematk(ASMState
*as
, IRRef ref
)
314 if (ra_iskref(ref
)) {
316 lua_assert(!rset_test(as
->freeset
, r
));
319 emit_loadi(as
, r
, ra_krefk(as
, ref
));
324 lua_assert(ra_hasreg(r
) && !ra_hasspill(ir
->s
));
327 ir
->r
= RID_INIT
; /* Do not keep any hint. */
328 RA_DBGX((as
, "remat $i $r", ir
, r
));
330 if (ir
->o
== IR_KNUM
) {
331 emit_loadn(as
, r
, ir_knum(ir
));
334 if (emit_canremat(REF_BASE
) && ir
->o
== IR_BASE
) {
335 ra_sethint(ir
->r
, RID_BASE
); /* Restore BASE register hint. */
336 emit_getgl(as
, r
, jit_base
);
337 } else if (emit_canremat(ASMREF_L
) && ir
->o
== IR_KPRI
) {
338 lua_assert(irt_isnil(ir
->t
)); /* REF_NIL stores ASMREF_L register. */
339 emit_getgl(as
, r
, jit_L
);
341 } else if (ir
->o
== IR_KINT64
) {
342 emit_loadu64(as
, r
, ir_kint64(ir
)->u64
);
345 lua_assert(ir
->o
== IR_KINT
|| ir
->o
== IR_KGC
||
346 ir
->o
== IR_KPTR
|| ir
->o
== IR_KKPTR
|| ir
->o
== IR_KNULL
);
347 emit_loadi(as
, r
, ir
->i
);
352 /* Force a spill. Allocate a new spill slot if needed. */
353 static int32_t ra_spill(ASMState
*as
, IRIns
*ir
)
355 int32_t slot
= ir
->s
;
356 lua_assert(ir
>= as
->ir
+ REF_TRUE
);
357 if (!ra_hasspill(slot
)) {
358 if (irt_is64(ir
->t
)) {
359 slot
= as
->evenspill
;
361 } else if (as
->oddspill
) {
365 slot
= as
->evenspill
;
366 as
->oddspill
= slot
+1;
369 if (as
->evenspill
> 256)
370 lj_trace_err(as
->J
, LJ_TRERR_SPILLOV
);
371 ir
->s
= (uint8_t)slot
;
373 return sps_scale(slot
);
376 /* Release the temporarily allocated register in ASMREF_TMP1/ASMREF_TMP2. */
377 static Reg
ra_releasetmp(ASMState
*as
, IRRef ref
)
381 lua_assert(ra_hasreg(r
) && !ra_hasspill(ir
->s
));
388 /* Restore a register (marked as free). Rematerialize or force a spill. */
389 static Reg
ra_restore(ASMState
*as
, IRRef ref
)
391 if (emit_canremat(ref
)) {
392 return ra_rematk(as
, ref
);
395 int32_t ofs
= ra_spill(as
, ir
); /* Force a spill slot. */
397 lua_assert(ra_hasreg(r
));
398 ra_sethint(ir
->r
, r
); /* Keep hint. */
400 if (!rset_test(as
->weakset
, r
)) { /* Only restore non-weak references. */
402 RA_DBGX((as
, "restore $i $r", ir
, r
));
403 emit_spload(as
, ir
, r
, ofs
);
409 /* Save a register to a spill slot. */
410 static void ra_save(ASMState
*as
, IRIns
*ir
, Reg r
)
412 RA_DBGX((as
, "save $i $r", ir
, r
));
413 emit_spstore(as
, ir
, r
, sps_scale(ir
->s
));
416 #define MINCOST(name) \
417 if (rset_test(RSET_ALL, RID_##name) && \
418 LJ_LIKELY(allow&RID2RSET(RID_##name)) && as->cost[RID_##name] < cost) \
419 cost = as->cost[RID_##name];
421 /* Evict the register with the lowest cost, forcing a restore. */
422 static Reg
ra_evict(ASMState
*as
, RegSet allow
)
425 RegCost cost
= ~(RegCost
)0;
426 lua_assert(allow
!= RSET_EMPTY
);
427 if (RID_NUM_FPR
== 0 || allow
< RID2RSET(RID_MAX_GPR
)) {
432 ref
= regcost_ref(cost
);
433 lua_assert(ra_iskref(ref
) || (ref
>= as
->T
->nk
&& ref
< as
->T
->nins
));
434 /* Preferably pick any weak ref instead of a non-weak, non-const ref. */
435 if (!irref_isk(ref
) && (as
->weakset
& allow
)) {
437 if (!rset_test(as
->weakset
, ir
->r
))
438 ref
= regcost_ref(as
->cost
[rset_pickbot((as
->weakset
& allow
))]);
440 return ra_restore(as
, ref
);
443 /* Pick any register (marked as free). Evict on-demand. */
444 static Reg
ra_pick(ASMState
*as
, RegSet allow
)
446 RegSet pick
= as
->freeset
& allow
;
448 return ra_evict(as
, allow
);
450 return rset_picktop(pick
);
453 /* Get a scratch register (marked as free). */
454 static Reg
ra_scratch(ASMState
*as
, RegSet allow
)
456 Reg r
= ra_pick(as
, allow
);
458 RA_DBGX((as
, "scratch $r", r
));
462 /* Evict all registers from a set (if not free). */
463 static void ra_evictset(ASMState
*as
, RegSet drop
)
468 work
= (drop
& ~as
->freeset
) & RSET_FPR
;
470 Reg r
= rset_pickbot(work
);
471 ra_restore(as
, regcost_ref(as
->cost
[r
]));
476 work
= (drop
& ~as
->freeset
);
478 Reg r
= rset_pickbot(work
);
479 ra_restore(as
, regcost_ref(as
->cost
[r
]));
485 /* Evict (rematerialize) all registers allocated to constants. */
486 static void ra_evictk(ASMState
*as
)
490 work
= ~as
->freeset
& RSET_FPR
;
492 Reg r
= rset_pickbot(work
);
493 IRRef ref
= regcost_ref(as
->cost
[r
]);
494 if (emit_canremat(ref
) && irref_isk(ref
)) {
501 work
= ~as
->freeset
& RSET_GPR
;
503 Reg r
= rset_pickbot(work
);
504 IRRef ref
= regcost_ref(as
->cost
[r
]);
505 if (emit_canremat(ref
) && irref_isk(ref
)) {
514 /* Allocate a register for a constant. */
515 static Reg
ra_allock(ASMState
*as
, int32_t k
, RegSet allow
)
517 /* First try to find a register which already holds the same constant. */
518 RegSet pick
, work
= ~as
->freeset
& RSET_GPR
;
522 r
= rset_pickbot(work
);
523 ref
= regcost_ref(as
->cost
[r
]);
524 if (ref
< ASMREF_L
&&
525 k
== (ra_iskref(ref
) ? ra_krefk(as
, ref
) : IR(ref
)->i
))
529 pick
= as
->freeset
& allow
;
531 /* Constants should preferably get unmodified registers. */
532 if ((pick
& ~as
->modset
))
534 r
= rset_pickbot(pick
); /* Reduce conflicts with inverse allocation. */
536 r
= ra_evict(as
, allow
);
538 RA_DBGX((as
, "allock $x $r", k
, r
));
539 ra_setkref(as
, r
, k
);
540 rset_clear(as
->freeset
, r
);
545 /* Allocate a specific register for a constant. */
546 static void ra_allockreg(ASMState
*as
, int32_t k
, Reg r
)
548 Reg kr
= ra_allock(as
, k
, RID2RSET(r
));
551 irdummy
.t
.irt
= IRT_INT
;
552 ra_scratch(as
, RID2RSET(r
));
553 emit_movrr(as
, &irdummy
, r
, kr
);
557 #define ra_allockreg(as, k, r) emit_loadi(as, (r), (k))
560 /* Allocate a register for ref from the allowed set of registers.
561 ** Note: this function assumes the ref does NOT have a register yet!
562 ** Picks an optimal register, sets the cost and marks the register as non-free.
564 static Reg
ra_allocref(ASMState
*as
, IRRef ref
, RegSet allow
)
567 RegSet pick
= as
->freeset
& allow
;
569 lua_assert(ra_noreg(ir
->r
));
571 /* First check register hint from propagation or PHI. */
572 if (ra_hashint(ir
->r
)) {
573 r
= ra_gethint(ir
->r
);
574 if (rset_test(pick
, r
)) /* Use hint register if possible. */
576 /* Rematerialization is cheaper than missing a hint. */
577 if (rset_test(allow
, r
) && emit_canremat(regcost_ref(as
->cost
[r
]))) {
578 ra_rematk(as
, regcost_ref(as
->cost
[r
]));
581 RA_DBGX((as
, "hintmiss $f $r", ref
, r
));
583 /* Invariants should preferably get unmodified registers. */
584 if (ref
< as
->loopref
&& !irt_isphi(ir
->t
)) {
585 if ((pick
& ~as
->modset
))
587 r
= rset_pickbot(pick
); /* Reduce conflicts with inverse allocation. */
589 /* We've got plenty of regs, so get callee-save regs if possible. */
590 if (RID_NUM_GPR
> 8 && (pick
& ~RSET_SCRATCH
))
591 pick
&= ~RSET_SCRATCH
;
592 r
= rset_picktop(pick
);
595 r
= ra_evict(as
, allow
);
598 RA_DBGX((as
, "alloc $f $r", ref
, r
));
600 rset_clear(as
->freeset
, r
);
602 as
->cost
[r
] = REGCOST_REF_T(ref
, irt_t(ir
->t
));
606 /* Allocate a register on-demand. */
607 static Reg
ra_alloc1(ASMState
*as
, IRRef ref
, RegSet allow
)
610 /* Note: allow is ignored if the register is already allocated. */
611 if (ra_noreg(r
)) r
= ra_allocref(as
, ref
, allow
);
616 /* Rename register allocation and emit move. */
617 static void ra_rename(ASMState
*as
, Reg down
, Reg up
)
619 IRRef ren
, ref
= regcost_ref(as
->cost
[up
] = as
->cost
[down
]);
623 lua_assert((down
< RID_MAX_GPR
) == (up
< RID_MAX_GPR
));
624 lua_assert(!rset_test(as
->freeset
, down
) && rset_test(as
->freeset
, up
));
625 ra_free(as
, down
); /* 'down' is free ... */
626 ra_modified(as
, down
);
627 rset_clear(as
->freeset
, up
); /* ... and 'up' is now allocated. */
629 RA_DBGX((as
, "rename $f $r $r", regcost_ref(as
->cost
[up
]), down
, up
));
630 emit_movrr(as
, ir
, down
, up
); /* Backwards codegen needs inverse move. */
631 if (!ra_hasspill(IR(ref
)->s
)) { /* Add the rename to the IR. */
632 lj_ir_set(as
->J
, IRT(IR_RENAME
, IRT_NIL
), ref
, as
->snapno
);
633 ren
= tref_ref(lj_ir_emit(as
->J
));
634 as
->ir
= as
->T
->ir
; /* The IR may have been reallocated. */
635 IR(ren
)->r
= (uint8_t)down
;
636 IR(ren
)->s
= SPS_NONE
;
640 /* Pick a destination register (marked as free).
641 ** Caveat: allow is ignored if there's already a destination register.
642 ** Use ra_destreg() to get a specific register.
644 static Reg
ra_dest(ASMState
*as
, IRIns
*ir
, RegSet allow
)
647 if (ra_hasreg(dest
)) {
649 ra_modified(as
, dest
);
651 if (ra_hashint(dest
) && rset_test((as
->freeset
&allow
), ra_gethint(dest
))) {
652 dest
= ra_gethint(dest
);
653 ra_modified(as
, dest
);
654 RA_DBGX((as
, "dest $r", dest
));
656 dest
= ra_scratch(as
, allow
);
660 if (LJ_UNLIKELY(ra_hasspill(ir
->s
))) ra_save(as
, ir
, dest
);
664 /* Force a specific destination register (marked as free). */
665 static void ra_destreg(ASMState
*as
, IRIns
*ir
, Reg r
)
667 Reg dest
= ra_dest(as
, ir
, RID2RSET(r
));
669 lua_assert(rset_test(as
->freeset
, r
));
671 emit_movrr(as
, ir
, dest
, r
);
675 #if LJ_TARGET_X86ORX64
676 /* Propagate dest register to left reference. Emit moves as needed.
677 ** This is a required fixup step for all 2-operand machine instructions.
679 static void ra_left(ASMState
*as
, Reg dest
, IRRef lref
)
681 IRIns
*ir
= IR(lref
);
683 if (ra_noreg(left
)) {
684 if (irref_isk(lref
)) {
685 if (ir
->o
== IR_KNUM
) {
686 cTValue
*tv
= ir_knum(ir
);
687 /* FP remat needs a load except for +0. Still better than eviction. */
688 if (tvispzero(tv
) || !(as
->freeset
& RSET_FPR
)) {
689 emit_loadn(as
, dest
, tv
);
693 } else if (ir
->o
== IR_KINT64
) {
694 emit_loadu64(as
, dest
, ir_kint64(ir
)->u64
);
698 lua_assert(ir
->o
== IR_KINT
|| ir
->o
== IR_KGC
||
699 ir
->o
== IR_KPTR
|| ir
->o
== IR_KKPTR
|| ir
->o
== IR_KNULL
);
700 emit_loadi(as
, dest
, ir
->i
);
704 if (!ra_hashint(left
) && !iscrossref(as
, lref
))
705 ra_sethint(ir
->r
, dest
); /* Propagate register hint. */
706 left
= ra_allocref(as
, lref
, dest
< RID_MAX_GPR
? RSET_GPR
: RSET_FPR
);
709 /* Move needed for true 3-operand instruction: y=a+b ==> y=a; y+=b. */
711 /* Use register renaming if dest is the PHI reg. */
712 if (irt_isphi(ir
->t
) && as
->phireg
[dest
] == lref
) {
713 ra_modified(as
, left
);
714 ra_rename(as
, left
, dest
);
716 emit_movrr(as
, ir
, dest
, left
);
721 /* Similar to ra_left, except we override any hints. */
722 static void ra_leftov(ASMState
*as
, Reg dest
, IRRef lref
)
724 IRIns
*ir
= IR(lref
);
726 if (ra_noreg(left
)) {
727 ra_sethint(ir
->r
, dest
); /* Propagate register hint. */
728 left
= ra_allocref(as
, lref
,
729 (LJ_SOFTFP
|| dest
< RID_MAX_GPR
) ? RSET_GPR
: RSET_FPR
);
733 /* Use register renaming if dest is the PHI reg. */
734 if (irt_isphi(ir
->t
) && as
->phireg
[dest
] == lref
) {
735 ra_modified(as
, left
);
736 ra_rename(as
, left
, dest
);
738 emit_movrr(as
, ir
, dest
, left
);
745 /* Force a RID_RETLO/RID_RETHI destination register pair (marked as free). */
746 static void ra_destpair(ASMState
*as
, IRIns
*ir
)
748 Reg destlo
= ir
->r
, desthi
= (ir
+1)->r
;
749 /* First spill unrelated refs blocking the destination registers. */
750 if (!rset_test(as
->freeset
, RID_RETLO
) &&
751 destlo
!= RID_RETLO
&& desthi
!= RID_RETLO
)
752 ra_restore(as
, regcost_ref(as
->cost
[RID_RETLO
]));
753 if (!rset_test(as
->freeset
, RID_RETHI
) &&
754 destlo
!= RID_RETHI
&& desthi
!= RID_RETHI
)
755 ra_restore(as
, regcost_ref(as
->cost
[RID_RETHI
]));
756 /* Next free the destination registers (if any). */
757 if (ra_hasreg(destlo
)) {
759 ra_modified(as
, destlo
);
763 if (ra_hasreg(desthi
)) {
765 ra_modified(as
, desthi
);
769 /* Check for conflicts and shuffle the registers as needed. */
770 if (destlo
== RID_RETHI
) {
771 if (desthi
== RID_RETLO
) {
773 *--as
->mcp
= XI_XCHGa
+ RID_RETHI
;
775 emit_movrr(as
, ir
, RID_RETHI
, RID_TMP
);
776 emit_movrr(as
, ir
, RID_RETLO
, RID_RETHI
);
777 emit_movrr(as
, ir
, RID_TMP
, RID_RETLO
);
780 emit_movrr(as
, ir
, RID_RETHI
, RID_RETLO
);
781 if (desthi
!= RID_RETHI
) emit_movrr(as
, ir
, desthi
, RID_RETHI
);
783 } else if (desthi
== RID_RETLO
) {
784 emit_movrr(as
, ir
, RID_RETLO
, RID_RETHI
);
785 if (destlo
!= RID_RETLO
) emit_movrr(as
, ir
, destlo
, RID_RETLO
);
787 if (desthi
!= RID_RETHI
) emit_movrr(as
, ir
, desthi
, RID_RETHI
);
788 if (destlo
!= RID_RETLO
) emit_movrr(as
, ir
, destlo
, RID_RETLO
);
790 /* Restore spill slots (if any). */
791 if (ra_hasspill((ir
+1)->s
)) ra_save(as
, ir
+1, RID_RETHI
);
792 if (ra_hasspill(ir
->s
)) ra_save(as
, ir
, RID_RETLO
);
796 /* -- Snapshot handling --------- ----------------------------------------- */
798 /* Can we rematerialize a KNUM instead of forcing a spill? */
799 static int asm_snap_canremat(ASMState
*as
)
802 for (r
= RID_MIN_FPR
; r
< RID_MAX_FPR
; r
++)
803 if (irref_isk(regcost_ref(as
->cost
[r
])))
808 /* Check whether a sunk store corresponds to an allocation. */
809 static int asm_sunk_store(ASMState
*as
, IRIns
*ira
, IRIns
*irs
)
812 if (irs
->o
== IR_ASTORE
|| irs
->o
== IR_HSTORE
||
813 irs
->o
== IR_FSTORE
|| irs
->o
== IR_XSTORE
) {
814 IRIns
*irk
= IR(irs
->op1
);
815 if (irk
->o
== IR_AREF
|| irk
->o
== IR_HREFK
)
817 return (IR(irk
->op1
) == ira
);
821 return (ira
+ irs
->s
== irs
); /* Quick check. */
825 /* Allocate register or spill slot for a ref that escapes to a snapshot. */
826 static void asm_snap_alloc1(ASMState
*as
, IRRef ref
)
829 if (!irref_isk(ref
) && ir
->r
!= RID_SUNK
) {
830 bloomset(as
->snapfilt1
, ref
);
831 bloomset(as
->snapfilt2
, hashrot(ref
, ref
+ HASH_BIAS
));
832 if (ra_used(ir
)) return;
833 if (ir
->r
== RID_SINK
) {
836 if (ir
->o
== IR_CNEWI
) { /* Allocate CNEWI value. */
837 asm_snap_alloc1(as
, ir
->op2
);
838 if (LJ_32
&& (ir
+1)->o
== IR_HIOP
)
839 asm_snap_alloc1(as
, (ir
+1)->op2
);
842 { /* Allocate stored values for TNEW, TDUP and CNEW. */
844 lua_assert(ir
->o
== IR_TNEW
|| ir
->o
== IR_TDUP
|| ir
->o
== IR_CNEW
);
845 for (irs
= IR(as
->snapref
-1); irs
> ir
; irs
--)
846 if (irs
->r
== RID_SINK
&& asm_sunk_store(as
, ir
, irs
)) {
847 lua_assert(irs
->o
== IR_ASTORE
|| irs
->o
== IR_HSTORE
||
848 irs
->o
== IR_FSTORE
|| irs
->o
== IR_XSTORE
);
849 asm_snap_alloc1(as
, irs
->op2
);
850 if (LJ_32
&& (irs
+1)->o
== IR_HIOP
)
851 asm_snap_alloc1(as
, (irs
+1)->op2
);
856 if (ir
->o
== IR_CONV
&& ir
->op2
== IRCONV_NUM_INT
) {
858 for (irc
= IR(as
->curins
); irc
> ir
; irc
--)
859 if ((irc
->op1
== ref
|| irc
->op2
== ref
) &&
860 !(irc
->r
== RID_SINK
|| irc
->r
== RID_SUNK
))
861 goto nosink
; /* Don't sink conversion if result is used. */
862 asm_snap_alloc1(as
, ir
->op1
);
866 allow
= (!LJ_SOFTFP
&& irt_isfp(ir
->t
)) ? RSET_FPR
: RSET_GPR
;
867 if ((as
->freeset
& allow
) ||
868 (allow
== RSET_FPR
&& asm_snap_canremat(as
))) {
869 /* Get a weak register if we have a free one or can rematerialize. */
870 Reg r
= ra_allocref(as
, ref
, allow
); /* Allocate a register. */
871 if (!irt_isphi(ir
->t
))
872 ra_weak(as
, r
); /* But mark it as weakly referenced. */
874 RA_DBGX((as
, "snapreg $f $r", ref
, ir
->r
));
876 ra_spill(as
, ir
); /* Otherwise force a spill slot. */
877 RA_DBGX((as
, "snapspill $f $s", ref
, ir
->s
));
883 /* Allocate refs escaping to a snapshot. */
884 static void asm_snap_alloc(ASMState
*as
)
886 SnapShot
*snap
= &as
->T
->snap
[as
->snapno
];
887 SnapEntry
*map
= &as
->T
->snapmap
[snap
->mapofs
];
888 MSize n
, nent
= snap
->nent
;
889 as
->snapfilt1
= as
->snapfilt2
= 0;
890 for (n
= 0; n
< nent
; n
++) {
891 SnapEntry sn
= map
[n
];
892 IRRef ref
= snap_ref(sn
);
893 if (!irref_isk(ref
)) {
894 asm_snap_alloc1(as
, ref
);
895 if (LJ_SOFTFP
&& (sn
& SNAP_SOFTFPNUM
)) {
896 lua_assert(irt_type(IR(ref
+1)->t
) == IRT_SOFTFP
);
897 asm_snap_alloc1(as
, ref
+1);
903 /* All guards for a snapshot use the same exitno. This is currently the
904 ** same as the snapshot number. Since the exact origin of the exit cannot
905 ** be determined, all guards for the same snapshot must exit with the same
907 ** A renamed ref which has been used in a prior guard for the same snapshot
908 ** would cause an inconsistency. The easy way out is to force a spill slot.
910 static int asm_snap_checkrename(ASMState
*as
, IRRef ren
)
912 if (bloomtest(as
->snapfilt1
, ren
) &&
913 bloomtest(as
->snapfilt2
, hashrot(ren
, ren
+ HASH_BIAS
))) {
915 ra_spill(as
, ir
); /* Register renamed, so force a spill slot. */
916 RA_DBGX((as
, "snaprensp $f $s", ren
, ir
->s
));
917 return 1; /* Found. */
919 return 0; /* Not found. */
922 /* Prepare snapshot for next guard instruction. */
923 static void asm_snap_prep(ASMState
*as
)
925 if (as
->curins
< as
->snapref
) {
927 if (as
->snapno
== 0) return; /* Called by sunk stores before snap #0. */
929 as
->snapref
= as
->T
->snap
[as
->snapno
].ref
;
930 } while (as
->curins
< as
->snapref
);
932 as
->snaprename
= as
->T
->nins
;
934 /* Process any renames above the highwater mark. */
935 for (; as
->snaprename
< as
->T
->nins
; as
->snaprename
++) {
936 IRIns
*ir
= IR(as
->snaprename
);
937 if (asm_snap_checkrename(as
, ir
->op1
))
938 ir
->op2
= REF_BIAS
-1; /* Kill rename. */
943 /* -- Miscellaneous helpers ----------------------------------------------- */
945 /* Collect arguments from CALL* and CARG instructions. */
946 static void asm_collectargs(ASMState
*as
, IRIns
*ir
,
947 const CCallInfo
*ci
, IRRef
*args
)
949 uint32_t n
= CCI_NARGS(ci
);
950 lua_assert(n
<= CCI_NARGS_MAX
*2); /* Account for split args. */
951 if ((ci
->flags
& CCI_L
)) { *args
++ = ASMREF_L
; n
--; }
954 lua_assert(ir
->o
== IR_CARG
);
955 args
[n
] = ir
->op2
== REF_NIL
? 0 : ir
->op2
;
957 args
[0] = ir
->op1
== REF_NIL
? 0 : ir
->op1
;
958 lua_assert(IR(ir
->op1
)->o
!= IR_CARG
);
961 /* Reconstruct CCallInfo flags for CALLX*. */
962 static uint32_t asm_callx_flags(ASMState
*as
, IRIns
*ir
)
965 if (ir
->op1
!= REF_NIL
) { /* Count number of arguments first. */
966 IRIns
*ira
= IR(ir
->op1
);
968 while (ira
->o
== IR_CARG
) { nargs
++; ira
= IR(ira
->op1
); }
971 if (IR(ir
->op2
)->o
== IR_CARG
) { /* Copy calling convention info. */
972 CTypeID id
= (CTypeID
)IR(IR(ir
->op2
)->op2
)->i
;
973 CType
*ct
= ctype_get(ctype_ctsG(J2G(as
->J
)), id
);
974 nargs
|= ((ct
->info
& CTF_VARARG
) ? CCI_VARARG
: 0);
976 nargs
|= (ctype_cconv(ct
->info
) << CCI_CC_SHIFT
);
980 return (nargs
| (ir
->t
.irt
<< CCI_OTSHIFT
));
983 /* Calculate stack adjustment. */
984 static int32_t asm_stack_adjust(ASMState
*as
)
986 if (as
->evenspill
<= SPS_FIXED
)
988 return sps_scale(sps_align(as
->evenspill
));
991 /* Must match with hash*() in lj_tab.c. */
992 static uint32_t ir_khash(IRIns
*ir
)
995 if (irt_isstr(ir
->t
)) {
996 return ir_kstr(ir
)->hash
;
997 } else if (irt_isnum(ir
->t
)) {
998 lo
= ir_knum(ir
)->u32
.lo
;
999 hi
= ir_knum(ir
)->u32
.hi
<< 1;
1000 } else if (irt_ispri(ir
->t
)) {
1001 lua_assert(!irt_isnil(ir
->t
));
1002 return irt_type(ir
->t
)-IRT_FALSE
;
1004 lua_assert(irt_isgcv(ir
->t
));
1005 lo
= u32ptr(ir_kgc(ir
));
1006 hi
= lo
+ HASH_BIAS
;
1008 return hashrot(lo
, hi
);
1011 /* -- Allocations --------------------------------------------------------- */
1013 static void asm_gencall(ASMState
*as
, const CCallInfo
*ci
, IRRef
*args
);
1014 static void asm_setupresult(ASMState
*as
, IRIns
*ir
, const CCallInfo
*ci
);
1016 static void asm_snew(ASMState
*as
, IRIns
*ir
)
1018 const CCallInfo
*ci
= &lj_ir_callinfo
[IRCALL_lj_str_new
];
1020 args
[0] = ASMREF_L
; /* lua_State *L */
1021 args
[1] = ir
->op1
; /* const char *str */
1022 args
[2] = ir
->op2
; /* size_t len */
1024 asm_setupresult(as
, ir
, ci
); /* GCstr * */
1025 asm_gencall(as
, ci
, args
);
1028 static void asm_tnew(ASMState
*as
, IRIns
*ir
)
1030 const CCallInfo
*ci
= &lj_ir_callinfo
[IRCALL_lj_tab_new1
];
1032 args
[0] = ASMREF_L
; /* lua_State *L */
1033 args
[1] = ASMREF_TMP1
; /* uint32_t ahsize */
1035 asm_setupresult(as
, ir
, ci
); /* GCtab * */
1036 asm_gencall(as
, ci
, args
);
1037 ra_allockreg(as
, ir
->op1
| (ir
->op2
<< 24), ra_releasetmp(as
, ASMREF_TMP1
));
1040 static void asm_tdup(ASMState
*as
, IRIns
*ir
)
1042 const CCallInfo
*ci
= &lj_ir_callinfo
[IRCALL_lj_tab_dup
];
1044 args
[0] = ASMREF_L
; /* lua_State *L */
1045 args
[1] = ir
->op1
; /* const GCtab *kt */
1047 asm_setupresult(as
, ir
, ci
); /* GCtab * */
1048 asm_gencall(as
, ci
, args
);
1051 static void asm_gc_check(ASMState
*as
);
1053 /* Explicit GC step. */
1054 static void asm_gcstep(ASMState
*as
, IRIns
*ir
)
1057 for (ira
= IR(as
->stopins
+1); ira
< ir
; ira
++)
1058 if ((ira
->o
== IR_TNEW
|| ira
->o
== IR_TDUP
||
1059 (LJ_HASFFI
&& (ira
->o
== IR_CNEW
|| ira
->o
== IR_CNEWI
))) &&
1064 as
->gcsteps
= 0x80000000; /* Prevent implicit GC check further up. */
1067 /* -- PHI and loop handling ----------------------------------------------- */
1069 /* Break a PHI cycle by renaming to a free register (evict if needed). */
1070 static void asm_phi_break(ASMState
*as
, RegSet blocked
, RegSet blockedby
,
1073 RegSet candidates
= blocked
& allow
;
1074 if (candidates
) { /* If this register file has candidates. */
1075 /* Note: the set for ra_pick cannot be empty, since each register file
1076 ** has some registers never allocated to PHIs.
1078 Reg down
, up
= ra_pick(as
, ~blocked
& allow
); /* Get a free register. */
1079 if (candidates
& ~blockedby
) /* Optimize shifts, else it's a cycle. */
1080 candidates
= candidates
& ~blockedby
;
1081 down
= rset_picktop(candidates
); /* Pick candidate PHI register. */
1082 ra_rename(as
, down
, up
); /* And rename it to the free register. */
1086 /* PHI register shuffling.
1088 ** The allocator tries hard to preserve PHI register assignments across
1089 ** the loop body. Most of the time this loop does nothing, since there
1090 ** are no register mismatches.
1092 ** If a register mismatch is detected and ...
1093 ** - the register is currently free: rename it.
1094 ** - the register is blocked by an invariant: restore/remat and rename it.
1095 ** - Otherwise the register is used by another PHI, so mark it as blocked.
1097 ** The renames are order-sensitive, so just retry the loop if a register
1098 ** is marked as blocked, but has been freed in the meantime. A cycle is
1099 ** detected if all of the blocked registers are allocated. To break the
1100 ** cycle rename one of them to a free register and retry.
1102 ** Note that PHI spill slots are kept in sync and don't need to be shuffled.
1104 static void asm_phi_shuffle(ASMState
*as
)
1108 /* Find and resolve PHI register mismatches. */
1110 RegSet blocked
= RSET_EMPTY
;
1111 RegSet blockedby
= RSET_EMPTY
;
1112 RegSet phiset
= as
->phiset
;
1113 while (phiset
) { /* Check all left PHI operand registers. */
1114 Reg r
= rset_pickbot(phiset
);
1115 IRIns
*irl
= IR(as
->phireg
[r
]);
1117 if (r
!= left
) { /* Mismatch? */
1118 if (!rset_test(as
->freeset
, r
)) { /* PHI register blocked? */
1119 IRRef ref
= regcost_ref(as
->cost
[r
]);
1120 /* Blocked by other PHI (w/reg)? */
1121 if (!ra_iskref(ref
) && irt_ismarked(IR(ref
)->t
)) {
1122 rset_set(blocked
, r
);
1123 if (ra_hasreg(left
))
1124 rset_set(blockedby
, left
);
1126 } else { /* Otherwise grab register from invariant. */
1127 ra_restore(as
, ref
);
1131 if (ra_hasreg(left
)) {
1132 ra_rename(as
, left
, r
);
1136 rset_clear(phiset
, r
);
1138 if (!blocked
) break; /* Finished. */
1139 if (!(as
->freeset
& blocked
)) { /* Break cycles if none are free. */
1140 asm_phi_break(as
, blocked
, blockedby
, RSET_GPR
);
1141 if (!LJ_SOFTFP
) asm_phi_break(as
, blocked
, blockedby
, RSET_FPR
);
1143 } /* Else retry some more renames. */
1146 /* Restore/remat invariants whose registers are modified inside the loop. */
1148 work
= as
->modset
& ~(as
->freeset
| as
->phiset
) & RSET_FPR
;
1150 Reg r
= rset_pickbot(work
);
1151 ra_restore(as
, regcost_ref(as
->cost
[r
]));
1152 rset_clear(work
, r
);
1156 work
= as
->modset
& ~(as
->freeset
| as
->phiset
);
1158 Reg r
= rset_pickbot(work
);
1159 ra_restore(as
, regcost_ref(as
->cost
[r
]));
1160 rset_clear(work
, r
);
1164 /* Allocate and save all unsaved PHI regs and clear marks. */
1167 Reg r
= rset_picktop(work
);
1168 IRRef lref
= as
->phireg
[r
];
1169 IRIns
*ir
= IR(lref
);
1170 if (ra_hasspill(ir
->s
)) { /* Left PHI gained a spill slot? */
1171 irt_clearmark(ir
->t
); /* Handled here, so clear marker now. */
1172 ra_alloc1(as
, lref
, RID2RSET(r
));
1173 ra_save(as
, ir
, r
); /* Save to spill slot inside the loop. */
1176 rset_clear(work
, r
);
1180 /* Copy unsynced left/right PHI spill slots. Rarely needed. */
1181 static void asm_phi_copyspill(ASMState
*as
)
1185 for (ir
= IR(as
->orignins
-1); ir
->o
== IR_PHI
; ir
--)
1186 if (ra_hasspill(ir
->s
) && ra_hasspill(IR(ir
->op1
)->s
))
1187 need
|= irt_isfp(ir
->t
) ? 2 : 1; /* Unsynced spill slot? */
1188 if ((need
& 1)) { /* Copy integer spill slots. */
1189 #if !LJ_TARGET_X86ORX64
1193 if ((as
->freeset
& RSET_GPR
))
1194 r
= rset_pickbot((as
->freeset
& RSET_GPR
));
1196 emit_spload(as
, IR(regcost_ref(as
->cost
[r
])), r
, SPOFS_TMP
);
1198 for (ir
= IR(as
->orignins
-1); ir
->o
== IR_PHI
; ir
--) {
1199 if (ra_hasspill(ir
->s
)) {
1200 IRIns
*irl
= IR(ir
->op1
);
1201 if (ra_hasspill(irl
->s
) && !irt_isfp(ir
->t
)) {
1202 emit_spstore(as
, irl
, r
, sps_scale(irl
->s
));
1203 emit_spload(as
, ir
, r
, sps_scale(ir
->s
));
1208 #if LJ_TARGET_X86ORX64
1209 if (!rset_test(as
->freeset
, r
))
1210 emit_spstore(as
, IR(regcost_ref(as
->cost
[r
])), r
, SPOFS_TMP
);
1214 if ((need
& 2)) { /* Copy FP spill slots. */
1220 if ((as
->freeset
& RSET_FPR
))
1221 r
= rset_pickbot((as
->freeset
& RSET_FPR
));
1222 if (!rset_test(as
->freeset
, r
))
1223 emit_spload(as
, IR(regcost_ref(as
->cost
[r
])), r
, SPOFS_TMP
);
1224 for (ir
= IR(as
->orignins
-1); ir
->o
== IR_PHI
; ir
--) {
1225 if (ra_hasspill(ir
->s
)) {
1226 IRIns
*irl
= IR(ir
->op1
);
1227 if (ra_hasspill(irl
->s
) && irt_isfp(ir
->t
)) {
1228 emit_spstore(as
, irl
, r
, sps_scale(irl
->s
));
1229 emit_spload(as
, ir
, r
, sps_scale(ir
->s
));
1234 if (!rset_test(as
->freeset
, r
))
1235 emit_spstore(as
, IR(regcost_ref(as
->cost
[r
])), r
, SPOFS_TMP
);
1240 /* Emit renames for left PHIs which are only spilled outside the loop. */
1241 static void asm_phi_fixup(ASMState
*as
)
1243 RegSet work
= as
->phiset
;
1245 Reg r
= rset_picktop(work
);
1246 IRRef lref
= as
->phireg
[r
];
1247 IRIns
*ir
= IR(lref
);
1248 if (irt_ismarked(ir
->t
)) {
1249 irt_clearmark(ir
->t
);
1250 /* Left PHI gained a spill slot before the loop? */
1251 if (ra_hasspill(ir
->s
)) {
1253 lj_ir_set(as
->J
, IRT(IR_RENAME
, IRT_NIL
), lref
, as
->loopsnapno
);
1254 ren
= tref_ref(lj_ir_emit(as
->J
));
1255 as
->ir
= as
->T
->ir
; /* The IR may have been reallocated. */
1256 IR(ren
)->r
= (uint8_t)r
;
1257 IR(ren
)->s
= SPS_NONE
;
1260 rset_clear(work
, r
);
1264 /* Setup right PHI reference. */
1265 static void asm_phi(ASMState
*as
, IRIns
*ir
)
1267 RegSet allow
= ((!LJ_SOFTFP
&& irt_isfp(ir
->t
)) ? RSET_FPR
: RSET_GPR
) &
1269 RegSet afree
= (as
->freeset
& allow
);
1270 IRIns
*irl
= IR(ir
->op1
);
1271 IRIns
*irr
= IR(ir
->op2
);
1272 if (ir
->r
== RID_SINK
) /* Sink PHI. */
1274 /* Spill slot shuffling is not implemented yet (but rarely needed). */
1275 if (ra_hasspill(irl
->s
) || ra_hasspill(irr
->s
))
1276 lj_trace_err(as
->J
, LJ_TRERR_NYIPHI
);
1277 /* Leave at least one register free for non-PHIs (and PHI cycle breaking). */
1278 if ((afree
& (afree
-1))) { /* Two or more free registers? */
1280 if (ra_noreg(irr
->r
)) { /* Get a register for the right PHI. */
1281 r
= ra_allocref(as
, ir
->op2
, allow
);
1282 } else { /* Duplicate right PHI, need a copy (rare). */
1283 r
= ra_scratch(as
, allow
);
1284 emit_movrr(as
, irr
, r
, irr
->r
);
1287 rset_set(as
->phiset
, r
);
1288 as
->phireg
[r
] = (IRRef1
)ir
->op1
;
1289 irt_setmark(irl
->t
); /* Marks left PHIs _with_ register. */
1290 if (ra_noreg(irl
->r
))
1291 ra_sethint(irl
->r
, r
); /* Set register hint for left PHI. */
1292 } else { /* Otherwise allocate a spill slot. */
1293 /* This is overly restrictive, but it triggers only on synthetic code. */
1294 if (ra_hasreg(irl
->r
) || ra_hasreg(irr
->r
))
1295 lj_trace_err(as
->J
, LJ_TRERR_NYIPHI
);
1297 irr
->s
= ir
->s
; /* Set right PHI spill slot. Sync left slot later. */
1301 static void asm_loop_fixup(ASMState
*as
);
1303 /* Middle part of a loop. */
1304 static void asm_loop(ASMState
*as
)
1307 /* LOOP is a guard, so the snapno is up to date. */
1308 as
->loopsnapno
= as
->snapno
;
1311 /* LOOP marks the transition from the variant to the invariant part. */
1312 as
->flagmcp
= as
->invmcp
= NULL
;
1314 if (!neverfuse(as
)) as
->fuseref
= 0;
1315 asm_phi_shuffle(as
);
1317 asm_phi_copyspill(as
);
1319 as
->mcloop
= as
->mcp
;
1320 RA_DBGX((as
, "===== LOOP ====="));
1321 if (!as
->realign
) RA_DBG_FLUSH();
1322 if (as
->mcp
!= mcspill
)
1323 emit_jmp(as
, mcspill
);
1326 /* -- Target-specific assembler ------------------------------------------- */
1328 #if LJ_TARGET_X86ORX64
1329 #include "lj_asm_x86.h"
1331 #include "lj_asm_arm.h"
1333 #include "lj_asm_ppc.h"
1334 #elif LJ_TARGET_MIPS
1335 #include "lj_asm_mips.h"
1337 #error "Missing assembler for target CPU"
1340 /* -- Head of trace ------------------------------------------------------- */
1342 /* Head of a root trace. */
1343 static void asm_head_root(ASMState
*as
)
1346 asm_head_root_base(as
);
1347 emit_setvmstate(as
, (int32_t)as
->T
->traceno
);
1348 spadj
= asm_stack_adjust(as
);
1349 as
->T
->spadjust
= (uint16_t)spadj
;
1350 emit_spsub(as
, spadj
);
1351 /* Root traces assume a checked stack for the starting proto. */
1352 as
->T
->topslot
= gcref(as
->T
->startpt
)->pt
.framesize
;
1355 /* Head of a side trace.
1357 ** The current simplistic algorithm requires that all slots inherited
1358 ** from the parent are live in a register between pass 2 and pass 3. This
1359 ** avoids the complexity of stack slot shuffling. But of course this may
1360 ** overflow the register set in some cases and cause the dreaded error:
1361 ** "NYI: register coalescing too complex". A refined algorithm is needed.
1363 static void asm_head_side(ASMState
*as
)
1365 IRRef1 sloadins
[RID_MAX
];
1366 RegSet allow
= RSET_ALL
; /* Inverse of all coalesced registers. */
1367 RegSet live
= RSET_EMPTY
; /* Live parent registers. */
1368 RegSet pallow
= RSET_GPR
; /* Registers needed by the parent stack check. */
1370 IRIns
*irp
= &as
->parent
->ir
[REF_BASE
]; /* Parent base. */
1371 int32_t spadj
, spdelta
;
1376 if (as
->snapno
&& as
->topslot
> as
->parent
->topslot
) {
1377 /* Force snap #0 alloc to prevent register overwrite in stack check. */
1381 pbase
= asm_head_side_base(as
, irp
);
1382 if (pbase
!= RID_NONE
) {
1383 rset_clear(allow
, pbase
);
1384 rset_clear(pallow
, pbase
);
1387 /* Scan all parent SLOADs and collect register dependencies. */
1388 for (i
= as
->stopins
; i
> REF_BASE
; i
--) {
1391 lua_assert((ir
->o
== IR_SLOAD
&& (ir
->op2
& IRSLOAD_PARENT
)) ||
1392 (LJ_SOFTFP
&& ir
->o
== IR_HIOP
) || ir
->o
== IR_PVAL
);
1393 rs
= as
->parentmap
[i
- REF_FIRST
];
1394 if (ra_hasreg(ir
->r
)) {
1395 rset_clear(allow
, ir
->r
);
1396 if (ra_hasspill(ir
->s
)) {
1397 ra_save(as
, ir
, ir
->r
);
1400 } else if (ra_hasspill(ir
->s
)) {
1404 if (ir
->r
== rs
) { /* Coalesce matching registers right now. */
1406 } else if (ra_hasspill(regsp_spill(rs
))) {
1407 if (ra_hasreg(ir
->r
))
1409 } else if (ra_used(ir
)) {
1410 sloadins
[rs
] = (IRRef1
)i
;
1411 rset_set(live
, rs
); /* Block live parent register. */
1413 if (!ra_hasspill(regsp_spill(rs
))) rset_clear(pallow
, regsp_reg(rs
));
1416 /* Calculate stack frame adjustment. */
1417 spadj
= asm_stack_adjust(as
);
1418 spdelta
= spadj
- (int32_t)as
->parent
->spadjust
;
1419 if (spdelta
< 0) { /* Don't shrink the stack frame. */
1420 spadj
= (int32_t)as
->parent
->spadjust
;
1423 as
->T
->spadjust
= (uint16_t)spadj
;
1425 /* Reload spilled target registers. */
1427 for (i
= as
->stopins
; i
> REF_BASE
; i
--) {
1429 if (irt_ismarked(ir
->t
)) {
1433 irt_clearmark(ir
->t
);
1434 rs
= as
->parentmap
[i
- REF_FIRST
];
1435 if (!ra_hasspill(regsp_spill(rs
)))
1436 ra_sethint(ir
->r
, rs
); /* Hint may be gone, set it again. */
1437 else if (sps_scale(regsp_spill(rs
))+spdelta
== sps_scale(ir
->s
))
1438 continue; /* Same spill slot, do nothing. */
1439 mask
= ((!LJ_SOFTFP
&& irt_isfp(ir
->t
)) ? RSET_FPR
: RSET_GPR
) & allow
;
1440 if (mask
== RSET_EMPTY
)
1441 lj_trace_err(as
->J
, LJ_TRERR_NYICOAL
);
1442 r
= ra_allocref(as
, i
, mask
);
1444 rset_clear(allow
, r
);
1445 if (r
== rs
) { /* Coalesce matching registers right now. */
1447 rset_clear(live
, r
);
1448 } else if (ra_hasspill(regsp_spill(rs
))) {
1456 /* Store trace number and adjust stack frame relative to the parent. */
1457 emit_setvmstate(as
, (int32_t)as
->T
->traceno
);
1458 emit_spsub(as
, spdelta
);
1460 #if !LJ_TARGET_X86ORX64
1461 /* Restore BASE register from parent spill slot. */
1462 if (ra_hasspill(irp
->s
))
1463 emit_spload(as
, IR(REF_BASE
), IR(REF_BASE
)->r
, sps_scale(irp
->s
));
1466 /* Restore target registers from parent spill slots. */
1468 RegSet work
= ~as
->freeset
& RSET_ALL
;
1470 Reg r
= rset_pickbot(work
);
1471 IRRef ref
= regcost_ref(as
->cost
[r
]);
1472 RegSP rs
= as
->parentmap
[ref
- REF_FIRST
];
1473 rset_clear(work
, r
);
1474 if (ra_hasspill(regsp_spill(rs
))) {
1475 int32_t ofs
= sps_scale(regsp_spill(rs
));
1477 emit_spload(as
, IR(ref
), r
, ofs
);
1483 /* Shuffle registers to match up target regs with parent regs. */
1487 /* Repeatedly coalesce free live registers by moving to their target. */
1488 while ((work
= as
->freeset
& live
) != RSET_EMPTY
) {
1489 Reg rp
= rset_pickbot(work
);
1490 IRIns
*ir
= IR(sloadins
[rp
]);
1491 rset_clear(live
, rp
);
1492 rset_clear(allow
, rp
);
1494 emit_movrr(as
, ir
, ir
->r
, rp
);
1498 /* We're done if no live registers remain. */
1499 if (live
== RSET_EMPTY
)
1502 /* Break cycles by renaming one target to a temp. register. */
1503 if (live
& RSET_GPR
) {
1504 RegSet tmpset
= as
->freeset
& ~live
& allow
& RSET_GPR
;
1505 if (tmpset
== RSET_EMPTY
)
1506 lj_trace_err(as
->J
, LJ_TRERR_NYICOAL
);
1507 ra_rename(as
, rset_pickbot(live
& RSET_GPR
), rset_pickbot(tmpset
));
1509 if (!LJ_SOFTFP
&& (live
& RSET_FPR
)) {
1510 RegSet tmpset
= as
->freeset
& ~live
& allow
& RSET_FPR
;
1511 if (tmpset
== RSET_EMPTY
)
1512 lj_trace_err(as
->J
, LJ_TRERR_NYICOAL
);
1513 ra_rename(as
, rset_pickbot(live
& RSET_FPR
), rset_pickbot(tmpset
));
1516 /* Continue with coalescing to fix up the broken cycle(s). */
1519 /* Inherit top stack slot already checked by parent trace. */
1520 as
->T
->topslot
= as
->parent
->topslot
;
1521 if (as
->topslot
> as
->T
->topslot
) { /* Need to check for higher slot? */
1522 #ifdef EXITSTATE_CHECKEXIT
1523 /* Highest exit + 1 indicates stack check. */
1524 ExitNo exitno
= as
->T
->nsnap
;
1526 /* Reuse the parent exit in the context of the parent trace. */
1527 ExitNo exitno
= as
->J
->exitno
;
1529 as
->T
->topslot
= (uint8_t)as
->topslot
; /* Remember for child traces. */
1530 asm_stack_check(as
, as
->topslot
, irp
, pallow
, exitno
);
1534 /* -- Tail of trace ------------------------------------------------------- */
1536 /* Get base slot for a snapshot. */
1537 static BCReg
asm_baseslot(ASMState
*as
, SnapShot
*snap
, int *gotframe
)
1539 SnapEntry
*map
= &as
->T
->snapmap
[snap
->mapofs
];
1541 for (n
= snap
->nent
; n
> 0; n
--) {
1542 SnapEntry sn
= map
[n
-1];
1543 if ((sn
& SNAP_FRAME
)) {
1545 return snap_slot(sn
);
1551 /* Link to another trace. */
1552 static void asm_tail_link(ASMState
*as
)
1554 SnapNo snapno
= as
->T
->nsnap
-1; /* Last snapshot. */
1555 SnapShot
*snap
= &as
->T
->snap
[snapno
];
1557 BCReg baseslot
= asm_baseslot(as
, snap
, &gotframe
);
1559 as
->topslot
= snap
->topslot
;
1561 ra_allocref(as
, REF_BASE
, RID2RSET(RID_BASE
));
1563 if (as
->T
->link
== 0) {
1564 /* Setup fixed registers for exit to interpreter. */
1565 const BCIns
*pc
= snap_pc(as
->T
->snapmap
[snap
->mapofs
+ snap
->nent
]);
1567 if (bc_op(*pc
) == BC_JLOOP
) { /* NYI: find a better way to do this. */
1568 BCIns
*retpc
= &traceref(as
->J
, bc_d(*pc
))->startins
;
1569 if (bc_isret(bc_op(*retpc
)))
1572 ra_allockreg(as
, i32ptr(J2GG(as
->J
)->dispatch
), RID_DISPATCH
);
1573 ra_allockreg(as
, i32ptr(pc
), RID_LPC
);
1574 mres
= (int32_t)(snap
->nslots
- baseslot
);
1575 switch (bc_op(*pc
)) {
1576 case BC_CALLM
: case BC_CALLMT
:
1577 mres
-= (int32_t)(1 + bc_a(*pc
) + bc_c(*pc
)); break;
1578 case BC_RETM
: mres
-= (int32_t)(bc_a(*pc
) + bc_d(*pc
)); break;
1579 case BC_TSETM
: mres
-= (int32_t)bc_a(*pc
); break;
1580 default: if (bc_op(*pc
) < BC_FUNCF
) mres
= 0; break;
1582 ra_allockreg(as
, mres
, RID_RET
); /* Return MULTRES or 0. */
1583 } else if (baseslot
) {
1584 /* Save modified BASE for linking to trace with higher start frame. */
1585 emit_setgl(as
, RID_BASE
, jit_base
);
1587 emit_addptr(as
, RID_BASE
, 8*(int32_t)baseslot
);
1589 /* Sync the interpreter state with the on-trace state. */
1590 asm_stack_restore(as
, snap
);
1592 /* Root traces that add frames need to check the stack at the end. */
1593 if (!as
->parent
&& gotframe
)
1594 asm_stack_check(as
, as
->topslot
, NULL
, as
->freeset
& RSET_GPR
, snapno
);
1597 /* -- Trace setup --------------------------------------------------------- */
1599 /* Clear reg/sp for all instructions and add register hints. */
1600 static void asm_setup_regsp(ASMState
*as
)
1603 int sink
= T
->sinktags
;
1604 IRRef nins
= T
->nins
;
1608 uint32_t rload
= 0xa6402a64;
1613 /* Clear reg/sp for constants. */
1614 for (ir
= IR(T
->nk
), lastir
= IR(REF_BASE
); ir
< lastir
; ir
++)
1615 ir
->prev
= REGSP_INIT
;
1617 /* REF_BASE is used for implicit references to the BASE register. */
1618 lastir
->prev
= REGSP_HINT(RID_BASE
);
1621 if (ir
->o
== IR_RENAME
) {
1622 do { ir
--; nins
--; } while (ir
->o
== IR_RENAME
);
1623 T
->nins
= nins
; /* Remove any renames left over from ASM restart. */
1625 as
->snaprename
= nins
;
1627 as
->snapno
= T
->nsnap
;
1629 as
->stopins
= REF_BASE
;
1630 as
->orignins
= nins
;
1633 /* Setup register hints for parent link instructions. */
1637 lastir
= lj_snap_regspmap(as
->parent
, as
->J
->exitno
, ir
);
1638 if (lastir
- ir
> LJ_MAX_JSLOTS
)
1639 lj_trace_err(as
->J
, LJ_TRERR_NYICOAL
);
1640 as
->stopins
= (IRRef
)((lastir
-1) - as
->ir
);
1641 for (p
= as
->parentmap
; ir
< lastir
; ir
++) {
1642 RegSP rs
= ir
->prev
;
1643 *p
++ = (uint16_t)rs
; /* Copy original parent RegSP to parentmap. */
1644 if (!ra_hasspill(regsp_spill(rs
)))
1645 ir
->prev
= (uint16_t)REGSP_HINT(regsp_reg(rs
));
1647 ir
->prev
= REGSP_INIT
;
1652 as
->evenspill
= SPS_FIRST
;
1653 for (lastir
= IR(nins
); ir
< lastir
; ir
++) {
1655 if (ir
->r
== RID_SINK
)
1657 if (ir
->r
== RID_SUNK
) { /* Revert after ASM restart. */
1668 if (!((ir
->op2
& IRSLOAD_TYPECHECK
) || (ir
+1)->o
== IR_HIOP
))
1671 case IR_ALOAD
: case IR_HLOAD
: case IR_ULOAD
: case IR_VLOAD
:
1672 if (!LJ_SOFTFP
&& irt_isnum(ir
->t
)) break;
1673 ir
->prev
= (uint16_t)REGSP_HINT((rload
& 15));
1674 rload
= lj_ror(rload
, 4);
1679 ci
.flags
= asm_callx_flags(as
, ir
);
1680 ir
->prev
= asm_setup_call_slots(as
, ir
, &ci
);
1682 as
->modset
|= RSET_SCRATCH
;
1685 case IR_CALLN
: case IR_CALLL
: case IR_CALLS
: {
1686 const CCallInfo
*ci
= &lj_ir_callinfo
[ir
->op2
];
1687 ir
->prev
= asm_setup_call_slots(as
, ir
, ci
);
1689 as
->modset
|= (ci
->flags
& CCI_NOFPRCLOBBER
) ?
1690 (RSET_SCRATCH
& ~RSET_FPR
) : RSET_SCRATCH
;
1693 #if LJ_SOFTFP || (LJ_32 && LJ_HASFFI)
1695 switch ((ir
-1)->o
) {
1696 #if LJ_SOFTFP && LJ_TARGET_ARM
1697 case IR_SLOAD
: case IR_ALOAD
: case IR_HLOAD
: case IR_ULOAD
: case IR_VLOAD
:
1698 if (ra_hashint((ir
-1)->r
)) {
1699 ir
->prev
= (ir
-1)->prev
+ 1;
1704 #if !LJ_SOFTFP && LJ_NEED_FP64
1706 if (irt_isfp((ir
-1)->t
)) {
1707 ir
->prev
= REGSP_HINT(RID_FPRET
);
1712 case IR_CALLN
: case IR_CALLXS
:
1714 case IR_MIN
: case IR_MAX
:
1716 (ir
-1)->prev
= REGSP_HINT(RID_RETLO
);
1717 ir
->prev
= REGSP_HINT(RID_RETHI
);
1725 case IR_MIN
: case IR_MAX
:
1726 if ((ir
+1)->o
!= IR_HIOP
) break;
1729 /* C calls evict all scratch regs and return results in RID_RET. */
1730 case IR_SNEW
: case IR_XSNEW
: case IR_NEWREF
:
1731 if (REGARG_NUMGPR
< 3 && as
->evenspill
< 3)
1732 as
->evenspill
= 3; /* lj_str_new and lj_tab_newkey need 3 args. */
1734 case IR_TNEW
: case IR_TDUP
: case IR_CNEW
: case IR_CNEWI
: case IR_TOSTR
:
1735 ir
->prev
= REGSP_HINT(RID_RET
);
1737 as
->modset
= RSET_SCRATCH
;
1739 case IR_STRTO
: case IR_OBAR
:
1741 as
->modset
= RSET_SCRATCH
;
1743 #if !LJ_TARGET_X86ORX64 && !LJ_SOFTFP
1744 case IR_ATAN2
: case IR_LDEXP
:
1747 if (!LJ_SOFTFP
&& irt_isnum(ir
->t
)) {
1748 #if LJ_TARGET_X86ORX64
1749 ir
->prev
= REGSP_HINT(RID_XMM0
);
1751 as
->modset
|= RSET_RANGE(RID_XMM0
, RID_XMM1
+1)|RID2RSET(RID_EAX
);
1753 ir
->prev
= REGSP_HINT(RID_FPRET
);
1755 as
->modset
|= RSET_SCRATCH
;
1759 /* fallthrough */ /* for integer POW */
1760 case IR_DIV
: case IR_MOD
:
1761 if (!irt_isnum(ir
->t
)) {
1762 ir
->prev
= REGSP_HINT(RID_RET
);
1764 as
->modset
|= (RSET_SCRATCH
& RSET_GPR
);
1769 #if LJ_TARGET_X86ORX64
1770 if (ir
->op2
== IRFPM_EXP2
) { /* May be joined to lj_vm_pow_sse. */
1771 ir
->prev
= REGSP_HINT(RID_XMM0
);
1773 if (as
->evenspill
< 4) /* Leave room for 16 byte scratch area. */
1777 as
->modset
|= RSET_RANGE(RID_XMM0
, RID_XMM2
+1)|RID2RSET(RID_EAX
);
1779 } else if (ir
->op2
<= IRFPM_TRUNC
&& !(as
->flags
& JIT_F_SSE4_1
)) {
1780 ir
->prev
= REGSP_HINT(RID_XMM0
);
1782 as
->modset
|= RSET_RANGE(RID_XMM0
, RID_XMM3
+1)|RID2RSET(RID_EAX
);
1787 ir
->prev
= REGSP_HINT(RID_FPRET
);
1789 as
->modset
|= RSET_SCRATCH
;
1792 #if LJ_TARGET_X86ORX64
1793 /* Non-constant shift counts need to be in RID_ECX on x86/x64. */
1794 case IR_BSHL
: case IR_BSHR
: case IR_BSAR
: case IR_BROL
: case IR_BROR
:
1795 if (!irref_isk(ir
->op2
) && !ra_hashint(IR(ir
->op2
)->r
)) {
1796 IR(ir
->op2
)->r
= REGSP_HINT(RID_ECX
);
1798 rset_set(as
->modset
, RID_ECX
);
1802 /* Do not propagate hints across type conversions or loads. */
1806 case IR_ALOAD
: case IR_HLOAD
: case IR_ULOAD
: case IR_VLOAD
:
1810 if (irt_isfp(ir
->t
) || (ir
->op2
& IRCONV_SRCMASK
) == IRT_NUM
||
1811 (ir
->op2
& IRCONV_SRCMASK
) == IRT_FLOAT
)
1815 /* Propagate hints across likely 'op reg, imm' or 'op reg'. */
1816 if (irref_isk(ir
->op2
) && !irref_isk(ir
->op1
) &&
1817 ra_hashint(regsp_reg(IR(ir
->op1
)->prev
))) {
1818 ir
->prev
= IR(ir
->op1
)->prev
;
1823 ir
->prev
= REGSP_INIT
;
1825 if ((as
->evenspill
& 1))
1826 as
->oddspill
= as
->evenspill
++;
1831 /* -- Assembler core ------------------------------------------------------ */
1833 /* Assemble a trace. */
1834 void lj_asm_trace(jit_State
*J
, GCtrace
*T
)
1837 ASMState
*as
= &as_
;
1840 /* Ensure an initialized instruction beyond the last one for HIOP checks. */
1841 J
->cur
.nins
= lj_ir_nextins(J
);
1842 lj_ir_nop(&J
->cur
.ir
[J
->cur
.nins
]);
1844 /* Setup initial state. Copy some fields to reduce indirections. */
1848 as
->flags
= J
->flags
;
1849 as
->loopref
= J
->loopref
;
1852 as
->parent
= J
->parent
? traceref(J
, J
->parent
) : NULL
;
1854 /* Reserve MCode memory. */
1855 as
->mctop
= origtop
= lj_mcode_reserve(J
, &as
->mcbot
);
1856 as
->mcp
= as
->mctop
;
1857 as
->mclim
= as
->mcbot
+ MCLIM_REDZONE
;
1858 asm_setup_target(as
);
1861 as
->mcp
= as
->mctop
;
1862 #ifdef LUA_USE_ASSERT
1863 as
->mcp_prev
= as
->mcp
;
1865 as
->curins
= T
->nins
;
1867 RA_DBGX((as
, "===== STOP ====="));
1869 /* General trace setup. Emit tail of trace. */
1875 as
->sectref
= as
->loopref
;
1876 as
->fuseref
= (as
->flags
& JIT_F_OPT_FUSE
) ? as
->loopref
: FUSE_DISABLED
;
1877 asm_setup_regsp(as
);
1881 /* Assemble a trace in linear backwards order. */
1882 for (as
->curins
--; as
->curins
> as
->stopins
; as
->curins
--) {
1883 IRIns
*ir
= IR(as
->curins
);
1884 lua_assert(!(LJ_32
&& irt_isint64(ir
->t
))); /* Handled by SPLIT. */
1885 if (!ra_used(ir
) && !ir_sideeff(ir
) && (as
->flags
& JIT_F_OPT_DCE
))
1886 continue; /* Dead-code elimination can be soooo easy. */
1887 if (irt_isguard(ir
->t
))
1893 } while (as
->realign
); /* Retry in case the MCode needs to be realigned. */
1895 /* Emit head of trace. */
1898 if (as
->gcsteps
> 0) {
1899 as
->curins
= as
->T
->snap
[0].ref
;
1900 asm_snap_prep(as
); /* The GC check is a guard. */
1910 RA_DBGX((as
, "===== START ===="));
1912 if (as
->freeset
!= RSET_ALL
)
1913 lj_trace_err(as
->J
, LJ_TRERR_BADRA
); /* Ouch! Should never happen. */
1915 /* Set trace entry point before fixing up tail to allow link to self. */
1917 T
->mcloop
= as
->mcloop
? (MSize
)((char *)as
->mcloop
- (char *)as
->mcp
) : 0;
1919 asm_tail_fixup(as
, T
->link
); /* Note: this may change as->mctop! */
1920 T
->szmcode
= (MSize
)((char *)as
->mctop
- (char *)as
->mcp
);
1921 lj_mcode_sync(T
->mcode
, origtop
);