2 ** IR assembler (SSA IR -> machine code).
3 ** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
22 #include "lj_ircall.h"
29 #include "lj_dispatch.h"
31 #include "lj_target.h"
33 /* -- Assembler state and common macros ----------------------------------- */
35 /* Assembler state. */
36 typedef struct ASMState
{
37 RegCost cost
[RID_MAX
]; /* Reference and blended allocation cost for regs. */
39 MCode
*mcp
; /* Current MCode pointer (grows down). */
40 MCode
*mclim
; /* Lower limit for MCode memory + red zone. */
42 IRIns
*ir
; /* Copy of pointer to IR instructions/constants. */
43 jit_State
*J
; /* JIT compiler state. */
45 #if LJ_TARGET_X86ORX64
46 x86ModRM mrm
; /* Fused x86 address operand. */
49 RegSet freeset
; /* Set of free registers. */
50 RegSet modset
; /* Set of registers modified inside the loop. */
51 RegSet weakset
; /* Set of weakly referenced registers. */
52 RegSet phiset
; /* Set of PHI registers. */
54 uint32_t flags
; /* Copy of JIT compiler flags. */
55 int loopinv
; /* Loop branch inversion (0:no, 1:yes, 2:yes+CC_P). */
57 int32_t evenspill
; /* Next even spill slot. */
58 int32_t oddspill
; /* Next odd spill slot (or 0). */
60 IRRef curins
; /* Reference of current instruction. */
61 IRRef stopins
; /* Stop assembly before hitting this instruction. */
62 IRRef orignins
; /* Original T->nins. */
64 IRRef snapref
; /* Current snapshot is active after this reference. */
65 IRRef snaprename
; /* Rename highwater mark for snapshot check. */
66 SnapNo snapno
; /* Current snapshot number. */
67 SnapNo loopsnapno
; /* Loop snapshot number. */
69 IRRef fuseref
; /* Fusion limit (loopref, 0 or FUSE_DISABLED). */
70 IRRef sectref
; /* Section base reference (loopref or 0). */
71 IRRef loopref
; /* Reference of LOOP instruction (or 0). */
73 BCReg topslot
; /* Number of slots for stack check (unless 0). */
74 MSize gcsteps
; /* Accumulated number of GC steps (per section). */
76 GCtrace
*T
; /* Trace to assemble. */
77 GCtrace
*parent
; /* Parent trace (or NULL). */
79 MCode
*mcbot
; /* Bottom of reserved MCode. */
80 MCode
*mctop
; /* Top of generated MCode. */
81 MCode
*mcloop
; /* Pointer to loop MCode (or NULL). */
82 MCode
*invmcp
; /* Points to invertible loop branch (or NULL). */
83 MCode
*flagmcp
; /* Pending opportunity to merge flag setting ins. */
84 MCode
*realign
; /* Realign loop if not NULL. */
87 int32_t krefk
[RID_NUM_KREF
];
89 IRRef1 phireg
[RID_MAX
]; /* PHI register references. */
90 uint16_t parentmap
[LJ_MAX_JSLOTS
]; /* Parent slot to RegSP map. */
92 uint16_t parentmaphi
[LJ_MAX_JSLOTS
]; /* Parent slot to hi RegSP map. */
96 #define IR(ref) (&as->ir[(ref)])
98 #define ASMREF_TMP1 REF_TRUE /* Temp. register. */
99 #define ASMREF_TMP2 REF_FALSE /* Temp. register. */
100 #define ASMREF_L REF_NIL /* Stores register for L. */
102 /* Check for variant to invariant references. */
103 #define iscrossref(as, ref) ((ref) < as->sectref)
105 /* Inhibit memory op fusion from variant to invariant references. */
106 #define FUSE_DISABLED (~(IRRef)0)
107 #define mayfuse(as, ref) ((ref) > as->fuseref)
108 #define neverfuse(as) (as->fuseref == FUSE_DISABLED)
109 #define canfuse(as, ir) (!neverfuse(as) && !irt_isphi((ir)->t))
110 #define opisfusableload(o) \
111 ((o) == IR_ALOAD || (o) == IR_HLOAD || (o) == IR_ULOAD || \
112 (o) == IR_FLOAD || (o) == IR_XLOAD || (o) == IR_SLOAD || (o) == IR_VLOAD)
114 /* Sparse limit checks using a red zone before the actual limit. */
115 #define MCLIM_REDZONE 64
116 #define checkmclim(as) \
117 if (LJ_UNLIKELY(as->mcp < as->mclim)) asm_mclimit(as)
119 static LJ_NORET LJ_NOINLINE
void asm_mclimit(ASMState
*as
)
121 lj_mcode_limiterr(as
->J
, (size_t)(as
->mctop
- as
->mcp
+ 4*MCLIM_REDZONE
));
125 #define ra_iskref(ref) ((ref) < RID_NUM_KREF)
126 #define ra_krefreg(ref) ((Reg)(RID_MIN_KREF + (Reg)(ref)))
127 #define ra_krefk(as, ref) (as->krefk[(ref)])
129 static LJ_AINLINE
void ra_setkref(ASMState
*as
, Reg r
, int32_t k
)
131 IRRef ref
= (IRRef
)(r
- RID_MIN_KREF
);
133 as
->cost
[r
] = REGCOST(ref
, ref
);
137 #define ra_iskref(ref) 0
138 #define ra_krefreg(ref) RID_MIN_GPR
139 #define ra_krefk(as, ref) 0
142 /* Arch-specific field offsets. */
143 static const uint8_t field_ofs
[IRFL__MAX
+1] = {
144 #define FLOFS(name, ofs) (uint8_t)(ofs),
150 /* -- Target-specific instruction emitter --------------------------------- */
152 #if LJ_TARGET_X86ORX64
153 #include "lj_emit_x86.h"
155 #include "lj_emit_arm.h"
157 #include "lj_emit_ppc.h"
159 #include "lj_emit_mips.h"
161 #error "Missing instruction emitter for target CPU"
164 /* -- Register allocator debugging ---------------------------------------- */
166 /* #define LUAJIT_DEBUG_RA */
168 #ifdef LUAJIT_DEBUG_RA
173 #define RIDNAME(name) #name,
174 static const char *const ra_regname
[] = {
182 static char ra_dbg_buf
[65536];
183 static char *ra_dbg_p
;
184 static char *ra_dbg_merge
;
185 static MCode
*ra_dbg_mcp
;
187 static void ra_dstart(void)
189 ra_dbg_p
= ra_dbg_buf
;
194 static void ra_dflush(void)
196 fwrite(ra_dbg_buf
, 1, (size_t)(ra_dbg_p
-ra_dbg_buf
), stdout
);
200 static void ra_dprintf(ASMState
*as
, const char *fmt
, ...)
205 p
= ra_dbg_mcp
== as
->mcp
? ra_dbg_merge
: ra_dbg_p
;
207 p
+= sprintf(p
, "%08x \e[36m%04d ", (uintptr_t)as
->mcp
, as
->curins
-REF_BIAS
);
209 const char *e
= strchr(fmt
, '$');
210 if (e
== NULL
) break;
211 memcpy(p
, fmt
, (size_t)(e
-fmt
));
214 Reg r
= va_arg(argp
, Reg
) & RID_MASK
;
217 for (q
= ra_regname
[r
]; *q
; q
++)
218 *p
++ = *q
>= 'A' && *q
<= 'Z' ? *q
+ 0x20 : *q
;
223 } else if (e
[1] == 'f' || e
[1] == 'i') {
226 ref
= va_arg(argp
, IRRef
);
228 ref
= va_arg(argp
, IRIns
*) - as
->ir
;
230 p
+= sprintf(p
, "%04d", ref
- REF_BIAS
);
232 p
+= sprintf(p
, "K%03d", REF_BIAS
- ref
);
233 } else if (e
[1] == 's') {
234 uint32_t slot
= va_arg(argp
, uint32_t);
235 p
+= sprintf(p
, "[sp+0x%x]", sps_scale(slot
));
236 } else if (e
[1] == 'x') {
237 p
+= sprintf(p
, "%08x", va_arg(argp
, int32_t));
246 *p
++ = '\e'; *p
++ = '['; *p
++ = 'm'; *p
++ = '\n';
247 if (p
> ra_dbg_buf
+sizeof(ra_dbg_buf
)-256) {
248 fwrite(ra_dbg_buf
, 1, (size_t)(p
-ra_dbg_buf
), stdout
);
254 #define RA_DBG_START() ra_dstart()
255 #define RA_DBG_FLUSH() ra_dflush()
256 #define RA_DBG_REF() \
257 do { char *_p = ra_dbg_p; ra_dprintf(as, ""); \
258 ra_dbg_merge = _p; ra_dbg_mcp = as->mcp; } while (0)
259 #define RA_DBGX(x) ra_dprintf x
262 #define RA_DBG_START() ((void)0)
263 #define RA_DBG_FLUSH() ((void)0)
264 #define RA_DBG_REF() ((void)0)
265 #define RA_DBGX(x) ((void)0)
268 /* -- Register allocator -------------------------------------------------- */
270 #define ra_free(as, r) rset_set(as->freeset, (r))
271 #define ra_modified(as, r) rset_set(as->modset, (r))
272 #define ra_weak(as, r) rset_set(as->weakset, (r))
273 #define ra_noweak(as, r) rset_clear(as->weakset, (r))
275 #define ra_used(ir) (ra_hasreg((ir)->r) || ra_hasspill((ir)->s))
277 /* Setup register allocator. */
278 static void ra_setup(ASMState
*as
)
281 /* Initially all regs (except the stack pointer) are free for use. */
282 as
->freeset
= RSET_INIT
;
283 as
->modset
= RSET_EMPTY
;
284 as
->weakset
= RSET_EMPTY
;
285 as
->phiset
= RSET_EMPTY
;
286 memset(as
->phireg
, 0, sizeof(as
->phireg
));
287 for (r
= RID_MIN_GPR
; r
< RID_MAX
; r
++)
288 as
->cost
[r
] = REGCOST(~0u, 0u);
291 /* Rematerialize constants. */
292 static Reg
ra_rematk(ASMState
*as
, IRRef ref
)
296 if (ra_iskref(ref
)) {
298 lua_assert(!rset_test(as
->freeset
, r
));
301 emit_loadi(as
, r
, ra_krefk(as
, ref
));
306 lua_assert(ra_hasreg(r
) && !ra_hasspill(ir
->s
));
309 ir
->r
= RID_INIT
; /* Do not keep any hint. */
310 RA_DBGX((as
, "remat $i $r", ir
, r
));
312 if (ir
->o
== IR_KNUM
) {
313 emit_loadn(as
, r
, ir_knum(ir
));
316 if (emit_canremat(REF_BASE
) && ir
->o
== IR_BASE
) {
317 ra_sethint(ir
->r
, RID_BASE
); /* Restore BASE register hint. */
318 emit_getgl(as
, r
, jit_base
);
319 } else if (emit_canremat(ASMREF_L
) && ir
->o
== IR_KPRI
) {
320 lua_assert(irt_isnil(ir
->t
)); /* REF_NIL stores ASMREF_L register. */
321 emit_getgl(as
, r
, jit_L
);
323 } else if (ir
->o
== IR_KINT64
) {
324 emit_loadu64(as
, r
, ir_kint64(ir
)->u64
);
327 lua_assert(ir
->o
== IR_KINT
|| ir
->o
== IR_KGC
||
328 ir
->o
== IR_KPTR
|| ir
->o
== IR_KKPTR
|| ir
->o
== IR_KNULL
);
329 emit_loadi(as
, r
, ir
->i
);
334 /* Force a spill. Allocate a new spill slot if needed. */
335 static int32_t ra_spill(ASMState
*as
, IRIns
*ir
)
337 int32_t slot
= ir
->s
;
338 if (!ra_hasspill(slot
)) {
339 if (irt_is64(ir
->t
)) {
340 slot
= as
->evenspill
;
342 } else if (as
->oddspill
) {
346 slot
= as
->evenspill
;
347 as
->oddspill
= slot
+1;
350 if (as
->evenspill
> 256)
351 lj_trace_err(as
->J
, LJ_TRERR_SPILLOV
);
352 ir
->s
= (uint8_t)slot
;
354 return sps_scale(slot
);
357 /* Release the temporarily allocated register in ASMREF_TMP1/ASMREF_TMP2. */
358 static Reg
ra_releasetmp(ASMState
*as
, IRRef ref
)
362 lua_assert(ra_hasreg(r
) && !ra_hasspill(ir
->s
));
369 /* Restore a register (marked as free). Rematerialize or force a spill. */
370 static Reg
ra_restore(ASMState
*as
, IRRef ref
)
372 if (emit_canremat(ref
)) {
373 return ra_rematk(as
, ref
);
376 int32_t ofs
= ra_spill(as
, ir
); /* Force a spill slot. */
378 lua_assert(ra_hasreg(r
));
379 ra_sethint(ir
->r
, r
); /* Keep hint. */
381 if (!rset_test(as
->weakset
, r
)) { /* Only restore non-weak references. */
383 RA_DBGX((as
, "restore $i $r", ir
, r
));
384 emit_spload(as
, ir
, r
, ofs
);
390 /* Save a register to a spill slot. */
391 static void ra_save(ASMState
*as
, IRIns
*ir
, Reg r
)
393 RA_DBGX((as
, "save $i $r", ir
, r
));
394 emit_spstore(as
, ir
, r
, sps_scale(ir
->s
));
397 #define MINCOST(name) \
398 if (rset_test(RSET_ALL, RID_##name) && \
399 LJ_LIKELY(allow&RID2RSET(RID_##name)) && as->cost[RID_##name] < cost) \
400 cost = as->cost[RID_##name];
402 /* Evict the register with the lowest cost, forcing a restore. */
403 static Reg
ra_evict(ASMState
*as
, RegSet allow
)
406 RegCost cost
= ~(RegCost
)0;
407 lua_assert(allow
!= RSET_EMPTY
);
408 if (RID_NUM_FPR
== 0 || allow
< RID2RSET(RID_MAX_GPR
)) {
413 ref
= regcost_ref(cost
);
414 lua_assert(ra_iskref(ref
) || (ref
>= as
->T
->nk
&& ref
< as
->T
->nins
));
415 /* Preferably pick any weak ref instead of a non-weak, non-const ref. */
416 if (!irref_isk(ref
) && (as
->weakset
& allow
)) {
418 if (!rset_test(as
->weakset
, ir
->r
))
419 ref
= regcost_ref(as
->cost
[rset_pickbot((as
->weakset
& allow
))]);
421 return ra_restore(as
, ref
);
424 /* Pick any register (marked as free). Evict on-demand. */
425 static Reg
ra_pick(ASMState
*as
, RegSet allow
)
427 RegSet pick
= as
->freeset
& allow
;
429 return ra_evict(as
, allow
);
431 return rset_picktop(pick
);
434 /* Get a scratch register (marked as free). */
435 static Reg
ra_scratch(ASMState
*as
, RegSet allow
)
437 Reg r
= ra_pick(as
, allow
);
439 RA_DBGX((as
, "scratch $r", r
));
443 /* Evict all registers from a set (if not free). */
444 static void ra_evictset(ASMState
*as
, RegSet drop
)
449 work
= (drop
& ~as
->freeset
) & RSET_FPR
;
451 Reg r
= rset_pickbot(work
);
452 ra_restore(as
, regcost_ref(as
->cost
[r
]));
457 work
= (drop
& ~as
->freeset
) & RSET_GPR
;
459 Reg r
= rset_pickbot(work
);
460 ra_restore(as
, regcost_ref(as
->cost
[r
]));
466 /* Evict (rematerialize) all registers allocated to constants. */
467 static void ra_evictk(ASMState
*as
)
471 work
= ~as
->freeset
& RSET_FPR
;
473 Reg r
= rset_pickbot(work
);
474 IRRef ref
= regcost_ref(as
->cost
[r
]);
475 if (emit_canremat(ref
) && irref_isk(ref
)) {
482 work
= ~as
->freeset
& RSET_GPR
;
484 Reg r
= rset_pickbot(work
);
485 IRRef ref
= regcost_ref(as
->cost
[r
]);
486 if (emit_canremat(ref
) && irref_isk(ref
)) {
495 /* Allocate a register for a constant. */
496 static Reg
ra_allock(ASMState
*as
, int32_t k
, RegSet allow
)
498 /* First try to find a register which already holds the same constant. */
499 RegSet pick
, work
= ~as
->freeset
& RSET_GPR
;
503 r
= rset_pickbot(work
);
504 ref
= regcost_ref(as
->cost
[r
]);
505 if (ref
< ASMREF_L
&&
506 k
== (ra_iskref(ref
) ? ra_krefk(as
, ref
) : IR(ref
)->i
))
510 pick
= as
->freeset
& allow
;
512 /* Constants should preferably get unmodified registers. */
513 if ((pick
& ~as
->modset
))
515 r
= rset_pickbot(pick
); /* Reduce conflicts with inverse allocation. */
517 r
= ra_evict(as
, allow
);
519 RA_DBGX((as
, "allock $x $r", k
, r
));
520 ra_setkref(as
, r
, k
);
521 rset_clear(as
->freeset
, r
);
526 /* Allocate a specific register for a constant. */
527 static void ra_allockreg(ASMState
*as
, int32_t k
, Reg r
)
529 Reg kr
= ra_allock(as
, k
, RID2RSET(r
));
532 irdummy
.t
.irt
= IRT_INT
;
533 ra_scratch(as
, RID2RSET(r
));
534 emit_movrr(as
, &irdummy
, r
, kr
);
538 #define ra_allockreg(as, k, r) emit_loadi(as, (r), (k))
541 /* Allocate a register for ref from the allowed set of registers.
542 ** Note: this function assumes the ref does NOT have a register yet!
543 ** Picks an optimal register, sets the cost and marks the register as non-free.
545 static Reg
ra_allocref(ASMState
*as
, IRRef ref
, RegSet allow
)
548 RegSet pick
= as
->freeset
& allow
;
550 lua_assert(ra_noreg(ir
->r
));
552 /* First check register hint from propagation or PHI. */
553 if (ra_hashint(ir
->r
)) {
554 r
= ra_gethint(ir
->r
);
555 if (rset_test(pick
, r
)) /* Use hint register if possible. */
557 /* Rematerialization is cheaper than missing a hint. */
558 if (rset_test(allow
, r
) && emit_canremat(regcost_ref(as
->cost
[r
]))) {
559 ra_rematk(as
, regcost_ref(as
->cost
[r
]));
562 RA_DBGX((as
, "hintmiss $f $r", ref
, r
));
564 /* Invariants should preferably get unmodified registers. */
565 if (ref
< as
->loopref
&& !irt_isphi(ir
->t
)) {
566 if ((pick
& ~as
->modset
))
568 r
= rset_pickbot(pick
); /* Reduce conflicts with inverse allocation. */
570 /* We've got plenty of regs, so get callee-save regs if possible. */
571 if (RID_NUM_GPR
> 8 && (pick
& ~RSET_SCRATCH
))
572 pick
&= ~RSET_SCRATCH
;
573 r
= rset_picktop(pick
);
576 r
= ra_evict(as
, allow
);
579 RA_DBGX((as
, "alloc $f $r", ref
, r
));
581 rset_clear(as
->freeset
, r
);
583 as
->cost
[r
] = REGCOST_REF_T(ref
, irt_t(ir
->t
));
587 /* Allocate a register on-demand. */
588 static Reg
ra_alloc1(ASMState
*as
, IRRef ref
, RegSet allow
)
591 /* Note: allow is ignored if the register is already allocated. */
592 if (ra_noreg(r
)) r
= ra_allocref(as
, ref
, allow
);
597 /* Rename register allocation and emit move. */
598 static void ra_rename(ASMState
*as
, Reg down
, Reg up
)
600 IRRef ren
, ref
= regcost_ref(as
->cost
[up
] = as
->cost
[down
]);
604 lua_assert((down
< RID_MAX_GPR
) == (up
< RID_MAX_GPR
));
605 lua_assert(!rset_test(as
->freeset
, down
) && rset_test(as
->freeset
, up
));
606 ra_free(as
, down
); /* 'down' is free ... */
607 ra_modified(as
, down
);
608 rset_clear(as
->freeset
, up
); /* ... and 'up' is now allocated. */
610 RA_DBGX((as
, "rename $f $r $r", regcost_ref(as
->cost
[up
]), down
, up
));
611 emit_movrr(as
, ir
, down
, up
); /* Backwards codegen needs inverse move. */
612 if (!ra_hasspill(IR(ref
)->s
)) { /* Add the rename to the IR. */
613 lj_ir_set(as
->J
, IRT(IR_RENAME
, IRT_NIL
), ref
, as
->snapno
);
614 ren
= tref_ref(lj_ir_emit(as
->J
));
615 as
->ir
= as
->T
->ir
; /* The IR may have been reallocated. */
616 IR(ren
)->r
= (uint8_t)down
;
617 IR(ren
)->s
= SPS_NONE
;
621 /* Pick a destination register (marked as free).
622 ** Caveat: allow is ignored if there's already a destination register.
623 ** Use ra_destreg() to get a specific register.
625 static Reg
ra_dest(ASMState
*as
, IRIns
*ir
, RegSet allow
)
628 if (ra_hasreg(dest
)) {
630 ra_modified(as
, dest
);
632 if (ra_hashint(dest
) && rset_test((as
->freeset
&allow
), ra_gethint(dest
))) {
633 dest
= ra_gethint(dest
);
634 ra_modified(as
, dest
);
635 RA_DBGX((as
, "dest $r", dest
));
637 dest
= ra_scratch(as
, allow
);
641 if (LJ_UNLIKELY(ra_hasspill(ir
->s
))) ra_save(as
, ir
, dest
);
645 /* Force a specific destination register (marked as free). */
646 static void ra_destreg(ASMState
*as
, IRIns
*ir
, Reg r
)
648 Reg dest
= ra_dest(as
, ir
, RID2RSET(r
));
650 ra_scratch(as
, RID2RSET(r
));
651 emit_movrr(as
, ir
, dest
, r
);
655 #if LJ_TARGET_X86ORX64
656 /* Propagate dest register to left reference. Emit moves as needed.
657 ** This is a required fixup step for all 2-operand machine instructions.
659 static void ra_left(ASMState
*as
, Reg dest
, IRRef lref
)
661 IRIns
*ir
= IR(lref
);
663 if (ra_noreg(left
)) {
664 if (irref_isk(lref
)) {
665 if (ir
->o
== IR_KNUM
) {
666 cTValue
*tv
= ir_knum(ir
);
667 /* FP remat needs a load except for +0. Still better than eviction. */
668 if (tvispzero(tv
) || !(as
->freeset
& RSET_FPR
)) {
669 emit_loadn(as
, dest
, tv
);
673 } else if (ir
->o
== IR_KINT64
) {
674 emit_loadu64(as
, dest
, ir_kint64(ir
)->u64
);
678 lua_assert(ir
->o
== IR_KINT
|| ir
->o
== IR_KGC
||
679 ir
->o
== IR_KPTR
|| ir
->o
== IR_KKPTR
|| ir
->o
== IR_KNULL
);
680 emit_loadi(as
, dest
, ir
->i
);
684 if (!ra_hashint(left
) && !iscrossref(as
, lref
))
685 ra_sethint(ir
->r
, dest
); /* Propagate register hint. */
686 left
= ra_allocref(as
, lref
, dest
< RID_MAX_GPR
? RSET_GPR
: RSET_FPR
);
689 /* Move needed for true 3-operand instruction: y=a+b ==> y=a; y+=b. */
691 /* Use register renaming if dest is the PHI reg. */
692 if (irt_isphi(ir
->t
) && as
->phireg
[dest
] == lref
) {
693 ra_modified(as
, left
);
694 ra_rename(as
, left
, dest
);
696 emit_movrr(as
, ir
, dest
, left
);
701 /* Similar to ra_left, except we override any hints. */
702 static void ra_leftov(ASMState
*as
, Reg dest
, IRRef lref
)
704 IRIns
*ir
= IR(lref
);
706 if (ra_noreg(left
)) {
707 ra_sethint(ir
->r
, dest
); /* Propagate register hint. */
708 left
= ra_allocref(as
, lref
,
709 (LJ_SOFTFP
|| dest
< RID_MAX_GPR
) ? RSET_GPR
: RSET_FPR
);
713 /* Use register renaming if dest is the PHI reg. */
714 if (irt_isphi(ir
->t
) && as
->phireg
[dest
] == lref
) {
715 ra_modified(as
, left
);
716 ra_rename(as
, left
, dest
);
718 emit_movrr(as
, ir
, dest
, left
);
724 #if !LJ_TARGET_X86ORX64
725 /* Force a RID_RETLO/RID_RETHI destination register pair (marked as free). */
726 static void ra_destpair(ASMState
*as
, IRIns
*ir
)
728 Reg destlo
= ir
->r
, desthi
= (ir
+1)->r
;
729 /* First spill unrelated refs blocking the destination registers. */
730 if (!rset_test(as
->freeset
, RID_RETLO
) &&
731 destlo
!= RID_RETLO
&& desthi
!= RID_RETLO
)
732 ra_restore(as
, regcost_ref(as
->cost
[RID_RETLO
]));
733 if (!rset_test(as
->freeset
, RID_RETHI
) &&
734 destlo
!= RID_RETHI
&& desthi
!= RID_RETHI
)
735 ra_restore(as
, regcost_ref(as
->cost
[RID_RETHI
]));
736 /* Next free the destination registers (if any). */
737 if (ra_hasreg(destlo
)) {
739 ra_modified(as
, destlo
);
743 if (ra_hasreg(desthi
)) {
745 ra_modified(as
, desthi
);
749 /* Check for conflicts and shuffle the registers as needed. */
750 if (destlo
== RID_RETHI
) {
751 if (desthi
== RID_RETLO
) {
752 emit_movrr(as
, ir
, RID_RETHI
, RID_TMP
);
753 emit_movrr(as
, ir
, RID_RETLO
, RID_RETHI
);
754 emit_movrr(as
, ir
, RID_TMP
, RID_RETLO
);
756 emit_movrr(as
, ir
, RID_RETHI
, RID_RETLO
);
757 if (desthi
!= RID_RETHI
) emit_movrr(as
, ir
, desthi
, RID_RETHI
);
759 } else if (desthi
== RID_RETLO
) {
760 emit_movrr(as
, ir
, RID_RETLO
, RID_RETHI
);
761 if (destlo
!= RID_RETLO
) emit_movrr(as
, ir
, destlo
, RID_RETLO
);
763 if (desthi
!= RID_RETHI
) emit_movrr(as
, ir
, desthi
, RID_RETHI
);
764 if (destlo
!= RID_RETLO
) emit_movrr(as
, ir
, destlo
, RID_RETLO
);
766 /* Restore spill slots (if any). */
767 if (ra_hasspill((ir
+1)->s
)) ra_save(as
, ir
+1, RID_RETHI
);
768 if (ra_hasspill(ir
->s
)) ra_save(as
, ir
, RID_RETLO
);
772 /* -- Snapshot handling --------- ----------------------------------------- */
774 /* Can we rematerialize a KNUM instead of forcing a spill? */
775 static int asm_snap_canremat(ASMState
*as
)
778 for (r
= RID_MIN_FPR
; r
< RID_MAX_FPR
; r
++)
779 if (irref_isk(regcost_ref(as
->cost
[r
])))
784 /* Allocate register or spill slot for a ref that escapes to a snapshot. */
785 static void asm_snap_alloc1(ASMState
*as
, IRRef ref
)
789 RegSet allow
= (!LJ_SOFTFP
&& irt_isnum(ir
->t
)) ? RSET_FPR
: RSET_GPR
;
790 /* Get a weak register if we have a free one or can rematerialize. */
791 if ((as
->freeset
& allow
) ||
792 (allow
== RSET_FPR
&& asm_snap_canremat(as
))) {
793 Reg r
= ra_allocref(as
, ref
, allow
); /* Allocate a register. */
794 if (!irt_isphi(ir
->t
))
795 ra_weak(as
, r
); /* But mark it as weakly referenced. */
797 RA_DBGX((as
, "snapreg $f $r", ref
, ir
->r
));
799 ra_spill(as
, ir
); /* Otherwise force a spill slot. */
800 RA_DBGX((as
, "snapspill $f $s", ref
, ir
->s
));
805 /* Allocate refs escaping to a snapshot. */
806 static void asm_snap_alloc(ASMState
*as
)
808 SnapShot
*snap
= &as
->T
->snap
[as
->snapno
];
809 SnapEntry
*map
= &as
->T
->snapmap
[snap
->mapofs
];
810 MSize n
, nent
= snap
->nent
;
811 for (n
= 0; n
< nent
; n
++) {
812 SnapEntry sn
= map
[n
];
813 IRRef ref
= snap_ref(sn
);
814 if (!irref_isk(ref
)) {
815 asm_snap_alloc1(as
, ref
);
816 if (LJ_SOFTFP
&& (sn
& SNAP_SOFTFPNUM
)) {
817 lua_assert(irt_type(IR(ref
+1)->t
) == IRT_SOFTFP
);
818 asm_snap_alloc1(as
, ref
+1);
824 /* All guards for a snapshot use the same exitno. This is currently the
825 ** same as the snapshot number. Since the exact origin of the exit cannot
826 ** be determined, all guards for the same snapshot must exit with the same
828 ** A renamed ref which has been used in a prior guard for the same snapshot
829 ** would cause an inconsistency. The easy way out is to force a spill slot.
831 static int asm_snap_checkrename(ASMState
*as
, IRRef ren
)
833 SnapShot
*snap
= &as
->T
->snap
[as
->snapno
];
834 SnapEntry
*map
= &as
->T
->snapmap
[snap
->mapofs
];
835 MSize n
, nent
= snap
->nent
;
836 for (n
= 0; n
< nent
; n
++) {
837 SnapEntry sn
= map
[n
];
838 IRRef ref
= snap_ref(sn
);
839 if (ref
== ren
|| (LJ_SOFTFP
&& (sn
& SNAP_SOFTFPNUM
) && ++ref
== ren
)) {
841 ra_spill(as
, ir
); /* Register renamed, so force a spill slot. */
842 RA_DBGX((as
, "snaprensp $f $s", ref
, ir
->s
));
843 return 1; /* Found. */
846 return 0; /* Not found. */
849 /* Prepare snapshot for next guard instruction. */
850 static void asm_snap_prep(ASMState
*as
)
852 if (as
->curins
< as
->snapref
) {
854 lua_assert(as
->snapno
!= 0);
856 as
->snapref
= as
->T
->snap
[as
->snapno
].ref
;
857 } while (as
->curins
< as
->snapref
);
859 as
->snaprename
= as
->T
->nins
;
861 /* Process any renames above the highwater mark. */
862 for (; as
->snaprename
< as
->T
->nins
; as
->snaprename
++) {
863 IRIns
*ir
= IR(as
->snaprename
);
864 if (asm_snap_checkrename(as
, ir
->op1
))
865 ir
->op2
= REF_BIAS
-1; /* Kill rename. */
870 /* -- Miscellaneous helpers ----------------------------------------------- */
872 /* Collect arguments from CALL* and CARG instructions. */
873 static void asm_collectargs(ASMState
*as
, IRIns
*ir
,
874 const CCallInfo
*ci
, IRRef
*args
)
876 uint32_t n
= CCI_NARGS(ci
);
877 lua_assert(n
<= CCI_NARGS_MAX
);
878 if ((ci
->flags
& CCI_L
)) { *args
++ = ASMREF_L
; n
--; }
881 lua_assert(ir
->o
== IR_CARG
);
882 args
[n
] = ir
->op2
== REF_NIL
? 0 : ir
->op2
;
884 args
[0] = ir
->op1
== REF_NIL
? 0 : ir
->op1
;
885 lua_assert(IR(ir
->op1
)->o
!= IR_CARG
);
888 /* Reconstruct CCallInfo flags for CALLX*. */
889 static uint32_t asm_callx_flags(ASMState
*as
, IRIns
*ir
)
892 if (ir
->op1
!= REF_NIL
) { /* Count number of arguments first. */
893 IRIns
*ira
= IR(ir
->op1
);
895 while (ira
->o
== IR_CARG
) { nargs
++; ira
= IR(ira
->op1
); }
898 if (IR(ir
->op2
)->o
== IR_CARG
) { /* Copy calling convention info. */
899 CTypeID id
= (CTypeID
)IR(IR(ir
->op2
)->op2
)->i
;
900 CType
*ct
= ctype_get(ctype_ctsG(J2G(as
->J
)), id
);
901 nargs
|= ((ct
->info
& CTF_VARARG
) ? CCI_VARARG
: 0);
903 nargs
|= (ctype_cconv(ct
->info
) << CCI_CC_SHIFT
);
907 return (nargs
| (ir
->t
.irt
<< CCI_OTSHIFT
));
910 /* Calculate stack adjustment. */
911 static int32_t asm_stack_adjust(ASMState
*as
)
913 if (as
->evenspill
<= SPS_FIXED
)
915 return sps_scale(sps_align(as
->evenspill
));
918 /* Must match with hash*() in lj_tab.c. */
919 static uint32_t ir_khash(IRIns
*ir
)
922 if (irt_isstr(ir
->t
)) {
923 return ir_kstr(ir
)->hash
;
924 } else if (irt_isnum(ir
->t
)) {
925 lo
= ir_knum(ir
)->u32
.lo
;
926 hi
= ir_knum(ir
)->u32
.hi
<< 1;
927 } else if (irt_ispri(ir
->t
)) {
928 lua_assert(!irt_isnil(ir
->t
));
929 return irt_type(ir
->t
)-IRT_FALSE
;
931 lua_assert(irt_isgcv(ir
->t
));
932 lo
= u32ptr(ir_kgc(ir
));
935 return hashrot(lo
, hi
);
938 /* -- Allocations --------------------------------------------------------- */
940 static void asm_gencall(ASMState
*as
, const CCallInfo
*ci
, IRRef
*args
);
941 static void asm_setupresult(ASMState
*as
, IRIns
*ir
, const CCallInfo
*ci
);
943 static void asm_snew(ASMState
*as
, IRIns
*ir
)
945 const CCallInfo
*ci
= &lj_ir_callinfo
[IRCALL_lj_str_new
];
947 args
[0] = ASMREF_L
; /* lua_State *L */
948 args
[1] = ir
->op1
; /* const char *str */
949 args
[2] = ir
->op2
; /* size_t len */
951 asm_setupresult(as
, ir
, ci
); /* GCstr * */
952 asm_gencall(as
, ci
, args
);
955 static void asm_tnew(ASMState
*as
, IRIns
*ir
)
957 const CCallInfo
*ci
= &lj_ir_callinfo
[IRCALL_lj_tab_new1
];
959 args
[0] = ASMREF_L
; /* lua_State *L */
960 args
[1] = ASMREF_TMP1
; /* uint32_t ahsize */
962 asm_setupresult(as
, ir
, ci
); /* GCtab * */
963 asm_gencall(as
, ci
, args
);
964 ra_allockreg(as
, ir
->op1
| (ir
->op2
<< 24), ra_releasetmp(as
, ASMREF_TMP1
));
967 static void asm_tdup(ASMState
*as
, IRIns
*ir
)
969 const CCallInfo
*ci
= &lj_ir_callinfo
[IRCALL_lj_tab_dup
];
971 args
[0] = ASMREF_L
; /* lua_State *L */
972 args
[1] = ir
->op1
; /* const GCtab *kt */
974 asm_setupresult(as
, ir
, ci
); /* GCtab * */
975 asm_gencall(as
, ci
, args
);
978 /* -- PHI and loop handling ----------------------------------------------- */
980 /* Break a PHI cycle by renaming to a free register (evict if needed). */
981 static void asm_phi_break(ASMState
*as
, RegSet blocked
, RegSet blockedby
,
984 RegSet candidates
= blocked
& allow
;
985 if (candidates
) { /* If this register file has candidates. */
986 /* Note: the set for ra_pick cannot be empty, since each register file
987 ** has some registers never allocated to PHIs.
989 Reg down
, up
= ra_pick(as
, ~blocked
& allow
); /* Get a free register. */
990 if (candidates
& ~blockedby
) /* Optimize shifts, else it's a cycle. */
991 candidates
= candidates
& ~blockedby
;
992 down
= rset_picktop(candidates
); /* Pick candidate PHI register. */
993 ra_rename(as
, down
, up
); /* And rename it to the free register. */
997 /* PHI register shuffling.
999 ** The allocator tries hard to preserve PHI register assignments across
1000 ** the loop body. Most of the time this loop does nothing, since there
1001 ** are no register mismatches.
1003 ** If a register mismatch is detected and ...
1004 ** - the register is currently free: rename it.
1005 ** - the register is blocked by an invariant: restore/remat and rename it.
1006 ** - Otherwise the register is used by another PHI, so mark it as blocked.
1008 ** The renames are order-sensitive, so just retry the loop if a register
1009 ** is marked as blocked, but has been freed in the meantime. A cycle is
1010 ** detected if all of the blocked registers are allocated. To break the
1011 ** cycle rename one of them to a free register and retry.
1013 ** Note that PHI spill slots are kept in sync and don't need to be shuffled.
1015 static void asm_phi_shuffle(ASMState
*as
)
1019 /* Find and resolve PHI register mismatches. */
1021 RegSet blocked
= RSET_EMPTY
;
1022 RegSet blockedby
= RSET_EMPTY
;
1023 RegSet phiset
= as
->phiset
;
1024 while (phiset
) { /* Check all left PHI operand registers. */
1025 Reg r
= rset_pickbot(phiset
);
1026 IRIns
*irl
= IR(as
->phireg
[r
]);
1028 if (r
!= left
) { /* Mismatch? */
1029 if (!rset_test(as
->freeset
, r
)) { /* PHI register blocked? */
1030 IRRef ref
= regcost_ref(as
->cost
[r
]);
1031 /* Blocked by other PHI (w/reg)? */
1032 if (!ra_iskref(ref
) && irt_ismarked(IR(ref
)->t
)) {
1033 rset_set(blocked
, r
);
1034 if (ra_hasreg(left
))
1035 rset_set(blockedby
, left
);
1037 } else { /* Otherwise grab register from invariant. */
1038 ra_restore(as
, ref
);
1042 if (ra_hasreg(left
)) {
1043 ra_rename(as
, left
, r
);
1047 rset_clear(phiset
, r
);
1049 if (!blocked
) break; /* Finished. */
1050 if (!(as
->freeset
& blocked
)) { /* Break cycles if none are free. */
1051 asm_phi_break(as
, blocked
, blockedby
, RSET_GPR
);
1052 if (!LJ_SOFTFP
) asm_phi_break(as
, blocked
, blockedby
, RSET_FPR
);
1054 } /* Else retry some more renames. */
1057 /* Restore/remat invariants whose registers are modified inside the loop. */
1058 work
= as
->modset
& ~(as
->freeset
| as
->phiset
);
1060 Reg r
= rset_pickbot(work
);
1061 ra_restore(as
, regcost_ref(as
->cost
[r
]));
1062 rset_clear(work
, r
);
1066 /* Allocate and save all unsaved PHI regs and clear marks. */
1069 Reg r
= rset_picktop(work
);
1070 IRRef lref
= as
->phireg
[r
];
1071 IRIns
*ir
= IR(lref
);
1072 if (ra_hasspill(ir
->s
)) { /* Left PHI gained a spill slot? */
1073 irt_clearmark(ir
->t
); /* Handled here, so clear marker now. */
1074 ra_alloc1(as
, lref
, RID2RSET(r
));
1075 ra_save(as
, ir
, r
); /* Save to spill slot inside the loop. */
1078 rset_clear(work
, r
);
1082 /* Emit renames for left PHIs which are only spilled outside the loop. */
1083 static void asm_phi_fixup(ASMState
*as
)
1085 RegSet work
= as
->phiset
;
1087 Reg r
= rset_picktop(work
);
1088 IRRef lref
= as
->phireg
[r
];
1089 IRIns
*ir
= IR(lref
);
1090 /* Left PHI gained a spill slot before the loop? */
1091 if (irt_ismarked(ir
->t
) && ra_hasspill(ir
->s
)) {
1093 lj_ir_set(as
->J
, IRT(IR_RENAME
, IRT_NIL
), lref
, as
->loopsnapno
);
1094 ren
= tref_ref(lj_ir_emit(as
->J
));
1095 as
->ir
= as
->T
->ir
; /* The IR may have been reallocated. */
1096 IR(ren
)->r
= (uint8_t)r
;
1097 IR(ren
)->s
= SPS_NONE
;
1099 irt_clearmark(ir
->t
); /* Always clear marker. */
1100 rset_clear(work
, r
);
1104 /* Setup right PHI reference. */
1105 static void asm_phi(ASMState
*as
, IRIns
*ir
)
1107 RegSet allow
= ((!LJ_SOFTFP
&& irt_isfp(ir
->t
)) ? RSET_FPR
: RSET_GPR
) &
1109 RegSet afree
= (as
->freeset
& allow
);
1110 IRIns
*irl
= IR(ir
->op1
);
1111 IRIns
*irr
= IR(ir
->op2
);
1112 /* Spill slot shuffling is not implemented yet (but rarely needed). */
1113 if (ra_hasspill(irl
->s
) || ra_hasspill(irr
->s
))
1114 lj_trace_err(as
->J
, LJ_TRERR_NYIPHI
);
1115 /* Leave at least one register free for non-PHIs (and PHI cycle breaking). */
1116 if ((afree
& (afree
-1))) { /* Two or more free registers? */
1118 if (ra_noreg(irr
->r
)) { /* Get a register for the right PHI. */
1119 r
= ra_allocref(as
, ir
->op2
, allow
);
1120 } else { /* Duplicate right PHI, need a copy (rare). */
1121 r
= ra_scratch(as
, allow
);
1122 emit_movrr(as
, irr
, r
, irr
->r
);
1125 rset_set(as
->phiset
, r
);
1126 as
->phireg
[r
] = (IRRef1
)ir
->op1
;
1127 irt_setmark(irl
->t
); /* Marks left PHIs _with_ register. */
1128 if (ra_noreg(irl
->r
))
1129 ra_sethint(irl
->r
, r
); /* Set register hint for left PHI. */
1130 } else { /* Otherwise allocate a spill slot. */
1131 /* This is overly restrictive, but it triggers only on synthetic code. */
1132 if (ra_hasreg(irl
->r
) || ra_hasreg(irr
->r
))
1133 lj_trace_err(as
->J
, LJ_TRERR_NYIPHI
);
1135 irl
->s
= irr
->s
= ir
->s
; /* Sync left/right PHI spill slots. */
1139 static void asm_gc_check(ASMState
*as
);
1140 static void asm_loop_fixup(ASMState
*as
);
1142 /* Middle part of a loop. */
1143 static void asm_loop(ASMState
*as
)
1145 /* LOOP is a guard, so the snapno is up to date. */
1146 as
->loopsnapno
= as
->snapno
;
1149 /* LOOP marks the transition from the variant to the invariant part. */
1150 as
->flagmcp
= as
->invmcp
= NULL
;
1152 if (!neverfuse(as
)) as
->fuseref
= 0;
1153 asm_phi_shuffle(as
);
1155 as
->mcloop
= as
->mcp
;
1156 RA_DBGX((as
, "===== LOOP ====="));
1157 if (!as
->realign
) RA_DBG_FLUSH();
1160 /* -- Target-specific assembler ------------------------------------------- */
1162 #if LJ_TARGET_X86ORX64
1163 #include "lj_asm_x86.h"
1165 #include "lj_asm_arm.h"
1167 #include "lj_asm_ppc.h"
1168 #elif LJ_TARGET_MIPS
1169 #include "lj_asm_mips.h"
1171 #error "Missing assembler for target CPU"
1174 /* -- Head of trace ------------------------------------------------------- */
1176 /* Head of a root trace. */
1177 static void asm_head_root(ASMState
*as
)
1180 asm_head_root_base(as
);
1181 emit_setvmstate(as
, (int32_t)as
->T
->traceno
);
1182 spadj
= asm_stack_adjust(as
);
1183 as
->T
->spadjust
= (uint16_t)spadj
;
1184 emit_spsub(as
, spadj
);
1185 /* Root traces assume a checked stack for the starting proto. */
1186 as
->T
->topslot
= gcref(as
->T
->startpt
)->pt
.framesize
;
1189 /* Get RegSP for parent slot. */
1190 static LJ_AINLINE RegSP
asm_head_parentrs(ASMState
*as
, IRIns
*ir
)
1193 if (ir
->o
== IR_HIOP
) return as
->parentmaphi
[(ir
-1)->op1
];
1195 return as
->parentmap
[ir
->op1
];
1198 /* Head of a side trace.
1200 ** The current simplistic algorithm requires that all slots inherited
1201 ** from the parent are live in a register between pass 2 and pass 3. This
1202 ** avoids the complexity of stack slot shuffling. But of course this may
1203 ** overflow the register set in some cases and cause the dreaded error:
1204 ** "NYI: register coalescing too complex". A refined algorithm is needed.
1206 static void asm_head_side(ASMState
*as
)
1208 IRRef1 sloadins
[RID_MAX
];
1209 RegSet allow
= RSET_ALL
; /* Inverse of all coalesced registers. */
1210 RegSet live
= RSET_EMPTY
; /* Live parent registers. */
1211 IRIns
*irp
= &as
->parent
->ir
[REF_BASE
]; /* Parent base. */
1212 int32_t spadj
, spdelta
;
1217 allow
= asm_head_side_base(as
, irp
, allow
);
1219 /* Scan all parent SLOADs and collect register dependencies. */
1220 for (i
= as
->stopins
; i
> REF_BASE
; i
--) {
1223 lua_assert((ir
->o
== IR_SLOAD
&& (ir
->op2
& IRSLOAD_PARENT
)) ||
1224 (LJ_SOFTFP
&& ir
->o
== IR_HIOP
));
1225 rs
= asm_head_parentrs(as
, ir
);
1226 if (ra_hasreg(ir
->r
)) {
1227 rset_clear(allow
, ir
->r
);
1228 if (ra_hasspill(ir
->s
)) {
1229 ra_save(as
, ir
, ir
->r
);
1232 } else if (ra_hasspill(ir
->s
)) {
1236 if (ir
->r
== rs
) { /* Coalesce matching registers right now. */
1238 } else if (ra_hasspill(regsp_spill(rs
))) {
1239 if (ra_hasreg(ir
->r
))
1241 } else if (ra_used(ir
)) {
1242 sloadins
[rs
] = (IRRef1
)i
;
1243 rset_set(live
, rs
); /* Block live parent register. */
1247 /* Calculate stack frame adjustment. */
1248 spadj
= asm_stack_adjust(as
);
1249 spdelta
= spadj
- (int32_t)as
->parent
->spadjust
;
1250 if (spdelta
< 0) { /* Don't shrink the stack frame. */
1251 spadj
= (int32_t)as
->parent
->spadjust
;
1254 as
->T
->spadjust
= (uint16_t)spadj
;
1256 /* Reload spilled target registers. */
1258 for (i
= as
->stopins
; i
> REF_BASE
; i
--) {
1260 if (irt_ismarked(ir
->t
)) {
1264 irt_clearmark(ir
->t
);
1265 rs
= asm_head_parentrs(as
, ir
);
1266 if (!ra_hasspill(regsp_spill(rs
)))
1267 ra_sethint(ir
->r
, rs
); /* Hint may be gone, set it again. */
1268 else if (sps_scale(regsp_spill(rs
))+spdelta
== sps_scale(ir
->s
))
1269 continue; /* Same spill slot, do nothing. */
1270 mask
= ((!LJ_SOFTFP
&& irt_isnum(ir
->t
)) ? RSET_FPR
: RSET_GPR
) & allow
;
1271 if (mask
== RSET_EMPTY
)
1272 lj_trace_err(as
->J
, LJ_TRERR_NYICOAL
);
1273 r
= ra_allocref(as
, i
, mask
);
1275 rset_clear(allow
, r
);
1276 if (r
== rs
) { /* Coalesce matching registers right now. */
1278 rset_clear(live
, r
);
1279 } else if (ra_hasspill(regsp_spill(rs
))) {
1287 /* Store trace number and adjust stack frame relative to the parent. */
1288 emit_setvmstate(as
, (int32_t)as
->T
->traceno
);
1289 emit_spsub(as
, spdelta
);
1291 #if !LJ_TARGET_X86ORX64
1292 /* Restore BASE register from parent spill slot. */
1293 if (ra_hasspill(irp
->s
))
1294 emit_spload(as
, IR(REF_BASE
), IR(REF_BASE
)->r
, sps_scale(irp
->s
));
1297 /* Restore target registers from parent spill slots. */
1299 RegSet work
= ~as
->freeset
& RSET_ALL
;
1301 Reg r
= rset_pickbot(work
);
1302 IRIns
*ir
= IR(regcost_ref(as
->cost
[r
]));
1303 RegSP rs
= asm_head_parentrs(as
, ir
);
1304 rset_clear(work
, r
);
1305 if (ra_hasspill(regsp_spill(rs
))) {
1306 int32_t ofs
= sps_scale(regsp_spill(rs
));
1308 emit_spload(as
, ir
, r
, ofs
);
1314 /* Shuffle registers to match up target regs with parent regs. */
1318 /* Repeatedly coalesce free live registers by moving to their target. */
1319 while ((work
= as
->freeset
& live
) != RSET_EMPTY
) {
1320 Reg rp
= rset_pickbot(work
);
1321 IRIns
*ir
= IR(sloadins
[rp
]);
1322 rset_clear(live
, rp
);
1323 rset_clear(allow
, rp
);
1325 emit_movrr(as
, ir
, ir
->r
, rp
);
1329 /* We're done if no live registers remain. */
1330 if (live
== RSET_EMPTY
)
1333 /* Break cycles by renaming one target to a temp. register. */
1334 if (live
& RSET_GPR
) {
1335 RegSet tmpset
= as
->freeset
& ~live
& allow
& RSET_GPR
;
1336 if (tmpset
== RSET_EMPTY
)
1337 lj_trace_err(as
->J
, LJ_TRERR_NYICOAL
);
1338 ra_rename(as
, rset_pickbot(live
& RSET_GPR
), rset_pickbot(tmpset
));
1340 if (!LJ_SOFTFP
&& (live
& RSET_FPR
)) {
1341 RegSet tmpset
= as
->freeset
& ~live
& allow
& RSET_FPR
;
1342 if (tmpset
== RSET_EMPTY
)
1343 lj_trace_err(as
->J
, LJ_TRERR_NYICOAL
);
1344 ra_rename(as
, rset_pickbot(live
& RSET_FPR
), rset_pickbot(tmpset
));
1347 /* Continue with coalescing to fix up the broken cycle(s). */
1350 /* Inherit top stack slot already checked by parent trace. */
1351 as
->T
->topslot
= as
->parent
->topslot
;
1352 if (as
->topslot
> as
->T
->topslot
) { /* Need to check for higher slot? */
1353 #ifdef EXITSTATE_CHECKEXIT
1354 /* Highest exit + 1 indicates stack check. */
1355 ExitNo exitno
= as
->T
->nsnap
;
1357 /* Reuse the parent exit in the context of the parent trace. */
1358 ExitNo exitno
= as
->J
->exitno
;
1360 as
->T
->topslot
= (uint8_t)as
->topslot
; /* Remember for child traces. */
1361 asm_stack_check(as
, as
->topslot
, irp
, allow
& RSET_GPR
, exitno
);
1365 /* -- Tail of trace ------------------------------------------------------- */
1367 /* Get base slot for a snapshot. */
1368 static BCReg
asm_baseslot(ASMState
*as
, SnapShot
*snap
, int *gotframe
)
1370 SnapEntry
*map
= &as
->T
->snapmap
[snap
->mapofs
];
1372 for (n
= snap
->nent
; n
> 0; n
--) {
1373 SnapEntry sn
= map
[n
-1];
1374 if ((sn
& SNAP_FRAME
)) {
1376 return snap_slot(sn
);
1382 /* Link to another trace. */
1383 static void asm_tail_link(ASMState
*as
)
1385 SnapNo snapno
= as
->T
->nsnap
-1; /* Last snapshot. */
1386 SnapShot
*snap
= &as
->T
->snap
[snapno
];
1388 BCReg baseslot
= asm_baseslot(as
, snap
, &gotframe
);
1390 as
->topslot
= snap
->topslot
;
1392 ra_allocref(as
, REF_BASE
, RID2RSET(RID_BASE
));
1394 if (as
->T
->link
== 0) {
1395 /* Setup fixed registers for exit to interpreter. */
1396 const BCIns
*pc
= snap_pc(as
->T
->snapmap
[snap
->mapofs
+ snap
->nent
]);
1398 if (bc_op(*pc
) == BC_JLOOP
) { /* NYI: find a better way to do this. */
1399 BCIns
*retpc
= &traceref(as
->J
, bc_d(*pc
))->startins
;
1400 if (bc_isret(bc_op(*retpc
)))
1403 ra_allockreg(as
, i32ptr(J2GG(as
->J
)->dispatch
), RID_DISPATCH
);
1404 ra_allockreg(as
, i32ptr(pc
), RID_LPC
);
1405 mres
= (int32_t)(snap
->nslots
- baseslot
);
1406 switch (bc_op(*pc
)) {
1407 case BC_CALLM
: case BC_CALLMT
:
1408 mres
-= (int32_t)(1 + bc_a(*pc
) + bc_c(*pc
)); break;
1409 case BC_RETM
: mres
-= (int32_t)(bc_a(*pc
) + bc_d(*pc
)); break;
1410 case BC_TSETM
: mres
-= (int32_t)bc_a(*pc
); break;
1411 default: if (bc_op(*pc
) < BC_FUNCF
) mres
= 0; break;
1413 ra_allockreg(as
, mres
, RID_RET
); /* Return MULTRES or 0. */
1414 } else if (baseslot
) {
1415 /* Save modified BASE for linking to trace with higher start frame. */
1416 emit_setgl(as
, RID_BASE
, jit_base
);
1418 emit_addptr(as
, RID_BASE
, 8*(int32_t)baseslot
);
1420 /* Sync the interpreter state with the on-trace state. */
1421 asm_stack_restore(as
, snap
);
1423 /* Root traces that add frames need to check the stack at the end. */
1424 if (!as
->parent
&& gotframe
)
1425 asm_stack_check(as
, as
->topslot
, NULL
, as
->freeset
& RSET_GPR
, snapno
);
1428 /* -- Trace setup --------------------------------------------------------- */
1430 /* Clear reg/sp for all instructions and add register hints. */
1431 static void asm_setup_regsp(ASMState
*as
)
1437 uint32_t rload
= 0xa6402a64;
1442 /* Clear reg/sp for constants. */
1443 for (i
= T
->nk
; i
< REF_BIAS
; i
++)
1444 IR(i
)->prev
= REGSP_INIT
;
1446 /* REF_BASE is used for implicit references to the BASE register. */
1447 IR(REF_BASE
)->prev
= REGSP_HINT(RID_BASE
);
1450 if (IR(nins
-1)->o
== IR_RENAME
) {
1451 do { nins
--; } while (IR(nins
-1)->o
== IR_RENAME
);
1452 T
->nins
= nins
; /* Remove any renames left over from ASM restart. */
1454 as
->snaprename
= nins
;
1456 as
->snapno
= T
->nsnap
;
1458 as
->stopins
= REF_BASE
;
1459 as
->orignins
= nins
;
1463 as
->evenspill
= SPS_FIRST
;
1464 for (i
= REF_FIRST
; i
< nins
; i
++) {
1470 /* Set hints for slot loads from a parent trace. */
1472 if ((ir
->op2
& IRSLOAD_PARENT
)) {
1473 RegSP rs
= as
->parentmap
[ir
->op1
];
1474 lua_assert(regsp_used(rs
));
1476 if (!ra_hasspill(regsp_spill(rs
)) && ra_hasreg(regsp_reg(rs
))) {
1477 ir
->prev
= (uint16_t)REGSP_HINT(regsp_reg(rs
));
1482 if ((ir
->op2
& IRSLOAD_TYPECHECK
) || (ir
+1)->o
== IR_HIOP
) {
1483 ir
->prev
= (uint16_t)REGSP_HINT((rload
& 15));
1484 rload
= lj_ror(rload
, 4);
1490 case IR_ALOAD
: case IR_HLOAD
: case IR_ULOAD
: case IR_VLOAD
:
1491 ir
->prev
= (uint16_t)REGSP_HINT((rload
& 15));
1492 rload
= lj_ror(rload
, 4);
1497 ci
.flags
= asm_callx_flags(as
, ir
);
1498 ir
->prev
= asm_setup_call_slots(as
, ir
, &ci
);
1500 as
->modset
|= RSET_SCRATCH
;
1503 case IR_CALLN
: case IR_CALLL
: case IR_CALLS
: {
1504 const CCallInfo
*ci
= &lj_ir_callinfo
[ir
->op2
];
1505 ir
->prev
= asm_setup_call_slots(as
, ir
, ci
);
1507 as
->modset
|= (ci
->flags
& CCI_NOFPRCLOBBER
) ?
1508 (RSET_SCRATCH
& ~RSET_FPR
) : RSET_SCRATCH
;
1511 #if LJ_SOFTFP || (LJ_32 && LJ_HASFFI)
1513 switch ((ir
-1)->o
) {
1516 if (((ir
-1)->op2
& IRSLOAD_PARENT
)) {
1517 RegSP rs
= as
->parentmaphi
[(ir
-1)->op1
];
1518 lua_assert(regsp_used(rs
));
1520 if (!ra_hasspill(regsp_spill(rs
)) && ra_hasreg(regsp_reg(rs
))) {
1521 ir
->prev
= (uint16_t)REGSP_HINT(regsp_reg(rs
));
1527 case IR_ALOAD
: case IR_HLOAD
: case IR_ULOAD
: case IR_VLOAD
:
1528 if (ra_hashint((ir
-1)->r
)) {
1529 ir
->prev
= (ir
-1)->prev
+ 1;
1537 if (irt_isfp((ir
-1)->t
)) {
1538 ir
->prev
= REGSP_HINT(RID_FPRET
);
1543 case IR_CALLN
: case IR_CALLXS
:
1545 case IR_MIN
: case IR_MAX
:
1547 (ir
-1)->prev
= REGSP_HINT(RID_RETLO
);
1548 ir
->prev
= REGSP_HINT(RID_RETHI
);
1556 case IR_MIN
: case IR_MAX
:
1557 if ((ir
+1)->o
!= IR_HIOP
) break;
1560 /* C calls evict all scratch regs and return results in RID_RET. */
1561 case IR_SNEW
: case IR_XSNEW
: case IR_NEWREF
:
1562 if (REGARG_NUMGPR
< 3 && as
->evenspill
< 3)
1563 as
->evenspill
= 3; /* lj_str_new and lj_tab_newkey need 3 args. */
1564 case IR_TNEW
: case IR_TDUP
: case IR_CNEW
: case IR_CNEWI
: case IR_TOSTR
:
1565 ir
->prev
= REGSP_HINT(RID_RET
);
1567 as
->modset
= RSET_SCRATCH
;
1569 case IR_STRTO
: case IR_OBAR
:
1571 as
->modset
= RSET_SCRATCH
;
1573 #if !LJ_TARGET_X86ORX64 && !LJ_SOFTFP
1574 case IR_ATAN2
: case IR_LDEXP
:
1577 if (!LJ_SOFTFP
&& irt_isnum(ir
->t
)) {
1578 #if LJ_TARGET_X86ORX64
1579 ir
->prev
= REGSP_HINT(RID_XMM0
);
1581 as
->modset
|= RSET_RANGE(RID_XMM0
, RID_XMM1
+1)|RID2RSET(RID_EAX
);
1583 ir
->prev
= REGSP_HINT(RID_FPRET
);
1585 as
->modset
|= RSET_SCRATCH
;
1589 /* fallthrough for integer POW */
1590 case IR_DIV
: case IR_MOD
:
1591 if (!irt_isnum(ir
->t
)) {
1592 ir
->prev
= REGSP_HINT(RID_RET
);
1594 as
->modset
|= (RSET_SCRATCH
& RSET_GPR
);
1599 #if LJ_TARGET_X86ORX64
1600 if (ir
->op2
== IRFPM_EXP2
) { /* May be joined to lj_vm_pow_sse. */
1601 ir
->prev
= REGSP_HINT(RID_XMM0
);
1603 if (as
->evenspill
< 4) /* Leave room for 16 byte scratch area. */
1607 as
->modset
|= RSET_RANGE(RID_XMM0
, RID_XMM2
+1)|RID2RSET(RID_EAX
);
1609 } else if (ir
->op2
<= IRFPM_TRUNC
&& !(as
->flags
& JIT_F_SSE4_1
)) {
1610 ir
->prev
= REGSP_HINT(RID_XMM0
);
1612 as
->modset
|= RSET_RANGE(RID_XMM0
, RID_XMM3
+1)|RID2RSET(RID_EAX
);
1617 ir
->prev
= REGSP_HINT(RID_FPRET
);
1619 as
->modset
|= RSET_SCRATCH
;
1622 #if LJ_TARGET_X86ORX64
1623 /* Non-constant shift counts need to be in RID_ECX on x86/x64. */
1624 case IR_BSHL
: case IR_BSHR
: case IR_BSAR
: case IR_BROL
: case IR_BROR
:
1625 if (!irref_isk(ir
->op2
) && !ra_hashint(IR(ir
->op2
)->r
)) {
1626 IR(ir
->op2
)->r
= REGSP_HINT(RID_ECX
);
1628 rset_set(as
->modset
, RID_ECX
);
1632 /* Do not propagate hints across type conversions. */
1636 if (irt_isfp(ir
->t
) || (ir
->op2
& IRCONV_SRCMASK
) == IRT_NUM
||
1637 (ir
->op2
& IRCONV_SRCMASK
) == IRT_FLOAT
)
1641 /* Propagate hints across likely 'op reg, imm' or 'op reg'. */
1642 if (irref_isk(ir
->op2
) && !irref_isk(ir
->op1
)) {
1643 ir
->prev
= IR(ir
->op1
)->prev
;
1648 ir
->prev
= REGSP_INIT
;
1650 if ((as
->evenspill
& 1))
1651 as
->oddspill
= as
->evenspill
++;
1656 /* -- Assembler core ------------------------------------------------------ */
1658 /* Assemble a trace. */
1659 void lj_asm_trace(jit_State
*J
, GCtrace
*T
)
1662 ASMState
*as
= &as_
;
1665 /* Ensure an initialized instruction beyond the last one for HIOP checks. */
1666 J
->cur
.nins
= lj_ir_nextins(J
);
1667 J
->cur
.ir
[J
->cur
.nins
].o
= IR_NOP
;
1669 /* Setup initial state. Copy some fields to reduce indirections. */
1673 as
->flags
= J
->flags
;
1674 as
->loopref
= J
->loopref
;
1678 as
->parent
= traceref(J
, J
->parent
);
1679 lj_snap_regspmap(as
->parentmap
, as
->parent
, J
->exitno
, 0);
1681 lj_snap_regspmap(as
->parentmaphi
, as
->parent
, J
->exitno
, 1);
1686 /* Reserve MCode memory. */
1687 as
->mctop
= origtop
= lj_mcode_reserve(J
, &as
->mcbot
);
1688 as
->mcp
= as
->mctop
;
1689 as
->mclim
= as
->mcbot
+ MCLIM_REDZONE
;
1690 asm_setup_target(as
);
1693 as
->mcp
= as
->mctop
;
1694 as
->curins
= T
->nins
;
1696 RA_DBGX((as
, "===== STOP ====="));
1698 /* General trace setup. Emit tail of trace. */
1704 as
->sectref
= as
->loopref
;
1705 as
->fuseref
= (as
->flags
& JIT_F_OPT_FUSE
) ? as
->loopref
: FUSE_DISABLED
;
1706 asm_setup_regsp(as
);
1710 /* Assemble a trace in linear backwards order. */
1711 for (as
->curins
--; as
->curins
> as
->stopins
; as
->curins
--) {
1712 IRIns
*ir
= IR(as
->curins
);
1713 lua_assert(!(LJ_32
&& irt_isint64(ir
->t
))); /* Handled by SPLIT. */
1714 if (!ra_used(ir
) && !ir_sideeff(ir
) && (as
->flags
& JIT_F_OPT_DCE
))
1715 continue; /* Dead-code elimination can be soooo easy. */
1716 if (irt_isguard(ir
->t
))
1722 } while (as
->realign
); /* Retry in case the MCode needs to be realigned. */
1724 /* Emit head of trace. */
1728 as
->curins
= as
->T
->snap
[0].ref
;
1729 asm_snap_prep(as
); /* The GC check is a guard. */
1739 RA_DBGX((as
, "===== START ===="));
1741 if (as
->freeset
!= RSET_ALL
)
1742 lj_trace_err(as
->J
, LJ_TRERR_BADRA
); /* Ouch! Should never happen. */
1744 /* Set trace entry point before fixing up tail to allow link to self. */
1746 T
->mcloop
= as
->mcloop
? (MSize
)((char *)as
->mcloop
- (char *)as
->mcp
) : 0;
1748 asm_tail_fixup(as
, T
->link
); /* Note: this may change as->mctop! */
1749 T
->szmcode
= (MSize
)((char *)as
->mctop
- (char *)as
->mcp
);
1750 lj_mcode_sync(T
->mcode
, origtop
);