2 ** LOOP: Loop Optimizations.
3 ** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
24 ** Traditional Loop-Invariant Code Motion (LICM) splits the instructions
25 ** of a loop into invariant and variant instructions. The invariant
26 ** instructions are hoisted out of the loop and only the variant
27 ** instructions remain inside the loop body.
29 ** Unfortunately LICM is mostly useless for compiling dynamic languages.
30 ** The IR has many guards and most of the subsequent instructions are
31 ** control-dependent on them. The first non-hoistable guard would
32 ** effectively prevent hoisting of all subsequent instructions.
34 ** That's why we use a special form of unrolling using copy-substitution,
35 ** combined with redundancy elimination:
37 ** The recorded instruction stream is re-emitted to the compiler pipeline
38 ** with substituted operands. The substitution table is filled with the
39 ** refs returned by re-emitting each instruction. This can be done
40 ** on-the-fly, because the IR is in strict SSA form, where every ref is
41 ** defined before its use.
43 ** This aproach generates two code sections, separated by the LOOP
46 ** 1. The recorded instructions form a kind of pre-roll for the loop. It
47 ** contains a mix of invariant and variant instructions and performs
48 ** exactly one loop iteration (but not necessarily the 1st iteration).
50 ** 2. The loop body contains only the variant instructions and performs
51 ** all remaining loop iterations.
53 ** On first sight that looks like a waste of space, because the variant
54 ** instructions are present twice. But the key insight is that the
55 ** pre-roll honors the control-dependencies for *both* the pre-roll itself
56 ** *and* the loop body!
58 ** It also means one doesn't have to explicitly model control-dependencies
59 ** (which, BTW, wouldn't help LICM much). And it's much easier to
60 ** integrate sparse snapshotting with this approach.
62 ** One of the nicest aspects of this approach is that all of the
63 ** optimizations of the compiler pipeline (FOLD, CSE, FWD, etc.) can be
64 ** reused with only minor restrictions (e.g. one should not fold
65 ** instructions across loop-carried dependencies).
67 ** But in general all optimizations can be applied which only need to look
68 ** backwards into the generated instruction stream. At any point in time
69 ** during the copy-substitution process this contains both a static loop
70 ** iteration (the pre-roll) and a dynamic one (from the to-be-copied
71 ** instruction up to the end of the partial loop body).
73 ** Since control-dependencies are implicitly kept, CSE also applies to all
74 ** kinds of guards. The major advantage is that all invariant guards can
77 ** Load/store forwarding works across loop iterations, too. This is
78 ** important if loop-carried dependencies are kept in upvalues or tables.
79 ** E.g. 'self.idx = self.idx + 1' deep down in some OO-style method may
80 ** become a forwarded loop-recurrence after inlining.
82 ** Since the IR is in SSA form, loop-carried dependencies have to be
83 ** modeled with PHI instructions. The potential candidates for PHIs are
84 ** collected on-the-fly during copy-substitution. After eliminating the
85 ** redundant ones, PHI instructions are emitted *below* the loop body.
87 ** Note that this departure from traditional SSA form doesn't change the
88 ** semantics of the PHI instructions themselves. But it greatly simplifies
89 ** on-the-fly generation of the IR and the machine code.
92 /* Some local macros to save typing. Undef'd at the end. */
93 #define IR(ref) (&J->cur.ir[(ref)])
95 /* Pass IR on to next optimization in chain (FOLD). */
96 #define emitir(ot, a, b) (lj_ir_set(J, (ot), (a), (b)), lj_opt_fold(J))
98 /* Emit raw IR without passing through optimizations. */
99 #define emitir_raw(ot, a, b) (lj_ir_set(J, (ot), (a), (b)), lj_ir_emit(J))
101 /* -- PHI elimination ----------------------------------------------------- */
103 /* Emit or eliminate collected PHIs. */
104 static void loop_emit_phi(jit_State
*J
, IRRef1
*subst
, IRRef1
*phi
, IRRef nphi
,
109 IRRef invar
= J
->chain
[IR_LOOP
];
110 /* Pass #1: mark redundant and potentially redundant PHIs. */
111 for (i
= 0; i
< nphi
; i
++) {
113 IRRef rref
= subst
[lref
];
114 if (lref
== rref
|| rref
== REF_DROP
) { /* Invariants are redundant. */
115 irt_setmark(IR(lref
)->t
);
116 } else if (!(IR(rref
)->op1
== lref
|| IR(rref
)->op2
== lref
)) {
117 /* Quick check for simple recurrences failed, need pass2. */
118 irt_setmark(IR(lref
)->t
);
122 /* Pass #2: traverse variant part and clear marks of non-redundant PHIs. */
125 for (i
= J
->cur
.nins
-1; i
> invar
; i
--) {
127 if (!irref_isk(ir
->op2
)) irt_clearmark(IR(ir
->op2
)->t
);
128 if (!irref_isk(ir
->op1
)) {
129 irt_clearmark(IR(ir
->op1
)->t
);
130 if (ir
->op1
< invar
&&
131 ir
->o
>= IR_CALLN
&& ir
->o
<= IR_CARG
) { /* ORDER IR */
133 while (ir
->o
== IR_CARG
) {
134 if (!irref_isk(ir
->op2
)) irt_clearmark(IR(ir
->op2
)->t
);
135 if (irref_isk(ir
->op1
)) break;
137 irt_clearmark(ir
->t
);
142 for (s
= J
->cur
.nsnap
-1; s
>= onsnap
; s
--) {
143 SnapShot
*snap
= &J
->cur
.snap
[s
];
144 SnapEntry
*map
= &J
->cur
.snapmap
[snap
->mapofs
];
145 MSize n
, nent
= snap
->nent
;
146 for (n
= 0; n
< nent
; n
++) {
147 IRRef ref
= snap_ref(map
[n
]);
148 if (!irref_isk(ref
)) irt_clearmark(IR(ref
)->t
);
152 /* Pass #3: add PHIs for variant slots without a corresponding SLOAD. */
153 nslots
= J
->baseslot
+J
->maxslot
;
154 for (i
= 1; i
< nslots
; i
++) {
155 IRRef ref
= tref_ref(J
->slot
[i
]);
156 while (!irref_isk(ref
) && ref
!= subst
[ref
]) {
158 irt_clearmark(ir
->t
); /* Unmark potential uses, too. */
159 if (irt_isphi(ir
->t
) || irt_ispri(ir
->t
))
162 if (nphi
>= LJ_MAX_PHI
)
163 lj_trace_err(J
, LJ_TRERR_PHIOV
);
164 phi
[nphi
++] = (IRRef1
)ref
;
170 /* Pass #4: propagate non-redundant PHIs. */
173 for (i
= 0; i
< nphi
; i
++) {
175 IRIns
*ir
= IR(lref
);
176 if (!irt_ismarked(ir
->t
)) { /* Propagate only from unmarked PHIs. */
177 IRRef rref
= subst
[lref
];
178 if (lref
== rref
) { /* Mark redundant PHI. */
181 IRIns
*irr
= IR(rref
);
182 if (irt_ismarked(irr
->t
)) { /* Right ref points to other PHI? */
183 irt_clearmark(irr
->t
); /* Mark that PHI as non-redundant. */
184 passx
= 1; /* Retry. */
190 /* Pass #5: emit PHI instructions or eliminate PHIs. */
191 for (i
= 0; i
< nphi
; i
++) {
193 IRIns
*ir
= IR(lref
);
194 if (!irt_ismarked(ir
->t
)) { /* Emit PHI if not marked. */
195 IRRef rref
= subst
[lref
];
197 irt_setphi(IR(rref
)->t
);
198 emitir_raw(IRT(IR_PHI
, irt_type(ir
->t
)), lref
, rref
);
199 } else { /* Otherwise eliminate PHI. */
200 irt_clearmark(ir
->t
);
206 /* -- Loop unrolling using copy-substitution ------------------------------ */
208 /* Copy-substitute snapshot. */
209 static void loop_subst_snap(jit_State
*J
, SnapShot
*osnap
,
210 SnapEntry
*loopmap
, IRRef1
*subst
)
212 SnapEntry
*nmap
, *omap
= &J
->cur
.snapmap
[osnap
->mapofs
];
213 SnapEntry
*nextmap
= &J
->cur
.snapmap
[snap_nextofs(&J
->cur
, osnap
)];
215 MSize on
, ln
, nn
, onent
= osnap
->nent
;
216 BCReg nslots
= osnap
->nslots
;
217 SnapShot
*snap
= &J
->cur
.snap
[J
->cur
.nsnap
];
218 if (irt_isguard(J
->guardemit
)) { /* Guard inbetween? */
219 nmapofs
= J
->cur
.nsnapmap
;
220 J
->cur
.nsnap
++; /* Add new snapshot. */
221 } else { /* Otherwise overwrite previous snapshot. */
223 nmapofs
= snap
->mapofs
;
225 J
->guardemit
.irt
= 0;
226 /* Setup new snapshot. */
227 snap
->mapofs
= (uint16_t)nmapofs
;
228 snap
->ref
= (IRRef1
)J
->cur
.nins
;
229 snap
->nslots
= nslots
;
230 snap
->topslot
= osnap
->topslot
;
232 nmap
= &J
->cur
.snapmap
[nmapofs
];
233 /* Substitute snapshot slots. */
236 SnapEntry osn
= omap
[on
], lsn
= loopmap
[ln
];
237 if (snap_slot(lsn
) < snap_slot(osn
)) { /* Copy slot from loop map. */
240 } else { /* Copy substituted slot from snapshot map. */
241 if (snap_slot(lsn
) == snap_slot(osn
)) ln
++; /* Shadowed loop slot. */
242 if (!irref_isk(snap_ref(osn
)))
243 osn
= snap_setref(osn
, subst
[snap_ref(osn
)]);
248 while (snap_slot(loopmap
[ln
]) < nslots
) /* Copy remaining loop slots. */
249 nmap
[nn
++] = loopmap
[ln
++];
250 snap
->nent
= (uint8_t)nn
;
253 while (omap
< nextmap
) /* Copy PC + frame links. */
255 J
->cur
.nsnapmap
= (uint16_t)(nmap
- J
->cur
.snapmap
);
259 static void loop_unroll(jit_State
*J
)
261 IRRef1 phi
[LJ_MAX_PHI
];
265 SnapShot
*osnap
, *loopsnap
;
266 SnapEntry
*loopmap
, *psentinel
;
269 /* Use temp buffer for substitution table.
270 ** Only non-constant refs in [REF_BIAS,invar) are valid indexes.
271 ** Caveat: don't call into the VM or run the GC or the buffer may be gone.
274 subst
= (IRRef1
*)lj_str_needbuf(J
->L
, &G(J
->L
)->tmpbuf
,
275 (invar
-REF_BIAS
)*sizeof(IRRef1
)) - REF_BIAS
;
276 subst
[REF_BASE
] = REF_BASE
;
278 /* LOOP separates the pre-roll from the loop body. */
279 emitir_raw(IRTG(IR_LOOP
, IRT_NIL
), 0, 0);
281 /* Grow snapshot buffer and map for copy-substituted snapshots.
282 ** Need up to twice the number of snapshots minus #0 and loop snapshot.
283 ** Need up to twice the number of entries plus fallback substitutions
284 ** from the loop snapshot entries for each new snapshot.
285 ** Caveat: both calls may reallocate J->cur.snap and J->cur.snapmap!
287 onsnap
= J
->cur
.nsnap
;
288 lj_snap_grow_buf(J
, 2*onsnap
-2);
289 lj_snap_grow_map(J
, J
->cur
.nsnapmap
*2+(onsnap
-2)*J
->cur
.snap
[onsnap
-1].nent
);
291 /* The loop snapshot is used for fallback substitutions. */
292 loopsnap
= &J
->cur
.snap
[onsnap
-1];
293 loopmap
= &J
->cur
.snapmap
[loopsnap
->mapofs
];
294 /* The PC of snapshot #0 and the loop snapshot must match. */
295 psentinel
= &loopmap
[loopsnap
->nent
];
296 lua_assert(*psentinel
== J
->cur
.snapmap
[J
->cur
.snap
[0].nent
]);
297 *psentinel
= SNAP(255, 0, 0); /* Replace PC with temporary sentinel. */
299 /* Start substitution with snapshot #1 (#0 is empty for root traces). */
300 osnap
= &J
->cur
.snap
[1];
302 /* Copy and substitute all recorded instructions and snapshots. */
303 for (ins
= REF_FIRST
; ins
< invar
; ins
++) {
307 if (ins
>= osnap
->ref
) /* Instruction belongs to next snapshot? */
308 loop_subst_snap(J
, osnap
++, loopmap
, subst
); /* Copy-substitute it. */
310 /* Substitute instruction operands. */
313 if (!irref_isk(op1
)) op1
= subst
[op1
];
315 if (!irref_isk(op2
)) op2
= subst
[op2
];
316 if (irm_kind(lj_ir_mode
[ir
->o
]) == IRM_N
&&
317 op1
== ir
->op1
&& op2
== ir
->op2
) { /* Regular invariant ins? */
318 subst
[ins
] = (IRRef1
)ins
; /* Shortcut. */
320 /* Re-emit substituted instruction to the FOLD/CSE/etc. pipeline. */
321 IRType1 t
= ir
->t
; /* Get this first, since emitir may invalidate ir. */
322 IRRef ref
= tref_ref(emitir(ir
->ot
& ~IRT_ISPHI
, op1
, op2
));
323 subst
[ins
] = (IRRef1
)ref
;
325 IRIns
*irr
= IR(ref
);
326 if (ref
< invar
) { /* Loop-carried dependency? */
328 if (!irref_isk(ref
) && !irt_isphi(irr
->t
) && !irt_ispri(irr
->t
)) {
330 if (nphi
>= LJ_MAX_PHI
)
331 lj_trace_err(J
, LJ_TRERR_PHIOV
);
332 phi
[nphi
++] = (IRRef1
)ref
;
334 /* Check all loop-carried dependencies for type instability. */
335 if (!irt_sametype(t
, irr
->t
)) {
336 if (irt_isinteger(t
) && irt_isinteger(irr
->t
))
338 else if (irt_isnum(t
) && irt_isinteger(irr
->t
)) /* Fix int->num. */
339 ref
= tref_ref(emitir(IRTN(IR_CONV
), ref
, IRCONV_NUM_INT
));
340 else if (irt_isnum(irr
->t
) && irt_isinteger(t
)) /* Fix num->int. */
341 ref
= tref_ref(emitir(IRTGI(IR_CONV
), ref
,
342 IRCONV_INT_NUM
|IRCONV_CHECK
));
344 lj_trace_err(J
, LJ_TRERR_TYPEINS
);
345 subst
[ins
] = (IRRef1
)ref
;
349 } else if (ref
!= REF_DROP
&& irr
->o
== IR_CONV
&&
350 ref
> invar
&& irr
->op1
< invar
) {
351 /* May need an extra PHI for a CONV. */
355 if (ref
< invar
&& !irref_isk(ref
) && !irt_isphi(irr
->t
)) {
357 if (nphi
>= LJ_MAX_PHI
)
358 lj_trace_err(J
, LJ_TRERR_PHIOV
);
359 phi
[nphi
++] = (IRRef1
)ref
;
365 if (!irt_isguard(J
->guardemit
)) /* Drop redundant snapshot. */
366 J
->cur
.nsnapmap
= (uint16_t)J
->cur
.snap
[--J
->cur
.nsnap
].mapofs
;
367 lua_assert(J
->cur
.nsnapmap
<= J
->sizesnapmap
);
368 *psentinel
= J
->cur
.snapmap
[J
->cur
.snap
[0].nent
]; /* Restore PC. */
370 loop_emit_phi(J
, subst
, phi
, nphi
, onsnap
);
373 /* Undo any partial changes made by the loop optimization. */
374 static void loop_undo(jit_State
*J
, IRRef ins
, SnapNo nsnap
, MSize nsnapmap
)
377 SnapShot
*snap
= &J
->cur
.snap
[nsnap
-1];
378 SnapEntry
*map
= J
->cur
.snapmap
;
379 map
[snap
->mapofs
+ snap
->nent
] = map
[J
->cur
.snap
[0].nent
]; /* Restore PC. */
380 J
->cur
.nsnapmap
= (uint16_t)nsnapmap
;
381 J
->cur
.nsnap
= nsnap
;
382 J
->guardemit
.irt
= 0;
383 lj_ir_rollback(J
, ins
);
384 for (i
= 0; i
< BPROP_SLOTS
; i
++) { /* Remove backprop. cache entries. */
385 BPropEntry
*bp
= &J
->bpropcache
[i
];
389 for (ins
--; ins
>= REF_FIRST
; ins
--) { /* Remove flags. */
392 irt_clearmark(ir
->t
);
396 /* Protected callback for loop optimization. */
397 static TValue
*cploop_opt(lua_State
*L
, lua_CFunction dummy
, void *ud
)
399 UNUSED(L
); UNUSED(dummy
);
400 loop_unroll((jit_State
*)ud
);
404 /* Loop optimization. */
405 int lj_opt_loop(jit_State
*J
)
407 IRRef nins
= J
->cur
.nins
;
408 SnapNo nsnap
= J
->cur
.nsnap
;
409 MSize nsnapmap
= J
->cur
.nsnapmap
;
410 int errcode
= lj_vm_cpcall(J
->L
, NULL
, J
, cploop_opt
);
411 if (LJ_UNLIKELY(errcode
)) {
413 if (errcode
== LUA_ERRRUN
&& tvisnumber(L
->top
-1)) { /* Trace error? */
414 int32_t e
= numberVint(L
->top
-1);
415 switch ((TraceError
)e
) {
416 case LJ_TRERR_TYPEINS
: /* Type instability. */
417 case LJ_TRERR_GFAIL
: /* Guard would always fail. */
418 /* Unrolling via recording fixes many cases, e.g. a flipped boolean. */
419 if (--J
->instunroll
< 0) /* But do not unroll forever. */
421 L
->top
--; /* Remove error object. */
422 loop_undo(J
, nins
, nsnap
, nsnapmap
);
423 return 1; /* Loop optimization failed, continue recording. */
428 lj_err_throw(L
, errcode
); /* Propagate all other errors. */
430 return 0; /* Loop optimization is ok. */