Windows/MSVC: Cleanup msvcbuild.bat and always generate PDB.
[luajit-2.0.git] / src / lj_opt_mem.c
blob29b33f2931261788d7acb060b47606729e195352
1 /*
2 ** Memory access optimizations.
3 ** AA: Alias Analysis using high-level semantic disambiguation.
4 ** FWD: Load Forwarding (L2L) + Store Forwarding (S2L).
5 ** DSE: Dead-Store Elimination.
6 ** Copyright (C) 2005-2023 Mike Pall. See Copyright Notice in luajit.h
7 */
9 #define lj_opt_mem_c
10 #define LUA_CORE
12 #include "lj_obj.h"
14 #if LJ_HASJIT
16 #include "lj_tab.h"
17 #include "lj_ir.h"
18 #include "lj_jit.h"
19 #include "lj_iropt.h"
20 #include "lj_ircall.h"
21 #include "lj_dispatch.h"
23 /* Some local macros to save typing. Undef'd at the end. */
24 #define IR(ref) (&J->cur.ir[(ref)])
25 #define fins (&J->fold.ins)
26 #define fleft (J->fold.left)
27 #define fright (J->fold.right)
30 ** Caveat #1: return value is not always a TRef -- only use with tref_ref().
31 ** Caveat #2: FWD relies on active CSE for xREF operands -- see lj_opt_fold().
34 /* Return values from alias analysis. */
35 typedef enum {
36 ALIAS_NO, /* The two refs CANNOT alias (exact). */
37 ALIAS_MAY, /* The two refs MAY alias (inexact). */
38 ALIAS_MUST /* The two refs MUST alias (exact). */
39 } AliasRet;
41 /* -- ALOAD/HLOAD forwarding and ASTORE/HSTORE elimination ---------------- */
43 /* Simplified escape analysis: check for intervening stores. */
44 static AliasRet aa_escape(jit_State *J, IRIns *ir, IRIns *stop)
46 IRRef ref = (IRRef)(ir - J->cur.ir); /* The ref that might be stored. */
47 for (ir++; ir < stop; ir++)
48 if (ir->op2 == ref &&
49 (ir->o == IR_ASTORE || ir->o == IR_HSTORE ||
50 ir->o == IR_USTORE || ir->o == IR_FSTORE))
51 return ALIAS_MAY; /* Reference was stored and might alias. */
52 return ALIAS_NO; /* Reference was not stored. */
55 /* Alias analysis for two different table references. */
56 static AliasRet aa_table(jit_State *J, IRRef ta, IRRef tb)
58 IRIns *taba = IR(ta), *tabb = IR(tb);
59 int newa, newb;
60 lj_assertJ(ta != tb, "bad usage");
61 lj_assertJ(irt_istab(taba->t) && irt_istab(tabb->t), "bad usage");
62 /* Disambiguate new allocations. */
63 newa = (taba->o == IR_TNEW || taba->o == IR_TDUP);
64 newb = (tabb->o == IR_TNEW || tabb->o == IR_TDUP);
65 if (newa && newb)
66 return ALIAS_NO; /* Two different allocations never alias. */
67 if (newb) { /* At least one allocation? */
68 IRIns *tmp = taba; taba = tabb; tabb = tmp;
69 } else if (!newa) {
70 return ALIAS_MAY; /* Anything else: we just don't know. */
72 return aa_escape(J, taba, tabb);
75 /* Check whether there's no aliasing table.clear. */
76 static int fwd_aa_tab_clear(jit_State *J, IRRef lim, IRRef ta)
78 IRRef ref = J->chain[IR_CALLS];
79 while (ref > lim) {
80 IRIns *calls = IR(ref);
81 if (calls->op2 == IRCALL_lj_tab_clear &&
82 (ta == calls->op1 || aa_table(J, ta, calls->op1) != ALIAS_NO))
83 return 0; /* Conflict. */
84 ref = calls->prev;
86 return 1; /* No conflict. Can safely FOLD/CSE. */
89 /* Check whether there's no aliasing NEWREF/table.clear for the left operand. */
90 int LJ_FASTCALL lj_opt_fwd_tptr(jit_State *J, IRRef lim)
92 IRRef ta = fins->op1;
93 IRRef ref = J->chain[IR_NEWREF];
94 while (ref > lim) {
95 IRIns *newref = IR(ref);
96 if (ta == newref->op1 || aa_table(J, ta, newref->op1) != ALIAS_NO)
97 return 0; /* Conflict. */
98 ref = newref->prev;
100 return fwd_aa_tab_clear(J, lim, ta);
103 /* Alias analysis for array and hash access using key-based disambiguation. */
104 static AliasRet aa_ahref(jit_State *J, IRIns *refa, IRIns *refb)
106 IRRef ka = refa->op2;
107 IRRef kb = refb->op2;
108 IRIns *keya, *keyb;
109 IRRef ta, tb;
110 if (refa == refb)
111 return ALIAS_MUST; /* Shortcut for same refs. */
112 keya = IR(ka);
113 if (keya->o == IR_KSLOT) { ka = keya->op1; keya = IR(ka); }
114 keyb = IR(kb);
115 if (keyb->o == IR_KSLOT) { kb = keyb->op1; keyb = IR(kb); }
116 ta = (refa->o==IR_HREFK || refa->o==IR_AREF) ? IR(refa->op1)->op1 : refa->op1;
117 tb = (refb->o==IR_HREFK || refb->o==IR_AREF) ? IR(refb->op1)->op1 : refb->op1;
118 if (ka == kb) {
119 /* Same key. Check for same table with different ref (NEWREF vs. HREF). */
120 if (ta == tb)
121 return ALIAS_MUST; /* Same key, same table. */
122 else
123 return aa_table(J, ta, tb); /* Same key, possibly different table. */
125 if (irref_isk(ka) && irref_isk(kb))
126 return ALIAS_NO; /* Different constant keys. */
127 if (refa->o == IR_AREF) {
128 /* Disambiguate array references based on index arithmetic. */
129 int32_t ofsa = 0, ofsb = 0;
130 IRRef basea = ka, baseb = kb;
131 lj_assertJ(refb->o == IR_AREF, "expected AREF");
132 /* Gather base and offset from t[base] or t[base+-ofs]. */
133 if (keya->o == IR_ADD && irref_isk(keya->op2)) {
134 basea = keya->op1;
135 ofsa = IR(keya->op2)->i;
136 if (basea == kb && ofsa != 0)
137 return ALIAS_NO; /* t[base+-ofs] vs. t[base]. */
139 if (keyb->o == IR_ADD && irref_isk(keyb->op2)) {
140 baseb = keyb->op1;
141 ofsb = IR(keyb->op2)->i;
142 if (ka == baseb && ofsb != 0)
143 return ALIAS_NO; /* t[base] vs. t[base+-ofs]. */
145 if (basea == baseb && ofsa != ofsb)
146 return ALIAS_NO; /* t[base+-o1] vs. t[base+-o2] and o1 != o2. */
147 } else {
148 /* Disambiguate hash references based on the type of their keys. */
149 lj_assertJ((refa->o==IR_HREF || refa->o==IR_HREFK || refa->o==IR_NEWREF) &&
150 (refb->o==IR_HREF || refb->o==IR_HREFK || refb->o==IR_NEWREF),
151 "bad xREF IR op %d or %d", refa->o, refb->o);
152 if (!irt_sametype(keya->t, keyb->t))
153 return ALIAS_NO; /* Different key types. */
155 if (ta == tb)
156 return ALIAS_MAY; /* Same table, cannot disambiguate keys. */
157 else
158 return aa_table(J, ta, tb); /* Try to disambiguate tables. */
161 /* Array and hash load forwarding. */
162 static TRef fwd_ahload(jit_State *J, IRRef xref)
164 IRIns *xr = IR(xref);
165 IRRef lim = xref; /* Search limit. */
166 IRRef ref;
168 /* Search for conflicting stores. */
169 ref = J->chain[fins->o+IRDELTA_L2S];
170 while (ref > xref) {
171 IRIns *store = IR(ref);
172 switch (aa_ahref(J, xr, IR(store->op1))) {
173 case ALIAS_NO: break; /* Continue searching. */
174 case ALIAS_MAY: lim = ref; goto cselim; /* Limit search for load. */
175 case ALIAS_MUST: return store->op2; /* Store forwarding. */
177 ref = store->prev;
180 /* No conflicting store (yet): const-fold loads from allocations. */
182 IRIns *ir = (xr->o == IR_HREFK || xr->o == IR_AREF) ? IR(xr->op1) : xr;
183 IRRef tab = ir->op1;
184 ir = IR(tab);
185 if ((ir->o == IR_TNEW || (ir->o == IR_TDUP && irref_isk(xr->op2))) &&
186 fwd_aa_tab_clear(J, tab, tab)) {
187 /* A NEWREF with a number key may end up pointing to the array part.
188 ** But it's referenced from HSTORE and not found in the ASTORE chain.
189 ** Or a NEWREF may rehash the table and move unrelated number keys.
190 ** For now simply consider this a conflict without forwarding anything.
192 if (xr->o == IR_AREF) {
193 IRRef ref2 = J->chain[IR_NEWREF];
194 while (ref2 > tab) {
195 IRIns *newref = IR(ref2);
196 if (irt_isnum(IR(newref->op2)->t))
197 goto cselim;
198 ref2 = newref->prev;
200 } else {
201 IRIns *key = IR(xr->op2);
202 if (key->o == IR_KSLOT) key = IR(key->op1);
203 if (irt_isnum(key->t) && J->chain[IR_NEWREF] > tab)
204 goto cselim;
206 /* NEWREF inhibits CSE for HREF, and dependent FLOADs from HREFK/AREF.
207 ** But the above search for conflicting stores was limited by xref.
208 ** So continue searching, limited by the TNEW/TDUP. Store forwarding
209 ** is ok, too. A conflict does NOT limit the search for a matching load.
211 while (ref > tab) {
212 IRIns *store = IR(ref);
213 switch (aa_ahref(J, xr, IR(store->op1))) {
214 case ALIAS_NO: break; /* Continue searching. */
215 case ALIAS_MAY: goto cselim; /* Conflicting store. */
216 case ALIAS_MUST: return store->op2; /* Store forwarding. */
218 ref = store->prev;
220 /* Simplified here: let loop_unroll() figure out any type instability. */
221 if (ir->o == IR_TNEW) {
222 return TREF_NIL;
223 } else {
224 TValue keyv;
225 cTValue *tv;
226 IRIns *key = IR(xr->op2);
227 if (key->o == IR_KSLOT) key = IR(key->op1);
228 lj_ir_kvalue(J->L, &keyv, key);
229 tv = lj_tab_get(J->L, ir_ktab(IR(ir->op1)), &keyv);
230 if (tvispri(tv))
231 return TREF_PRI(itype2irt(tv));
232 else if (tvisnum(tv))
233 return lj_ir_knum_u64(J, tv->u64);
234 else if (tvisint(tv))
235 return lj_ir_kint(J, intV(tv));
236 else if (tvisgcv(tv))
237 return lj_ir_kstr(J, strV(tv));
239 /* Othwerwise: don't intern as a constant. */
243 cselim:
244 /* Try to find a matching load. Below the conflicting store, if any. */
245 ref = J->chain[fins->o];
246 while (ref > lim) {
247 IRIns *load = IR(ref);
248 if (load->op1 == xref)
249 return ref; /* Load forwarding. */
250 ref = load->prev;
252 return 0; /* Conflict or no match. */
255 /* Reassociate ALOAD across PHIs to handle t[i-1] forwarding case. */
256 static TRef fwd_aload_reassoc(jit_State *J)
258 IRIns *irx = IR(fins->op1);
259 IRIns *key = IR(irx->op2);
260 if (key->o == IR_ADD && irref_isk(key->op2)) {
261 IRIns *add2 = IR(key->op1);
262 if (add2->o == IR_ADD && irref_isk(add2->op2) &&
263 IR(key->op2)->i == -IR(add2->op2)->i) {
264 IRRef ref = J->chain[IR_AREF];
265 IRRef lim = add2->op1;
266 if (irx->op1 > lim) lim = irx->op1;
267 while (ref > lim) {
268 IRIns *ir = IR(ref);
269 if (ir->op1 == irx->op1 && ir->op2 == add2->op1)
270 return fwd_ahload(J, ref);
271 ref = ir->prev;
275 return 0;
278 /* ALOAD forwarding. */
279 TRef LJ_FASTCALL lj_opt_fwd_aload(jit_State *J)
281 IRRef ref;
282 if ((ref = fwd_ahload(J, fins->op1)) ||
283 (ref = fwd_aload_reassoc(J)))
284 return ref;
285 return EMITFOLD;
288 /* HLOAD forwarding. */
289 TRef LJ_FASTCALL lj_opt_fwd_hload(jit_State *J)
291 IRRef ref = fwd_ahload(J, fins->op1);
292 if (ref)
293 return ref;
294 return EMITFOLD;
297 /* HREFK forwarding. */
298 TRef LJ_FASTCALL lj_opt_fwd_hrefk(jit_State *J)
300 IRRef tab = fleft->op1;
301 IRRef ref = J->chain[IR_NEWREF];
302 while (ref > tab) {
303 IRIns *newref = IR(ref);
304 if (tab == newref->op1) {
305 if (fright->op1 == newref->op2 && fwd_aa_tab_clear(J, ref, tab))
306 return ref; /* Forward from NEWREF. */
307 else
308 goto docse;
309 } else if (aa_table(J, tab, newref->op1) != ALIAS_NO) {
310 goto docse;
312 ref = newref->prev;
314 /* No conflicting NEWREF: key location unchanged for HREFK of TDUP. */
315 if (IR(tab)->o == IR_TDUP && fwd_aa_tab_clear(J, tab, tab))
316 fins->t.irt &= ~IRT_GUARD; /* Drop HREFK guard. */
317 docse:
318 return CSEFOLD;
321 /* Check whether HREF of TNEW/TDUP can be folded to niltv. */
322 int LJ_FASTCALL lj_opt_fwd_href_nokey(jit_State *J)
324 IRRef lim = fins->op1; /* Search limit. */
325 IRRef ref;
327 /* The key for an ASTORE may end up in the hash part after a NEWREF. */
328 if (irt_isnum(fright->t) && J->chain[IR_NEWREF] > lim) {
329 ref = J->chain[IR_ASTORE];
330 while (ref > lim) {
331 if (ref < J->chain[IR_NEWREF])
332 return 0; /* Conflict. */
333 ref = IR(ref)->prev;
337 /* Search for conflicting stores. */
338 ref = J->chain[IR_HSTORE];
339 while (ref > lim) {
340 IRIns *store = IR(ref);
341 if (aa_ahref(J, fins, IR(store->op1)) != ALIAS_NO)
342 return 0; /* Conflict. */
343 ref = store->prev;
346 return 1; /* No conflict. Can fold to niltv. */
349 /* ASTORE/HSTORE elimination. */
350 TRef LJ_FASTCALL lj_opt_dse_ahstore(jit_State *J)
352 IRRef xref = fins->op1; /* xREF reference. */
353 IRRef val = fins->op2; /* Stored value reference. */
354 IRIns *xr = IR(xref);
355 IRRef1 *refp = &J->chain[fins->o];
356 IRRef ref = *refp;
357 while (ref > xref) { /* Search for redundant or conflicting stores. */
358 IRIns *store = IR(ref);
359 switch (aa_ahref(J, xr, IR(store->op1))) {
360 case ALIAS_NO:
361 break; /* Continue searching. */
362 case ALIAS_MAY: /* Store to MAYBE the same location. */
363 if (store->op2 != val) /* Conflict if the value is different. */
364 goto doemit;
365 break; /* Otherwise continue searching. */
366 case ALIAS_MUST: /* Store to the same location. */
367 if (store->op2 == val) /* Same value: drop the new store. */
368 return DROPFOLD;
369 /* Different value: try to eliminate the redundant store. */
370 if (ref > J->chain[IR_LOOP]) { /* Quick check to avoid crossing LOOP. */
371 IRIns *ir;
372 /* Check for any intervening guards (includes conflicting loads).
373 ** Note that lj_tab_keyindex and lj_vm_next don't need guards,
374 ** since they are followed by at least one guarded VLOAD.
376 for (ir = IR(J->cur.nins-1); ir > store; ir--)
377 if (irt_isguard(ir->t) || ir->o == IR_ALEN)
378 goto doemit; /* No elimination possible. */
379 /* Remove redundant store from chain and replace with NOP. */
380 *refp = store->prev;
381 lj_ir_nop(store);
382 /* Now emit the new store instead. */
384 goto doemit;
386 ref = *(refp = &store->prev);
388 doemit:
389 return EMITFOLD; /* Otherwise we have a conflict or simply no match. */
392 /* ALEN forwarding. */
393 TRef LJ_FASTCALL lj_opt_fwd_alen(jit_State *J)
395 IRRef tab = fins->op1; /* Table reference. */
396 IRRef lim = tab; /* Search limit. */
397 IRRef ref;
399 /* Search for conflicting HSTORE with numeric key. */
400 ref = J->chain[IR_HSTORE];
401 while (ref > lim) {
402 IRIns *store = IR(ref);
403 IRIns *href = IR(store->op1);
404 IRIns *key = IR(href->op2);
405 if (irt_isnum(key->o == IR_KSLOT ? IR(key->op1)->t : key->t)) {
406 lim = ref; /* Conflicting store found, limits search for ALEN. */
407 break;
409 ref = store->prev;
412 /* Try to find a matching ALEN. */
413 ref = J->chain[IR_ALEN];
414 while (ref > lim) {
415 /* CSE for ALEN only depends on the table, not the hint. */
416 if (IR(ref)->op1 == tab) {
417 IRRef sref;
419 /* Search for aliasing table.clear. */
420 if (!fwd_aa_tab_clear(J, ref, tab))
421 break;
423 /* Search for hint-forwarding or conflicting store. */
424 sref = J->chain[IR_ASTORE];
425 while (sref > ref) {
426 IRIns *store = IR(sref);
427 IRIns *aref = IR(store->op1);
428 IRIns *fref = IR(aref->op1);
429 if (tab == fref->op1) { /* ASTORE to the same table. */
430 /* Detect t[#t+1] = x idiom for push. */
431 IRIns *idx = IR(aref->op2);
432 if (!irt_isnil(store->t) &&
433 idx->o == IR_ADD && idx->op1 == ref &&
434 IR(idx->op2)->o == IR_KINT && IR(idx->op2)->i == 1) {
435 /* Note: this requires an extra PHI check in loop unroll. */
436 fins->op2 = aref->op2; /* Set ALEN hint. */
438 goto doemit; /* Conflicting store, possibly giving a hint. */
439 } else if (aa_table(J, tab, fref->op1) != ALIAS_NO) {
440 goto doemit; /* Conflicting store. */
442 sref = store->prev;
445 return ref; /* Plain ALEN forwarding. */
447 ref = IR(ref)->prev;
449 doemit:
450 return EMITFOLD;
453 /* -- ULOAD forwarding ---------------------------------------------------- */
455 /* The current alias analysis for upvalues is very simplistic. It only
456 ** disambiguates between the unique upvalues of the same function.
457 ** This is good enough for now, since most upvalues are read-only.
459 ** A more precise analysis would be feasible with the help of the parser:
460 ** generate a unique key for every upvalue, even across all prototypes.
461 ** Lacking a realistic use-case, it's unclear whether this is beneficial.
463 static AliasRet aa_uref(IRIns *refa, IRIns *refb)
465 if (refa->op1 == refb->op1) { /* Same function. */
466 if (refa->op2 == refb->op2)
467 return ALIAS_MUST; /* Same function, same upvalue idx. */
468 else
469 return ALIAS_NO; /* Same function, different upvalue idx. */
470 } else { /* Different functions, check disambiguation hash values. */
471 if (((refa->op2 ^ refb->op2) & 0xff)) {
472 return ALIAS_NO; /* Upvalues with different hash values cannot alias. */
473 } else if (refa->o != refb->o) {
474 /* Different UREFx type, but need to confirm the UREFO really is open. */
475 if (irt_type(refa->t) == IRT_IGC) refa->t.irt += IRT_PGC-IRT_IGC;
476 else if (irt_type(refb->t) == IRT_IGC) refb->t.irt += IRT_PGC-IRT_IGC;
477 return ALIAS_NO;
478 } else {
479 /* No conclusion can be drawn for same hash value and same UREFx type. */
480 return ALIAS_MAY;
485 /* ULOAD forwarding. */
486 TRef LJ_FASTCALL lj_opt_fwd_uload(jit_State *J)
488 IRRef uref = fins->op1;
489 IRRef lim = REF_BASE; /* Search limit. */
490 IRIns *xr = IR(uref);
491 IRRef ref;
493 /* Search for conflicting stores. */
494 ref = J->chain[IR_USTORE];
495 while (ref > lim) {
496 IRIns *store = IR(ref);
497 switch (aa_uref(xr, IR(store->op1))) {
498 case ALIAS_NO: break; /* Continue searching. */
499 case ALIAS_MAY: lim = ref; goto cselim; /* Limit search for load. */
500 case ALIAS_MUST: return store->op2; /* Store forwarding. */
502 ref = store->prev;
505 cselim:
506 /* Try to find a matching load. Below the conflicting store, if any. */
507 ref = J->chain[IR_ULOAD];
508 while (ref > lim) {
509 IRIns *ir = IR(ref);
510 if (ir->op1 == uref ||
511 (IR(ir->op1)->op12 == IR(uref)->op12 && IR(ir->op1)->o == IR(uref)->o))
512 return ref; /* Match for identical or equal UREFx (non-CSEable UREFO). */
513 ref = ir->prev;
515 return lj_ir_emit(J);
518 /* USTORE elimination. */
519 TRef LJ_FASTCALL lj_opt_dse_ustore(jit_State *J)
521 IRRef xref = fins->op1; /* xREF reference. */
522 IRRef val = fins->op2; /* Stored value reference. */
523 IRIns *xr = IR(xref);
524 IRRef1 *refp = &J->chain[IR_USTORE];
525 IRRef ref = *refp;
526 while (ref > xref) { /* Search for redundant or conflicting stores. */
527 IRIns *store = IR(ref);
528 switch (aa_uref(xr, IR(store->op1))) {
529 case ALIAS_NO:
530 break; /* Continue searching. */
531 case ALIAS_MAY: /* Store to MAYBE the same location. */
532 if (store->op2 != val) /* Conflict if the value is different. */
533 goto doemit;
534 break; /* Otherwise continue searching. */
535 case ALIAS_MUST: /* Store to the same location. */
536 if (store->op2 == val) /* Same value: drop the new store. */
537 return DROPFOLD;
538 /* Different value: try to eliminate the redundant store. */
539 if (ref > J->chain[IR_LOOP]) { /* Quick check to avoid crossing LOOP. */
540 IRIns *ir;
541 /* Check for any intervening guards (includes conflicting loads). */
542 for (ir = IR(J->cur.nins-1); ir > store; ir--)
543 if (irt_isguard(ir->t))
544 goto doemit; /* No elimination possible. */
545 /* Remove redundant store from chain and replace with NOP. */
546 *refp = store->prev;
547 lj_ir_nop(store);
548 if (ref+1 < J->cur.nins &&
549 store[1].o == IR_OBAR && store[1].op1 == xref) {
550 IRRef1 *bp = &J->chain[IR_OBAR];
551 IRIns *obar;
552 for (obar = IR(*bp); *bp > ref+1; obar = IR(*bp))
553 bp = &obar->prev;
554 /* Remove OBAR, too. */
555 *bp = obar->prev;
556 lj_ir_nop(obar);
558 /* Now emit the new store instead. */
560 goto doemit;
562 ref = *(refp = &store->prev);
564 doemit:
565 return EMITFOLD; /* Otherwise we have a conflict or simply no match. */
568 /* -- FLOAD forwarding and FSTORE elimination ----------------------------- */
570 /* Alias analysis for field access.
571 ** Field loads are cheap and field stores are rare.
572 ** Simple disambiguation based on field types is good enough.
574 static AliasRet aa_fref(jit_State *J, IRIns *refa, IRIns *refb)
576 if (refa->op2 != refb->op2)
577 return ALIAS_NO; /* Different fields. */
578 if (refa->op1 == refb->op1)
579 return ALIAS_MUST; /* Same field, same object. */
580 else if (refa->op2 >= IRFL_TAB_META && refa->op2 <= IRFL_TAB_NOMM)
581 return aa_table(J, refa->op1, refb->op1); /* Disambiguate tables. */
582 else
583 return ALIAS_MAY; /* Same field, possibly different object. */
586 /* Only the loads for mutable fields end up here (see FOLD). */
587 TRef LJ_FASTCALL lj_opt_fwd_fload(jit_State *J)
589 IRRef oref = fins->op1; /* Object reference. */
590 IRRef fid = fins->op2; /* Field ID. */
591 IRRef lim = oref; /* Search limit. */
592 IRRef ref;
594 /* Search for conflicting stores. */
595 ref = J->chain[IR_FSTORE];
596 while (ref > oref) {
597 IRIns *store = IR(ref);
598 switch (aa_fref(J, fins, IR(store->op1))) {
599 case ALIAS_NO: break; /* Continue searching. */
600 case ALIAS_MAY: lim = ref; goto cselim; /* Limit search for load. */
601 case ALIAS_MUST: return store->op2; /* Store forwarding. */
603 ref = store->prev;
606 /* No conflicting store: const-fold field loads from allocations. */
607 if (fid == IRFL_TAB_META) {
608 IRIns *ir = IR(oref);
609 if (ir->o == IR_TNEW || ir->o == IR_TDUP)
610 return lj_ir_knull(J, IRT_TAB);
613 cselim:
614 /* Try to find a matching load. Below the conflicting store, if any. */
615 return lj_opt_cselim(J, lim);
618 /* FSTORE elimination. */
619 TRef LJ_FASTCALL lj_opt_dse_fstore(jit_State *J)
621 IRRef fref = fins->op1; /* FREF reference. */
622 IRRef val = fins->op2; /* Stored value reference. */
623 IRIns *xr = IR(fref);
624 IRRef1 *refp = &J->chain[IR_FSTORE];
625 IRRef ref = *refp;
626 while (ref > fref) { /* Search for redundant or conflicting stores. */
627 IRIns *store = IR(ref);
628 switch (aa_fref(J, xr, IR(store->op1))) {
629 case ALIAS_NO:
630 break; /* Continue searching. */
631 case ALIAS_MAY:
632 if (store->op2 != val) /* Conflict if the value is different. */
633 goto doemit;
634 break; /* Otherwise continue searching. */
635 case ALIAS_MUST:
636 if (store->op2 == val &&
637 !(xr->op2 >= IRFL_SBUF_W && xr->op2 <= IRFL_SBUF_R))
638 return DROPFOLD; /* Same value: drop the new store. */
639 /* Different value: try to eliminate the redundant store. */
640 if (ref > J->chain[IR_LOOP]) { /* Quick check to avoid crossing LOOP. */
641 IRIns *ir;
642 /* Check for any intervening guards or conflicting loads. */
643 for (ir = IR(J->cur.nins-1); ir > store; ir--)
644 if (irt_isguard(ir->t) || (ir->o == IR_FLOAD && ir->op2 == xr->op2))
645 goto doemit; /* No elimination possible. */
646 /* Remove redundant store from chain and replace with NOP. */
647 *refp = store->prev;
648 lj_ir_nop(store);
649 /* Now emit the new store instead. */
651 goto doemit;
653 ref = *(refp = &store->prev);
655 doemit:
656 return EMITFOLD; /* Otherwise we have a conflict or simply no match. */
659 /* Check whether there's no aliasing buffer op between IRFL_SBUF_*. */
660 int LJ_FASTCALL lj_opt_fwd_sbuf(jit_State *J, IRRef lim)
662 IRRef ref;
663 if (J->chain[IR_BUFPUT] > lim)
664 return 0; /* Conflict. */
665 ref = J->chain[IR_CALLS];
666 while (ref > lim) {
667 IRIns *ir = IR(ref);
668 if (ir->op2 >= IRCALL_lj_strfmt_putint && ir->op2 < IRCALL_lj_buf_tostr)
669 return 0; /* Conflict. */
670 ref = ir->prev;
672 ref = J->chain[IR_CALLL];
673 while (ref > lim) {
674 IRIns *ir = IR(ref);
675 if (ir->op2 >= IRCALL_lj_strfmt_putint && ir->op2 < IRCALL_lj_buf_tostr)
676 return 0; /* Conflict. */
677 ref = ir->prev;
679 return 1; /* No conflict. Can safely FOLD/CSE. */
682 /* -- XLOAD forwarding and XSTORE elimination ----------------------------- */
684 /* Find cdata allocation for a reference (if any). */
685 static IRIns *aa_findcnew(jit_State *J, IRIns *ir)
687 while (ir->o == IR_ADD) {
688 if (!irref_isk(ir->op1)) {
689 IRIns *ir1 = aa_findcnew(J, IR(ir->op1)); /* Left-recursion. */
690 if (ir1) return ir1;
692 if (irref_isk(ir->op2)) return NULL;
693 ir = IR(ir->op2); /* Flatten right-recursion. */
695 return ir->o == IR_CNEW ? ir : NULL;
698 /* Alias analysis for two cdata allocations. */
699 static AliasRet aa_cnew(jit_State *J, IRIns *refa, IRIns *refb)
701 IRIns *cnewa = aa_findcnew(J, refa);
702 IRIns *cnewb = aa_findcnew(J, refb);
703 if (cnewa == cnewb)
704 return ALIAS_MAY; /* Same allocation or neither is an allocation. */
705 if (cnewa && cnewb)
706 return ALIAS_NO; /* Two different allocations never alias. */
707 if (cnewb) { cnewa = cnewb; refb = refa; }
708 return aa_escape(J, cnewa, refb);
711 /* Alias analysis for XLOAD/XSTORE. */
712 static AliasRet aa_xref(jit_State *J, IRIns *refa, IRIns *xa, IRIns *xb)
714 ptrdiff_t ofsa = 0, ofsb = 0;
715 IRIns *refb = IR(xb->op1);
716 IRIns *basea = refa, *baseb = refb;
717 if (refa == refb && irt_sametype(xa->t, xb->t))
718 return ALIAS_MUST; /* Shortcut for same refs with identical type. */
719 /* Offset-based disambiguation. */
720 if (refa->o == IR_ADD && irref_isk(refa->op2)) {
721 IRIns *irk = IR(refa->op2);
722 basea = IR(refa->op1);
723 ofsa = (LJ_64 && irk->o == IR_KINT64) ? (ptrdiff_t)ir_k64(irk)->u64 :
724 (ptrdiff_t)irk->i;
726 if (refb->o == IR_ADD && irref_isk(refb->op2)) {
727 IRIns *irk = IR(refb->op2);
728 baseb = IR(refb->op1);
729 ofsb = (LJ_64 && irk->o == IR_KINT64) ? (ptrdiff_t)ir_k64(irk)->u64 :
730 (ptrdiff_t)irk->i;
732 /* Treat constified pointers like base vs. base+offset. */
733 if (basea->o == IR_KPTR && baseb->o == IR_KPTR) {
734 ofsb += (char *)ir_kptr(baseb) - (char *)ir_kptr(basea);
735 baseb = basea;
737 /* This implements (very) strict aliasing rules.
738 ** Different types do NOT alias, except for differences in signedness.
739 ** Type punning through unions is allowed (but forces a reload).
741 if (basea == baseb) {
742 ptrdiff_t sza = irt_size(xa->t), szb = irt_size(xb->t);
743 if (ofsa == ofsb) {
744 if (sza == szb && irt_isfp(xa->t) == irt_isfp(xb->t))
745 return ALIAS_MUST; /* Same-sized, same-kind. May need to convert. */
746 } else if (ofsa + sza <= ofsb || ofsb + szb <= ofsa) {
747 return ALIAS_NO; /* Non-overlapping base+-o1 vs. base+-o2. */
749 /* NYI: extract, extend or reinterpret bits (int <-> fp). */
750 return ALIAS_MAY; /* Overlapping or type punning: force reload. */
752 if (!irt_sametype(xa->t, xb->t) &&
753 !(irt_typerange(xa->t, IRT_I8, IRT_U64) &&
754 ((xa->t.irt - IRT_I8) ^ (xb->t.irt - IRT_I8)) == 1))
755 return ALIAS_NO;
756 /* NYI: structural disambiguation. */
757 return aa_cnew(J, basea, baseb); /* Try to disambiguate allocations. */
760 /* Return CSEd reference or 0. Caveat: swaps lower ref to the right! */
761 static IRRef reassoc_trycse(jit_State *J, IROp op, IRRef op1, IRRef op2)
763 IRRef ref = J->chain[op];
764 IRRef lim = op1;
765 if (op2 > lim) { lim = op2; op2 = op1; op1 = lim; }
766 while (ref > lim) {
767 IRIns *ir = IR(ref);
768 if (ir->op1 == op1 && ir->op2 == op2)
769 return ref;
770 ref = ir->prev;
772 return 0;
775 /* Reassociate index references. */
776 static IRRef reassoc_xref(jit_State *J, IRIns *ir)
778 ptrdiff_t ofs = 0;
779 if (ir->o == IR_ADD && irref_isk(ir->op2)) { /* Get constant offset. */
780 IRIns *irk = IR(ir->op2);
781 ofs = (LJ_64 && irk->o == IR_KINT64) ? (ptrdiff_t)ir_k64(irk)->u64 :
782 (ptrdiff_t)irk->i;
783 ir = IR(ir->op1);
785 if (ir->o == IR_ADD) { /* Add of base + index. */
786 /* Index ref > base ref for loop-carried dependences. Only check op1. */
787 IRIns *ir2, *ir1 = IR(ir->op1);
788 int32_t shift = 0;
789 IRRef idxref;
790 /* Determine index shifts. Don't bother with IR_MUL here. */
791 if (ir1->o == IR_BSHL && irref_isk(ir1->op2))
792 shift = IR(ir1->op2)->i;
793 else if (ir1->o == IR_ADD && ir1->op1 == ir1->op2)
794 shift = 1;
795 else
796 ir1 = ir;
797 ir2 = IR(ir1->op1);
798 /* A non-reassociated add. Must be a loop-carried dependence. */
799 if (ir2->o == IR_ADD && irt_isint(ir2->t) && irref_isk(ir2->op2))
800 ofs += (ptrdiff_t)IR(ir2->op2)->i << shift;
801 else
802 return 0;
803 idxref = ir2->op1;
804 /* Try to CSE the reassociated chain. Give up if not found. */
805 if (ir1 != ir &&
806 !(idxref = reassoc_trycse(J, ir1->o, idxref,
807 ir1->o == IR_BSHL ? ir1->op2 : idxref)))
808 return 0;
809 if (!(idxref = reassoc_trycse(J, IR_ADD, idxref, ir->op2)))
810 return 0;
811 if (ofs != 0) {
812 IRRef refk = tref_ref(lj_ir_kintp(J, ofs));
813 if (!(idxref = reassoc_trycse(J, IR_ADD, idxref, refk)))
814 return 0;
816 return idxref; /* Success, found a reassociated index reference. Phew. */
818 return 0; /* Failure. */
821 /* XLOAD forwarding. */
822 TRef LJ_FASTCALL lj_opt_fwd_xload(jit_State *J)
824 IRRef xref = fins->op1;
825 IRIns *xr = IR(xref);
826 IRRef lim = xref; /* Search limit. */
827 IRRef ref;
829 if ((fins->op2 & IRXLOAD_READONLY))
830 goto cselim;
831 if ((fins->op2 & IRXLOAD_VOLATILE))
832 goto doemit;
834 /* Search for conflicting stores. */
835 ref = J->chain[IR_XSTORE];
836 retry:
837 if (J->chain[IR_CALLXS] > lim) lim = J->chain[IR_CALLXS];
838 if (J->chain[IR_XBAR] > lim) lim = J->chain[IR_XBAR];
839 while (ref > lim) {
840 IRIns *store = IR(ref);
841 switch (aa_xref(J, xr, fins, store)) {
842 case ALIAS_NO: break; /* Continue searching. */
843 case ALIAS_MAY: lim = ref; goto cselim; /* Limit search for load. */
844 case ALIAS_MUST:
845 /* Emit conversion if the loaded type doesn't match the forwarded type. */
846 if (!irt_sametype(fins->t, IR(store->op2)->t)) {
847 IRType dt = irt_type(fins->t), st = irt_type(IR(store->op2)->t);
848 if (dt == IRT_I8 || dt == IRT_I16) { /* Trunc + sign-extend. */
849 st = dt | IRCONV_SEXT;
850 dt = IRT_INT;
851 } else if (dt == IRT_U8 || dt == IRT_U16) { /* Trunc + zero-extend. */
852 st = dt;
853 dt = IRT_INT;
855 fins->ot = IRT(IR_CONV, dt);
856 fins->op1 = store->op2;
857 fins->op2 = (dt<<5)|st;
858 return RETRYFOLD;
860 return store->op2; /* Store forwarding. */
862 ref = store->prev;
865 cselim:
866 /* Try to find a matching load. Below the conflicting store, if any. */
867 ref = J->chain[IR_XLOAD];
868 while (ref > lim) {
869 /* CSE for XLOAD depends on the type, but not on the IRXLOAD_* flags. */
870 if (IR(ref)->op1 == xref && irt_sametype(IR(ref)->t, fins->t))
871 return ref;
872 ref = IR(ref)->prev;
875 /* Reassociate XLOAD across PHIs to handle a[i-1] forwarding case. */
876 if (!(fins->op2 & IRXLOAD_READONLY) && J->chain[IR_LOOP] &&
877 xref == fins->op1 && (xref = reassoc_xref(J, xr)) != 0) {
878 ref = J->chain[IR_XSTORE];
879 while (ref > lim) /* Skip stores that have already been checked. */
880 ref = IR(ref)->prev;
881 lim = xref;
882 xr = IR(xref);
883 goto retry; /* Retry with the reassociated reference. */
885 doemit:
886 return EMITFOLD;
889 /* XSTORE elimination. */
890 TRef LJ_FASTCALL lj_opt_dse_xstore(jit_State *J)
892 IRRef xref = fins->op1;
893 IRIns *xr = IR(xref);
894 IRRef lim = xref; /* Search limit. */
895 IRRef val = fins->op2; /* Stored value reference. */
896 IRRef1 *refp = &J->chain[IR_XSTORE];
897 IRRef ref = *refp;
898 if (J->chain[IR_CALLXS] > lim) lim = J->chain[IR_CALLXS];
899 if (J->chain[IR_XBAR] > lim) lim = J->chain[IR_XBAR];
900 if (J->chain[IR_XSNEW] > lim) lim = J->chain[IR_XSNEW];
901 while (ref > lim) { /* Search for redundant or conflicting stores. */
902 IRIns *store = IR(ref);
903 switch (aa_xref(J, xr, fins, store)) {
904 case ALIAS_NO:
905 break; /* Continue searching. */
906 case ALIAS_MAY:
907 if (store->op2 != val) /* Conflict if the value is different. */
908 goto doemit;
909 break; /* Otherwise continue searching. */
910 case ALIAS_MUST:
911 if (store->op2 == val) /* Same value: drop the new store. */
912 return DROPFOLD;
913 /* Different value: try to eliminate the redundant store. */
914 if (ref > J->chain[IR_LOOP]) { /* Quick check to avoid crossing LOOP. */
915 IRIns *ir;
916 /* Check for any intervening guards or any XLOADs (no AA performed). */
917 for (ir = IR(J->cur.nins-1); ir > store; ir--)
918 if (irt_isguard(ir->t) || ir->o == IR_XLOAD)
919 goto doemit; /* No elimination possible. */
920 /* Remove redundant store from chain and replace with NOP. */
921 *refp = store->prev;
922 lj_ir_nop(store);
923 /* Now emit the new store instead. */
925 goto doemit;
927 ref = *(refp = &store->prev);
929 doemit:
930 return EMITFOLD; /* Otherwise we have a conflict or simply no match. */
933 /* -- ASTORE/HSTORE previous type analysis -------------------------------- */
935 /* Check whether the previous value for a table store is non-nil.
936 ** This can be derived either from a previous store or from a previous
937 ** load (because all loads from tables perform a type check).
939 ** The result of the analysis can be used to avoid the metatable check
940 ** and the guard against HREF returning niltv. Both of these are cheap,
941 ** so let's not spend too much effort on the analysis.
943 ** A result of 1 is exact: previous value CANNOT be nil.
944 ** A result of 0 is inexact: previous value MAY be nil.
946 int lj_opt_fwd_wasnonnil(jit_State *J, IROpT loadop, IRRef xref)
948 /* First check stores. */
949 IRRef ref = J->chain[loadop+IRDELTA_L2S];
950 while (ref > xref) {
951 IRIns *store = IR(ref);
952 if (store->op1 == xref) { /* Same xREF. */
953 /* A nil store MAY alias, but a non-nil store MUST alias. */
954 return !irt_isnil(store->t);
955 } else if (irt_isnil(store->t)) { /* Must check any nil store. */
956 IRRef skref = IR(store->op1)->op2;
957 IRRef xkref = IR(xref)->op2;
958 /* Same key type MAY alias. Need ALOAD check due to multiple int types. */
959 if (loadop == IR_ALOAD || irt_sametype(IR(skref)->t, IR(xkref)->t)) {
960 if (skref == xkref || !irref_isk(skref) || !irref_isk(xkref))
961 return 0; /* A nil store with same const key or var key MAY alias. */
962 /* Different const keys CANNOT alias. */
963 } else if (irt_isp32(IR(skref)->t) != irt_isp32(IR(xkref)->t)) {
964 return 0; /* HREF and HREFK MAY alias. */
965 } /* Different key types CANNOT alias. */
966 } /* Other non-nil stores MAY alias. */
967 ref = store->prev;
970 /* Check loads since nothing could be derived from stores. */
971 ref = J->chain[loadop];
972 while (ref > xref) {
973 IRIns *load = IR(ref);
974 if (load->op1 == xref) { /* Same xREF. */
975 /* A nil load MAY alias, but a non-nil load MUST alias. */
976 return !irt_isnil(load->t);
977 } /* Other non-nil loads MAY alias. */
978 ref = load->prev;
980 return 0; /* Nothing derived at all, previous value MAY be nil. */
983 /* ------------------------------------------------------------------------ */
985 #undef IR
986 #undef fins
987 #undef fleft
988 #undef fright
990 #endif