Fix recording of __concat metamethod.
[luajit-2.0.git] / src / lj_gc.c
blobeebc751b216d488cb45526e57e70ec56bf865b54
1 /*
2 ** Garbage collector.
3 ** Copyright (C) 2005-2023 Mike Pall. See Copyright Notice in luajit.h
4 **
5 ** Major portions taken verbatim or adapted from the Lua interpreter.
6 ** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h
7 */
9 #define lj_gc_c
10 #define LUA_CORE
12 #include "lj_obj.h"
13 #include "lj_gc.h"
14 #include "lj_err.h"
15 #include "lj_buf.h"
16 #include "lj_str.h"
17 #include "lj_tab.h"
18 #include "lj_func.h"
19 #include "lj_udata.h"
20 #include "lj_meta.h"
21 #include "lj_state.h"
22 #include "lj_frame.h"
23 #if LJ_HASFFI
24 #include "lj_ctype.h"
25 #include "lj_cdata.h"
26 #endif
27 #include "lj_trace.h"
28 #include "lj_dispatch.h"
29 #include "lj_vm.h"
30 #include "lj_vmevent.h"
32 #define GCSTEPSIZE 1024u
33 #define GCSWEEPMAX 40
34 #define GCSWEEPCOST 10
35 #define GCFINALIZECOST 100
37 /* Macros to set GCobj colors and flags. */
38 #define white2gray(x) ((x)->gch.marked &= (uint8_t)~LJ_GC_WHITES)
39 #define gray2black(x) ((x)->gch.marked |= LJ_GC_BLACK)
40 #define isfinalized(u) ((u)->marked & LJ_GC_FINALIZED)
42 /* -- Mark phase ---------------------------------------------------------- */
44 /* Mark a TValue (if needed). */
45 #define gc_marktv(g, tv) \
46 { lj_assertG(!tvisgcv(tv) || (~itype(tv) == gcval(tv)->gch.gct), \
47 "TValue and GC type mismatch"); \
48 if (tviswhite(tv)) gc_mark(g, gcV(tv)); }
50 /* Mark a GCobj (if needed). */
51 #define gc_markobj(g, o) \
52 { if (iswhite(obj2gco(o))) gc_mark(g, obj2gco(o)); }
54 /* Mark a string object. */
55 #define gc_mark_str(s) ((s)->marked &= (uint8_t)~LJ_GC_WHITES)
57 /* Mark a white GCobj. */
58 static void gc_mark(global_State *g, GCobj *o)
60 int gct = o->gch.gct;
61 lj_assertG(iswhite(o), "mark of non-white object");
62 lj_assertG(!isdead(g, o), "mark of dead object");
63 white2gray(o);
64 if (LJ_UNLIKELY(gct == ~LJ_TUDATA)) {
65 GCtab *mt = tabref(gco2ud(o)->metatable);
66 gray2black(o); /* Userdata are never gray. */
67 if (mt) gc_markobj(g, mt);
68 gc_markobj(g, tabref(gco2ud(o)->env));
69 if (LJ_HASBUFFER && gco2ud(o)->udtype == UDTYPE_BUFFER) {
70 SBufExt *sbx = (SBufExt *)uddata(gco2ud(o));
71 if (sbufiscow(sbx) && gcref(sbx->cowref))
72 gc_markobj(g, gcref(sbx->cowref));
73 if (gcref(sbx->dict_str))
74 gc_markobj(g, gcref(sbx->dict_str));
75 if (gcref(sbx->dict_mt))
76 gc_markobj(g, gcref(sbx->dict_mt));
78 } else if (LJ_UNLIKELY(gct == ~LJ_TUPVAL)) {
79 GCupval *uv = gco2uv(o);
80 gc_marktv(g, uvval(uv));
81 if (uv->closed)
82 gray2black(o); /* Closed upvalues are never gray. */
83 } else if (gct != ~LJ_TSTR && gct != ~LJ_TCDATA) {
84 lj_assertG(gct == ~LJ_TFUNC || gct == ~LJ_TTAB ||
85 gct == ~LJ_TTHREAD || gct == ~LJ_TPROTO || gct == ~LJ_TTRACE,
86 "bad GC type %d", gct);
87 setgcrefr(o->gch.gclist, g->gc.gray);
88 setgcref(g->gc.gray, o);
92 /* Mark GC roots. */
93 static void gc_mark_gcroot(global_State *g)
95 ptrdiff_t i;
96 for (i = 0; i < GCROOT_MAX; i++)
97 if (gcref(g->gcroot[i]) != NULL)
98 gc_markobj(g, gcref(g->gcroot[i]));
101 /* Start a GC cycle and mark the root set. */
102 static void gc_mark_start(global_State *g)
104 setgcrefnull(g->gc.gray);
105 setgcrefnull(g->gc.grayagain);
106 setgcrefnull(g->gc.weak);
107 gc_markobj(g, mainthread(g));
108 gc_markobj(g, tabref(mainthread(g)->env));
109 gc_marktv(g, &g->registrytv);
110 gc_mark_gcroot(g);
111 #if LJ_HASFFI
112 if (ctype_ctsG(g)) gc_markobj(g, ctype_ctsG(g)->finalizer);
113 #endif
114 g->gc.state = GCSpropagate;
117 /* Mark open upvalues. */
118 static void gc_mark_uv(global_State *g)
120 GCupval *uv;
121 for (uv = uvnext(&g->uvhead); uv != &g->uvhead; uv = uvnext(uv)) {
122 lj_assertG(uvprev(uvnext(uv)) == uv && uvnext(uvprev(uv)) == uv,
123 "broken upvalue chain");
124 if (isgray(obj2gco(uv)))
125 gc_marktv(g, uvval(uv));
129 /* Mark userdata in mmudata list. */
130 static void gc_mark_mmudata(global_State *g)
132 GCobj *root = gcref(g->gc.mmudata);
133 GCobj *u = root;
134 if (u) {
135 do {
136 u = gcnext(u);
137 makewhite(g, u); /* Could be from previous GC. */
138 gc_mark(g, u);
139 } while (u != root);
143 /* Separate userdata objects to be finalized to mmudata list. */
144 size_t lj_gc_separateudata(global_State *g, int all)
146 size_t m = 0;
147 GCRef *p = &mainthread(g)->nextgc;
148 GCobj *o;
149 while ((o = gcref(*p)) != NULL) {
150 if (!(iswhite(o) || all) || isfinalized(gco2ud(o))) {
151 p = &o->gch.nextgc; /* Nothing to do. */
152 } else if (!lj_meta_fastg(g, tabref(gco2ud(o)->metatable), MM_gc)) {
153 markfinalized(o); /* Done, as there's no __gc metamethod. */
154 p = &o->gch.nextgc;
155 } else { /* Otherwise move userdata to be finalized to mmudata list. */
156 m += sizeudata(gco2ud(o));
157 markfinalized(o);
158 *p = o->gch.nextgc;
159 if (gcref(g->gc.mmudata)) { /* Link to end of mmudata list. */
160 GCobj *root = gcref(g->gc.mmudata);
161 setgcrefr(o->gch.nextgc, root->gch.nextgc);
162 setgcref(root->gch.nextgc, o);
163 setgcref(g->gc.mmudata, o);
164 } else { /* Create circular list. */
165 setgcref(o->gch.nextgc, o);
166 setgcref(g->gc.mmudata, o);
170 return m;
173 /* -- Propagation phase --------------------------------------------------- */
175 /* Traverse a table. */
176 static int gc_traverse_tab(global_State *g, GCtab *t)
178 int weak = 0;
179 cTValue *mode;
180 GCtab *mt = tabref(t->metatable);
181 if (mt)
182 gc_markobj(g, mt);
183 mode = lj_meta_fastg(g, mt, MM_mode);
184 if (mode && tvisstr(mode)) { /* Valid __mode field? */
185 const char *modestr = strVdata(mode);
186 int c;
187 while ((c = *modestr++)) {
188 if (c == 'k') weak |= LJ_GC_WEAKKEY;
189 else if (c == 'v') weak |= LJ_GC_WEAKVAL;
191 if (weak) { /* Weak tables are cleared in the atomic phase. */
192 #if LJ_HASFFI
193 CTState *cts = ctype_ctsG(g);
194 if (cts && cts->finalizer == t) {
195 weak = (int)(~0u & ~LJ_GC_WEAKVAL);
196 } else
197 #endif
199 t->marked = (uint8_t)((t->marked & ~LJ_GC_WEAK) | weak);
200 setgcrefr(t->gclist, g->gc.weak);
201 setgcref(g->gc.weak, obj2gco(t));
205 if (weak == LJ_GC_WEAK) /* Nothing to mark if both keys/values are weak. */
206 return 1;
207 if (!(weak & LJ_GC_WEAKVAL)) { /* Mark array part. */
208 MSize i, asize = t->asize;
209 for (i = 0; i < asize; i++)
210 gc_marktv(g, arrayslot(t, i));
212 if (t->hmask > 0) { /* Mark hash part. */
213 Node *node = noderef(t->node);
214 MSize i, hmask = t->hmask;
215 for (i = 0; i <= hmask; i++) {
216 Node *n = &node[i];
217 if (!tvisnil(&n->val)) { /* Mark non-empty slot. */
218 lj_assertG(!tvisnil(&n->key), "mark of nil key in non-empty slot");
219 if (!(weak & LJ_GC_WEAKKEY)) gc_marktv(g, &n->key);
220 if (!(weak & LJ_GC_WEAKVAL)) gc_marktv(g, &n->val);
224 return weak;
227 /* Traverse a function. */
228 static void gc_traverse_func(global_State *g, GCfunc *fn)
230 gc_markobj(g, tabref(fn->c.env));
231 if (isluafunc(fn)) {
232 uint32_t i;
233 lj_assertG(fn->l.nupvalues <= funcproto(fn)->sizeuv,
234 "function upvalues out of range");
235 gc_markobj(g, funcproto(fn));
236 for (i = 0; i < fn->l.nupvalues; i++) /* Mark Lua function upvalues. */
237 gc_markobj(g, &gcref(fn->l.uvptr[i])->uv);
238 } else {
239 uint32_t i;
240 for (i = 0; i < fn->c.nupvalues; i++) /* Mark C function upvalues. */
241 gc_marktv(g, &fn->c.upvalue[i]);
245 #if LJ_HASJIT
246 /* Mark a trace. */
247 static void gc_marktrace(global_State *g, TraceNo traceno)
249 GCobj *o = obj2gco(traceref(G2J(g), traceno));
250 lj_assertG(traceno != G2J(g)->cur.traceno, "active trace escaped");
251 if (iswhite(o)) {
252 white2gray(o);
253 setgcrefr(o->gch.gclist, g->gc.gray);
254 setgcref(g->gc.gray, o);
258 /* Traverse a trace. */
259 static void gc_traverse_trace(global_State *g, GCtrace *T)
261 IRRef ref;
262 if (T->traceno == 0) return;
263 for (ref = T->nk; ref < REF_TRUE; ref++) {
264 IRIns *ir = &T->ir[ref];
265 if (ir->o == IR_KGC)
266 gc_markobj(g, ir_kgc(ir));
267 if (irt_is64(ir->t) && ir->o != IR_KNULL)
268 ref++;
270 if (T->link) gc_marktrace(g, T->link);
271 if (T->nextroot) gc_marktrace(g, T->nextroot);
272 if (T->nextside) gc_marktrace(g, T->nextside);
273 gc_markobj(g, gcref(T->startpt));
276 /* The current trace is a GC root while not anchored in the prototype (yet). */
277 #define gc_traverse_curtrace(g) gc_traverse_trace(g, &G2J(g)->cur)
278 #else
279 #define gc_traverse_curtrace(g) UNUSED(g)
280 #endif
282 /* Traverse a prototype. */
283 static void gc_traverse_proto(global_State *g, GCproto *pt)
285 ptrdiff_t i;
286 gc_mark_str(proto_chunkname(pt));
287 for (i = -(ptrdiff_t)pt->sizekgc; i < 0; i++) /* Mark collectable consts. */
288 gc_markobj(g, proto_kgc(pt, i));
289 #if LJ_HASJIT
290 if (pt->trace) gc_marktrace(g, pt->trace);
291 #endif
294 /* Traverse the frame structure of a stack. */
295 static MSize gc_traverse_frames(global_State *g, lua_State *th)
297 TValue *frame, *top = th->top-1, *bot = tvref(th->stack);
298 /* Note: extra vararg frame not skipped, marks function twice (harmless). */
299 for (frame = th->base-1; frame > bot+LJ_FR2; frame = frame_prev(frame)) {
300 GCfunc *fn = frame_func(frame);
301 TValue *ftop = frame;
302 if (isluafunc(fn)) ftop += funcproto(fn)->framesize;
303 if (ftop > top) top = ftop;
304 if (!LJ_FR2) gc_markobj(g, fn); /* Need to mark hidden function (or L). */
306 top++; /* Correct bias of -1 (frame == base-1). */
307 if (top > tvref(th->maxstack)) top = tvref(th->maxstack);
308 return (MSize)(top - bot); /* Return minimum needed stack size. */
311 /* Traverse a thread object. */
312 static void gc_traverse_thread(global_State *g, lua_State *th)
314 TValue *o, *top = th->top;
315 for (o = tvref(th->stack)+1+LJ_FR2; o < top; o++)
316 gc_marktv(g, o);
317 if (g->gc.state == GCSatomic) {
318 top = tvref(th->stack) + th->stacksize;
319 for (; o < top; o++) /* Clear unmarked slots. */
320 setnilV(o);
322 gc_markobj(g, tabref(th->env));
323 lj_state_shrinkstack(th, gc_traverse_frames(g, th));
326 /* Propagate one gray object. Traverse it and turn it black. */
327 static size_t propagatemark(global_State *g)
329 GCobj *o = gcref(g->gc.gray);
330 int gct = o->gch.gct;
331 lj_assertG(isgray(o), "propagation of non-gray object");
332 gray2black(o);
333 setgcrefr(g->gc.gray, o->gch.gclist); /* Remove from gray list. */
334 if (LJ_LIKELY(gct == ~LJ_TTAB)) {
335 GCtab *t = gco2tab(o);
336 if (gc_traverse_tab(g, t) > 0)
337 black2gray(o); /* Keep weak tables gray. */
338 return sizeof(GCtab) + sizeof(TValue) * t->asize +
339 (t->hmask ? sizeof(Node) * (t->hmask + 1) : 0);
340 } else if (LJ_LIKELY(gct == ~LJ_TFUNC)) {
341 GCfunc *fn = gco2func(o);
342 gc_traverse_func(g, fn);
343 return isluafunc(fn) ? sizeLfunc((MSize)fn->l.nupvalues) :
344 sizeCfunc((MSize)fn->c.nupvalues);
345 } else if (LJ_LIKELY(gct == ~LJ_TPROTO)) {
346 GCproto *pt = gco2pt(o);
347 gc_traverse_proto(g, pt);
348 return pt->sizept;
349 } else if (LJ_LIKELY(gct == ~LJ_TTHREAD)) {
350 lua_State *th = gco2th(o);
351 setgcrefr(th->gclist, g->gc.grayagain);
352 setgcref(g->gc.grayagain, o);
353 black2gray(o); /* Threads are never black. */
354 gc_traverse_thread(g, th);
355 return sizeof(lua_State) + sizeof(TValue) * th->stacksize;
356 } else {
357 #if LJ_HASJIT
358 GCtrace *T = gco2trace(o);
359 gc_traverse_trace(g, T);
360 return ((sizeof(GCtrace)+7)&~7) + (T->nins-T->nk)*sizeof(IRIns) +
361 T->nsnap*sizeof(SnapShot) + T->nsnapmap*sizeof(SnapEntry);
362 #else
363 lj_assertG(0, "bad GC type %d", gct);
364 return 0;
365 #endif
369 /* Propagate all gray objects. */
370 static size_t gc_propagate_gray(global_State *g)
372 size_t m = 0;
373 while (gcref(g->gc.gray) != NULL)
374 m += propagatemark(g);
375 return m;
378 /* -- Sweep phase --------------------------------------------------------- */
380 /* Type of GC free functions. */
381 typedef void (LJ_FASTCALL *GCFreeFunc)(global_State *g, GCobj *o);
383 /* GC free functions for LJ_TSTR .. LJ_TUDATA. ORDER LJ_T */
384 static const GCFreeFunc gc_freefunc[] = {
385 (GCFreeFunc)lj_str_free,
386 (GCFreeFunc)lj_func_freeuv,
387 (GCFreeFunc)lj_state_free,
388 (GCFreeFunc)lj_func_freeproto,
389 (GCFreeFunc)lj_func_free,
390 #if LJ_HASJIT
391 (GCFreeFunc)lj_trace_free,
392 #else
393 (GCFreeFunc)0,
394 #endif
395 #if LJ_HASFFI
396 (GCFreeFunc)lj_cdata_free,
397 #else
398 (GCFreeFunc)0,
399 #endif
400 (GCFreeFunc)lj_tab_free,
401 (GCFreeFunc)lj_udata_free
404 /* Full sweep of a GC list. */
405 #define gc_fullsweep(g, p) gc_sweep(g, (p), ~(uint32_t)0)
407 /* Partial sweep of a GC list. */
408 static GCRef *gc_sweep(global_State *g, GCRef *p, uint32_t lim)
410 /* Mask with other white and LJ_GC_FIXED. Or LJ_GC_SFIXED on shutdown. */
411 int ow = otherwhite(g);
412 GCobj *o;
413 while ((o = gcref(*p)) != NULL && lim-- > 0) {
414 if (o->gch.gct == ~LJ_TTHREAD) /* Need to sweep open upvalues, too. */
415 gc_fullsweep(g, &gco2th(o)->openupval);
416 if (((o->gch.marked ^ LJ_GC_WHITES) & ow)) { /* Black or current white? */
417 lj_assertG(!isdead(g, o) || (o->gch.marked & LJ_GC_FIXED),
418 "sweep of undead object");
419 makewhite(g, o); /* Value is alive, change to the current white. */
420 p = &o->gch.nextgc;
421 } else { /* Otherwise value is dead, free it. */
422 lj_assertG(isdead(g, o) || ow == LJ_GC_SFIXED,
423 "sweep of unlive object");
424 setgcrefr(*p, o->gch.nextgc);
425 if (o == gcref(g->gc.root))
426 setgcrefr(g->gc.root, o->gch.nextgc); /* Adjust list anchor. */
427 gc_freefunc[o->gch.gct - ~LJ_TSTR](g, o);
430 return p;
433 /* Sweep one string interning table chain. Preserves hashalg bit. */
434 static void gc_sweepstr(global_State *g, GCRef *chain)
436 /* Mask with other white and LJ_GC_FIXED. Or LJ_GC_SFIXED on shutdown. */
437 int ow = otherwhite(g);
438 uintptr_t u = gcrefu(*chain);
439 GCRef q;
440 GCRef *p = &q;
441 GCobj *o;
442 setgcrefp(q, (u & ~(uintptr_t)1));
443 while ((o = gcref(*p)) != NULL) {
444 if (((o->gch.marked ^ LJ_GC_WHITES) & ow)) { /* Black or current white? */
445 lj_assertG(!isdead(g, o) || (o->gch.marked & LJ_GC_FIXED),
446 "sweep of undead string");
447 makewhite(g, o); /* String is alive, change to the current white. */
448 p = &o->gch.nextgc;
449 } else { /* Otherwise string is dead, free it. */
450 lj_assertG(isdead(g, o) || ow == LJ_GC_SFIXED,
451 "sweep of unlive string");
452 setgcrefr(*p, o->gch.nextgc);
453 lj_str_free(g, gco2str(o));
456 setgcrefp(*chain, (gcrefu(q) | (u & 1)));
459 /* Check whether we can clear a key or a value slot from a table. */
460 static int gc_mayclear(cTValue *o, int val)
462 if (tvisgcv(o)) { /* Only collectable objects can be weak references. */
463 if (tvisstr(o)) { /* But strings cannot be used as weak references. */
464 gc_mark_str(strV(o)); /* And need to be marked. */
465 return 0;
467 if (iswhite(gcV(o)))
468 return 1; /* Object is about to be collected. */
469 if (tvisudata(o) && val && isfinalized(udataV(o)))
470 return 1; /* Finalized userdata is dropped only from values. */
472 return 0; /* Cannot clear. */
475 /* Clear collected entries from weak tables. */
476 static void gc_clearweak(global_State *g, GCobj *o)
478 UNUSED(g);
479 while (o) {
480 GCtab *t = gco2tab(o);
481 lj_assertG((t->marked & LJ_GC_WEAK), "clear of non-weak table");
482 if ((t->marked & LJ_GC_WEAKVAL)) {
483 MSize i, asize = t->asize;
484 for (i = 0; i < asize; i++) {
485 /* Clear array slot when value is about to be collected. */
486 TValue *tv = arrayslot(t, i);
487 if (gc_mayclear(tv, 1))
488 setnilV(tv);
491 if (t->hmask > 0) {
492 Node *node = noderef(t->node);
493 MSize i, hmask = t->hmask;
494 for (i = 0; i <= hmask; i++) {
495 Node *n = &node[i];
496 /* Clear hash slot when key or value is about to be collected. */
497 if (!tvisnil(&n->val) && (gc_mayclear(&n->key, 0) ||
498 gc_mayclear(&n->val, 1)))
499 setnilV(&n->val);
502 o = gcref(t->gclist);
506 /* Call a userdata or cdata finalizer. */
507 static void gc_call_finalizer(global_State *g, lua_State *L,
508 cTValue *mo, GCobj *o)
510 /* Save and restore lots of state around the __gc callback. */
511 uint8_t oldh = hook_save(g);
512 GCSize oldt = g->gc.threshold;
513 int errcode;
514 TValue *top;
515 lj_trace_abort(g);
516 hook_entergc(g); /* Disable hooks and new traces during __gc. */
517 if (LJ_HASPROFILE && (oldh & HOOK_PROFILE)) lj_dispatch_update(g);
518 g->gc.threshold = LJ_MAX_MEM; /* Prevent GC steps. */
519 top = L->top;
520 copyTV(L, top++, mo);
521 if (LJ_FR2) setnilV(top++);
522 setgcV(L, top, o, ~o->gch.gct);
523 L->top = top+1;
524 errcode = lj_vm_pcall(L, top, 1+0, -1); /* Stack: |mo|o| -> | */
525 hook_restore(g, oldh);
526 if (LJ_HASPROFILE && (oldh & HOOK_PROFILE)) lj_dispatch_update(g);
527 g->gc.threshold = oldt; /* Restore GC threshold. */
528 if (errcode) {
529 ptrdiff_t errobj = savestack(L, L->top-1); /* Stack may be resized. */
530 lj_vmevent_send(L, ERRFIN,
531 copyTV(L, L->top++, restorestack(L, errobj));
533 L->top--;
537 /* Finalize one userdata or cdata object from the mmudata list. */
538 static void gc_finalize(lua_State *L)
540 global_State *g = G(L);
541 GCobj *o = gcnext(gcref(g->gc.mmudata));
542 cTValue *mo;
543 lj_assertG(tvref(g->jit_base) == NULL, "finalizer called on trace");
544 /* Unchain from list of userdata to be finalized. */
545 if (o == gcref(g->gc.mmudata))
546 setgcrefnull(g->gc.mmudata);
547 else
548 setgcrefr(gcref(g->gc.mmudata)->gch.nextgc, o->gch.nextgc);
549 #if LJ_HASFFI
550 if (o->gch.gct == ~LJ_TCDATA) {
551 TValue tmp, *tv;
552 /* Add cdata back to the GC list and make it white. */
553 setgcrefr(o->gch.nextgc, g->gc.root);
554 setgcref(g->gc.root, o);
555 makewhite(g, o);
556 o->gch.marked &= (uint8_t)~LJ_GC_CDATA_FIN;
557 /* Resolve finalizer. */
558 setcdataV(L, &tmp, gco2cd(o));
559 tv = lj_tab_set(L, ctype_ctsG(g)->finalizer, &tmp);
560 if (!tvisnil(tv)) {
561 g->gc.nocdatafin = 0;
562 copyTV(L, &tmp, tv);
563 setnilV(tv); /* Clear entry in finalizer table. */
564 gc_call_finalizer(g, L, &tmp, o);
566 return;
568 #endif
569 /* Add userdata back to the main userdata list and make it white. */
570 setgcrefr(o->gch.nextgc, mainthread(g)->nextgc);
571 setgcref(mainthread(g)->nextgc, o);
572 makewhite(g, o);
573 /* Resolve the __gc metamethod. */
574 mo = lj_meta_fastg(g, tabref(gco2ud(o)->metatable), MM_gc);
575 if (mo)
576 gc_call_finalizer(g, L, mo, o);
579 /* Finalize all userdata objects from mmudata list. */
580 void lj_gc_finalize_udata(lua_State *L)
582 while (gcref(G(L)->gc.mmudata) != NULL)
583 gc_finalize(L);
586 #if LJ_HASFFI
587 /* Finalize all cdata objects from finalizer table. */
588 void lj_gc_finalize_cdata(lua_State *L)
590 global_State *g = G(L);
591 CTState *cts = ctype_ctsG(g);
592 if (cts) {
593 GCtab *t = cts->finalizer;
594 Node *node = noderef(t->node);
595 ptrdiff_t i;
596 setgcrefnull(t->metatable); /* Mark finalizer table as disabled. */
597 for (i = (ptrdiff_t)t->hmask; i >= 0; i--)
598 if (!tvisnil(&node[i].val) && tviscdata(&node[i].key)) {
599 GCobj *o = gcV(&node[i].key);
600 TValue tmp;
601 makewhite(g, o);
602 o->gch.marked &= (uint8_t)~LJ_GC_CDATA_FIN;
603 copyTV(L, &tmp, &node[i].val);
604 setnilV(&node[i].val);
605 gc_call_finalizer(g, L, &tmp, o);
609 #endif
611 /* Free all remaining GC objects. */
612 void lj_gc_freeall(global_State *g)
614 MSize i, strmask;
615 /* Free everything, except super-fixed objects (the main thread). */
616 g->gc.currentwhite = LJ_GC_WHITES | LJ_GC_SFIXED;
617 gc_fullsweep(g, &g->gc.root);
618 strmask = g->str.mask;
619 for (i = 0; i <= strmask; i++) /* Free all string hash chains. */
620 gc_sweepstr(g, &g->str.tab[i]);
623 /* -- Collector ----------------------------------------------------------- */
625 /* Atomic part of the GC cycle, transitioning from mark to sweep phase. */
626 static void atomic(global_State *g, lua_State *L)
628 size_t udsize;
630 gc_mark_uv(g); /* Need to remark open upvalues (the thread may be dead). */
631 gc_propagate_gray(g); /* Propagate any left-overs. */
633 setgcrefr(g->gc.gray, g->gc.weak); /* Empty the list of weak tables. */
634 setgcrefnull(g->gc.weak);
635 lj_assertG(!iswhite(obj2gco(mainthread(g))), "main thread turned white");
636 gc_markobj(g, L); /* Mark running thread. */
637 gc_traverse_curtrace(g); /* Traverse current trace. */
638 gc_mark_gcroot(g); /* Mark GC roots (again). */
639 gc_propagate_gray(g); /* Propagate all of the above. */
641 setgcrefr(g->gc.gray, g->gc.grayagain); /* Empty the 2nd chance list. */
642 setgcrefnull(g->gc.grayagain);
643 gc_propagate_gray(g); /* Propagate it. */
645 udsize = lj_gc_separateudata(g, 0); /* Separate userdata to be finalized. */
646 gc_mark_mmudata(g); /* Mark them. */
647 udsize += gc_propagate_gray(g); /* And propagate the marks. */
649 /* All marking done, clear weak tables. */
650 gc_clearweak(g, gcref(g->gc.weak));
652 lj_buf_shrink(L, &g->tmpbuf); /* Shrink temp buffer. */
654 /* Prepare for sweep phase. */
655 g->gc.currentwhite = (uint8_t)otherwhite(g); /* Flip current white. */
656 g->strempty.marked = g->gc.currentwhite;
657 setmref(g->gc.sweep, &g->gc.root);
658 g->gc.estimate = g->gc.total - (GCSize)udsize; /* Initial estimate. */
661 /* GC state machine. Returns a cost estimate for each step performed. */
662 static size_t gc_onestep(lua_State *L)
664 global_State *g = G(L);
665 switch (g->gc.state) {
666 case GCSpause:
667 gc_mark_start(g); /* Start a new GC cycle by marking all GC roots. */
668 return 0;
669 case GCSpropagate:
670 if (gcref(g->gc.gray) != NULL)
671 return propagatemark(g); /* Propagate one gray object. */
672 g->gc.state = GCSatomic; /* End of mark phase. */
673 return 0;
674 case GCSatomic:
675 if (tvref(g->jit_base)) /* Don't run atomic phase on trace. */
676 return LJ_MAX_MEM;
677 atomic(g, L);
678 g->gc.state = GCSsweepstring; /* Start of sweep phase. */
679 g->gc.sweepstr = 0;
680 return 0;
681 case GCSsweepstring: {
682 GCSize old = g->gc.total;
683 gc_sweepstr(g, &g->str.tab[g->gc.sweepstr++]); /* Sweep one chain. */
684 if (g->gc.sweepstr > g->str.mask)
685 g->gc.state = GCSsweep; /* All string hash chains sweeped. */
686 lj_assertG(old >= g->gc.total, "sweep increased memory");
687 g->gc.estimate -= old - g->gc.total;
688 return GCSWEEPCOST;
690 case GCSsweep: {
691 GCSize old = g->gc.total;
692 setmref(g->gc.sweep, gc_sweep(g, mref(g->gc.sweep, GCRef), GCSWEEPMAX));
693 lj_assertG(old >= g->gc.total, "sweep increased memory");
694 g->gc.estimate -= old - g->gc.total;
695 if (gcref(*mref(g->gc.sweep, GCRef)) == NULL) {
696 if (g->str.num <= (g->str.mask >> 2) && g->str.mask > LJ_MIN_STRTAB*2-1)
697 lj_str_resize(L, g->str.mask >> 1); /* Shrink string table. */
698 if (gcref(g->gc.mmudata)) { /* Need any finalizations? */
699 g->gc.state = GCSfinalize;
700 #if LJ_HASFFI
701 g->gc.nocdatafin = 1;
702 #endif
703 } else { /* Otherwise skip this phase to help the JIT. */
704 g->gc.state = GCSpause; /* End of GC cycle. */
705 g->gc.debt = 0;
708 return GCSWEEPMAX*GCSWEEPCOST;
710 case GCSfinalize:
711 if (gcref(g->gc.mmudata) != NULL) {
712 GCSize old = g->gc.total;
713 if (tvref(g->jit_base)) /* Don't call finalizers on trace. */
714 return LJ_MAX_MEM;
715 gc_finalize(L); /* Finalize one userdata object. */
716 if (old >= g->gc.total && g->gc.estimate > old - g->gc.total)
717 g->gc.estimate -= old - g->gc.total;
718 if (g->gc.estimate > GCFINALIZECOST)
719 g->gc.estimate -= GCFINALIZECOST;
720 return GCFINALIZECOST;
722 #if LJ_HASFFI
723 if (!g->gc.nocdatafin) lj_tab_rehash(L, ctype_ctsG(g)->finalizer);
724 #endif
725 g->gc.state = GCSpause; /* End of GC cycle. */
726 g->gc.debt = 0;
727 return 0;
728 default:
729 lj_assertG(0, "bad GC state");
730 return 0;
734 /* Perform a limited amount of incremental GC steps. */
735 int LJ_FASTCALL lj_gc_step(lua_State *L)
737 global_State *g = G(L);
738 GCSize lim;
739 int32_t ostate = g->vmstate;
740 setvmstate(g, GC);
741 lim = (GCSTEPSIZE/100) * g->gc.stepmul;
742 if (lim == 0)
743 lim = LJ_MAX_MEM;
744 if (g->gc.total > g->gc.threshold)
745 g->gc.debt += g->gc.total - g->gc.threshold;
746 do {
747 lim -= (GCSize)gc_onestep(L);
748 if (g->gc.state == GCSpause) {
749 g->gc.threshold = (g->gc.estimate/100) * g->gc.pause;
750 g->vmstate = ostate;
751 return 1; /* Finished a GC cycle. */
753 } while (sizeof(lim) == 8 ? ((int64_t)lim > 0) : ((int32_t)lim > 0));
754 if (g->gc.debt < GCSTEPSIZE) {
755 g->gc.threshold = g->gc.total + GCSTEPSIZE;
756 g->vmstate = ostate;
757 return -1;
758 } else {
759 g->gc.debt -= GCSTEPSIZE;
760 g->gc.threshold = g->gc.total;
761 g->vmstate = ostate;
762 return 0;
766 /* Ditto, but fix the stack top first. */
767 void LJ_FASTCALL lj_gc_step_fixtop(lua_State *L)
769 if (curr_funcisL(L)) L->top = curr_topL(L);
770 lj_gc_step(L);
773 #if LJ_HASJIT
774 /* Perform multiple GC steps. Called from JIT-compiled code. */
775 int LJ_FASTCALL lj_gc_step_jit(global_State *g, MSize steps)
777 lua_State *L = gco2th(gcref(g->cur_L));
778 L->base = tvref(G(L)->jit_base);
779 L->top = curr_topL(L);
780 while (steps-- > 0 && lj_gc_step(L) == 0)
782 /* Return 1 to force a trace exit. */
783 return (G(L)->gc.state == GCSatomic || G(L)->gc.state == GCSfinalize);
785 #endif
787 /* Perform a full GC cycle. */
788 void lj_gc_fullgc(lua_State *L)
790 global_State *g = G(L);
791 int32_t ostate = g->vmstate;
792 setvmstate(g, GC);
793 if (g->gc.state <= GCSatomic) { /* Caught somewhere in the middle. */
794 setmref(g->gc.sweep, &g->gc.root); /* Sweep everything (preserving it). */
795 setgcrefnull(g->gc.gray); /* Reset lists from partial propagation. */
796 setgcrefnull(g->gc.grayagain);
797 setgcrefnull(g->gc.weak);
798 g->gc.state = GCSsweepstring; /* Fast forward to the sweep phase. */
799 g->gc.sweepstr = 0;
801 while (g->gc.state == GCSsweepstring || g->gc.state == GCSsweep)
802 gc_onestep(L); /* Finish sweep. */
803 lj_assertG(g->gc.state == GCSfinalize || g->gc.state == GCSpause,
804 "bad GC state");
805 /* Now perform a full GC. */
806 g->gc.state = GCSpause;
807 do { gc_onestep(L); } while (g->gc.state != GCSpause);
808 g->gc.threshold = (g->gc.estimate/100) * g->gc.pause;
809 g->vmstate = ostate;
812 /* -- Write barriers ------------------------------------------------------ */
814 /* Move the GC propagation frontier forward. */
815 void lj_gc_barrierf(global_State *g, GCobj *o, GCobj *v)
817 lj_assertG(isblack(o) && iswhite(v) && !isdead(g, v) && !isdead(g, o),
818 "bad object states for forward barrier");
819 lj_assertG(g->gc.state != GCSfinalize && g->gc.state != GCSpause,
820 "bad GC state");
821 lj_assertG(o->gch.gct != ~LJ_TTAB, "barrier object is not a table");
822 /* Preserve invariant during propagation. Otherwise it doesn't matter. */
823 if (g->gc.state == GCSpropagate || g->gc.state == GCSatomic)
824 gc_mark(g, v); /* Move frontier forward. */
825 else
826 makewhite(g, o); /* Make it white to avoid the following barrier. */
829 /* Specialized barrier for closed upvalue. Pass &uv->tv. */
830 void LJ_FASTCALL lj_gc_barrieruv(global_State *g, TValue *tv)
832 #define TV2MARKED(x) \
833 (*((uint8_t *)(x) - offsetof(GCupval, tv) + offsetof(GCupval, marked)))
834 if (g->gc.state == GCSpropagate || g->gc.state == GCSatomic)
835 gc_mark(g, gcV(tv));
836 else
837 TV2MARKED(tv) = (TV2MARKED(tv) & (uint8_t)~LJ_GC_COLORS) | curwhite(g);
838 #undef TV2MARKED
841 /* Close upvalue. Also needs a write barrier. */
842 void lj_gc_closeuv(global_State *g, GCupval *uv)
844 GCobj *o = obj2gco(uv);
845 /* Copy stack slot to upvalue itself and point to the copy. */
846 copyTV(mainthread(g), &uv->tv, uvval(uv));
847 setmref(uv->v, &uv->tv);
848 uv->closed = 1;
849 setgcrefr(o->gch.nextgc, g->gc.root);
850 setgcref(g->gc.root, o);
851 if (isgray(o)) { /* A closed upvalue is never gray, so fix this. */
852 if (g->gc.state == GCSpropagate || g->gc.state == GCSatomic) {
853 gray2black(o); /* Make it black and preserve invariant. */
854 if (tviswhite(&uv->tv))
855 lj_gc_barrierf(g, o, gcV(&uv->tv));
856 } else {
857 makewhite(g, o); /* Make it white, i.e. sweep the upvalue. */
858 lj_assertG(g->gc.state != GCSfinalize && g->gc.state != GCSpause,
859 "bad GC state");
864 #if LJ_HASJIT
865 /* Mark a trace if it's saved during the propagation phase. */
866 void lj_gc_barriertrace(global_State *g, uint32_t traceno)
868 if (g->gc.state == GCSpropagate || g->gc.state == GCSatomic)
869 gc_marktrace(g, traceno);
871 #endif
873 /* -- Allocator ----------------------------------------------------------- */
875 /* Call pluggable memory allocator to allocate or resize a fragment. */
876 void *lj_mem_realloc(lua_State *L, void *p, GCSize osz, GCSize nsz)
878 global_State *g = G(L);
879 lj_assertG((osz == 0) == (p == NULL), "realloc API violation");
880 p = g->allocf(g->allocd, p, osz, nsz);
881 if (p == NULL && nsz > 0)
882 lj_err_mem(L);
883 lj_assertG((nsz == 0) == (p == NULL), "allocf API violation");
884 lj_assertG(checkptrGC(p),
885 "allocated memory address %p outside required range", p);
886 g->gc.total = (g->gc.total - osz) + nsz;
887 return p;
890 /* Allocate new GC object and link it to the root set. */
891 void * LJ_FASTCALL lj_mem_newgco(lua_State *L, GCSize size)
893 global_State *g = G(L);
894 GCobj *o = (GCobj *)g->allocf(g->allocd, NULL, 0, size);
895 if (o == NULL)
896 lj_err_mem(L);
897 lj_assertG(checkptrGC(o),
898 "allocated memory address %p outside required range", o);
899 g->gc.total += size;
900 setgcrefr(o->gch.nextgc, g->gc.root);
901 setgcref(g->gc.root, o);
902 newwhite(g, o);
903 return o;
906 /* Resize growable vector. */
907 void *lj_mem_grow(lua_State *L, void *p, MSize *szp, MSize lim, MSize esz)
909 MSize sz = (*szp) << 1;
910 if (sz < LJ_MIN_VECSZ)
911 sz = LJ_MIN_VECSZ;
912 if (sz > lim)
913 sz = lim;
914 p = lj_mem_realloc(L, p, (*szp)*esz, sz*esz);
915 *szp = sz;
916 return p;