beta-0.89.2
[luatex.git] / source / libs / luajit / LuaJIT-src / src / lj_gc.c
blob99d664aa2a17357c392d60f7094dacce7d847a1c
1 /*
2 ** Garbage collector.
3 ** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
4 **
5 ** Major portions taken verbatim or adapted from the Lua interpreter.
6 ** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h
7 */
9 #define lj_gc_c
10 #define LUA_CORE
12 #include "lj_obj.h"
13 #include "lj_gc.h"
14 #include "lj_err.h"
15 #include "lj_buf.h"
16 #include "lj_str.h"
17 #include "lj_tab.h"
18 #include "lj_func.h"
19 #include "lj_udata.h"
20 #include "lj_meta.h"
21 #include "lj_state.h"
22 #include "lj_frame.h"
23 #if LJ_HASFFI
24 #include "lj_ctype.h"
25 #include "lj_cdata.h"
26 #endif
27 #include "lj_trace.h"
28 #include "lj_vm.h"
30 #define GCSTEPSIZE 1024u
31 #define GCSWEEPMAX 40
32 #define GCSWEEPCOST 10
33 #define GCFINALIZECOST 100
35 /* Macros to set GCobj colors and flags. */
36 #define white2gray(x) ((x)->gch.marked &= (uint8_t)~LJ_GC_WHITES)
37 #define gray2black(x) ((x)->gch.marked |= LJ_GC_BLACK)
38 #define isfinalized(u) ((u)->marked & LJ_GC_FINALIZED)
40 /* -- Mark phase ---------------------------------------------------------- */
42 /* Mark a TValue (if needed). */
43 #define gc_marktv(g, tv) \
44 { lua_assert(!tvisgcv(tv) || (~itype(tv) == gcval(tv)->gch.gct)); \
45 if (tviswhite(tv)) gc_mark(g, gcV(tv)); }
47 /* Mark a GCobj (if needed). */
48 #define gc_markobj(g, o) \
49 { if (iswhite(obj2gco(o))) gc_mark(g, obj2gco(o)); }
51 /* Mark a string object. */
52 #define gc_mark_str(s) ((s)->marked &= (uint8_t)~LJ_GC_WHITES)
54 /* Mark a white GCobj. */
55 static void gc_mark(global_State *g, GCobj *o)
57 int gct = o->gch.gct;
58 lua_assert(iswhite(o) && !isdead(g, o));
59 white2gray(o);
60 if (LJ_UNLIKELY(gct == ~LJ_TUDATA)) {
61 GCtab *mt = tabref(gco2ud(o)->metatable);
62 gray2black(o); /* Userdata are never gray. */
63 if (mt) gc_markobj(g, mt);
64 gc_markobj(g, tabref(gco2ud(o)->env));
65 } else if (LJ_UNLIKELY(gct == ~LJ_TUPVAL)) {
66 GCupval *uv = gco2uv(o);
67 gc_marktv(g, uvval(uv));
68 if (uv->closed)
69 gray2black(o); /* Closed upvalues are never gray. */
70 } else if (gct != ~LJ_TSTR && gct != ~LJ_TCDATA) {
71 lua_assert(gct == ~LJ_TFUNC || gct == ~LJ_TTAB ||
72 gct == ~LJ_TTHREAD || gct == ~LJ_TPROTO);
73 setgcrefr(o->gch.gclist, g->gc.gray);
74 setgcref(g->gc.gray, o);
78 /* Mark GC roots. */
79 static void gc_mark_gcroot(global_State *g)
81 ptrdiff_t i;
82 for (i = 0; i < GCROOT_MAX; i++)
83 if (gcref(g->gcroot[i]) != NULL)
84 gc_markobj(g, gcref(g->gcroot[i]));
87 /* Start a GC cycle and mark the root set. */
88 static void gc_mark_start(global_State *g)
90 setgcrefnull(g->gc.gray);
91 setgcrefnull(g->gc.grayagain);
92 setgcrefnull(g->gc.weak);
93 gc_markobj(g, mainthread(g));
94 gc_markobj(g, tabref(mainthread(g)->env));
95 gc_marktv(g, &g->registrytv);
96 gc_mark_gcroot(g);
97 g->gc.state = GCSpropagate;
100 /* Mark open upvalues. */
101 static void gc_mark_uv(global_State *g)
103 GCupval *uv;
104 for (uv = uvnext(&g->uvhead); uv != &g->uvhead; uv = uvnext(uv)) {
105 lua_assert(uvprev(uvnext(uv)) == uv && uvnext(uvprev(uv)) == uv);
106 if (isgray(obj2gco(uv)))
107 gc_marktv(g, uvval(uv));
111 /* Mark userdata in mmudata list. */
112 static void gc_mark_mmudata(global_State *g)
114 GCobj *root = gcref(g->gc.mmudata);
115 GCobj *u = root;
116 if (u) {
117 do {
118 u = gcnext(u);
119 makewhite(g, u); /* Could be from previous GC. */
120 gc_mark(g, u);
121 } while (u != root);
125 /* Separate userdata objects to be finalized to mmudata list. */
126 size_t lj_gc_separateudata(global_State *g, int all)
128 size_t m = 0;
129 GCRef *p = &mainthread(g)->nextgc;
130 GCobj *o;
131 while ((o = gcref(*p)) != NULL) {
132 if (!(iswhite(o) || all) || isfinalized(gco2ud(o))) {
133 p = &o->gch.nextgc; /* Nothing to do. */
134 } else if (!lj_meta_fastg(g, tabref(gco2ud(o)->metatable), MM_gc)) {
135 markfinalized(o); /* Done, as there's no __gc metamethod. */
136 p = &o->gch.nextgc;
137 } else { /* Otherwise move userdata to be finalized to mmudata list. */
138 m += sizeudata(gco2ud(o));
139 markfinalized(o);
140 *p = o->gch.nextgc;
141 if (gcref(g->gc.mmudata)) { /* Link to end of mmudata list. */
142 GCobj *root = gcref(g->gc.mmudata);
143 setgcrefr(o->gch.nextgc, root->gch.nextgc);
144 setgcref(root->gch.nextgc, o);
145 setgcref(g->gc.mmudata, o);
146 } else { /* Create circular list. */
147 setgcref(o->gch.nextgc, o);
148 setgcref(g->gc.mmudata, o);
152 return m;
155 /* -- Propagation phase --------------------------------------------------- */
157 /* Traverse a table. */
158 static int gc_traverse_tab(global_State *g, GCtab *t)
160 int weak = 0;
161 cTValue *mode;
162 GCtab *mt = tabref(t->metatable);
163 if (mt)
164 gc_markobj(g, mt);
165 mode = lj_meta_fastg(g, mt, MM_mode);
166 if (mode && tvisstr(mode)) { /* Valid __mode field? */
167 const char *modestr = strVdata(mode);
168 int c;
169 while ((c = *modestr++)) {
170 if (c == 'k') weak |= LJ_GC_WEAKKEY;
171 else if (c == 'v') weak |= LJ_GC_WEAKVAL;
172 else if (c == 'K') weak = (int)(~0u & ~LJ_GC_WEAKVAL);
174 if (weak > 0) { /* Weak tables are cleared in the atomic phase. */
175 t->marked = (uint8_t)((t->marked & ~LJ_GC_WEAK) | weak);
176 setgcrefr(t->gclist, g->gc.weak);
177 setgcref(g->gc.weak, obj2gco(t));
180 if (weak == LJ_GC_WEAK) /* Nothing to mark if both keys/values are weak. */
181 return 1;
182 if (!(weak & LJ_GC_WEAKVAL)) { /* Mark array part. */
183 MSize i, asize = t->asize;
184 for (i = 0; i < asize; i++)
185 gc_marktv(g, arrayslot(t, i));
187 if (t->hmask > 0) { /* Mark hash part. */
188 Node *node = noderef(t->node);
189 MSize i, hmask = t->hmask;
190 for (i = 0; i <= hmask; i++) {
191 Node *n = &node[i];
192 if (!tvisnil(&n->val)) { /* Mark non-empty slot. */
193 lua_assert(!tvisnil(&n->key));
194 if (!(weak & LJ_GC_WEAKKEY)) gc_marktv(g, &n->key);
195 if (!(weak & LJ_GC_WEAKVAL)) gc_marktv(g, &n->val);
199 return weak;
202 /* Traverse a function. */
203 static void gc_traverse_func(global_State *g, GCfunc *fn)
205 gc_markobj(g, tabref(fn->c.env));
206 if (isluafunc(fn)) {
207 uint32_t i;
208 lua_assert(fn->l.nupvalues <= funcproto(fn)->sizeuv);
209 gc_markobj(g, funcproto(fn));
210 for (i = 0; i < fn->l.nupvalues; i++) /* Mark Lua function upvalues. */
211 gc_markobj(g, &gcref(fn->l.uvptr[i])->uv);
212 } else {
213 uint32_t i;
214 for (i = 0; i < fn->c.nupvalues; i++) /* Mark C function upvalues. */
215 gc_marktv(g, &fn->c.upvalue[i]);
219 #if LJ_HASJIT
220 /* Mark a trace. */
221 static void gc_marktrace(global_State *g, TraceNo traceno)
223 GCobj *o = obj2gco(traceref(G2J(g), traceno));
224 lua_assert(traceno != G2J(g)->cur.traceno);
225 if (iswhite(o)) {
226 white2gray(o);
227 setgcrefr(o->gch.gclist, g->gc.gray);
228 setgcref(g->gc.gray, o);
232 /* Traverse a trace. */
233 static void gc_traverse_trace(global_State *g, GCtrace *T)
235 IRRef ref;
236 if (T->traceno == 0) return;
237 for (ref = T->nk; ref < REF_TRUE; ref++) {
238 IRIns *ir = &T->ir[ref];
239 if (ir->o == IR_KGC)
240 gc_markobj(g, ir_kgc(ir));
242 if (T->link) gc_marktrace(g, T->link);
243 if (T->nextroot) gc_marktrace(g, T->nextroot);
244 if (T->nextside) gc_marktrace(g, T->nextside);
245 gc_markobj(g, gcref(T->startpt));
248 /* The current trace is a GC root while not anchored in the prototype (yet). */
249 #define gc_traverse_curtrace(g) gc_traverse_trace(g, &G2J(g)->cur)
250 #else
251 #define gc_traverse_curtrace(g) UNUSED(g)
252 #endif
254 /* Traverse a prototype. */
255 static void gc_traverse_proto(global_State *g, GCproto *pt)
257 ptrdiff_t i;
258 gc_mark_str(proto_chunkname(pt));
259 for (i = -(ptrdiff_t)pt->sizekgc; i < 0; i++) /* Mark collectable consts. */
260 gc_markobj(g, proto_kgc(pt, i));
261 #if LJ_HASJIT
262 if (pt->trace) gc_marktrace(g, pt->trace);
263 #endif
266 /* Traverse the frame structure of a stack. */
267 static MSize gc_traverse_frames(global_State *g, lua_State *th)
269 TValue *frame, *top = th->top-1, *bot = tvref(th->stack);
270 /* Note: extra vararg frame not skipped, marks function twice (harmless). */
271 for (frame = th->base-1; frame > bot+LJ_FR2; frame = frame_prev(frame)) {
272 GCfunc *fn = frame_func(frame);
273 TValue *ftop = frame;
274 if (isluafunc(fn)) ftop += funcproto(fn)->framesize;
275 if (ftop > top) top = ftop;
276 if (!LJ_FR2) gc_markobj(g, fn); /* Need to mark hidden function (or L). */
278 top++; /* Correct bias of -1 (frame == base-1). */
279 if (top > tvref(th->maxstack)) top = tvref(th->maxstack);
280 return (MSize)(top - bot); /* Return minimum needed stack size. */
283 /* Traverse a thread object. */
284 static void gc_traverse_thread(global_State *g, lua_State *th)
286 TValue *o, *top = th->top;
287 for (o = tvref(th->stack)+1+LJ_FR2; o < top; o++)
288 gc_marktv(g, o);
289 if (g->gc.state == GCSatomic) {
290 top = tvref(th->stack) + th->stacksize;
291 for (; o < top; o++) /* Clear unmarked slots. */
292 setnilV(o);
294 gc_markobj(g, tabref(th->env));
295 lj_state_shrinkstack(th, gc_traverse_frames(g, th));
298 /* Propagate one gray object. Traverse it and turn it black. */
299 static size_t propagatemark(global_State *g)
301 GCobj *o = gcref(g->gc.gray);
302 int gct = o->gch.gct;
303 lua_assert(isgray(o));
304 gray2black(o);
305 setgcrefr(g->gc.gray, o->gch.gclist); /* Remove from gray list. */
306 if (LJ_LIKELY(gct == ~LJ_TTAB)) {
307 GCtab *t = gco2tab(o);
308 if (gc_traverse_tab(g, t) > 0)
309 black2gray(o); /* Keep weak tables gray. */
310 return sizeof(GCtab) + sizeof(TValue) * t->asize +
311 sizeof(Node) * (t->hmask + 1);
312 } else if (LJ_LIKELY(gct == ~LJ_TFUNC)) {
313 GCfunc *fn = gco2func(o);
314 gc_traverse_func(g, fn);
315 return isluafunc(fn) ? sizeLfunc((MSize)fn->l.nupvalues) :
316 sizeCfunc((MSize)fn->c.nupvalues);
317 } else if (LJ_LIKELY(gct == ~LJ_TPROTO)) {
318 GCproto *pt = gco2pt(o);
319 gc_traverse_proto(g, pt);
320 return pt->sizept;
321 } else if (LJ_LIKELY(gct == ~LJ_TTHREAD)) {
322 lua_State *th = gco2th(o);
323 setgcrefr(th->gclist, g->gc.grayagain);
324 setgcref(g->gc.grayagain, o);
325 black2gray(o); /* Threads are never black. */
326 gc_traverse_thread(g, th);
327 return sizeof(lua_State) + sizeof(TValue) * th->stacksize;
328 } else {
329 #if LJ_HASJIT
330 GCtrace *T = gco2trace(o);
331 gc_traverse_trace(g, T);
332 return ((sizeof(GCtrace)+7)&~7) + (T->nins-T->nk)*sizeof(IRIns) +
333 T->nsnap*sizeof(SnapShot) + T->nsnapmap*sizeof(SnapEntry);
334 #else
335 lua_assert(0);
336 return 0;
337 #endif
341 /* Propagate all gray objects. */
342 static size_t gc_propagate_gray(global_State *g)
344 size_t m = 0;
345 while (gcref(g->gc.gray) != NULL)
346 m += propagatemark(g);
347 return m;
350 /* -- Sweep phase --------------------------------------------------------- */
352 /* Type of GC free functions. */
353 typedef void (LJ_FASTCALL *GCFreeFunc)(global_State *g, GCobj *o);
355 /* GC free functions for LJ_TSTR .. LJ_TUDATA. ORDER LJ_T */
356 static const GCFreeFunc gc_freefunc[] = {
357 (GCFreeFunc)lj_str_free,
358 (GCFreeFunc)lj_func_freeuv,
359 (GCFreeFunc)lj_state_free,
360 (GCFreeFunc)lj_func_freeproto,
361 (GCFreeFunc)lj_func_free,
362 #if LJ_HASJIT
363 (GCFreeFunc)lj_trace_free,
364 #else
365 (GCFreeFunc)0,
366 #endif
367 #if LJ_HASFFI
368 (GCFreeFunc)lj_cdata_free,
369 #else
370 (GCFreeFunc)0,
371 #endif
372 (GCFreeFunc)lj_tab_free,
373 (GCFreeFunc)lj_udata_free
376 /* Full sweep of a GC list. */
377 #define gc_fullsweep(g, p) gc_sweep(g, (p), ~(uint32_t)0)
379 /* Partial sweep of a GC list. */
380 static GCRef *gc_sweep(global_State *g, GCRef *p, uint32_t lim)
382 /* Mask with other white and LJ_GC_FIXED. Or LJ_GC_SFIXED on shutdown. */
383 int ow = otherwhite(g);
384 GCobj *o;
385 while ((o = gcref(*p)) != NULL && lim-- > 0) {
386 if (o->gch.gct == ~LJ_TTHREAD) /* Need to sweep open upvalues, too. */
387 gc_fullsweep(g, &gco2th(o)->openupval);
388 if (((o->gch.marked ^ LJ_GC_WHITES) & ow)) { /* Black or current white? */
389 lua_assert(!isdead(g, o) || (o->gch.marked & LJ_GC_FIXED));
390 makewhite(g, o); /* Value is alive, change to the current white. */
391 p = &o->gch.nextgc;
392 } else { /* Otherwise value is dead, free it. */
393 lua_assert(isdead(g, o) || ow == LJ_GC_SFIXED);
394 setgcrefr(*p, o->gch.nextgc);
395 if (o == gcref(g->gc.root))
396 setgcrefr(g->gc.root, o->gch.nextgc); /* Adjust list anchor. */
397 gc_freefunc[o->gch.gct - ~LJ_TSTR](g, o);
400 return p;
403 /* Check whether we can clear a key or a value slot from a table. */
404 static int gc_mayclear(cTValue *o, int val)
406 if (tvisgcv(o)) { /* Only collectable objects can be weak references. */
407 if (tvisstr(o)) { /* But strings cannot be used as weak references. */
408 gc_mark_str(strV(o)); /* And need to be marked. */
409 return 0;
411 if (iswhite(gcV(o)))
412 return 1; /* Object is about to be collected. */
413 if (tvisudata(o) && val && isfinalized(udataV(o)))
414 return 1; /* Finalized userdata is dropped only from values. */
416 return 0; /* Cannot clear. */
419 /* Clear collected entries from weak tables. */
420 static void gc_clearweak(GCobj *o)
422 while (o) {
423 GCtab *t = gco2tab(o);
424 lua_assert((t->marked & LJ_GC_WEAK));
425 if ((t->marked & LJ_GC_WEAKVAL)) {
426 MSize i, asize = t->asize;
427 for (i = 0; i < asize; i++) {
428 /* Clear array slot when value is about to be collected. */
429 TValue *tv = arrayslot(t, i);
430 if (gc_mayclear(tv, 1))
431 setnilV(tv);
434 if (t->hmask > 0) {
435 Node *node = noderef(t->node);
436 MSize i, hmask = t->hmask;
437 for (i = 0; i <= hmask; i++) {
438 Node *n = &node[i];
439 /* Clear hash slot when key or value is about to be collected. */
440 if (!tvisnil(&n->val) && (gc_mayclear(&n->key, 0) ||
441 gc_mayclear(&n->val, 1)))
442 setnilV(&n->val);
445 o = gcref(t->gclist);
449 /* Call a userdata or cdata finalizer. */
450 static void gc_call_finalizer(global_State *g, lua_State *L,
451 cTValue *mo, GCobj *o)
453 /* Save and restore lots of state around the __gc callback. */
454 uint8_t oldh = hook_save(g);
455 GCSize oldt = g->gc.threshold;
456 int errcode;
457 TValue *top;
458 lj_trace_abort(g);
459 hook_entergc(g); /* Disable hooks and new traces during __gc. */
460 g->gc.threshold = LJ_MAX_MEM; /* Prevent GC steps. */
461 top = L->top;
462 copyTV(L, top++, mo);
463 if (LJ_FR2) setnilV(top++);
464 setgcV(L, top, o, ~o->gch.gct);
465 L->top = top+1;
466 errcode = lj_vm_pcall(L, top, 1+0, -1); /* Stack: |mo|o| -> | */
467 hook_restore(g, oldh);
468 g->gc.threshold = oldt; /* Restore GC threshold. */
469 if (errcode)
470 lj_err_throw(L, errcode); /* Propagate errors. */
473 /* Finalize one userdata or cdata object from the mmudata list. */
474 static void gc_finalize(lua_State *L)
476 global_State *g = G(L);
477 GCobj *o = gcnext(gcref(g->gc.mmudata));
478 cTValue *mo;
479 lua_assert(tvref(g->jit_base) == NULL); /* Must not be called on trace. */
480 /* Unchain from list of userdata to be finalized. */
481 if (o == gcref(g->gc.mmudata))
482 setgcrefnull(g->gc.mmudata);
483 else
484 setgcrefr(gcref(g->gc.mmudata)->gch.nextgc, o->gch.nextgc);
485 #if LJ_HASFFI
486 if (o->gch.gct == ~LJ_TCDATA) {
487 TValue tmp, *tv;
488 /* Add cdata back to the GC list and make it white. */
489 setgcrefr(o->gch.nextgc, g->gc.root);
490 setgcref(g->gc.root, o);
491 makewhite(g, o);
492 o->gch.marked &= (uint8_t)~LJ_GC_CDATA_FIN;
493 /* Resolve finalizer. */
494 setcdataV(L, &tmp, gco2cd(o));
495 tv = lj_tab_set(L, ctype_ctsG(g)->finalizer, &tmp);
496 if (!tvisnil(tv)) {
497 g->gc.nocdatafin = 0;
498 copyTV(L, &tmp, tv);
499 setnilV(tv); /* Clear entry in finalizer table. */
500 gc_call_finalizer(g, L, &tmp, o);
502 return;
504 #endif
505 /* Add userdata back to the main userdata list and make it white. */
506 setgcrefr(o->gch.nextgc, mainthread(g)->nextgc);
507 setgcref(mainthread(g)->nextgc, o);
508 makewhite(g, o);
509 /* Resolve the __gc metamethod. */
510 mo = lj_meta_fastg(g, tabref(gco2ud(o)->metatable), MM_gc);
511 if (mo)
512 gc_call_finalizer(g, L, mo, o);
515 /* Finalize all userdata objects from mmudata list. */
516 void lj_gc_finalize_udata(lua_State *L)
518 while (gcref(G(L)->gc.mmudata) != NULL)
519 gc_finalize(L);
522 #if LJ_HASFFI
523 /* Finalize all cdata objects from finalizer table. */
524 void lj_gc_finalize_cdata(lua_State *L)
526 global_State *g = G(L);
527 CTState *cts = ctype_ctsG(g);
528 if (cts) {
529 GCtab *t = cts->finalizer;
530 Node *node = noderef(t->node);
531 ptrdiff_t i;
532 setgcrefnull(t->metatable); /* Mark finalizer table as disabled. */
533 for (i = (ptrdiff_t)t->hmask; i >= 0; i--)
534 if (!tvisnil(&node[i].val) && tviscdata(&node[i].key)) {
535 GCobj *o = gcV(&node[i].key);
536 TValue tmp;
537 makewhite(g, o);
538 o->gch.marked &= (uint8_t)~LJ_GC_CDATA_FIN;
539 copyTV(L, &tmp, &node[i].val);
540 setnilV(&node[i].val);
541 gc_call_finalizer(g, L, &tmp, o);
545 #endif
547 /* Free all remaining GC objects. */
548 void lj_gc_freeall(global_State *g)
550 MSize i, strmask;
551 /* Free everything, except super-fixed objects (the main thread). */
552 g->gc.currentwhite = LJ_GC_WHITES | LJ_GC_SFIXED;
553 gc_fullsweep(g, &g->gc.root);
554 strmask = g->strmask;
555 for (i = 0; i <= strmask; i++) /* Free all string hash chains. */
556 gc_fullsweep(g, &g->strhash[i]);
559 /* -- Collector ----------------------------------------------------------- */
561 /* Atomic part of the GC cycle, transitioning from mark to sweep phase. */
562 static void atomic(global_State *g, lua_State *L)
564 size_t udsize;
566 gc_mark_uv(g); /* Need to remark open upvalues (the thread may be dead). */
567 gc_propagate_gray(g); /* Propagate any left-overs. */
569 setgcrefr(g->gc.gray, g->gc.weak); /* Empty the list of weak tables. */
570 setgcrefnull(g->gc.weak);
571 lua_assert(!iswhite(obj2gco(mainthread(g))));
572 gc_markobj(g, L); /* Mark running thread. */
573 gc_traverse_curtrace(g); /* Traverse current trace. */
574 gc_mark_gcroot(g); /* Mark GC roots (again). */
575 gc_propagate_gray(g); /* Propagate all of the above. */
577 setgcrefr(g->gc.gray, g->gc.grayagain); /* Empty the 2nd chance list. */
578 setgcrefnull(g->gc.grayagain);
579 gc_propagate_gray(g); /* Propagate it. */
581 udsize = lj_gc_separateudata(g, 0); /* Separate userdata to be finalized. */
582 gc_mark_mmudata(g); /* Mark them. */
583 udsize += gc_propagate_gray(g); /* And propagate the marks. */
585 /* All marking done, clear weak tables. */
586 gc_clearweak(gcref(g->gc.weak));
588 lj_buf_shrink(L, &g->tmpbuf); /* Shrink temp buffer. */
590 /* Prepare for sweep phase. */
591 g->gc.currentwhite = (uint8_t)otherwhite(g); /* Flip current white. */
592 g->strempty.marked = g->gc.currentwhite;
593 setmref(g->gc.sweep, &g->gc.root);
594 g->gc.estimate = g->gc.total - (GCSize)udsize; /* Initial estimate. */
597 /* GC state machine. Returns a cost estimate for each step performed. */
598 static size_t gc_onestep(lua_State *L)
600 global_State *g = G(L);
601 switch (g->gc.state) {
602 case GCSpause:
603 gc_mark_start(g); /* Start a new GC cycle by marking all GC roots. */
604 return 0;
605 case GCSpropagate:
606 if (gcref(g->gc.gray) != NULL)
607 return propagatemark(g); /* Propagate one gray object. */
608 g->gc.state = GCSatomic; /* End of mark phase. */
609 return 0;
610 case GCSatomic:
611 if (tvref(g->jit_base)) /* Don't run atomic phase on trace. */
612 return LJ_MAX_MEM;
613 atomic(g, L);
614 g->gc.state = GCSsweepstring; /* Start of sweep phase. */
615 g->gc.sweepstr = 0;
616 return 0;
617 case GCSsweepstring: {
618 GCSize old = g->gc.total;
619 gc_fullsweep(g, &g->strhash[g->gc.sweepstr++]); /* Sweep one chain. */
620 if (g->gc.sweepstr > g->strmask)
621 g->gc.state = GCSsweep; /* All string hash chains sweeped. */
622 lua_assert(old >= g->gc.total);
623 g->gc.estimate -= old - g->gc.total;
624 return GCSWEEPCOST;
626 case GCSsweep: {
627 GCSize old = g->gc.total;
628 setmref(g->gc.sweep, gc_sweep(g, mref(g->gc.sweep, GCRef), GCSWEEPMAX));
629 lua_assert(old >= g->gc.total);
630 g->gc.estimate -= old - g->gc.total;
631 if (gcref(*mref(g->gc.sweep, GCRef)) == NULL) {
632 if (g->strnum <= (g->strmask >> 2) && g->strmask > LJ_MIN_STRTAB*2-1)
633 lj_str_resize(L, g->strmask >> 1); /* Shrink string table. */
634 if (gcref(g->gc.mmudata)) { /* Need any finalizations? */
635 g->gc.state = GCSfinalize;
636 #if LJ_HASFFI
637 g->gc.nocdatafin = 1;
638 #endif
639 } else { /* Otherwise skip this phase to help the JIT. */
640 g->gc.state = GCSpause; /* End of GC cycle. */
641 g->gc.debt = 0;
644 return GCSWEEPMAX*GCSWEEPCOST;
646 case GCSfinalize:
647 if (gcref(g->gc.mmudata) != NULL) {
648 if (tvref(g->jit_base)) /* Don't call finalizers on trace. */
649 return LJ_MAX_MEM;
650 gc_finalize(L); /* Finalize one userdata object. */
651 if (g->gc.estimate > GCFINALIZECOST)
652 g->gc.estimate -= GCFINALIZECOST;
653 return GCFINALIZECOST;
655 #if LJ_HASFFI
656 if (!g->gc.nocdatafin) lj_tab_rehash(L, ctype_ctsG(g)->finalizer);
657 #endif
658 g->gc.state = GCSpause; /* End of GC cycle. */
659 g->gc.debt = 0;
660 return 0;
661 default:
662 lua_assert(0);
663 return 0;
667 /* Perform a limited amount of incremental GC steps. */
668 int LJ_FASTCALL lj_gc_step(lua_State *L)
670 global_State *g = G(L);
671 GCSize lim;
672 int32_t ostate = g->vmstate;
673 setvmstate(g, GC);
674 lim = (GCSTEPSIZE/100) * g->gc.stepmul;
675 if (lim == 0)
676 lim = LJ_MAX_MEM;
677 if (g->gc.total > g->gc.threshold)
678 g->gc.debt += g->gc.total - g->gc.threshold;
679 do {
680 lim -= (GCSize)gc_onestep(L);
681 if (g->gc.state == GCSpause) {
682 g->gc.threshold = (g->gc.estimate/100) * g->gc.pause;
683 g->vmstate = ostate;
684 return 1; /* Finished a GC cycle. */
686 } while (sizeof(lim) == 8 ? ((int64_t)lim > 0) : ((int32_t)lim > 0));
687 if (g->gc.debt < GCSTEPSIZE) {
688 g->gc.threshold = g->gc.total + GCSTEPSIZE;
689 g->vmstate = ostate;
690 return -1;
691 } else {
692 g->gc.debt -= GCSTEPSIZE;
693 g->gc.threshold = g->gc.total;
694 g->vmstate = ostate;
695 return 0;
699 /* Ditto, but fix the stack top first. */
700 void LJ_FASTCALL lj_gc_step_fixtop(lua_State *L)
702 if (curr_funcisL(L)) L->top = curr_topL(L);
703 lj_gc_step(L);
706 #if LJ_HASJIT
707 /* Perform multiple GC steps. Called from JIT-compiled code. */
708 int LJ_FASTCALL lj_gc_step_jit(global_State *g, MSize steps)
710 lua_State *L = gco2th(gcref(g->cur_L));
711 L->base = tvref(G(L)->jit_base);
712 L->top = curr_topL(L);
713 while (steps-- > 0 && lj_gc_step(L) == 0)
715 /* Return 1 to force a trace exit. */
716 return (G(L)->gc.state == GCSatomic || G(L)->gc.state == GCSfinalize);
718 #endif
720 /* Perform a full GC cycle. */
721 void lj_gc_fullgc(lua_State *L)
723 global_State *g = G(L);
724 int32_t ostate = g->vmstate;
725 setvmstate(g, GC);
726 if (g->gc.state <= GCSatomic) { /* Caught somewhere in the middle. */
727 setmref(g->gc.sweep, &g->gc.root); /* Sweep everything (preserving it). */
728 setgcrefnull(g->gc.gray); /* Reset lists from partial propagation. */
729 setgcrefnull(g->gc.grayagain);
730 setgcrefnull(g->gc.weak);
731 g->gc.state = GCSsweepstring; /* Fast forward to the sweep phase. */
732 g->gc.sweepstr = 0;
734 while (g->gc.state == GCSsweepstring || g->gc.state == GCSsweep)
735 gc_onestep(L); /* Finish sweep. */
736 lua_assert(g->gc.state == GCSfinalize || g->gc.state == GCSpause);
737 /* Now perform a full GC. */
738 g->gc.state = GCSpause;
739 do { gc_onestep(L); } while (g->gc.state != GCSpause);
740 g->gc.threshold = (g->gc.estimate/100) * g->gc.pause;
741 g->vmstate = ostate;
744 /* -- Write barriers ------------------------------------------------------ */
746 /* Move the GC propagation frontier forward. */
747 void lj_gc_barrierf(global_State *g, GCobj *o, GCobj *v)
749 lua_assert(isblack(o) && iswhite(v) && !isdead(g, v) && !isdead(g, o));
750 lua_assert(g->gc.state != GCSfinalize && g->gc.state != GCSpause);
751 lua_assert(o->gch.gct != ~LJ_TTAB);
752 /* Preserve invariant during propagation. Otherwise it doesn't matter. */
753 if (g->gc.state == GCSpropagate || g->gc.state == GCSatomic)
754 gc_mark(g, v); /* Move frontier forward. */
755 else
756 makewhite(g, o); /* Make it white to avoid the following barrier. */
759 /* Specialized barrier for closed upvalue. Pass &uv->tv. */
760 void LJ_FASTCALL lj_gc_barrieruv(global_State *g, TValue *tv)
762 #define TV2MARKED(x) \
763 (*((uint8_t *)(x) - offsetof(GCupval, tv) + offsetof(GCupval, marked)))
764 if (g->gc.state == GCSpropagate || g->gc.state == GCSatomic)
765 gc_mark(g, gcV(tv));
766 else
767 TV2MARKED(tv) = (TV2MARKED(tv) & (uint8_t)~LJ_GC_COLORS) | curwhite(g);
768 #undef TV2MARKED
771 /* Close upvalue. Also needs a write barrier. */
772 void lj_gc_closeuv(global_State *g, GCupval *uv)
774 GCobj *o = obj2gco(uv);
775 /* Copy stack slot to upvalue itself and point to the copy. */
776 copyTV(mainthread(g), &uv->tv, uvval(uv));
777 setmref(uv->v, &uv->tv);
778 uv->closed = 1;
779 setgcrefr(o->gch.nextgc, g->gc.root);
780 setgcref(g->gc.root, o);
781 if (isgray(o)) { /* A closed upvalue is never gray, so fix this. */
782 if (g->gc.state == GCSpropagate || g->gc.state == GCSatomic) {
783 gray2black(o); /* Make it black and preserve invariant. */
784 if (tviswhite(&uv->tv))
785 lj_gc_barrierf(g, o, gcV(&uv->tv));
786 } else {
787 makewhite(g, o); /* Make it white, i.e. sweep the upvalue. */
788 lua_assert(g->gc.state != GCSfinalize && g->gc.state != GCSpause);
793 #if LJ_HASJIT
794 /* Mark a trace if it's saved during the propagation phase. */
795 void lj_gc_barriertrace(global_State *g, uint32_t traceno)
797 if (g->gc.state == GCSpropagate || g->gc.state == GCSatomic)
798 gc_marktrace(g, traceno);
800 #endif
802 /* -- Allocator ----------------------------------------------------------- */
804 /* Call pluggable memory allocator to allocate or resize a fragment. */
805 void *lj_mem_realloc(lua_State *L, void *p, GCSize osz, GCSize nsz)
807 global_State *g = G(L);
808 lua_assert((osz == 0) == (p == NULL));
809 p = g->allocf(g->allocd, p, osz, nsz);
810 if (p == NULL && nsz > 0)
811 lj_err_mem(L);
812 lua_assert((nsz == 0) == (p == NULL));
813 lua_assert(checkptrGC(p));
814 g->gc.total = (g->gc.total - osz) + nsz;
815 return p;
818 /* Allocate new GC object and link it to the root set. */
819 void * LJ_FASTCALL lj_mem_newgco(lua_State *L, GCSize size)
821 global_State *g = G(L);
822 GCobj *o = (GCobj *)g->allocf(g->allocd, NULL, 0, size);
823 if (o == NULL)
824 lj_err_mem(L);
825 lua_assert(checkptrGC(o));
826 g->gc.total += size;
827 setgcrefr(o->gch.nextgc, g->gc.root);
828 setgcref(g->gc.root, o);
829 newwhite(g, o);
830 return o;
833 /* Resize growable vector. */
834 void *lj_mem_grow(lua_State *L, void *p, MSize *szp, MSize lim, MSize esz)
836 MSize sz = (*szp) << 1;
837 if (sz < LJ_MIN_VECSZ)
838 sz = LJ_MIN_VECSZ;
839 if (sz > lim)
840 sz = lim;
841 p = lj_mem_realloc(L, p, (*szp)*esz, sz*esz);
842 *szp = sz;
843 return p;