3 ** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
5 ** Major portions taken verbatim or adapted from the Lua interpreter.
6 ** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h
30 #define GCSTEPSIZE 1024u
32 #define GCSWEEPCOST 10
33 #define GCFINALIZECOST 100
35 /* Macros to set GCobj colors and flags. */
36 #define white2gray(x) ((x)->gch.marked &= (uint8_t)~LJ_GC_WHITES)
37 #define gray2black(x) ((x)->gch.marked |= LJ_GC_BLACK)
38 #define isfinalized(u) ((u)->marked & LJ_GC_FINALIZED)
40 /* -- Mark phase ---------------------------------------------------------- */
42 /* Mark a TValue (if needed). */
43 #define gc_marktv(g, tv) \
44 { lua_assert(!tvisgcv(tv) || (~itype(tv) == gcval(tv)->gch.gct)); \
45 if (tviswhite(tv)) gc_mark(g, gcV(tv)); }
47 /* Mark a GCobj (if needed). */
48 #define gc_markobj(g, o) \
49 { if (iswhite(obj2gco(o))) gc_mark(g, obj2gco(o)); }
51 /* Mark a string object. */
52 #define gc_mark_str(s) ((s)->marked &= (uint8_t)~LJ_GC_WHITES)
54 /* Mark a white GCobj. */
55 static void gc_mark(global_State
*g
, GCobj
*o
)
58 lua_assert(iswhite(o
) && !isdead(g
, o
));
60 if (LJ_UNLIKELY(gct
== ~LJ_TUDATA
)) {
61 GCtab
*mt
= tabref(gco2ud(o
)->metatable
);
62 gray2black(o
); /* Userdata are never gray. */
63 if (mt
) gc_markobj(g
, mt
);
64 gc_markobj(g
, tabref(gco2ud(o
)->env
));
65 } else if (LJ_UNLIKELY(gct
== ~LJ_TUPVAL
)) {
66 GCupval
*uv
= gco2uv(o
);
67 gc_marktv(g
, uvval(uv
));
69 gray2black(o
); /* Closed upvalues are never gray. */
70 } else if (gct
!= ~LJ_TSTR
&& gct
!= ~LJ_TCDATA
) {
71 lua_assert(gct
== ~LJ_TFUNC
|| gct
== ~LJ_TTAB
||
72 gct
== ~LJ_TTHREAD
|| gct
== ~LJ_TPROTO
);
73 setgcrefr(o
->gch
.gclist
, g
->gc
.gray
);
74 setgcref(g
->gc
.gray
, o
);
79 static void gc_mark_gcroot(global_State
*g
)
82 for (i
= 0; i
< GCROOT_MAX
; i
++)
83 if (gcref(g
->gcroot
[i
]) != NULL
)
84 gc_markobj(g
, gcref(g
->gcroot
[i
]));
87 /* Start a GC cycle and mark the root set. */
88 static void gc_mark_start(global_State
*g
)
90 setgcrefnull(g
->gc
.gray
);
91 setgcrefnull(g
->gc
.grayagain
);
92 setgcrefnull(g
->gc
.weak
);
93 gc_markobj(g
, mainthread(g
));
94 gc_markobj(g
, tabref(mainthread(g
)->env
));
95 gc_marktv(g
, &g
->registrytv
);
97 g
->gc
.state
= GCSpropagate
;
100 /* Mark open upvalues. */
101 static void gc_mark_uv(global_State
*g
)
104 for (uv
= uvnext(&g
->uvhead
); uv
!= &g
->uvhead
; uv
= uvnext(uv
)) {
105 lua_assert(uvprev(uvnext(uv
)) == uv
&& uvnext(uvprev(uv
)) == uv
);
106 if (isgray(obj2gco(uv
)))
107 gc_marktv(g
, uvval(uv
));
111 /* Mark userdata in mmudata list. */
112 static void gc_mark_mmudata(global_State
*g
)
114 GCobj
*root
= gcref(g
->gc
.mmudata
);
119 makewhite(g
, u
); /* Could be from previous GC. */
125 /* Separate userdata objects to be finalized to mmudata list. */
126 size_t lj_gc_separateudata(global_State
*g
, int all
)
129 GCRef
*p
= &mainthread(g
)->nextgc
;
131 while ((o
= gcref(*p
)) != NULL
) {
132 if (!(iswhite(o
) || all
) || isfinalized(gco2ud(o
))) {
133 p
= &o
->gch
.nextgc
; /* Nothing to do. */
134 } else if (!lj_meta_fastg(g
, tabref(gco2ud(o
)->metatable
), MM_gc
)) {
135 markfinalized(o
); /* Done, as there's no __gc metamethod. */
137 } else { /* Otherwise move userdata to be finalized to mmudata list. */
138 m
+= sizeudata(gco2ud(o
));
141 if (gcref(g
->gc
.mmudata
)) { /* Link to end of mmudata list. */
142 GCobj
*root
= gcref(g
->gc
.mmudata
);
143 setgcrefr(o
->gch
.nextgc
, root
->gch
.nextgc
);
144 setgcref(root
->gch
.nextgc
, o
);
145 setgcref(g
->gc
.mmudata
, o
);
146 } else { /* Create circular list. */
147 setgcref(o
->gch
.nextgc
, o
);
148 setgcref(g
->gc
.mmudata
, o
);
155 /* -- Propagation phase --------------------------------------------------- */
157 /* Traverse a table. */
158 static int gc_traverse_tab(global_State
*g
, GCtab
*t
)
162 GCtab
*mt
= tabref(t
->metatable
);
165 mode
= lj_meta_fastg(g
, mt
, MM_mode
);
166 if (mode
&& tvisstr(mode
)) { /* Valid __mode field? */
167 const char *modestr
= strVdata(mode
);
169 while ((c
= *modestr
++)) {
170 if (c
== 'k') weak
|= LJ_GC_WEAKKEY
;
171 else if (c
== 'v') weak
|= LJ_GC_WEAKVAL
;
172 else if (c
== 'K') weak
= (int)(~0u & ~LJ_GC_WEAKVAL
);
174 if (weak
> 0) { /* Weak tables are cleared in the atomic phase. */
175 t
->marked
= (uint8_t)((t
->marked
& ~LJ_GC_WEAK
) | weak
);
176 setgcrefr(t
->gclist
, g
->gc
.weak
);
177 setgcref(g
->gc
.weak
, obj2gco(t
));
180 if (weak
== LJ_GC_WEAK
) /* Nothing to mark if both keys/values are weak. */
182 if (!(weak
& LJ_GC_WEAKVAL
)) { /* Mark array part. */
183 MSize i
, asize
= t
->asize
;
184 for (i
= 0; i
< asize
; i
++)
185 gc_marktv(g
, arrayslot(t
, i
));
187 if (t
->hmask
> 0) { /* Mark hash part. */
188 Node
*node
= noderef(t
->node
);
189 MSize i
, hmask
= t
->hmask
;
190 for (i
= 0; i
<= hmask
; i
++) {
192 if (!tvisnil(&n
->val
)) { /* Mark non-empty slot. */
193 lua_assert(!tvisnil(&n
->key
));
194 if (!(weak
& LJ_GC_WEAKKEY
)) gc_marktv(g
, &n
->key
);
195 if (!(weak
& LJ_GC_WEAKVAL
)) gc_marktv(g
, &n
->val
);
202 /* Traverse a function. */
203 static void gc_traverse_func(global_State
*g
, GCfunc
*fn
)
205 gc_markobj(g
, tabref(fn
->c
.env
));
208 lua_assert(fn
->l
.nupvalues
<= funcproto(fn
)->sizeuv
);
209 gc_markobj(g
, funcproto(fn
));
210 for (i
= 0; i
< fn
->l
.nupvalues
; i
++) /* Mark Lua function upvalues. */
211 gc_markobj(g
, &gcref(fn
->l
.uvptr
[i
])->uv
);
214 for (i
= 0; i
< fn
->c
.nupvalues
; i
++) /* Mark C function upvalues. */
215 gc_marktv(g
, &fn
->c
.upvalue
[i
]);
221 static void gc_marktrace(global_State
*g
, TraceNo traceno
)
223 GCobj
*o
= obj2gco(traceref(G2J(g
), traceno
));
224 lua_assert(traceno
!= G2J(g
)->cur
.traceno
);
227 setgcrefr(o
->gch
.gclist
, g
->gc
.gray
);
228 setgcref(g
->gc
.gray
, o
);
232 /* Traverse a trace. */
233 static void gc_traverse_trace(global_State
*g
, GCtrace
*T
)
236 if (T
->traceno
== 0) return;
237 for (ref
= T
->nk
; ref
< REF_TRUE
; ref
++) {
238 IRIns
*ir
= &T
->ir
[ref
];
240 gc_markobj(g
, ir_kgc(ir
));
242 if (T
->link
) gc_marktrace(g
, T
->link
);
243 if (T
->nextroot
) gc_marktrace(g
, T
->nextroot
);
244 if (T
->nextside
) gc_marktrace(g
, T
->nextside
);
245 gc_markobj(g
, gcref(T
->startpt
));
248 /* The current trace is a GC root while not anchored in the prototype (yet). */
249 #define gc_traverse_curtrace(g) gc_traverse_trace(g, &G2J(g)->cur)
251 #define gc_traverse_curtrace(g) UNUSED(g)
254 /* Traverse a prototype. */
255 static void gc_traverse_proto(global_State
*g
, GCproto
*pt
)
258 gc_mark_str(proto_chunkname(pt
));
259 for (i
= -(ptrdiff_t)pt
->sizekgc
; i
< 0; i
++) /* Mark collectable consts. */
260 gc_markobj(g
, proto_kgc(pt
, i
));
262 if (pt
->trace
) gc_marktrace(g
, pt
->trace
);
266 /* Traverse the frame structure of a stack. */
267 static MSize
gc_traverse_frames(global_State
*g
, lua_State
*th
)
269 TValue
*frame
, *top
= th
->top
-1, *bot
= tvref(th
->stack
);
270 /* Note: extra vararg frame not skipped, marks function twice (harmless). */
271 for (frame
= th
->base
-1; frame
> bot
+LJ_FR2
; frame
= frame_prev(frame
)) {
272 GCfunc
*fn
= frame_func(frame
);
273 TValue
*ftop
= frame
;
274 if (isluafunc(fn
)) ftop
+= funcproto(fn
)->framesize
;
275 if (ftop
> top
) top
= ftop
;
276 if (!LJ_FR2
) gc_markobj(g
, fn
); /* Need to mark hidden function (or L). */
278 top
++; /* Correct bias of -1 (frame == base-1). */
279 if (top
> tvref(th
->maxstack
)) top
= tvref(th
->maxstack
);
280 return (MSize
)(top
- bot
); /* Return minimum needed stack size. */
283 /* Traverse a thread object. */
284 static void gc_traverse_thread(global_State
*g
, lua_State
*th
)
286 TValue
*o
, *top
= th
->top
;
287 for (o
= tvref(th
->stack
)+1+LJ_FR2
; o
< top
; o
++)
289 if (g
->gc
.state
== GCSatomic
) {
290 top
= tvref(th
->stack
) + th
->stacksize
;
291 for (; o
< top
; o
++) /* Clear unmarked slots. */
294 gc_markobj(g
, tabref(th
->env
));
295 lj_state_shrinkstack(th
, gc_traverse_frames(g
, th
));
298 /* Propagate one gray object. Traverse it and turn it black. */
299 static size_t propagatemark(global_State
*g
)
301 GCobj
*o
= gcref(g
->gc
.gray
);
302 int gct
= o
->gch
.gct
;
303 lua_assert(isgray(o
));
305 setgcrefr(g
->gc
.gray
, o
->gch
.gclist
); /* Remove from gray list. */
306 if (LJ_LIKELY(gct
== ~LJ_TTAB
)) {
307 GCtab
*t
= gco2tab(o
);
308 if (gc_traverse_tab(g
, t
) > 0)
309 black2gray(o
); /* Keep weak tables gray. */
310 return sizeof(GCtab
) + sizeof(TValue
) * t
->asize
+
311 sizeof(Node
) * (t
->hmask
+ 1);
312 } else if (LJ_LIKELY(gct
== ~LJ_TFUNC
)) {
313 GCfunc
*fn
= gco2func(o
);
314 gc_traverse_func(g
, fn
);
315 return isluafunc(fn
) ? sizeLfunc((MSize
)fn
->l
.nupvalues
) :
316 sizeCfunc((MSize
)fn
->c
.nupvalues
);
317 } else if (LJ_LIKELY(gct
== ~LJ_TPROTO
)) {
318 GCproto
*pt
= gco2pt(o
);
319 gc_traverse_proto(g
, pt
);
321 } else if (LJ_LIKELY(gct
== ~LJ_TTHREAD
)) {
322 lua_State
*th
= gco2th(o
);
323 setgcrefr(th
->gclist
, g
->gc
.grayagain
);
324 setgcref(g
->gc
.grayagain
, o
);
325 black2gray(o
); /* Threads are never black. */
326 gc_traverse_thread(g
, th
);
327 return sizeof(lua_State
) + sizeof(TValue
) * th
->stacksize
;
330 GCtrace
*T
= gco2trace(o
);
331 gc_traverse_trace(g
, T
);
332 return ((sizeof(GCtrace
)+7)&~7) + (T
->nins
-T
->nk
)*sizeof(IRIns
) +
333 T
->nsnap
*sizeof(SnapShot
) + T
->nsnapmap
*sizeof(SnapEntry
);
341 /* Propagate all gray objects. */
342 static size_t gc_propagate_gray(global_State
*g
)
345 while (gcref(g
->gc
.gray
) != NULL
)
346 m
+= propagatemark(g
);
350 /* -- Sweep phase --------------------------------------------------------- */
352 /* Type of GC free functions. */
353 typedef void (LJ_FASTCALL
*GCFreeFunc
)(global_State
*g
, GCobj
*o
);
355 /* GC free functions for LJ_TSTR .. LJ_TUDATA. ORDER LJ_T */
356 static const GCFreeFunc gc_freefunc
[] = {
357 (GCFreeFunc
)lj_str_free
,
358 (GCFreeFunc
)lj_func_freeuv
,
359 (GCFreeFunc
)lj_state_free
,
360 (GCFreeFunc
)lj_func_freeproto
,
361 (GCFreeFunc
)lj_func_free
,
363 (GCFreeFunc
)lj_trace_free
,
368 (GCFreeFunc
)lj_cdata_free
,
372 (GCFreeFunc
)lj_tab_free
,
373 (GCFreeFunc
)lj_udata_free
376 /* Full sweep of a GC list. */
377 #define gc_fullsweep(g, p) gc_sweep(g, (p), ~(uint32_t)0)
379 /* Partial sweep of a GC list. */
380 static GCRef
*gc_sweep(global_State
*g
, GCRef
*p
, uint32_t lim
)
382 /* Mask with other white and LJ_GC_FIXED. Or LJ_GC_SFIXED on shutdown. */
383 int ow
= otherwhite(g
);
385 while ((o
= gcref(*p
)) != NULL
&& lim
-- > 0) {
386 if (o
->gch
.gct
== ~LJ_TTHREAD
) /* Need to sweep open upvalues, too. */
387 gc_fullsweep(g
, &gco2th(o
)->openupval
);
388 if (((o
->gch
.marked
^ LJ_GC_WHITES
) & ow
)) { /* Black or current white? */
389 lua_assert(!isdead(g
, o
) || (o
->gch
.marked
& LJ_GC_FIXED
));
390 makewhite(g
, o
); /* Value is alive, change to the current white. */
392 } else { /* Otherwise value is dead, free it. */
393 lua_assert(isdead(g
, o
) || ow
== LJ_GC_SFIXED
);
394 setgcrefr(*p
, o
->gch
.nextgc
);
395 if (o
== gcref(g
->gc
.root
))
396 setgcrefr(g
->gc
.root
, o
->gch
.nextgc
); /* Adjust list anchor. */
397 gc_freefunc
[o
->gch
.gct
- ~LJ_TSTR
](g
, o
);
403 /* Check whether we can clear a key or a value slot from a table. */
404 static int gc_mayclear(cTValue
*o
, int val
)
406 if (tvisgcv(o
)) { /* Only collectable objects can be weak references. */
407 if (tvisstr(o
)) { /* But strings cannot be used as weak references. */
408 gc_mark_str(strV(o
)); /* And need to be marked. */
412 return 1; /* Object is about to be collected. */
413 if (tvisudata(o
) && val
&& isfinalized(udataV(o
)))
414 return 1; /* Finalized userdata is dropped only from values. */
416 return 0; /* Cannot clear. */
419 /* Clear collected entries from weak tables. */
420 static void gc_clearweak(GCobj
*o
)
423 GCtab
*t
= gco2tab(o
);
424 lua_assert((t
->marked
& LJ_GC_WEAK
));
425 if ((t
->marked
& LJ_GC_WEAKVAL
)) {
426 MSize i
, asize
= t
->asize
;
427 for (i
= 0; i
< asize
; i
++) {
428 /* Clear array slot when value is about to be collected. */
429 TValue
*tv
= arrayslot(t
, i
);
430 if (gc_mayclear(tv
, 1))
435 Node
*node
= noderef(t
->node
);
436 MSize i
, hmask
= t
->hmask
;
437 for (i
= 0; i
<= hmask
; i
++) {
439 /* Clear hash slot when key or value is about to be collected. */
440 if (!tvisnil(&n
->val
) && (gc_mayclear(&n
->key
, 0) ||
441 gc_mayclear(&n
->val
, 1)))
445 o
= gcref(t
->gclist
);
449 /* Call a userdata or cdata finalizer. */
450 static void gc_call_finalizer(global_State
*g
, lua_State
*L
,
451 cTValue
*mo
, GCobj
*o
)
453 /* Save and restore lots of state around the __gc callback. */
454 uint8_t oldh
= hook_save(g
);
455 GCSize oldt
= g
->gc
.threshold
;
459 hook_entergc(g
); /* Disable hooks and new traces during __gc. */
460 g
->gc
.threshold
= LJ_MAX_MEM
; /* Prevent GC steps. */
462 copyTV(L
, top
++, mo
);
463 if (LJ_FR2
) setnilV(top
++);
464 setgcV(L
, top
, o
, ~o
->gch
.gct
);
466 errcode
= lj_vm_pcall(L
, top
, 1+0, -1); /* Stack: |mo|o| -> | */
467 hook_restore(g
, oldh
);
468 g
->gc
.threshold
= oldt
; /* Restore GC threshold. */
470 lj_err_throw(L
, errcode
); /* Propagate errors. */
473 /* Finalize one userdata or cdata object from the mmudata list. */
474 static void gc_finalize(lua_State
*L
)
476 global_State
*g
= G(L
);
477 GCobj
*o
= gcnext(gcref(g
->gc
.mmudata
));
479 lua_assert(tvref(g
->jit_base
) == NULL
); /* Must not be called on trace. */
480 /* Unchain from list of userdata to be finalized. */
481 if (o
== gcref(g
->gc
.mmudata
))
482 setgcrefnull(g
->gc
.mmudata
);
484 setgcrefr(gcref(g
->gc
.mmudata
)->gch
.nextgc
, o
->gch
.nextgc
);
486 if (o
->gch
.gct
== ~LJ_TCDATA
) {
488 /* Add cdata back to the GC list and make it white. */
489 setgcrefr(o
->gch
.nextgc
, g
->gc
.root
);
490 setgcref(g
->gc
.root
, o
);
492 o
->gch
.marked
&= (uint8_t)~LJ_GC_CDATA_FIN
;
493 /* Resolve finalizer. */
494 setcdataV(L
, &tmp
, gco2cd(o
));
495 tv
= lj_tab_set(L
, ctype_ctsG(g
)->finalizer
, &tmp
);
497 g
->gc
.nocdatafin
= 0;
499 setnilV(tv
); /* Clear entry in finalizer table. */
500 gc_call_finalizer(g
, L
, &tmp
, o
);
505 /* Add userdata back to the main userdata list and make it white. */
506 setgcrefr(o
->gch
.nextgc
, mainthread(g
)->nextgc
);
507 setgcref(mainthread(g
)->nextgc
, o
);
509 /* Resolve the __gc metamethod. */
510 mo
= lj_meta_fastg(g
, tabref(gco2ud(o
)->metatable
), MM_gc
);
512 gc_call_finalizer(g
, L
, mo
, o
);
515 /* Finalize all userdata objects from mmudata list. */
516 void lj_gc_finalize_udata(lua_State
*L
)
518 while (gcref(G(L
)->gc
.mmudata
) != NULL
)
523 /* Finalize all cdata objects from finalizer table. */
524 void lj_gc_finalize_cdata(lua_State
*L
)
526 global_State
*g
= G(L
);
527 CTState
*cts
= ctype_ctsG(g
);
529 GCtab
*t
= cts
->finalizer
;
530 Node
*node
= noderef(t
->node
);
532 setgcrefnull(t
->metatable
); /* Mark finalizer table as disabled. */
533 for (i
= (ptrdiff_t)t
->hmask
; i
>= 0; i
--)
534 if (!tvisnil(&node
[i
].val
) && tviscdata(&node
[i
].key
)) {
535 GCobj
*o
= gcV(&node
[i
].key
);
538 o
->gch
.marked
&= (uint8_t)~LJ_GC_CDATA_FIN
;
539 copyTV(L
, &tmp
, &node
[i
].val
);
540 setnilV(&node
[i
].val
);
541 gc_call_finalizer(g
, L
, &tmp
, o
);
547 /* Free all remaining GC objects. */
548 void lj_gc_freeall(global_State
*g
)
551 /* Free everything, except super-fixed objects (the main thread). */
552 g
->gc
.currentwhite
= LJ_GC_WHITES
| LJ_GC_SFIXED
;
553 gc_fullsweep(g
, &g
->gc
.root
);
554 strmask
= g
->strmask
;
555 for (i
= 0; i
<= strmask
; i
++) /* Free all string hash chains. */
556 gc_fullsweep(g
, &g
->strhash
[i
]);
559 /* -- Collector ----------------------------------------------------------- */
561 /* Atomic part of the GC cycle, transitioning from mark to sweep phase. */
562 static void atomic(global_State
*g
, lua_State
*L
)
566 gc_mark_uv(g
); /* Need to remark open upvalues (the thread may be dead). */
567 gc_propagate_gray(g
); /* Propagate any left-overs. */
569 setgcrefr(g
->gc
.gray
, g
->gc
.weak
); /* Empty the list of weak tables. */
570 setgcrefnull(g
->gc
.weak
);
571 lua_assert(!iswhite(obj2gco(mainthread(g
))));
572 gc_markobj(g
, L
); /* Mark running thread. */
573 gc_traverse_curtrace(g
); /* Traverse current trace. */
574 gc_mark_gcroot(g
); /* Mark GC roots (again). */
575 gc_propagate_gray(g
); /* Propagate all of the above. */
577 setgcrefr(g
->gc
.gray
, g
->gc
.grayagain
); /* Empty the 2nd chance list. */
578 setgcrefnull(g
->gc
.grayagain
);
579 gc_propagate_gray(g
); /* Propagate it. */
581 udsize
= lj_gc_separateudata(g
, 0); /* Separate userdata to be finalized. */
582 gc_mark_mmudata(g
); /* Mark them. */
583 udsize
+= gc_propagate_gray(g
); /* And propagate the marks. */
585 /* All marking done, clear weak tables. */
586 gc_clearweak(gcref(g
->gc
.weak
));
588 lj_buf_shrink(L
, &g
->tmpbuf
); /* Shrink temp buffer. */
590 /* Prepare for sweep phase. */
591 g
->gc
.currentwhite
= (uint8_t)otherwhite(g
); /* Flip current white. */
592 g
->strempty
.marked
= g
->gc
.currentwhite
;
593 setmref(g
->gc
.sweep
, &g
->gc
.root
);
594 g
->gc
.estimate
= g
->gc
.total
- (GCSize
)udsize
; /* Initial estimate. */
597 /* GC state machine. Returns a cost estimate for each step performed. */
598 static size_t gc_onestep(lua_State
*L
)
600 global_State
*g
= G(L
);
601 switch (g
->gc
.state
) {
603 gc_mark_start(g
); /* Start a new GC cycle by marking all GC roots. */
606 if (gcref(g
->gc
.gray
) != NULL
)
607 return propagatemark(g
); /* Propagate one gray object. */
608 g
->gc
.state
= GCSatomic
; /* End of mark phase. */
611 if (tvref(g
->jit_base
)) /* Don't run atomic phase on trace. */
614 g
->gc
.state
= GCSsweepstring
; /* Start of sweep phase. */
617 case GCSsweepstring
: {
618 GCSize old
= g
->gc
.total
;
619 gc_fullsweep(g
, &g
->strhash
[g
->gc
.sweepstr
++]); /* Sweep one chain. */
620 if (g
->gc
.sweepstr
> g
->strmask
)
621 g
->gc
.state
= GCSsweep
; /* All string hash chains sweeped. */
622 lua_assert(old
>= g
->gc
.total
);
623 g
->gc
.estimate
-= old
- g
->gc
.total
;
627 GCSize old
= g
->gc
.total
;
628 setmref(g
->gc
.sweep
, gc_sweep(g
, mref(g
->gc
.sweep
, GCRef
), GCSWEEPMAX
));
629 lua_assert(old
>= g
->gc
.total
);
630 g
->gc
.estimate
-= old
- g
->gc
.total
;
631 if (gcref(*mref(g
->gc
.sweep
, GCRef
)) == NULL
) {
632 if (g
->strnum
<= (g
->strmask
>> 2) && g
->strmask
> LJ_MIN_STRTAB
*2-1)
633 lj_str_resize(L
, g
->strmask
>> 1); /* Shrink string table. */
634 if (gcref(g
->gc
.mmudata
)) { /* Need any finalizations? */
635 g
->gc
.state
= GCSfinalize
;
637 g
->gc
.nocdatafin
= 1;
639 } else { /* Otherwise skip this phase to help the JIT. */
640 g
->gc
.state
= GCSpause
; /* End of GC cycle. */
644 return GCSWEEPMAX
*GCSWEEPCOST
;
647 if (gcref(g
->gc
.mmudata
) != NULL
) {
648 if (tvref(g
->jit_base
)) /* Don't call finalizers on trace. */
650 gc_finalize(L
); /* Finalize one userdata object. */
651 if (g
->gc
.estimate
> GCFINALIZECOST
)
652 g
->gc
.estimate
-= GCFINALIZECOST
;
653 return GCFINALIZECOST
;
656 if (!g
->gc
.nocdatafin
) lj_tab_rehash(L
, ctype_ctsG(g
)->finalizer
);
658 g
->gc
.state
= GCSpause
; /* End of GC cycle. */
667 /* Perform a limited amount of incremental GC steps. */
668 int LJ_FASTCALL
lj_gc_step(lua_State
*L
)
670 global_State
*g
= G(L
);
672 int32_t ostate
= g
->vmstate
;
674 lim
= (GCSTEPSIZE
/100) * g
->gc
.stepmul
;
677 if (g
->gc
.total
> g
->gc
.threshold
)
678 g
->gc
.debt
+= g
->gc
.total
- g
->gc
.threshold
;
680 lim
-= (GCSize
)gc_onestep(L
);
681 if (g
->gc
.state
== GCSpause
) {
682 g
->gc
.threshold
= (g
->gc
.estimate
/100) * g
->gc
.pause
;
684 return 1; /* Finished a GC cycle. */
686 } while (sizeof(lim
) == 8 ? ((int64_t)lim
> 0) : ((int32_t)lim
> 0));
687 if (g
->gc
.debt
< GCSTEPSIZE
) {
688 g
->gc
.threshold
= g
->gc
.total
+ GCSTEPSIZE
;
692 g
->gc
.debt
-= GCSTEPSIZE
;
693 g
->gc
.threshold
= g
->gc
.total
;
699 /* Ditto, but fix the stack top first. */
700 void LJ_FASTCALL
lj_gc_step_fixtop(lua_State
*L
)
702 if (curr_funcisL(L
)) L
->top
= curr_topL(L
);
707 /* Perform multiple GC steps. Called from JIT-compiled code. */
708 int LJ_FASTCALL
lj_gc_step_jit(global_State
*g
, MSize steps
)
710 lua_State
*L
= gco2th(gcref(g
->cur_L
));
711 L
->base
= tvref(G(L
)->jit_base
);
712 L
->top
= curr_topL(L
);
713 while (steps
-- > 0 && lj_gc_step(L
) == 0)
715 /* Return 1 to force a trace exit. */
716 return (G(L
)->gc
.state
== GCSatomic
|| G(L
)->gc
.state
== GCSfinalize
);
720 /* Perform a full GC cycle. */
721 void lj_gc_fullgc(lua_State
*L
)
723 global_State
*g
= G(L
);
724 int32_t ostate
= g
->vmstate
;
726 if (g
->gc
.state
<= GCSatomic
) { /* Caught somewhere in the middle. */
727 setmref(g
->gc
.sweep
, &g
->gc
.root
); /* Sweep everything (preserving it). */
728 setgcrefnull(g
->gc
.gray
); /* Reset lists from partial propagation. */
729 setgcrefnull(g
->gc
.grayagain
);
730 setgcrefnull(g
->gc
.weak
);
731 g
->gc
.state
= GCSsweepstring
; /* Fast forward to the sweep phase. */
734 while (g
->gc
.state
== GCSsweepstring
|| g
->gc
.state
== GCSsweep
)
735 gc_onestep(L
); /* Finish sweep. */
736 lua_assert(g
->gc
.state
== GCSfinalize
|| g
->gc
.state
== GCSpause
);
737 /* Now perform a full GC. */
738 g
->gc
.state
= GCSpause
;
739 do { gc_onestep(L
); } while (g
->gc
.state
!= GCSpause
);
740 g
->gc
.threshold
= (g
->gc
.estimate
/100) * g
->gc
.pause
;
744 /* -- Write barriers ------------------------------------------------------ */
746 /* Move the GC propagation frontier forward. */
747 void lj_gc_barrierf(global_State
*g
, GCobj
*o
, GCobj
*v
)
749 lua_assert(isblack(o
) && iswhite(v
) && !isdead(g
, v
) && !isdead(g
, o
));
750 lua_assert(g
->gc
.state
!= GCSfinalize
&& g
->gc
.state
!= GCSpause
);
751 lua_assert(o
->gch
.gct
!= ~LJ_TTAB
);
752 /* Preserve invariant during propagation. Otherwise it doesn't matter. */
753 if (g
->gc
.state
== GCSpropagate
|| g
->gc
.state
== GCSatomic
)
754 gc_mark(g
, v
); /* Move frontier forward. */
756 makewhite(g
, o
); /* Make it white to avoid the following barrier. */
759 /* Specialized barrier for closed upvalue. Pass &uv->tv. */
760 void LJ_FASTCALL
lj_gc_barrieruv(global_State
*g
, TValue
*tv
)
762 #define TV2MARKED(x) \
763 (*((uint8_t *)(x) - offsetof(GCupval, tv) + offsetof(GCupval, marked)))
764 if (g
->gc
.state
== GCSpropagate
|| g
->gc
.state
== GCSatomic
)
767 TV2MARKED(tv
) = (TV2MARKED(tv
) & (uint8_t)~LJ_GC_COLORS
) | curwhite(g
);
771 /* Close upvalue. Also needs a write barrier. */
772 void lj_gc_closeuv(global_State
*g
, GCupval
*uv
)
774 GCobj
*o
= obj2gco(uv
);
775 /* Copy stack slot to upvalue itself and point to the copy. */
776 copyTV(mainthread(g
), &uv
->tv
, uvval(uv
));
777 setmref(uv
->v
, &uv
->tv
);
779 setgcrefr(o
->gch
.nextgc
, g
->gc
.root
);
780 setgcref(g
->gc
.root
, o
);
781 if (isgray(o
)) { /* A closed upvalue is never gray, so fix this. */
782 if (g
->gc
.state
== GCSpropagate
|| g
->gc
.state
== GCSatomic
) {
783 gray2black(o
); /* Make it black and preserve invariant. */
784 if (tviswhite(&uv
->tv
))
785 lj_gc_barrierf(g
, o
, gcV(&uv
->tv
));
787 makewhite(g
, o
); /* Make it white, i.e. sweep the upvalue. */
788 lua_assert(g
->gc
.state
!= GCSfinalize
&& g
->gc
.state
!= GCSpause
);
794 /* Mark a trace if it's saved during the propagation phase. */
795 void lj_gc_barriertrace(global_State
*g
, uint32_t traceno
)
797 if (g
->gc
.state
== GCSpropagate
|| g
->gc
.state
== GCSatomic
)
798 gc_marktrace(g
, traceno
);
802 /* -- Allocator ----------------------------------------------------------- */
804 /* Call pluggable memory allocator to allocate or resize a fragment. */
805 void *lj_mem_realloc(lua_State
*L
, void *p
, GCSize osz
, GCSize nsz
)
807 global_State
*g
= G(L
);
808 lua_assert((osz
== 0) == (p
== NULL
));
809 p
= g
->allocf(g
->allocd
, p
, osz
, nsz
);
810 if (p
== NULL
&& nsz
> 0)
812 lua_assert((nsz
== 0) == (p
== NULL
));
813 lua_assert(checkptrGC(p
));
814 g
->gc
.total
= (g
->gc
.total
- osz
) + nsz
;
818 /* Allocate new GC object and link it to the root set. */
819 void * LJ_FASTCALL
lj_mem_newgco(lua_State
*L
, GCSize size
)
821 global_State
*g
= G(L
);
822 GCobj
*o
= (GCobj
*)g
->allocf(g
->allocd
, NULL
, 0, size
);
825 lua_assert(checkptrGC(o
));
827 setgcrefr(o
->gch
.nextgc
, g
->gc
.root
);
828 setgcref(g
->gc
.root
, o
);
833 /* Resize growable vector. */
834 void *lj_mem_grow(lua_State
*L
, void *p
, MSize
*szp
, MSize lim
, MSize esz
)
836 MSize sz
= (*szp
) << 1;
837 if (sz
< LJ_MIN_VECSZ
)
841 p
= lj_mem_realloc(L
, p
, (*szp
)*esz
, sz
*esz
);