3 ** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
5 ** Major portions taken verbatim or adapted from the Lua interpreter.
6 ** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h
29 #define GCSTEPSIZE 1024u
31 #define GCSWEEPCOST 10
32 #define GCFINALIZECOST 100
34 /* Macros to set GCobj colors and flags. */
35 #define white2gray(x) ((x)->gch.marked &= (uint8_t)~LJ_GC_WHITES)
36 #define gray2black(x) ((x)->gch.marked |= LJ_GC_BLACK)
37 #define isfinalized(u) ((u)->marked & LJ_GC_FINALIZED)
39 /* -- Mark phase ---------------------------------------------------------- */
41 /* Mark a TValue (if needed). */
42 #define gc_marktv(g, tv) \
43 { lua_assert(!tvisgcv(tv) || (~itype(tv) == gcval(tv)->gch.gct)); \
44 if (tviswhite(tv)) gc_mark(g, gcV(tv)); }
46 /* Mark a GCobj (if needed). */
47 #define gc_markobj(g, o) \
48 { if (iswhite(obj2gco(o))) gc_mark(g, obj2gco(o)); }
50 /* Mark a string object. */
51 #define gc_mark_str(s) ((s)->marked &= (uint8_t)~LJ_GC_WHITES)
53 /* Mark a white GCobj. */
54 static void gc_mark(global_State
*g
, GCobj
*o
)
57 lua_assert(iswhite(o
) && !isdead(g
, o
));
59 if (LJ_UNLIKELY(gct
== ~LJ_TUDATA
)) {
60 GCtab
*mt
= tabref(gco2ud(o
)->metatable
);
61 gray2black(o
); /* Userdata are never gray. */
62 if (mt
) gc_markobj(g
, mt
);
63 gc_markobj(g
, tabref(gco2ud(o
)->env
));
64 } else if (LJ_UNLIKELY(gct
== ~LJ_TUPVAL
)) {
65 GCupval
*uv
= gco2uv(o
);
66 gc_marktv(g
, uvval(uv
));
68 gray2black(o
); /* Closed upvalues are never gray. */
69 } else if (gct
!= ~LJ_TSTR
&& gct
!= ~LJ_TCDATA
) {
70 lua_assert(gct
== ~LJ_TFUNC
|| gct
== ~LJ_TTAB
||
71 gct
== ~LJ_TTHREAD
|| gct
== ~LJ_TPROTO
);
72 setgcrefr(o
->gch
.gclist
, g
->gc
.gray
);
73 setgcref(g
->gc
.gray
, o
);
78 static void gc_mark_gcroot(global_State
*g
)
81 for (i
= 0; i
< GCROOT_MAX
; i
++)
82 if (gcref(g
->gcroot
[i
]) != NULL
)
83 gc_markobj(g
, gcref(g
->gcroot
[i
]));
86 /* Start a GC cycle and mark the root set. */
87 static void gc_mark_start(global_State
*g
)
89 setgcrefnull(g
->gc
.gray
);
90 setgcrefnull(g
->gc
.grayagain
);
91 setgcrefnull(g
->gc
.weak
);
92 gc_markobj(g
, mainthread(g
));
93 gc_markobj(g
, tabref(mainthread(g
)->env
));
94 gc_marktv(g
, &g
->registrytv
);
96 g
->gc
.state
= GCSpropagate
;
99 /* Mark open upvalues. */
100 static void gc_mark_uv(global_State
*g
)
103 for (uv
= uvnext(&g
->uvhead
); uv
!= &g
->uvhead
; uv
= uvnext(uv
)) {
104 lua_assert(uvprev(uvnext(uv
)) == uv
&& uvnext(uvprev(uv
)) == uv
);
105 if (isgray(obj2gco(uv
)))
106 gc_marktv(g
, uvval(uv
));
110 /* Mark userdata in mmudata list. */
111 static void gc_mark_mmudata(global_State
*g
)
113 GCobj
*root
= gcref(g
->gc
.mmudata
);
118 makewhite(g
, u
); /* Could be from previous GC. */
124 /* Separate userdata objects to be finalized to mmudata list. */
125 size_t lj_gc_separateudata(global_State
*g
, int all
)
128 GCRef
*p
= &mainthread(g
)->nextgc
;
130 while ((o
= gcref(*p
)) != NULL
) {
131 if (!(iswhite(o
) || all
) || isfinalized(gco2ud(o
))) {
132 p
= &o
->gch
.nextgc
; /* Nothing to do. */
133 } else if (!lj_meta_fastg(g
, tabref(gco2ud(o
)->metatable
), MM_gc
)) {
134 markfinalized(o
); /* Done, as there's no __gc metamethod. */
136 } else { /* Otherwise move userdata to be finalized to mmudata list. */
137 m
+= sizeudata(gco2ud(o
));
140 if (gcref(g
->gc
.mmudata
)) { /* Link to end of mmudata list. */
141 GCobj
*root
= gcref(g
->gc
.mmudata
);
142 setgcrefr(o
->gch
.nextgc
, root
->gch
.nextgc
);
143 setgcref(root
->gch
.nextgc
, o
);
144 setgcref(g
->gc
.mmudata
, o
);
145 } else { /* Create circular list. */
146 setgcref(o
->gch
.nextgc
, o
);
147 setgcref(g
->gc
.mmudata
, o
);
154 /* -- Propagation phase --------------------------------------------------- */
156 /* Traverse a table. */
157 static int gc_traverse_tab(global_State
*g
, GCtab
*t
)
161 GCtab
*mt
= tabref(t
->metatable
);
164 mode
= lj_meta_fastg(g
, mt
, MM_mode
);
165 if (mode
&& tvisstr(mode
)) { /* Valid __mode field? */
166 const char *modestr
= strVdata(mode
);
168 while ((c
= *modestr
++)) {
169 if (c
== 'k') weak
|= LJ_GC_WEAKKEY
;
170 else if (c
== 'v') weak
|= LJ_GC_WEAKVAL
;
171 else if (c
== 'K') weak
= (int)(~0u & ~LJ_GC_WEAKVAL
);
173 if (weak
> 0) { /* Weak tables are cleared in the atomic phase. */
174 t
->marked
= (uint8_t)((t
->marked
& ~LJ_GC_WEAK
) | weak
);
175 setgcrefr(t
->gclist
, g
->gc
.weak
);
176 setgcref(g
->gc
.weak
, obj2gco(t
));
179 if (weak
== LJ_GC_WEAK
) /* Nothing to mark if both keys/values are weak. */
181 if (!(weak
& LJ_GC_WEAKVAL
)) { /* Mark array part. */
182 MSize i
, asize
= t
->asize
;
183 for (i
= 0; i
< asize
; i
++)
184 gc_marktv(g
, arrayslot(t
, i
));
186 if (t
->hmask
> 0) { /* Mark hash part. */
187 Node
*node
= noderef(t
->node
);
188 MSize i
, hmask
= t
->hmask
;
189 for (i
= 0; i
<= hmask
; i
++) {
191 if (!tvisnil(&n
->val
)) { /* Mark non-empty slot. */
192 lua_assert(!tvisnil(&n
->key
));
193 if (!(weak
& LJ_GC_WEAKKEY
)) gc_marktv(g
, &n
->key
);
194 if (!(weak
& LJ_GC_WEAKVAL
)) gc_marktv(g
, &n
->val
);
201 /* Traverse a function. */
202 static void gc_traverse_func(global_State
*g
, GCfunc
*fn
)
204 gc_markobj(g
, tabref(fn
->c
.env
));
207 lua_assert(fn
->l
.nupvalues
<= funcproto(fn
)->sizeuv
);
208 gc_markobj(g
, funcproto(fn
));
209 for (i
= 0; i
< fn
->l
.nupvalues
; i
++) /* Mark Lua function upvalues. */
210 gc_markobj(g
, &gcref(fn
->l
.uvptr
[i
])->uv
);
213 for (i
= 0; i
< fn
->c
.nupvalues
; i
++) /* Mark C function upvalues. */
214 gc_marktv(g
, &fn
->c
.upvalue
[i
]);
220 static void gc_marktrace(global_State
*g
, TraceNo traceno
)
222 GCobj
*o
= obj2gco(traceref(G2J(g
), traceno
));
223 lua_assert(traceno
!= G2J(g
)->cur
.traceno
);
226 setgcrefr(o
->gch
.gclist
, g
->gc
.gray
);
227 setgcref(g
->gc
.gray
, o
);
231 /* Traverse a trace. */
232 static void gc_traverse_trace(global_State
*g
, GCtrace
*T
)
235 if (T
->traceno
== 0) return;
236 for (ref
= T
->nk
; ref
< REF_TRUE
; ref
++) {
237 IRIns
*ir
= &T
->ir
[ref
];
239 gc_markobj(g
, ir_kgc(ir
));
241 if (T
->link
) gc_marktrace(g
, T
->link
);
242 if (T
->nextroot
) gc_marktrace(g
, T
->nextroot
);
243 if (T
->nextside
) gc_marktrace(g
, T
->nextside
);
244 gc_markobj(g
, gcref(T
->startpt
));
247 /* The current trace is a GC root while not anchored in the prototype (yet). */
248 #define gc_traverse_curtrace(g) gc_traverse_trace(g, &G2J(g)->cur)
250 #define gc_traverse_curtrace(g) UNUSED(g)
253 /* Traverse a prototype. */
254 static void gc_traverse_proto(global_State
*g
, GCproto
*pt
)
257 gc_mark_str(proto_chunkname(pt
));
258 for (i
= -(ptrdiff_t)pt
->sizekgc
; i
< 0; i
++) /* Mark collectable consts. */
259 gc_markobj(g
, proto_kgc(pt
, i
));
261 if (pt
->trace
) gc_marktrace(g
, pt
->trace
);
265 /* Traverse the frame structure of a stack. */
266 static MSize
gc_traverse_frames(global_State
*g
, lua_State
*th
)
268 TValue
*frame
, *top
= th
->top
-1, *bot
= tvref(th
->stack
);
269 /* Note: extra vararg frame not skipped, marks function twice (harmless). */
270 for (frame
= th
->base
-1; frame
> bot
; frame
= frame_prev(frame
)) {
271 GCfunc
*fn
= frame_func(frame
);
272 TValue
*ftop
= frame
;
273 if (isluafunc(fn
)) ftop
+= funcproto(fn
)->framesize
;
274 if (ftop
> top
) top
= ftop
;
275 gc_markobj(g
, fn
); /* Need to mark hidden function (or L). */
277 top
++; /* Correct bias of -1 (frame == base-1). */
278 if (top
> tvref(th
->maxstack
)) top
= tvref(th
->maxstack
);
279 return (MSize
)(top
- bot
); /* Return minimum needed stack size. */
282 /* Traverse a thread object. */
283 static void gc_traverse_thread(global_State
*g
, lua_State
*th
)
285 TValue
*o
, *top
= th
->top
;
286 for (o
= tvref(th
->stack
)+1; o
< top
; o
++)
288 if (g
->gc
.state
== GCSatomic
) {
289 top
= tvref(th
->stack
) + th
->stacksize
;
290 for (; o
< top
; o
++) /* Clear unmarked slots. */
293 gc_markobj(g
, tabref(th
->env
));
294 lj_state_shrinkstack(th
, gc_traverse_frames(g
, th
));
297 /* Propagate one gray object. Traverse it and turn it black. */
298 static size_t propagatemark(global_State
*g
)
300 GCobj
*o
= gcref(g
->gc
.gray
);
301 int gct
= o
->gch
.gct
;
302 lua_assert(isgray(o
));
304 setgcrefr(g
->gc
.gray
, o
->gch
.gclist
); /* Remove from gray list. */
305 if (LJ_LIKELY(gct
== ~LJ_TTAB
)) {
306 GCtab
*t
= gco2tab(o
);
307 if (gc_traverse_tab(g
, t
) > 0)
308 black2gray(o
); /* Keep weak tables gray. */
309 return sizeof(GCtab
) + sizeof(TValue
) * t
->asize
+
310 sizeof(Node
) * (t
->hmask
+ 1);
311 } else if (LJ_LIKELY(gct
== ~LJ_TFUNC
)) {
312 GCfunc
*fn
= gco2func(o
);
313 gc_traverse_func(g
, fn
);
314 return isluafunc(fn
) ? sizeLfunc((MSize
)fn
->l
.nupvalues
) :
315 sizeCfunc((MSize
)fn
->c
.nupvalues
);
316 } else if (LJ_LIKELY(gct
== ~LJ_TPROTO
)) {
317 GCproto
*pt
= gco2pt(o
);
318 gc_traverse_proto(g
, pt
);
320 } else if (LJ_LIKELY(gct
== ~LJ_TTHREAD
)) {
321 lua_State
*th
= gco2th(o
);
322 setgcrefr(th
->gclist
, g
->gc
.grayagain
);
323 setgcref(g
->gc
.grayagain
, o
);
324 black2gray(o
); /* Threads are never black. */
325 gc_traverse_thread(g
, th
);
326 return sizeof(lua_State
) + sizeof(TValue
) * th
->stacksize
;
329 GCtrace
*T
= gco2trace(o
);
330 gc_traverse_trace(g
, T
);
331 return ((sizeof(GCtrace
)+7)&~7) + (T
->nins
-T
->nk
)*sizeof(IRIns
) +
332 T
->nsnap
*sizeof(SnapShot
) + T
->nsnapmap
*sizeof(SnapEntry
);
340 /* Propagate all gray objects. */
341 static size_t gc_propagate_gray(global_State
*g
)
344 while (gcref(g
->gc
.gray
) != NULL
)
345 m
+= propagatemark(g
);
349 /* -- Sweep phase --------------------------------------------------------- */
351 /* Try to shrink some common data structures. */
352 static void gc_shrink(global_State
*g
, lua_State
*L
)
354 if (g
->strnum
<= (g
->strmask
>> 2) && g
->strmask
> LJ_MIN_STRTAB
*2-1)
355 lj_str_resize(L
, g
->strmask
>> 1); /* Shrink string table. */
356 if (g
->tmpbuf
.sz
> LJ_MIN_SBUF
*2)
357 lj_str_resizebuf(L
, &g
->tmpbuf
, g
->tmpbuf
.sz
>> 1); /* Shrink temp buf. */
360 /* Type of GC free functions. */
361 typedef void (LJ_FASTCALL
*GCFreeFunc
)(global_State
*g
, GCobj
*o
);
363 /* GC free functions for LJ_TSTR .. LJ_TUDATA. ORDER LJ_T */
364 static const GCFreeFunc gc_freefunc
[] = {
365 (GCFreeFunc
)lj_str_free
,
366 (GCFreeFunc
)lj_func_freeuv
,
367 (GCFreeFunc
)lj_state_free
,
368 (GCFreeFunc
)lj_func_freeproto
,
369 (GCFreeFunc
)lj_func_free
,
371 (GCFreeFunc
)lj_trace_free
,
376 (GCFreeFunc
)lj_cdata_free
,
380 (GCFreeFunc
)lj_tab_free
,
381 (GCFreeFunc
)lj_udata_free
384 /* Full sweep of a GC list. */
385 #define gc_fullsweep(g, p) gc_sweep(g, (p), LJ_MAX_MEM)
387 /* Partial sweep of a GC list. */
388 static GCRef
*gc_sweep(global_State
*g
, GCRef
*p
, uint32_t lim
)
390 /* Mask with other white and LJ_GC_FIXED. Or LJ_GC_SFIXED on shutdown. */
391 int ow
= otherwhite(g
);
393 while ((o
= gcref(*p
)) != NULL
&& lim
-- > 0) {
394 if (o
->gch
.gct
== ~LJ_TTHREAD
) /* Need to sweep open upvalues, too. */
395 gc_fullsweep(g
, &gco2th(o
)->openupval
);
396 if (((o
->gch
.marked
^ LJ_GC_WHITES
) & ow
)) { /* Black or current white? */
397 lua_assert(!isdead(g
, o
) || (o
->gch
.marked
& LJ_GC_FIXED
));
398 makewhite(g
, o
); /* Value is alive, change to the current white. */
400 } else { /* Otherwise value is dead, free it. */
401 lua_assert(isdead(g
, o
) || ow
== LJ_GC_SFIXED
);
402 setgcrefr(*p
, o
->gch
.nextgc
);
403 if (o
== gcref(g
->gc
.root
))
404 setgcrefr(g
->gc
.root
, o
->gch
.nextgc
); /* Adjust list anchor. */
405 gc_freefunc
[o
->gch
.gct
- ~LJ_TSTR
](g
, o
);
411 /* Check whether we can clear a key or a value slot from a table. */
412 static int gc_mayclear(cTValue
*o
, int val
)
414 if (tvisgcv(o
)) { /* Only collectable objects can be weak references. */
415 if (tvisstr(o
)) { /* But strings cannot be used as weak references. */
416 gc_mark_str(strV(o
)); /* And need to be marked. */
420 return 1; /* Object is about to be collected. */
421 if (tvisudata(o
) && val
&& isfinalized(udataV(o
)))
422 return 1; /* Finalized userdata is dropped only from values. */
424 return 0; /* Cannot clear. */
427 /* Clear collected entries from weak tables. */
428 static void gc_clearweak(GCobj
*o
)
431 GCtab
*t
= gco2tab(o
);
432 lua_assert((t
->marked
& LJ_GC_WEAK
));
433 if ((t
->marked
& LJ_GC_WEAKVAL
)) {
434 MSize i
, asize
= t
->asize
;
435 for (i
= 0; i
< asize
; i
++) {
436 /* Clear array slot when value is about to be collected. */
437 TValue
*tv
= arrayslot(t
, i
);
438 if (gc_mayclear(tv
, 1))
443 Node
*node
= noderef(t
->node
);
444 MSize i
, hmask
= t
->hmask
;
445 for (i
= 0; i
<= hmask
; i
++) {
447 /* Clear hash slot when key or value is about to be collected. */
448 if (!tvisnil(&n
->val
) && (gc_mayclear(&n
->key
, 0) ||
449 gc_mayclear(&n
->val
, 1)))
453 o
= gcref(t
->gclist
);
457 /* Call a userdata or cdata finalizer. */
458 static void gc_call_finalizer(global_State
*g
, lua_State
*L
,
459 cTValue
*mo
, GCobj
*o
)
461 /* Save and restore lots of state around the __gc callback. */
462 uint8_t oldh
= hook_save(g
);
463 MSize oldt
= g
->gc
.threshold
;
469 hook_entergc(g
); /* Disable hooks and new traces during __gc. */
470 g
->gc
.threshold
= LJ_MAX_MEM
; /* Prevent GC steps. */
472 setgcV(L
, top
+1, o
, ~o
->gch
.gct
);
473 errcode
= lj_vm_pcall(L
, top
+1, 1+0, -1); /* Stack: |mo|o| -> | */
474 hook_restore(g
, oldh
);
475 g
->gc
.threshold
= oldt
; /* Restore GC threshold. */
477 lj_err_throw(L
, errcode
); /* Propagate errors. */
480 /* Finalize one userdata or cdata object from the mmudata list. */
481 static void gc_finalize(lua_State
*L
)
483 global_State
*g
= G(L
);
484 GCobj
*o
= gcnext(gcref(g
->gc
.mmudata
));
486 lua_assert(gcref(g
->jit_L
) == NULL
); /* Must not be called on trace. */
487 /* Unchain from list of userdata to be finalized. */
488 if (o
== gcref(g
->gc
.mmudata
))
489 setgcrefnull(g
->gc
.mmudata
);
491 setgcrefr(gcref(g
->gc
.mmudata
)->gch
.nextgc
, o
->gch
.nextgc
);
493 if (o
->gch
.gct
== ~LJ_TCDATA
) {
495 /* Add cdata back to the GC list and make it white. */
496 setgcrefr(o
->gch
.nextgc
, g
->gc
.root
);
497 setgcref(g
->gc
.root
, o
);
499 o
->gch
.marked
&= (uint8_t)~LJ_GC_CDATA_FIN
;
500 /* Resolve finalizer. */
501 setcdataV(L
, &tmp
, gco2cd(o
));
502 tv
= lj_tab_set(L
, ctype_ctsG(g
)->finalizer
, &tmp
);
504 g
->gc
.nocdatafin
= 0;
506 setnilV(tv
); /* Clear entry in finalizer table. */
507 gc_call_finalizer(g
, L
, &tmp
, o
);
512 /* Add userdata back to the main userdata list and make it white. */
513 setgcrefr(o
->gch
.nextgc
, mainthread(g
)->nextgc
);
514 setgcref(mainthread(g
)->nextgc
, o
);
516 /* Resolve the __gc metamethod. */
517 mo
= lj_meta_fastg(g
, tabref(gco2ud(o
)->metatable
), MM_gc
);
519 gc_call_finalizer(g
, L
, mo
, o
);
522 /* Finalize all userdata objects from mmudata list. */
523 void lj_gc_finalize_udata(lua_State
*L
)
525 while (gcref(G(L
)->gc
.mmudata
) != NULL
)
530 /* Finalize all cdata objects from finalizer table. */
531 void lj_gc_finalize_cdata(lua_State
*L
)
533 global_State
*g
= G(L
);
534 CTState
*cts
= ctype_ctsG(g
);
536 GCtab
*t
= cts
->finalizer
;
537 Node
*node
= noderef(t
->node
);
539 setgcrefnull(t
->metatable
); /* Mark finalizer table as disabled. */
540 for (i
= (ptrdiff_t)t
->hmask
; i
>= 0; i
--)
541 if (!tvisnil(&node
[i
].val
) && tviscdata(&node
[i
].key
)) {
542 GCobj
*o
= gcV(&node
[i
].key
);
545 o
->gch
.marked
&= (uint8_t)~LJ_GC_CDATA_FIN
;
546 copyTV(L
, &tmp
, &node
[i
].val
);
547 setnilV(&node
[i
].val
);
548 gc_call_finalizer(g
, L
, &tmp
, o
);
554 /* Free all remaining GC objects. */
555 void lj_gc_freeall(global_State
*g
)
558 /* Free everything, except super-fixed objects (the main thread). */
559 g
->gc
.currentwhite
= LJ_GC_WHITES
| LJ_GC_SFIXED
;
560 gc_fullsweep(g
, &g
->gc
.root
);
561 strmask
= g
->strmask
;
562 for (i
= 0; i
<= strmask
; i
++) /* Free all string hash chains. */
563 gc_fullsweep(g
, &g
->strhash
[i
]);
566 /* -- Collector ----------------------------------------------------------- */
568 /* Atomic part of the GC cycle, transitioning from mark to sweep phase. */
569 static void atomic(global_State
*g
, lua_State
*L
)
573 gc_mark_uv(g
); /* Need to remark open upvalues (the thread may be dead). */
574 gc_propagate_gray(g
); /* Propagate any left-overs. */
576 setgcrefr(g
->gc
.gray
, g
->gc
.weak
); /* Empty the list of weak tables. */
577 setgcrefnull(g
->gc
.weak
);
578 lua_assert(!iswhite(obj2gco(mainthread(g
))));
579 gc_markobj(g
, L
); /* Mark running thread. */
580 gc_traverse_curtrace(g
); /* Traverse current trace. */
581 gc_mark_gcroot(g
); /* Mark GC roots (again). */
582 gc_propagate_gray(g
); /* Propagate all of the above. */
584 setgcrefr(g
->gc
.gray
, g
->gc
.grayagain
); /* Empty the 2nd chance list. */
585 setgcrefnull(g
->gc
.grayagain
);
586 gc_propagate_gray(g
); /* Propagate it. */
588 udsize
= lj_gc_separateudata(g
, 0); /* Separate userdata to be finalized. */
589 gc_mark_mmudata(g
); /* Mark them. */
590 udsize
+= gc_propagate_gray(g
); /* And propagate the marks. */
592 /* All marking done, clear weak tables. */
593 gc_clearweak(gcref(g
->gc
.weak
));
595 /* Prepare for sweep phase. */
596 g
->gc
.currentwhite
= (uint8_t)otherwhite(g
); /* Flip current white. */
597 g
->strempty
.marked
= g
->gc
.currentwhite
;
598 setmref(g
->gc
.sweep
, &g
->gc
.root
);
599 g
->gc
.estimate
= g
->gc
.total
- (MSize
)udsize
; /* Initial estimate. */
602 /* GC state machine. Returns a cost estimate for each step performed. */
603 static size_t gc_onestep(lua_State
*L
)
605 global_State
*g
= G(L
);
606 switch (g
->gc
.state
) {
608 gc_mark_start(g
); /* Start a new GC cycle by marking all GC roots. */
611 if (gcref(g
->gc
.gray
) != NULL
)
612 return propagatemark(g
); /* Propagate one gray object. */
613 g
->gc
.state
= GCSatomic
; /* End of mark phase. */
616 if (gcref(g
->jit_L
)) /* Don't run atomic phase on trace. */
619 g
->gc
.state
= GCSsweepstring
; /* Start of sweep phase. */
622 case GCSsweepstring
: {
623 MSize old
= g
->gc
.total
;
624 gc_fullsweep(g
, &g
->strhash
[g
->gc
.sweepstr
++]); /* Sweep one chain. */
625 if (g
->gc
.sweepstr
> g
->strmask
)
626 g
->gc
.state
= GCSsweep
; /* All string hash chains sweeped. */
627 lua_assert(old
>= g
->gc
.total
);
628 g
->gc
.estimate
-= old
- g
->gc
.total
;
632 MSize old
= g
->gc
.total
;
633 setmref(g
->gc
.sweep
, gc_sweep(g
, mref(g
->gc
.sweep
, GCRef
), GCSWEEPMAX
));
634 if (gcref(*mref(g
->gc
.sweep
, GCRef
)) == NULL
) {
636 if (gcref(g
->gc
.mmudata
)) { /* Need any finalizations? */
637 g
->gc
.state
= GCSfinalize
;
639 g
->gc
.nocdatafin
= 1;
641 } else { /* Otherwise skip this phase to help the JIT. */
642 g
->gc
.state
= GCSpause
; /* End of GC cycle. */
646 lua_assert(old
>= g
->gc
.total
);
647 g
->gc
.estimate
-= old
- g
->gc
.total
;
648 return GCSWEEPMAX
*GCSWEEPCOST
;
651 if (gcref(g
->gc
.mmudata
) != NULL
) {
652 if (gcref(g
->jit_L
)) /* Don't call finalizers on trace. */
654 gc_finalize(L
); /* Finalize one userdata object. */
655 if (g
->gc
.estimate
> GCFINALIZECOST
)
656 g
->gc
.estimate
-= GCFINALIZECOST
;
657 return GCFINALIZECOST
;
660 if (!g
->gc
.nocdatafin
) lj_tab_rehash(L
, ctype_ctsG(g
)->finalizer
);
662 g
->gc
.state
= GCSpause
; /* End of GC cycle. */
671 /* Perform a limited amount of incremental GC steps. */
672 int LJ_FASTCALL
lj_gc_step(lua_State
*L
)
674 global_State
*g
= G(L
);
676 int32_t ostate
= g
->vmstate
;
678 lim
= (GCSTEPSIZE
/100) * g
->gc
.stepmul
;
681 g
->gc
.debt
+= g
->gc
.total
- g
->gc
.threshold
;
683 lim
-= (MSize
)gc_onestep(L
);
684 if (g
->gc
.state
== GCSpause
) {
685 g
->gc
.threshold
= (g
->gc
.estimate
/100) * g
->gc
.pause
;
687 return 1; /* Finished a GC cycle. */
689 } while ((int32_t)lim
> 0);
690 if (g
->gc
.debt
< GCSTEPSIZE
) {
691 g
->gc
.threshold
= g
->gc
.total
+ GCSTEPSIZE
;
695 g
->gc
.debt
-= GCSTEPSIZE
;
696 g
->gc
.threshold
= g
->gc
.total
;
702 /* Ditto, but fix the stack top first. */
703 void LJ_FASTCALL
lj_gc_step_fixtop(lua_State
*L
)
705 if (curr_funcisL(L
)) L
->top
= curr_topL(L
);
710 /* Perform multiple GC steps. Called from JIT-compiled code. */
711 int LJ_FASTCALL
lj_gc_step_jit(global_State
*g
, MSize steps
)
713 lua_State
*L
= gco2th(gcref(g
->jit_L
));
714 L
->base
= mref(G(L
)->jit_base
, TValue
);
715 L
->top
= curr_topL(L
);
716 while (steps
-- > 0 && lj_gc_step(L
) == 0)
718 /* Return 1 to force a trace exit. */
719 return (G(L
)->gc
.state
== GCSatomic
|| G(L
)->gc
.state
== GCSfinalize
);
723 /* Perform a full GC cycle. */
724 void lj_gc_fullgc(lua_State
*L
)
726 global_State
*g
= G(L
);
727 int32_t ostate
= g
->vmstate
;
729 if (g
->gc
.state
<= GCSatomic
) { /* Caught somewhere in the middle. */
730 setmref(g
->gc
.sweep
, &g
->gc
.root
); /* Sweep everything (preserving it). */
731 setgcrefnull(g
->gc
.gray
); /* Reset lists from partial propagation. */
732 setgcrefnull(g
->gc
.grayagain
);
733 setgcrefnull(g
->gc
.weak
);
734 g
->gc
.state
= GCSsweepstring
; /* Fast forward to the sweep phase. */
737 while (g
->gc
.state
== GCSsweepstring
|| g
->gc
.state
== GCSsweep
)
738 gc_onestep(L
); /* Finish sweep. */
739 lua_assert(g
->gc
.state
== GCSfinalize
|| g
->gc
.state
== GCSpause
);
740 /* Now perform a full GC. */
741 g
->gc
.state
= GCSpause
;
742 do { gc_onestep(L
); } while (g
->gc
.state
!= GCSpause
);
743 g
->gc
.threshold
= (g
->gc
.estimate
/100) * g
->gc
.pause
;
747 /* -- Write barriers ------------------------------------------------------ */
749 /* Move the GC propagation frontier forward. */
750 void lj_gc_barrierf(global_State
*g
, GCobj
*o
, GCobj
*v
)
752 lua_assert(isblack(o
) && iswhite(v
) && !isdead(g
, v
) && !isdead(g
, o
));
753 lua_assert(g
->gc
.state
!= GCSfinalize
&& g
->gc
.state
!= GCSpause
);
754 lua_assert(o
->gch
.gct
!= ~LJ_TTAB
);
755 /* Preserve invariant during propagation. Otherwise it doesn't matter. */
756 if (g
->gc
.state
== GCSpropagate
|| g
->gc
.state
== GCSatomic
)
757 gc_mark(g
, v
); /* Move frontier forward. */
759 makewhite(g
, o
); /* Make it white to avoid the following barrier. */
762 /* Specialized barrier for closed upvalue. Pass &uv->tv. */
763 void LJ_FASTCALL
lj_gc_barrieruv(global_State
*g
, TValue
*tv
)
765 #define TV2MARKED(x) \
766 (*((uint8_t *)(x) - offsetof(GCupval, tv) + offsetof(GCupval, marked)))
767 if (g
->gc
.state
== GCSpropagate
|| g
->gc
.state
== GCSatomic
)
770 TV2MARKED(tv
) = (TV2MARKED(tv
) & (uint8_t)~LJ_GC_COLORS
) | curwhite(g
);
774 /* Close upvalue. Also needs a write barrier. */
775 void lj_gc_closeuv(global_State
*g
, GCupval
*uv
)
777 GCobj
*o
= obj2gco(uv
);
778 /* Copy stack slot to upvalue itself and point to the copy. */
779 copyTV(mainthread(g
), &uv
->tv
, uvval(uv
));
780 setmref(uv
->v
, &uv
->tv
);
782 setgcrefr(o
->gch
.nextgc
, g
->gc
.root
);
783 setgcref(g
->gc
.root
, o
);
784 if (isgray(o
)) { /* A closed upvalue is never gray, so fix this. */
785 if (g
->gc
.state
== GCSpropagate
|| g
->gc
.state
== GCSatomic
) {
786 gray2black(o
); /* Make it black and preserve invariant. */
787 if (tviswhite(&uv
->tv
))
788 lj_gc_barrierf(g
, o
, gcV(&uv
->tv
));
790 makewhite(g
, o
); /* Make it white, i.e. sweep the upvalue. */
791 lua_assert(g
->gc
.state
!= GCSfinalize
&& g
->gc
.state
!= GCSpause
);
797 /* Mark a trace if it's saved during the propagation phase. */
798 void lj_gc_barriertrace(global_State
*g
, uint32_t traceno
)
800 if (g
->gc
.state
== GCSpropagate
|| g
->gc
.state
== GCSatomic
)
801 gc_marktrace(g
, traceno
);
805 /* -- Allocator ----------------------------------------------------------- */
807 /* Call pluggable memory allocator to allocate or resize a fragment. */
808 void *lj_mem_realloc(lua_State
*L
, void *p
, MSize osz
, MSize nsz
)
810 global_State
*g
= G(L
);
811 lua_assert((osz
== 0) == (p
== NULL
));
812 p
= g
->allocf(g
->allocd
, p
, osz
, nsz
);
813 if (p
== NULL
&& nsz
> 0)
815 lua_assert((nsz
== 0) == (p
== NULL
));
816 lua_assert(checkptr32(p
));
817 g
->gc
.total
= (g
->gc
.total
- osz
) + nsz
;
821 /* Allocate new GC object and link it to the root set. */
822 void * LJ_FASTCALL
lj_mem_newgco(lua_State
*L
, MSize size
)
824 global_State
*g
= G(L
);
825 GCobj
*o
= (GCobj
*)g
->allocf(g
->allocd
, NULL
, 0, size
);
828 lua_assert(checkptr32(o
));
830 setgcrefr(o
->gch
.nextgc
, g
->gc
.root
);
831 setgcref(g
->gc
.root
, o
);
836 /* Resize growable vector. */
837 void *lj_mem_grow(lua_State
*L
, void *p
, MSize
*szp
, MSize lim
, MSize esz
)
839 MSize sz
= (*szp
) << 1;
840 if (sz
< LJ_MIN_VECSZ
)
844 p
= lj_mem_realloc(L
, p
, (*szp
)*esz
, sz
*esz
);