[sgen] Logging for objects kept alive from ephemerons (#12650)
[mono-project.git] / mono / metadata / sgen-mono.c
blob4f078be119d1e7282050eebec862d62f27685cfb
1 /**
2 * \file
3 * SGen features specific to Mono.
5 * Copyright (C) 2014 Xamarin Inc
7 * Licensed under the MIT license. See LICENSE file in the project root for full license information.
8 */
10 #include "config.h"
11 #ifdef HAVE_SGEN_GC
13 #include "sgen/sgen-gc.h"
14 #include "sgen/sgen-protocol.h"
15 #include "metadata/monitor.h"
16 #include "sgen/sgen-layout-stats.h"
17 #include "sgen/sgen-client.h"
18 #include "sgen/sgen-cardtable.h"
19 #include "sgen/sgen-pinning.h"
20 #include "sgen/sgen-workers.h"
21 #include "metadata/class-init.h"
22 #include "metadata/marshal.h"
23 #include "metadata/method-builder.h"
24 #include "metadata/abi-details.h"
25 #include "metadata/class-abi-details.h"
26 #include "metadata/mono-gc.h"
27 #include "metadata/runtime.h"
28 #include "metadata/sgen-bridge-internals.h"
29 #include "metadata/sgen-mono.h"
30 #include "metadata/sgen-mono-ilgen.h"
31 #include "metadata/gc-internals.h"
32 #include "metadata/handle.h"
33 #include "metadata/abi-details.h"
34 #include "utils/mono-memory-model.h"
35 #include "utils/mono-logger-internals.h"
36 #include "utils/mono-threads-coop.h"
37 #include "utils/mono-threads.h"
38 #include "metadata/w32handle.h"
40 #ifdef HEAVY_STATISTICS
41 static guint64 stat_wbarrier_set_arrayref = 0;
42 static guint64 stat_wbarrier_value_copy = 0;
43 static guint64 stat_wbarrier_object_copy = 0;
45 static guint64 los_marked_cards;
46 static guint64 los_array_cards;
47 static guint64 los_array_remsets;
48 #endif
50 /* If set, mark stacks conservatively, even if precise marking is possible */
51 static gboolean conservative_stack_mark = FALSE;
52 /* If set, check that there are no references to the domain left at domain unload */
53 gboolean sgen_mono_xdomain_checks = FALSE;
55 /* Functions supplied by the runtime to be called by the GC */
56 static MonoGCCallbacks gc_callbacks;
58 #define OPDEF(a,b,c,d,e,f,g,h,i,j) \
59 a = i,
61 enum {
62 #include "mono/cil/opcode.def"
63 CEE_LAST
66 #undef OPDEF
69 * Write barriers
72 static gboolean
73 ptr_on_stack (void *ptr)
75 gpointer stack_start = &stack_start;
76 SgenThreadInfo *info = mono_thread_info_current ();
78 if (ptr >= stack_start && ptr < (gpointer)info->client_info.info.stack_end)
79 return TRUE;
80 return FALSE;
83 #ifdef SGEN_HEAVY_BINARY_PROTOCOL
84 #undef HANDLE_PTR
85 #define HANDLE_PTR(ptr,obj) do { \
86 gpointer o = *(gpointer*)(ptr); \
87 if ((o)) { \
88 gpointer d = ((char*)dest) + ((char*)(ptr) - (char*)(obj)); \
89 sgen_binary_protocol_wbarrier (d, o, (gpointer) SGEN_LOAD_VTABLE (o)); \
90 } \
91 } while (0)
93 static void
94 scan_object_for_binary_protocol_copy_wbarrier (gpointer dest, char *start, mword desc)
96 #define SCAN_OBJECT_NOVTABLE
97 #include "sgen/sgen-scan-object.h"
99 #endif
101 void
102 mono_gc_wbarrier_value_copy_internal (gpointer dest, gpointer src, int count, MonoClass *klass)
104 HEAVY_STAT (++stat_wbarrier_value_copy);
105 g_assert (m_class_is_valuetype (klass));
107 SGEN_LOG (8, "Adding value remset at %p, count %d, descr %p for class %s (%p)", dest, count, (gpointer)(uintptr_t)m_class_get_gc_descr (klass), m_class_get_name (klass), klass);
109 if (sgen_ptr_in_nursery (dest) || ptr_on_stack (dest) || !sgen_gc_descr_has_references ((mword)m_class_get_gc_descr (klass))) {
110 size_t element_size = mono_class_value_size (klass, NULL);
111 size_t size = count * element_size;
112 mono_gc_memmove_atomic (dest, src, size);
113 return;
116 #ifdef SGEN_HEAVY_BINARY_PROTOCOL
117 if (sgen_binary_protocol_is_heavy_enabled ()) {
118 size_t element_size = mono_class_value_size (klass, NULL);
119 int i;
120 for (i = 0; i < count; ++i) {
121 scan_object_for_binary_protocol_copy_wbarrier ((char*)dest + i * element_size,
122 (char*)src + i * element_size - MONO_ABI_SIZEOF (MonoObject),
123 (mword) klass->gc_descr);
126 #endif
128 sgen_get_remset ()->wbarrier_value_copy (dest, src, count, mono_class_value_size (klass, NULL));
132 * mono_gc_wbarrier_object_copy_internal:
134 * Write barrier to call when \p obj is the result of a clone or copy of an object.
136 void
137 mono_gc_wbarrier_object_copy_internal (MonoObject* obj, MonoObject *src)
139 int size;
141 HEAVY_STAT (++stat_wbarrier_object_copy);
143 SGEN_ASSERT (6, !ptr_on_stack (obj), "Why is this called for a non-reference type?");
144 if (sgen_ptr_in_nursery (obj) || !SGEN_OBJECT_HAS_REFERENCES (src)) {
145 size = m_class_get_instance_size (mono_object_class (obj));
146 mono_gc_memmove_aligned ((char*)obj + MONO_ABI_SIZEOF (MonoObject), (char*)src + MONO_ABI_SIZEOF (MonoObject),
147 size - MONO_ABI_SIZEOF (MonoObject));
148 return;
151 #ifdef SGEN_HEAVY_BINARY_PROTOCOL
152 if (sgen_binary_protocol_is_heavy_enabled ())
153 scan_object_for_binary_protocol_copy_wbarrier (obj, (char*)src, (mword) src->vtable->gc_descr);
154 #endif
156 sgen_get_remset ()->wbarrier_object_copy (obj, src);
160 * mono_gc_wbarrier_set_arrayref_internal:
162 void
163 mono_gc_wbarrier_set_arrayref_internal (MonoArray *arr, gpointer slot_ptr, MonoObject* value)
165 HEAVY_STAT (++stat_wbarrier_set_arrayref);
166 if (sgen_ptr_in_nursery (slot_ptr)) {
167 *(void**)slot_ptr = value;
168 return;
170 SGEN_LOG (8, "Adding remset at %p", slot_ptr);
171 if (value)
172 sgen_binary_protocol_wbarrier (slot_ptr, value, value->vtable);
174 sgen_get_remset ()->wbarrier_set_field ((GCObject*)arr, slot_ptr, value);
178 * mono_gc_wbarrier_set_field_internal:
180 void
181 mono_gc_wbarrier_set_field_internal (MonoObject *obj, gpointer field_ptr, MonoObject* value)
183 mono_gc_wbarrier_set_arrayref_internal ((MonoArray*)obj, field_ptr, value);
186 void
187 mono_gc_wbarrier_range_copy (gpointer _dest, gconstpointer _src, int size)
189 sgen_wbarrier_range_copy (_dest, _src, size);
192 MonoRangeCopyFunction
193 mono_gc_get_range_copy_func (void)
195 return sgen_get_remset ()->wbarrier_range_copy;
199 mono_gc_get_suspend_signal (void)
201 return mono_threads_suspend_get_suspend_signal ();
205 mono_gc_get_restart_signal (void)
207 return mono_threads_suspend_get_restart_signal ();
210 static MonoMethod *write_barrier_conc_method;
211 static MonoMethod *write_barrier_noconc_method;
213 gboolean
214 sgen_is_critical_method (MonoMethod *method)
216 return sgen_is_managed_allocator (method);
219 gboolean
220 sgen_has_critical_method (void)
222 return sgen_has_managed_allocator ();
225 gboolean
226 mono_gc_is_critical_method (MonoMethod *method)
228 #ifdef HOST_WASM
229 //methods can't be critical under wasm due to the single thread'ness of it
230 return FALSE;
231 #else
232 return sgen_is_critical_method (method);
233 #endif
236 static void
237 emit_nursery_check_noilgen (MonoMethodBuilder *mb, gboolean is_concurrent)
241 static MonoSgenMonoCallbacks sgenmono_cb;
242 static gboolean cb_inited = FALSE;
244 void
245 mono_install_sgen_mono_callbacks (MonoSgenMonoCallbacks *cb)
247 g_assert (!cb_inited);
248 g_assert (cb->version == MONO_SGEN_MONO_CALLBACKS_VERSION);
249 memcpy (&sgenmono_cb, cb, sizeof (MonoSgenMonoCallbacks));
250 cb_inited = TRUE;
253 static void
254 emit_managed_allocater_noilgen (MonoMethodBuilder *mb, gboolean slowpath, gboolean profiler, int atype)
258 static void
259 install_noilgen (void)
261 MonoSgenMonoCallbacks cb;
262 cb.version = MONO_SGEN_MONO_CALLBACKS_VERSION;
263 cb.emit_nursery_check = emit_nursery_check_noilgen;
264 cb.emit_managed_allocater = emit_managed_allocater_noilgen;
265 mono_install_sgen_mono_callbacks (&cb);
268 static MonoSgenMonoCallbacks *
269 get_sgen_mono_cb (void)
271 if (G_UNLIKELY (!cb_inited)) {
272 #ifdef ENABLE_ILGEN
273 mono_sgen_mono_ilgen_init ();
274 #else
275 install_noilgen ();
276 #endif
278 return &sgenmono_cb;
281 MonoMethod*
282 mono_gc_get_specific_write_barrier (gboolean is_concurrent)
284 MonoMethod *res;
285 MonoMethodBuilder *mb;
286 MonoMethodSignature *sig;
287 MonoMethod **write_barrier_method_addr;
288 WrapperInfo *info;
289 // FIXME: Maybe create a separate version for ctors (the branch would be
290 // correctly predicted more times)
291 if (is_concurrent)
292 write_barrier_method_addr = &write_barrier_conc_method;
293 else
294 write_barrier_method_addr = &write_barrier_noconc_method;
296 if (*write_barrier_method_addr)
297 return *write_barrier_method_addr;
299 /* Create the IL version of mono_gc_barrier_generic_store () */
300 sig = mono_metadata_signature_alloc (mono_defaults.corlib, 1);
301 sig->ret = mono_get_void_type ();
302 sig->params [0] = mono_get_int_type ();
304 if (is_concurrent)
305 mb = mono_mb_new (mono_defaults.object_class, "wbarrier_conc", MONO_WRAPPER_WRITE_BARRIER);
306 else
307 mb = mono_mb_new (mono_defaults.object_class, "wbarrier_noconc", MONO_WRAPPER_WRITE_BARRIER);
309 get_sgen_mono_cb ()->emit_nursery_check (mb, is_concurrent);
311 res = mono_mb_create_method (mb, sig, 16);
312 info = mono_wrapper_info_create (mb, WRAPPER_SUBTYPE_NONE);
313 mono_marshal_set_wrapper_info (res, info);
314 mono_mb_free (mb);
316 LOCK_GC;
317 if (*write_barrier_method_addr) {
318 /* Already created */
319 mono_free_method (res);
320 } else {
321 /* double-checked locking */
322 mono_memory_barrier ();
323 *write_barrier_method_addr = res;
325 UNLOCK_GC;
327 return *write_barrier_method_addr;
330 MonoMethod*
331 mono_gc_get_write_barrier (void)
333 return mono_gc_get_specific_write_barrier (sgen_major_collector.is_concurrent);
337 * Dummy filler objects
340 /* Vtable of the objects used to fill out nursery fragments before a collection */
341 static GCVTable array_fill_vtable;
343 static GCVTable
344 get_array_fill_vtable (void)
346 if (!array_fill_vtable) {
347 static char _vtable[sizeof(MonoVTable)+8];
348 MonoVTable* vtable = (MonoVTable*) ALIGN_TO((mword)_vtable, 8);
349 gsize bmap;
351 MonoClass *klass = mono_class_create_array_fill_type ();
352 MonoDomain *domain = mono_get_root_domain ();
353 g_assert (domain);
355 vtable->klass = klass;
356 bmap = 0;
357 vtable->gc_descr = mono_gc_make_descr_for_array (TRUE, &bmap, 0, 8);
358 vtable->rank = 1;
360 array_fill_vtable = vtable;
362 return array_fill_vtable;
365 gboolean
366 sgen_client_array_fill_range (char *start, size_t size)
368 MonoArray *o;
370 if (size < MONO_SIZEOF_MONO_ARRAY) {
371 memset (start, 0, size);
372 return FALSE;
375 o = (MonoArray*)start;
376 o->obj.vtable = (MonoVTable*)get_array_fill_vtable ();
377 /* Mark this as not a real object */
378 o->obj.synchronisation = (MonoThreadsSync *)GINT_TO_POINTER (-1);
379 o->bounds = NULL;
380 /* We use array of int64 */
381 g_assert ((size - MONO_SIZEOF_MONO_ARRAY) % 8 == 0);
382 o->max_length = (mono_array_size_t)((size - MONO_SIZEOF_MONO_ARRAY) / 8);
384 return TRUE;
387 void
388 sgen_client_zero_array_fill_header (void *p, size_t size)
390 if (size >= MONO_SIZEOF_MONO_ARRAY) {
391 memset (p, 0, MONO_SIZEOF_MONO_ARRAY);
392 } else {
393 static guint8 zeros [MONO_SIZEOF_MONO_ARRAY];
395 SGEN_ASSERT (0, !memcmp (p, zeros, size), "TLAB segment must be zeroed out.");
399 MonoVTable *
400 mono_gc_get_vtable (MonoObject *obj)
402 // See sgen/sgen-tagged-pointer.h.
403 return SGEN_LOAD_VTABLE (obj);
407 * Finalization
410 static MonoGCFinalizerCallbacks fin_callbacks;
412 guint
413 mono_gc_get_vtable_bits (MonoClass *klass)
415 guint res = 0;
416 /* FIXME move this to the bridge code */
417 if (sgen_need_bridge_processing ()) {
418 switch (sgen_bridge_class_kind (klass)) {
419 case GC_BRIDGE_TRANSPARENT_BRIDGE_CLASS:
420 case GC_BRIDGE_OPAQUE_BRIDGE_CLASS:
421 res = SGEN_GC_BIT_BRIDGE_OBJECT;
422 break;
423 case GC_BRIDGE_OPAQUE_CLASS:
424 res = SGEN_GC_BIT_BRIDGE_OPAQUE_OBJECT;
425 break;
426 case GC_BRIDGE_TRANSPARENT_CLASS:
427 break;
430 if (fin_callbacks.is_class_finalization_aware) {
431 if (fin_callbacks.is_class_finalization_aware (klass))
432 res |= SGEN_GC_BIT_FINALIZER_AWARE;
434 return res;
437 static gboolean
438 is_finalization_aware (MonoObject *obj)
440 MonoVTable *vt = SGEN_LOAD_VTABLE (obj);
441 return (vt->gc_bits & SGEN_GC_BIT_FINALIZER_AWARE) == SGEN_GC_BIT_FINALIZER_AWARE;
444 void
445 sgen_client_object_queued_for_finalization (GCObject *obj)
447 if (fin_callbacks.object_queued_for_finalization && is_finalization_aware (obj))
448 fin_callbacks.object_queued_for_finalization (obj);
450 #ifdef ENABLE_DTRACE
451 if (G_UNLIKELY (MONO_GC_FINALIZE_ENQUEUE_ENABLED ())) {
452 int gen = sgen_ptr_in_nursery (obj) ? GENERATION_NURSERY : GENERATION_OLD;
453 GCVTable vt = SGEN_LOAD_VTABLE (obj);
454 MONO_GC_FINALIZE_ENQUEUE ((mword)obj, sgen_safe_object_get_size (obj),
455 sgen_client_vtable_get_namespace (vt), sgen_client_vtable_get_name (vt), gen,
456 sgen_client_object_has_critical_finalizer (obj));
458 #endif
461 void
462 mono_gc_register_finalizer_callbacks (MonoGCFinalizerCallbacks *callbacks)
464 if (callbacks->version != MONO_GC_FINALIZER_EXTENSION_VERSION)
465 g_error ("Invalid finalizer callback version. Expected %d but got %d\n", MONO_GC_FINALIZER_EXTENSION_VERSION, callbacks->version);
467 fin_callbacks = *callbacks;
470 void
471 sgen_client_run_finalize (MonoObject *obj)
473 mono_gc_run_finalize (obj, NULL);
477 * mono_gc_invoke_finalizers:
480 mono_gc_invoke_finalizers (void)
482 return sgen_gc_invoke_finalizers ();
486 * mono_gc_pending_finalizers:
488 MonoBoolean
489 mono_gc_pending_finalizers (void)
491 return sgen_have_pending_finalizers ();
494 void
495 sgen_client_finalize_notify (void)
497 mono_gc_finalize_notify ();
500 void
501 mono_gc_register_for_finalization (MonoObject *obj, MonoFinalizationProc user_data)
503 sgen_object_register_for_finalization (obj, user_data);
506 static gboolean
507 object_in_domain_predicate (MonoObject *obj, void *user_data)
509 MonoDomain *domain = (MonoDomain *)user_data;
510 if (mono_object_domain (obj) == domain) {
511 SGEN_LOG (5, "Unregistering finalizer for object: %p (%s)", obj, sgen_client_vtable_get_name (SGEN_LOAD_VTABLE (obj)));
512 return TRUE;
514 return FALSE;
518 * mono_gc_finalizers_for_domain:
519 * \param domain the unloading appdomain
520 * \param out_array output array
521 * \param out_size size of output array
522 * Enqueue for finalization all objects that belong to the unloading appdomain \p domain.
523 * \p suspend is used for early termination of the enqueuing process.
525 void
526 mono_gc_finalize_domain (MonoDomain *domain)
528 sgen_finalize_if (object_in_domain_predicate, domain);
531 void
532 mono_gc_suspend_finalizers (void)
534 sgen_set_suspend_finalizers ();
538 * Ephemerons
541 typedef struct _EphemeronLinkNode EphemeronLinkNode;
543 struct _EphemeronLinkNode {
544 EphemeronLinkNode *next;
545 MonoArray *array;
548 typedef struct {
549 GCObject *key;
550 GCObject *value;
551 } Ephemeron;
553 static EphemeronLinkNode *ephemeron_list;
555 /* LOCKING: requires that the GC lock is held */
556 static MONO_PERMIT (need (sgen_gc_locked)) void
557 null_ephemerons_for_domain (MonoDomain *domain)
559 EphemeronLinkNode *current = ephemeron_list, *prev = NULL;
561 while (current) {
562 MonoObject *object = (MonoObject*)current->array;
564 if (object)
565 SGEN_ASSERT (0, object->vtable, "Can't have objects without vtables.");
567 if (object && object->vtable->domain == domain) {
568 EphemeronLinkNode *tmp = current;
570 if (prev)
571 prev->next = current->next;
572 else
573 ephemeron_list = current->next;
575 current = current->next;
576 sgen_free_internal (tmp, INTERNAL_MEM_EPHEMERON_LINK);
577 } else {
578 prev = current;
579 current = current->next;
584 /* LOCKING: requires that the GC lock is held */
585 void
586 sgen_client_clear_unreachable_ephemerons (ScanCopyContext ctx)
588 CopyOrMarkObjectFunc copy_func = ctx.ops->copy_or_mark_object;
589 SgenGrayQueue *queue = ctx.queue;
590 EphemeronLinkNode *current = ephemeron_list, *prev = NULL;
591 Ephemeron *cur, *array_end;
592 GCObject *tombstone;
594 while (current) {
595 MonoArray *array = current->array;
597 if (!sgen_is_object_alive_for_current_gen ((GCObject*)array)) {
598 EphemeronLinkNode *tmp = current;
600 SGEN_LOG (5, "Dead Ephemeron array at %p", array);
602 if (prev)
603 prev->next = current->next;
604 else
605 ephemeron_list = current->next;
607 current = current->next;
608 sgen_free_internal (tmp, INTERNAL_MEM_EPHEMERON_LINK);
610 continue;
613 copy_func ((GCObject**)&array, queue);
614 current->array = array;
616 SGEN_LOG (5, "Clearing unreachable entries for ephemeron array at %p", array);
618 cur = mono_array_addr_internal (array, Ephemeron, 0);
619 array_end = cur + mono_array_length_internal (array);
620 tombstone = SGEN_LOAD_VTABLE ((GCObject*)array)->domain->ephemeron_tombstone;
622 for (; cur < array_end; ++cur) {
623 GCObject *key = cur->key;
625 if (!key || key == tombstone)
626 continue;
628 SGEN_LOG (5, "[%zd] key %p (%s) value %p (%s)", cur - mono_array_addr_internal (array, Ephemeron, 0),
629 key, sgen_is_object_alive_for_current_gen (key) ? "reachable" : "unreachable",
630 cur->value, cur->value && sgen_is_object_alive_for_current_gen (cur->value) ? "reachable" : "unreachable");
632 if (!sgen_is_object_alive_for_current_gen (key)) {
633 cur->key = tombstone;
634 cur->value = NULL;
635 continue;
638 prev = current;
639 current = current->next;
644 LOCKING: requires that the GC lock is held
646 Limitations: We scan all ephemerons on every collection since the current design doesn't allow for a simple nursery/mature split.
648 gboolean
649 sgen_client_mark_ephemerons (ScanCopyContext ctx)
651 CopyOrMarkObjectFunc copy_func = ctx.ops->copy_or_mark_object;
652 SgenGrayQueue *queue = ctx.queue;
653 gboolean nothing_marked = TRUE;
654 EphemeronLinkNode *current = ephemeron_list;
655 Ephemeron *cur, *array_end;
656 GCObject *tombstone;
658 for (current = ephemeron_list; current; current = current->next) {
659 MonoArray *array = current->array;
660 SGEN_LOG (5, "Ephemeron array at %p", array);
662 /*It has to be alive*/
663 if (!sgen_is_object_alive_for_current_gen ((GCObject*)array)) {
664 SGEN_LOG (5, "\tnot reachable");
665 continue;
668 copy_func ((GCObject**)&array, queue);
670 cur = mono_array_addr_internal (array, Ephemeron, 0);
671 array_end = cur + mono_array_length_internal (array);
672 tombstone = SGEN_LOAD_VTABLE ((GCObject*)array)->domain->ephemeron_tombstone;
674 for (; cur < array_end; ++cur) {
675 GCObject *key = cur->key;
677 if (!key || key == tombstone)
678 continue;
680 SGEN_LOG (5, "[%zd] key %p (%s) value %p (%s)", cur - mono_array_addr_internal (array, Ephemeron, 0),
681 key, sgen_is_object_alive_for_current_gen (key) ? "reachable" : "unreachable",
682 cur->value, cur->value && sgen_is_object_alive_for_current_gen (cur->value) ? "reachable" : "unreachable");
684 if (sgen_is_object_alive_for_current_gen (key)) {
685 GCObject *value = cur->value;
687 copy_func (&cur->key, queue);
688 if (value) {
689 if (!sgen_is_object_alive_for_current_gen (value)) {
690 nothing_marked = FALSE;
691 sgen_binary_protocol_ephemeron_ref (current, key, value);
693 copy_func (&cur->value, queue);
699 SGEN_LOG (5, "Ephemeron run finished. Is it done %d", nothing_marked);
700 return nothing_marked;
703 gboolean
704 mono_gc_ephemeron_array_add (MonoObject *obj)
706 EphemeronLinkNode *node;
708 LOCK_GC;
710 node = (EphemeronLinkNode *)sgen_alloc_internal (INTERNAL_MEM_EPHEMERON_LINK);
711 if (!node) {
712 UNLOCK_GC;
713 return FALSE;
715 node->array = (MonoArray*)obj;
716 node->next = ephemeron_list;
717 ephemeron_list = node;
719 SGEN_LOG (5, "Registered ephemeron array %p", obj);
721 UNLOCK_GC;
722 return TRUE;
726 * Appdomain handling
729 static gboolean
730 need_remove_object_for_domain (GCObject *start, MonoDomain *domain)
732 if (mono_object_domain (start) == domain) {
733 SGEN_LOG (4, "Need to cleanup object %p", start);
734 sgen_binary_protocol_cleanup (start, (gpointer)SGEN_LOAD_VTABLE (start), sgen_safe_object_get_size ((GCObject*)start));
735 return TRUE;
737 return FALSE;
740 static void
741 process_object_for_domain_clearing (GCObject *start, MonoDomain *domain)
743 MonoVTable *vt = SGEN_LOAD_VTABLE (start);
744 if (vt->klass == mono_defaults.internal_thread_class)
745 g_assert (mono_object_domain (start) == mono_get_root_domain ());
746 /* The object could be a proxy for an object in the domain
747 we're deleting. */
748 #ifndef DISABLE_REMOTING
749 if (m_class_get_supertypes (mono_defaults.real_proxy_class) && mono_class_has_parent_fast (vt->klass, mono_defaults.real_proxy_class)) {
750 MonoObject *server = ((MonoRealProxy*)start)->unwrapped_server;
752 /* The server could already have been zeroed out, so
753 we need to check for that, too. */
754 if (server && (!SGEN_LOAD_VTABLE (server) || mono_object_domain (server) == domain)) {
755 SGEN_LOG (4, "Cleaning up remote pointer in %p to object %p", start, server);
756 ((MonoRealProxy*)start)->unwrapped_server = NULL;
759 #endif
762 static gboolean
763 clear_domain_process_object (GCObject *obj, MonoDomain *domain)
765 gboolean remove;
767 process_object_for_domain_clearing (obj, domain);
768 remove = need_remove_object_for_domain (obj, domain);
770 if (remove && obj->synchronisation) {
771 guint32 dislink = mono_monitor_get_object_monitor_gchandle (obj);
772 if (dislink)
773 mono_gchandle_free_internal (dislink);
776 return remove;
779 static void
780 clear_domain_process_minor_object_callback (GCObject *obj, size_t size, MonoDomain *domain)
782 if (clear_domain_process_object (obj, domain)) {
783 CANARIFY_SIZE (size);
784 memset (obj, 0, size);
788 static void
789 clear_domain_process_major_object_callback (GCObject *obj, size_t size, MonoDomain *domain)
791 clear_domain_process_object (obj, domain);
794 static void
795 clear_domain_free_major_non_pinned_object_callback (GCObject *obj, size_t size, MonoDomain *domain)
797 if (need_remove_object_for_domain (obj, domain))
798 sgen_major_collector.free_non_pinned_object (obj, size);
801 static void
802 clear_domain_free_major_pinned_object_callback (GCObject *obj, size_t size, MonoDomain *domain)
804 if (need_remove_object_for_domain (obj, domain))
805 sgen_major_collector.free_pinned_object (obj, size);
808 static void
809 sgen_finish_concurrent_work (const char *reason, gboolean stw)
811 if (sgen_get_concurrent_collection_in_progress ())
812 sgen_perform_collection (0, GENERATION_OLD, reason, TRUE, stw);
813 SGEN_ASSERT (0, !sgen_get_concurrent_collection_in_progress (), "We just ordered a synchronous collection. Why are we collecting concurrently?");
815 sgen_major_collector.finish_sweeping ();
819 * When appdomains are unloaded we can easily remove objects that have finalizers,
820 * but all the others could still be present in random places on the heap.
821 * We need a sweep to get rid of them even though it's going to be costly
822 * with big heaps.
823 * The reason we need to remove them is because we access the vtable and class
824 * structures to know the object size and the reference bitmap: once the domain is
825 * unloaded the point to random memory.
827 void
828 mono_gc_clear_domain (MonoDomain * domain)
830 LOSObject *bigobj, *prev;
831 int i;
833 LOCK_GC;
835 sgen_binary_protocol_domain_unload_begin (domain);
837 sgen_stop_world (0, FALSE);
839 sgen_finish_concurrent_work ("clear domain", FALSE);
841 sgen_process_fin_stage_entries ();
843 sgen_clear_nursery_fragments ();
845 FOREACH_THREAD_ALL (info) {
846 mono_handle_stack_free_domain (info->client_info.info.handle_stack, domain);
847 } FOREACH_THREAD_END
849 if (sgen_mono_xdomain_checks && domain != mono_get_root_domain ()) {
850 sgen_scan_for_registered_roots_in_domain (domain, ROOT_TYPE_NORMAL);
851 sgen_scan_for_registered_roots_in_domain (domain, ROOT_TYPE_WBARRIER);
852 sgen_check_for_xdomain_refs ();
855 /*Ephemerons and dislinks must be processed before LOS since they might end up pointing
856 to memory returned to the OS.*/
857 null_ephemerons_for_domain (domain);
858 sgen_null_links_for_domain (domain);
860 for (i = GENERATION_NURSERY; i < GENERATION_MAX; ++i)
861 sgen_remove_finalizers_if (object_in_domain_predicate, domain, i);
863 sgen_scan_area_with_callback (sgen_nursery_section->data, sgen_nursery_section->end_data,
864 (IterateObjectCallbackFunc)clear_domain_process_minor_object_callback, domain, FALSE, TRUE);
866 /* We need two passes over major and large objects because
867 freeing such objects might give their memory back to the OS
868 (in the case of large objects) or obliterate its vtable
869 (pinned objects with major-copying or pinned and non-pinned
870 objects with major-mark&sweep), but we might need to
871 dereference a pointer from an object to another object if
872 the first object is a proxy. */
873 sgen_major_collector.iterate_objects (ITERATE_OBJECTS_SWEEP_ALL, (IterateObjectCallbackFunc)clear_domain_process_major_object_callback, domain);
874 for (bigobj = sgen_los_object_list; bigobj; bigobj = bigobj->next)
875 clear_domain_process_object ((GCObject*)bigobj->data, domain);
877 prev = NULL;
878 for (bigobj = sgen_los_object_list; bigobj;) {
879 if (need_remove_object_for_domain ((GCObject*)bigobj->data, domain)) {
880 LOSObject *to_free = bigobj;
881 if (prev)
882 prev->next = bigobj->next;
883 else
884 sgen_los_object_list = bigobj->next;
885 bigobj = bigobj->next;
886 SGEN_LOG (4, "Freeing large object %p", bigobj->data);
887 sgen_los_free_object (to_free);
888 continue;
890 prev = bigobj;
891 bigobj = bigobj->next;
893 sgen_major_collector.iterate_objects (ITERATE_OBJECTS_SWEEP_NON_PINNED, (IterateObjectCallbackFunc)clear_domain_free_major_non_pinned_object_callback, domain);
894 sgen_major_collector.iterate_objects (ITERATE_OBJECTS_SWEEP_PINNED, (IterateObjectCallbackFunc)clear_domain_free_major_pinned_object_callback, domain);
896 if (domain == mono_get_root_domain ()) {
897 sgen_pin_stats_report ();
898 sgen_object_layout_dump (stdout);
901 sgen_restart_world (0, FALSE);
903 sgen_binary_protocol_domain_unload_end (domain);
904 sgen_binary_protocol_flush_buffers (FALSE);
906 UNLOCK_GC;
910 * Allocation
913 MonoObject*
914 mono_gc_alloc_obj (MonoVTable *vtable, size_t size)
916 MonoObject *obj = sgen_alloc_obj (vtable, size);
918 if (G_UNLIKELY (mono_profiler_allocations_enabled ()) && obj)
919 MONO_PROFILER_RAISE (gc_allocation, (obj));
921 return obj;
924 MonoObject*
925 mono_gc_alloc_pinned_obj (MonoVTable *vtable, size_t size)
927 MonoObject *obj = sgen_alloc_obj_pinned (vtable, size);
929 if (G_UNLIKELY (mono_profiler_allocations_enabled ()) && obj)
930 MONO_PROFILER_RAISE (gc_allocation, (obj));
932 return obj;
935 MonoObject*
936 mono_gc_alloc_mature (MonoVTable *vtable, size_t size)
938 MonoObject *obj = sgen_alloc_obj_mature (vtable, size);
940 if (G_UNLIKELY (mono_profiler_allocations_enabled ()) && obj)
941 MONO_PROFILER_RAISE (gc_allocation, (obj));
943 return obj;
947 * mono_gc_alloc_fixed:
949 MonoObject*
950 mono_gc_alloc_fixed (size_t size, MonoGCDescriptor descr, MonoGCRootSource source, void *key, const char *msg)
952 /* FIXME: do a single allocation */
953 void *res = g_calloc (1, size);
954 if (!res)
955 return NULL;
956 if (!mono_gc_register_root ((char *)res, size, descr, source, key, msg)) {
957 g_free (res);
958 res = NULL;
960 return (MonoObject*)res;
963 MonoObject*
964 mono_gc_alloc_fixed_no_descriptor (size_t size, MonoGCRootSource source, void *key, const char *msg)
966 return mono_gc_alloc_fixed (size, 0, source, key, msg);
970 * mono_gc_free_fixed:
972 void
973 mono_gc_free_fixed (void* addr)
975 mono_gc_deregister_root ((char *)addr);
976 g_free (addr);
980 * Managed allocator
983 static MonoMethod* alloc_method_cache [ATYPE_NUM];
984 static MonoMethod* slowpath_alloc_method_cache [ATYPE_NUM];
985 static MonoMethod* profiler_alloc_method_cache [ATYPE_NUM];
986 static gboolean use_managed_allocator = TRUE;
988 #ifdef MANAGED_ALLOCATION
989 /* FIXME: Do this in the JIT, where specialized allocation sequences can be created
990 * for each class. This is currently not easy to do, as it is hard to generate basic
991 * blocks + branches, but it is easy with the linear IL codebase.
993 * For this to work we'd need to solve the TLAB race, first. Now we
994 * require the allocator to be in a few known methods to make sure
995 * that they are executed atomically via the restart mechanism.
997 static MonoMethod*
998 create_allocator (int atype, ManagedAllocatorVariant variant)
1000 gboolean slowpath = variant == MANAGED_ALLOCATOR_SLOW_PATH;
1001 gboolean profiler = variant == MANAGED_ALLOCATOR_PROFILER;
1002 MonoMethodBuilder *mb;
1003 MonoMethod *res;
1004 MonoMethodSignature *csig;
1005 const char *name = NULL;
1006 WrapperInfo *info;
1007 int num_params, i;
1009 if (atype == ATYPE_SMALL) {
1010 name = slowpath ? "SlowAllocSmall" : (profiler ? "ProfilerAllocSmall" : "AllocSmall");
1011 } else if (atype == ATYPE_NORMAL) {
1012 name = slowpath ? "SlowAlloc" : (profiler ? "ProfilerAlloc" : "Alloc");
1013 } else if (atype == ATYPE_VECTOR) {
1014 name = slowpath ? "SlowAllocVector" : (profiler ? "ProfilerAllocVector" : "AllocVector");
1015 } else if (atype == ATYPE_STRING) {
1016 name = slowpath ? "SlowAllocString" : (profiler ? "ProfilerAllocString" : "AllocString");
1017 } else {
1018 g_assert_not_reached ();
1021 if (atype == ATYPE_NORMAL)
1022 num_params = 1;
1023 else
1024 num_params = 2;
1026 MonoType *int_type = mono_get_int_type ();
1027 csig = mono_metadata_signature_alloc (mono_defaults.corlib, num_params);
1028 if (atype == ATYPE_STRING) {
1029 csig->ret = m_class_get_byval_arg (mono_defaults.string_class);
1030 csig->params [0] = int_type;
1031 csig->params [1] = mono_get_int32_type ();
1032 } else {
1033 csig->ret = mono_get_object_type ();
1034 for (i = 0; i < num_params; i++)
1035 csig->params [i] = int_type;
1038 mb = mono_mb_new (mono_defaults.object_class, name, MONO_WRAPPER_ALLOC);
1040 get_sgen_mono_cb ()->emit_managed_allocater (mb, slowpath, profiler, atype);
1042 info = mono_wrapper_info_create (mb, WRAPPER_SUBTYPE_NONE);
1043 info->d.alloc.gc_name = "sgen";
1044 info->d.alloc.alloc_type = atype;
1046 res = mono_mb_create (mb, csig, 8, info);
1047 mono_mb_free (mb);
1049 return res;
1051 #endif
1054 mono_gc_get_aligned_size_for_allocator (int size)
1056 return SGEN_ALIGN_UP (size);
1060 * Generate an allocator method implementing the fast path of mono_gc_alloc_obj ().
1061 * The signature of the called method is:
1062 * object allocate (MonoVTable *vtable)
1064 MonoMethod*
1065 mono_gc_get_managed_allocator (MonoClass *klass, gboolean for_box, gboolean known_instance_size)
1067 #ifdef MANAGED_ALLOCATION
1068 ManagedAllocatorVariant variant = mono_profiler_allocations_enabled () ?
1069 MANAGED_ALLOCATOR_PROFILER : MANAGED_ALLOCATOR_REGULAR;
1071 if (sgen_collect_before_allocs)
1072 return NULL;
1073 if (m_class_get_instance_size (klass) > sgen_tlab_size)
1074 return NULL;
1075 if (known_instance_size && ALIGN_TO (m_class_get_instance_size (klass), SGEN_ALLOC_ALIGN) >= SGEN_MAX_SMALL_OBJ_SIZE)
1076 return NULL;
1077 if (mono_class_has_finalizer (klass) || mono_class_is_marshalbyref (klass) || m_class_has_weak_fields (klass))
1078 return NULL;
1079 if (m_class_get_rank (klass))
1080 return NULL;
1081 if (m_class_get_byval_arg (klass)->type == MONO_TYPE_STRING)
1082 return mono_gc_get_managed_allocator_by_type (ATYPE_STRING, variant);
1083 /* Generic classes have dynamic field and can go above MAX_SMALL_OBJ_SIZE. */
1084 if (known_instance_size)
1085 return mono_gc_get_managed_allocator_by_type (ATYPE_SMALL, variant);
1086 else
1087 return mono_gc_get_managed_allocator_by_type (ATYPE_NORMAL, variant);
1088 #else
1089 return NULL;
1090 #endif
1093 MonoMethod*
1094 mono_gc_get_managed_array_allocator (MonoClass *klass)
1096 #ifdef MANAGED_ALLOCATION
1097 if (m_class_get_rank (klass) != 1)
1098 return NULL;
1099 if (sgen_has_per_allocation_action)
1100 return NULL;
1101 g_assert (!mono_class_has_finalizer (klass) && !mono_class_is_marshalbyref (klass));
1103 return mono_gc_get_managed_allocator_by_type (ATYPE_VECTOR, mono_profiler_allocations_enabled () ?
1104 MANAGED_ALLOCATOR_PROFILER : MANAGED_ALLOCATOR_REGULAR);
1105 #else
1106 return NULL;
1107 #endif
1110 void
1111 sgen_set_use_managed_allocator (gboolean flag)
1113 use_managed_allocator = flag;
1116 MonoMethod*
1117 mono_gc_get_managed_allocator_by_type (int atype, ManagedAllocatorVariant variant)
1119 #ifdef MANAGED_ALLOCATION
1120 MonoMethod *res;
1121 MonoMethod **cache;
1123 if (variant != MANAGED_ALLOCATOR_SLOW_PATH && !use_managed_allocator)
1124 return NULL;
1126 switch (variant) {
1127 case MANAGED_ALLOCATOR_REGULAR: cache = alloc_method_cache; break;
1128 case MANAGED_ALLOCATOR_SLOW_PATH: cache = slowpath_alloc_method_cache; break;
1129 case MANAGED_ALLOCATOR_PROFILER: cache = profiler_alloc_method_cache; break;
1130 default: g_assert_not_reached (); break;
1133 res = cache [atype];
1134 if (res)
1135 return res;
1137 res = create_allocator (atype, variant);
1138 LOCK_GC;
1139 if (cache [atype]) {
1140 mono_free_method (res);
1141 res = cache [atype];
1142 } else {
1143 mono_memory_barrier ();
1144 cache [atype] = res;
1146 UNLOCK_GC;
1148 return res;
1149 #else
1150 return NULL;
1151 #endif
1154 guint32
1155 mono_gc_get_managed_allocator_types (void)
1157 return ATYPE_NUM;
1160 gboolean
1161 sgen_is_managed_allocator (MonoMethod *method)
1163 int i;
1165 for (i = 0; i < ATYPE_NUM; ++i)
1166 if (method == alloc_method_cache [i] || method == slowpath_alloc_method_cache [i] || method == profiler_alloc_method_cache [i])
1167 return TRUE;
1168 return FALSE;
1171 gboolean
1172 sgen_has_managed_allocator (void)
1174 int i;
1176 for (i = 0; i < ATYPE_NUM; ++i)
1177 if (alloc_method_cache [i] || slowpath_alloc_method_cache [i] || profiler_alloc_method_cache [i])
1178 return TRUE;
1179 return FALSE;
1182 #define ARRAY_OBJ_INDEX(ptr,array,elem_size) (((char*)(ptr) - ((char*)(array) + G_STRUCT_OFFSET (MonoArray, vector))) / (elem_size))
1184 gboolean
1185 sgen_client_cardtable_scan_object (GCObject *obj, guint8 *cards, ScanCopyContext ctx)
1187 MonoVTable *vt = SGEN_LOAD_VTABLE (obj);
1188 MonoClass *klass = vt->klass;
1190 SGEN_ASSERT (0, SGEN_VTABLE_HAS_REFERENCES (vt), "Why would we ever call this on reference-free objects?");
1192 if (vt->rank) {
1193 MonoArray *arr = (MonoArray*)obj;
1194 guint8 *card_data, *card_base;
1195 guint8 *card_data_end;
1196 char *obj_start = (char *)sgen_card_table_align_pointer (obj);
1197 mword bounds_size;
1198 mword obj_size = sgen_mono_array_size (vt, arr, &bounds_size, sgen_vtable_get_descriptor (vt));
1199 /* We don't want to scan the bounds entries at the end of multidimensional arrays */
1200 char *obj_end = (char*)obj + obj_size - bounds_size;
1201 size_t card_count;
1202 size_t extra_idx = 0;
1204 mword desc = (mword)m_class_get_gc_descr (m_class_get_element_class (klass));
1205 int elem_size = mono_array_element_size (klass);
1207 #ifdef SGEN_OBJECT_LAYOUT_STATISTICS
1208 if (m_class_is_valuetype (m_class_get_element_class (klass)))
1209 sgen_object_layout_scanned_vtype_array ();
1210 else
1211 sgen_object_layout_scanned_ref_array ();
1212 #endif
1214 if (cards)
1215 card_data = cards;
1216 else
1217 card_data = sgen_card_table_get_card_scan_address ((mword)obj);
1219 card_base = card_data;
1220 card_count = sgen_card_table_number_of_cards_in_range ((mword)obj, obj_size);
1222 #ifdef SGEN_HAVE_OVERLAPPING_CARDS
1223 LOOP_HEAD:
1224 card_data_end = card_base + card_count;
1227 * Check for overflow and if so, scan only until the end of the shadow
1228 * card table, leaving the rest for next iterations.
1230 if (!cards && card_data_end >= SGEN_SHADOW_CARDTABLE_END) {
1231 card_data_end = SGEN_SHADOW_CARDTABLE_END;
1233 card_count -= (card_data_end - card_base);
1235 #else
1236 card_data_end = card_data + card_count;
1237 #endif
1239 card_data = sgen_find_next_card (card_data, card_data_end);
1240 for (; card_data < card_data_end; card_data = sgen_find_next_card (card_data + 1, card_data_end)) {
1241 size_t index;
1242 size_t idx = (card_data - card_base) + extra_idx;
1243 char *start = (char*)(obj_start + idx * CARD_SIZE_IN_BYTES);
1244 char *card_end = start + CARD_SIZE_IN_BYTES;
1245 char *first_elem, *elem;
1247 HEAVY_STAT (++los_marked_cards);
1249 if (!cards)
1250 sgen_card_table_prepare_card_for_scanning (card_data);
1252 card_end = MIN (card_end, obj_end);
1254 if (start <= (char*)arr->vector)
1255 index = 0;
1256 else
1257 index = ARRAY_OBJ_INDEX (start, obj, elem_size);
1259 elem = first_elem = (char*)mono_array_addr_with_size_fast ((MonoArray*)obj, elem_size, index);
1260 if (m_class_is_valuetype (m_class_get_element_class (klass))) {
1261 ScanVTypeFunc scan_vtype_func = ctx.ops->scan_vtype;
1263 for (; elem < card_end; elem += elem_size)
1264 scan_vtype_func (obj, elem, desc, ctx.queue BINARY_PROTOCOL_ARG (elem_size));
1265 } else {
1266 ScanPtrFieldFunc scan_ptr_field_func = ctx.ops->scan_ptr_field;
1268 HEAVY_STAT (++los_array_cards);
1269 for (; elem < card_end; elem += SIZEOF_VOID_P)
1270 scan_ptr_field_func (obj, (GCObject**)elem, ctx.queue);
1273 sgen_binary_protocol_card_scan (first_elem, elem - first_elem);
1276 #ifdef SGEN_HAVE_OVERLAPPING_CARDS
1277 if (card_count > 0) {
1278 SGEN_ASSERT (0, card_data == SGEN_SHADOW_CARDTABLE_END, "Why we didn't stop at shadow cardtable end ?");
1279 extra_idx += card_data - card_base;
1280 card_base = card_data = sgen_shadow_cardtable;
1281 goto LOOP_HEAD;
1283 #endif
1284 return TRUE;
1287 return FALSE;
1291 * Array and string allocation
1294 MonoArray*
1295 mono_gc_alloc_vector (MonoVTable *vtable, size_t size, uintptr_t max_length)
1297 MonoArray *arr;
1298 TLAB_ACCESS_INIT;
1300 if (!SGEN_CAN_ALIGN_UP (size))
1301 return NULL;
1303 #ifndef DISABLE_CRITICAL_REGION
1304 ENTER_CRITICAL_REGION;
1305 arr = (MonoArray*)sgen_try_alloc_obj_nolock (vtable, size);
1306 if (arr) {
1307 /*This doesn't require fencing since EXIT_CRITICAL_REGION already does it for us*/
1308 arr->max_length = (mono_array_size_t)max_length;
1309 EXIT_CRITICAL_REGION;
1310 goto done;
1312 EXIT_CRITICAL_REGION;
1313 #endif
1315 LOCK_GC;
1317 arr = (MonoArray*)sgen_alloc_obj_nolock (vtable, size);
1318 if (G_UNLIKELY (!arr)) {
1319 UNLOCK_GC;
1320 return NULL;
1323 arr->max_length = (mono_array_size_t)max_length;
1325 UNLOCK_GC;
1327 done:
1328 if (G_UNLIKELY (mono_profiler_allocations_enabled ()))
1329 MONO_PROFILER_RAISE (gc_allocation, (&arr->obj));
1331 SGEN_ASSERT (6, SGEN_ALIGN_UP (size) == SGEN_ALIGN_UP (sgen_client_par_object_get_size (vtable, (GCObject*)arr)), "Vector has incorrect size.");
1332 return arr;
1335 MonoArray*
1336 mono_gc_alloc_array (MonoVTable *vtable, size_t size, uintptr_t max_length, uintptr_t bounds_size)
1338 MonoArray *arr;
1339 MonoArrayBounds *bounds;
1340 TLAB_ACCESS_INIT;
1342 if (!SGEN_CAN_ALIGN_UP (size))
1343 return NULL;
1345 #ifndef DISABLE_CRITICAL_REGION
1346 ENTER_CRITICAL_REGION;
1347 arr = (MonoArray*)sgen_try_alloc_obj_nolock (vtable, size);
1348 if (arr) {
1349 /*This doesn't require fencing since EXIT_CRITICAL_REGION already does it for us*/
1350 arr->max_length = (mono_array_size_t)max_length;
1352 bounds = (MonoArrayBounds*)((char*)arr + size - bounds_size);
1353 arr->bounds = bounds;
1354 EXIT_CRITICAL_REGION;
1355 goto done;
1357 EXIT_CRITICAL_REGION;
1358 #endif
1360 LOCK_GC;
1362 arr = (MonoArray*)sgen_alloc_obj_nolock (vtable, size);
1363 if (G_UNLIKELY (!arr)) {
1364 UNLOCK_GC;
1365 return NULL;
1368 arr->max_length = (mono_array_size_t)max_length;
1370 bounds = (MonoArrayBounds*)((char*)arr + size - bounds_size);
1371 arr->bounds = bounds;
1373 UNLOCK_GC;
1375 done:
1376 if (G_UNLIKELY (mono_profiler_allocations_enabled ()))
1377 MONO_PROFILER_RAISE (gc_allocation, (&arr->obj));
1379 SGEN_ASSERT (6, SGEN_ALIGN_UP (size) == SGEN_ALIGN_UP (sgen_client_par_object_get_size (vtable, (GCObject*)arr)), "Array has incorrect size.");
1380 return arr;
1383 MonoString*
1384 mono_gc_alloc_string (MonoVTable *vtable, size_t size, gint32 len)
1386 MonoString *str;
1387 TLAB_ACCESS_INIT;
1389 if (!SGEN_CAN_ALIGN_UP (size))
1390 return NULL;
1392 #ifndef DISABLE_CRITICAL_REGION
1393 ENTER_CRITICAL_REGION;
1394 str = (MonoString*)sgen_try_alloc_obj_nolock (vtable, size);
1395 if (str) {
1396 /*This doesn't require fencing since EXIT_CRITICAL_REGION already does it for us*/
1397 str->length = len;
1398 EXIT_CRITICAL_REGION;
1399 goto done;
1401 EXIT_CRITICAL_REGION;
1402 #endif
1404 LOCK_GC;
1406 str = (MonoString*)sgen_alloc_obj_nolock (vtable, size);
1407 if (G_UNLIKELY (!str)) {
1408 UNLOCK_GC;
1409 return NULL;
1412 str->length = len;
1414 UNLOCK_GC;
1416 done:
1417 if (G_UNLIKELY (mono_profiler_allocations_enabled ()))
1418 MONO_PROFILER_RAISE (gc_allocation, (&str->object));
1420 return str;
1424 * Strings
1427 void
1428 mono_gc_set_string_length (MonoString *str, gint32 new_length)
1430 mono_unichar2 *new_end = str->chars + new_length;
1432 /* zero the discarded string. This null-delimits the string and allows
1433 * the space to be reclaimed by SGen. */
1435 if (sgen_nursery_canaries_enabled () && sgen_ptr_in_nursery (str)) {
1436 CHECK_CANARY_FOR_OBJECT ((GCObject*)str, TRUE);
1437 memset (new_end, 0, (str->length - new_length + 1) * sizeof (mono_unichar2) + CANARY_SIZE);
1438 memcpy (new_end + 1 , CANARY_STRING, CANARY_SIZE);
1439 } else {
1440 memset (new_end, 0, (str->length - new_length + 1) * sizeof (mono_unichar2));
1443 str->length = new_length;
1447 * Profiling
1450 #define GC_ROOT_NUM 32
1451 #define SPECIAL_ADDRESS_FIN_QUEUE ((mono_byte*)1)
1452 #define SPECIAL_ADDRESS_CRIT_FIN_QUEUE ((mono_byte*)2)
1453 #define SPECIAL_ADDRESS_EPHEMERON ((mono_byte*)3)
1455 typedef struct {
1456 int count; /* must be the first field */
1457 void *addresses [GC_ROOT_NUM];
1458 void *objects [GC_ROOT_NUM];
1459 } GCRootReport;
1461 static void
1462 notify_gc_roots (GCRootReport *report)
1464 if (!report->count)
1465 return;
1466 MONO_PROFILER_RAISE (gc_roots, (report->count, (const mono_byte *const *)report->addresses, (MonoObject *const *) report->objects));
1467 report->count = 0;
1470 static void
1471 report_gc_root (GCRootReport *report, void *address, void *object)
1473 if (report->count == GC_ROOT_NUM)
1474 notify_gc_roots (report);
1475 report->addresses [report->count] = address;
1476 report->objects [report->count] = object;
1477 report->count++;
1480 static void
1481 single_arg_report_root (MonoObject **obj, void *gc_data)
1483 GCRootReport *report = (GCRootReport*)gc_data;
1484 if (*obj)
1485 report_gc_root (report, obj, *obj);
1488 static void
1489 two_args_report_root (void *address, MonoObject *obj, void *gc_data)
1491 GCRootReport *report = (GCRootReport*)gc_data;
1492 if (obj)
1493 report_gc_root (report, address, obj);
1496 static void
1497 precisely_report_roots_from (GCRootReport *report, void** start_root, void** end_root, mword desc)
1499 switch (desc & ROOT_DESC_TYPE_MASK) {
1500 case ROOT_DESC_BITMAP:
1501 desc >>= ROOT_DESC_TYPE_SHIFT;
1502 while (desc) {
1503 if ((desc & 1) && *start_root)
1504 report_gc_root (report, start_root, *start_root);
1505 desc >>= 1;
1506 start_root++;
1508 return;
1509 case ROOT_DESC_COMPLEX: {
1510 gsize *bitmap_data = (gsize *)sgen_get_complex_descriptor_bitmap (desc);
1511 gsize bwords = (*bitmap_data) - 1;
1512 void **start_run = start_root;
1513 bitmap_data++;
1514 while (bwords-- > 0) {
1515 gsize bmap = *bitmap_data++;
1516 void **objptr = start_run;
1517 while (bmap) {
1518 if ((bmap & 1) && *objptr)
1519 report_gc_root (report, objptr, *objptr);
1520 bmap >>= 1;
1521 ++objptr;
1523 start_run += GC_BITS_PER_WORD;
1525 break;
1527 case ROOT_DESC_VECTOR: {
1528 void **p;
1530 for (p = start_root; p < end_root; p++) {
1531 if (*p)
1532 report_gc_root (report, p, *p);
1534 break;
1536 case ROOT_DESC_USER: {
1537 MonoGCRootMarkFunc marker = (MonoGCRootMarkFunc)sgen_get_user_descriptor_func (desc);
1539 if ((void*)marker == (void*)sgen_mark_normal_gc_handles)
1540 sgen_gc_handles_report_roots (two_args_report_root, report);
1541 else
1542 marker ((MonoObject**)start_root, single_arg_report_root, report);
1543 break;
1545 case ROOT_DESC_RUN_LEN:
1546 g_assert_not_reached ();
1547 default:
1548 g_assert_not_reached ();
1552 static void
1553 report_pinning_roots (GCRootReport *report, void **start, void **end)
1555 while (start < end) {
1556 mword addr = (mword)*start;
1557 addr &= ~(SGEN_ALLOC_ALIGN - 1);
1558 if (addr)
1559 report_gc_root (report, start, (void*)addr);
1561 start++;
1565 static SgenPointerQueue pinned_objects = SGEN_POINTER_QUEUE_INIT (INTERNAL_MEM_MOVED_OBJECT);
1566 static mword lower_bound, upper_bound;
1568 static GCObject*
1569 find_pinned_obj (char *addr)
1571 size_t idx = sgen_pointer_queue_search (&pinned_objects, addr);
1573 if (idx != pinned_objects.next_slot) {
1574 if (pinned_objects.data [idx] == addr)
1575 return (GCObject*)pinned_objects.data [idx];
1576 if (idx == 0)
1577 return NULL;
1580 GCObject *obj = (GCObject*)pinned_objects.data [idx - 1];
1581 if (addr > (char*)obj && addr < ((char*)obj + sgen_safe_object_get_size (obj)))
1582 return obj;
1583 return NULL;
1588 * We pass @root_report_address so register are properly accounted towards their thread
1590 static void
1591 report_conservative_roots (GCRootReport *report, void *root_report_address, void **start, void **end)
1593 while (start < end) {
1594 mword addr = (mword)*start;
1595 addr &= ~(SGEN_ALLOC_ALIGN - 1);
1597 if (addr < lower_bound || addr > upper_bound) {
1598 ++start;
1599 continue;
1602 GCObject *obj = find_pinned_obj ((char*)addr);
1603 if (obj)
1604 report_gc_root (report, root_report_address, obj);
1605 start++;
1609 typedef struct {
1610 gboolean precise;
1611 GCRootReport *report;
1612 SgenThreadInfo *info;
1613 } ReportHandleStackRoot;
1615 static void
1616 report_handle_stack_root (gpointer *ptr, gpointer user_data)
1618 ReportHandleStackRoot *ud = (ReportHandleStackRoot*)user_data;
1619 GCRootReport *report = ud->report;
1620 gpointer addr = ud->info->client_info.info.handle_stack;
1622 // Note: We know that *ptr != NULL.
1623 if (ud->precise)
1624 report_gc_root (report, addr, *ptr);
1625 else
1626 report_conservative_roots (report, addr, ptr, ptr + 1);
1629 static void
1630 report_handle_stack_roots (GCRootReport *report, SgenThreadInfo *info, gboolean precise)
1632 ReportHandleStackRoot ud;
1633 memset (&ud, 0, sizeof (ud));
1634 ud.precise = precise;
1635 ud.report = report;
1636 ud.info = info;
1638 mono_handle_stack_scan (info->client_info.info.handle_stack, report_handle_stack_root, &ud, ud.precise, FALSE);
1641 static void
1642 report_stack_roots (void)
1644 GCRootReport report = {0};
1645 FOREACH_THREAD_EXCLUDE (info, MONO_THREAD_INFO_FLAGS_NO_GC) {
1646 void *aligned_stack_start;
1648 if (info->client_info.skip) {
1649 continue;
1650 } else if (!mono_thread_info_is_live (info)) {
1651 continue;
1652 } else if (!info->client_info.stack_start) {
1653 continue;
1656 g_assert (info->client_info.stack_start);
1657 g_assert (info->client_info.info.stack_end);
1659 aligned_stack_start = (void*)(mword) ALIGN_TO ((mword)info->client_info.stack_start, SIZEOF_VOID_P);
1660 #ifdef HOST_WIN32
1661 /* Windows uses a guard page before the committed stack memory pages to detect when the
1662 stack needs to be grown. If we suspend a thread just after a function prolog has
1663 decremented the stack pointer to point into the guard page but before the thread has
1664 been able to read or write to that page, starting the stack scan at aligned_stack_start
1665 will raise a STATUS_GUARD_PAGE_VIOLATION and the process will crash. This code uses
1666 VirtualQuery() to determine whether stack_start points into the guard page and then
1667 updates aligned_stack_start to point at the next non-guard page. */
1668 MEMORY_BASIC_INFORMATION mem_info;
1669 SIZE_T result = VirtualQuery (info->client_info.stack_start, &mem_info, sizeof(mem_info));
1670 g_assert (result != 0);
1671 if (mem_info.Protect & PAGE_GUARD) {
1672 aligned_stack_start = ((char*) mem_info.BaseAddress) + mem_info.RegionSize;
1674 #endif
1676 g_assert (info->client_info.suspend_done);
1678 report_conservative_roots (&report, aligned_stack_start, (void **)aligned_stack_start, (void **)info->client_info.info.stack_end);
1679 report_conservative_roots (&report, aligned_stack_start, (void**)&info->client_info.ctx, (void**)(&info->client_info.ctx + 1));
1681 report_handle_stack_roots (&report, info, FALSE);
1682 report_handle_stack_roots (&report, info, TRUE);
1683 } FOREACH_THREAD_END
1685 notify_gc_roots (&report);
1688 static void
1689 report_pin_queue (void)
1691 lower_bound = SIZE_MAX;
1692 upper_bound = 0;
1694 //sort the addresses
1695 sgen_pointer_queue_sort_uniq (&pinned_objects);
1697 for (int i = 0; i < pinned_objects.next_slot; ++i) {
1698 GCObject *obj = (GCObject*)pinned_objects.data [i];
1699 ssize_t size = sgen_safe_object_get_size (obj);
1701 ssize_t addr = (ssize_t)obj;
1702 lower_bound = MIN (lower_bound, addr);
1703 upper_bound = MAX (upper_bound, addr + size);
1706 report_stack_roots ();
1707 sgen_pointer_queue_clear (&pinned_objects);
1710 static void
1711 report_finalizer_roots_from_queue (SgenPointerQueue *queue, void* queue_address)
1713 GCRootReport report;
1714 size_t i;
1716 report.count = 0;
1717 for (i = 0; i < queue->next_slot; ++i) {
1718 void *obj = queue->data [i];
1719 if (!obj)
1720 continue;
1721 report_gc_root (&report, queue_address, obj);
1723 notify_gc_roots (&report);
1726 static void
1727 report_registered_roots_by_type (int root_type)
1729 GCRootReport report = { 0 };
1730 void **start_root;
1731 RootRecord *root;
1732 report.count = 0;
1733 SGEN_HASH_TABLE_FOREACH (&sgen_roots_hash [root_type], void **, start_root, RootRecord *, root) {
1734 SGEN_LOG (6, "Profiler root scan %p-%p (desc: %p)", start_root, root->end_root, (void*)(intptr_t)root->root_desc);
1735 if (root_type == ROOT_TYPE_PINNED)
1736 report_pinning_roots (&report, start_root, (void**)root->end_root);
1737 else
1738 precisely_report_roots_from (&report, start_root, (void**)root->end_root, root->root_desc);
1739 } SGEN_HASH_TABLE_FOREACH_END;
1740 notify_gc_roots (&report);
1743 static void
1744 report_registered_roots (void)
1746 for (int i = 0; i < ROOT_TYPE_NUM; ++i)
1747 report_registered_roots_by_type (i);
1750 static void
1751 report_ephemeron_roots (void)
1753 EphemeronLinkNode *current = ephemeron_list;
1754 Ephemeron *cur, *array_end;
1755 GCObject *tombstone;
1756 GCRootReport report = { 0 };
1758 for (current = ephemeron_list; current; current = current->next) {
1759 MonoArray *array = current->array;
1761 if (!sgen_is_object_alive_for_current_gen ((GCObject*)array))
1762 continue;
1764 cur = mono_array_addr_internal (array, Ephemeron, 0);
1765 array_end = cur + mono_array_length_internal (array);
1766 tombstone = SGEN_LOAD_VTABLE ((GCObject*)array)->domain->ephemeron_tombstone;
1768 for (; cur < array_end; ++cur) {
1769 GCObject *key = cur->key;
1771 if (!key || key == tombstone)
1772 continue;
1774 if (cur->value && sgen_is_object_alive_for_current_gen (key))
1775 report_gc_root (&report, SPECIAL_ADDRESS_EPHEMERON, cur->value);
1779 notify_gc_roots (&report);
1782 static void
1783 sgen_report_all_roots (SgenPointerQueue *fin_ready_queue, SgenPointerQueue *critical_fin_queue)
1785 if (!MONO_PROFILER_ENABLED (gc_roots))
1786 return;
1788 report_registered_roots ();
1789 report_ephemeron_roots ();
1790 report_pin_queue ();
1791 report_finalizer_roots_from_queue (fin_ready_queue, SPECIAL_ADDRESS_FIN_QUEUE);
1792 report_finalizer_roots_from_queue (critical_fin_queue, SPECIAL_ADDRESS_CRIT_FIN_QUEUE);
1795 void
1796 sgen_client_pinning_start (void)
1798 if (!MONO_PROFILER_ENABLED (gc_roots))
1799 return;
1801 sgen_pointer_queue_clear (&pinned_objects);
1804 void
1805 sgen_client_pinning_end (void)
1807 if (!MONO_PROFILER_ENABLED (gc_roots))
1808 return;
1811 void
1812 sgen_client_nursery_objects_pinned (void **definitely_pinned, int count)
1814 if (!MONO_PROFILER_ENABLED (gc_roots))
1815 return;
1817 for (int i = 0; i < count; ++i)
1818 sgen_pointer_queue_add (&pinned_objects, definitely_pinned [i]);
1821 void
1822 sgen_client_pinned_los_object (GCObject *obj)
1824 if (!MONO_PROFILER_ENABLED (gc_roots))
1825 return;
1827 sgen_pointer_queue_add (&pinned_objects, obj);
1830 void
1831 sgen_client_pinned_cemented_object (GCObject *obj)
1833 if (!MONO_PROFILER_ENABLED (gc_roots))
1834 return;
1836 // TODO: How do we report this in a way that makes sense?
1839 void
1840 sgen_client_pinned_major_heap_object (GCObject *obj)
1842 if (!MONO_PROFILER_ENABLED (gc_roots))
1843 return;
1845 sgen_pointer_queue_add (&pinned_objects, obj);
1848 void
1849 sgen_client_collecting_minor_report_roots (SgenPointerQueue *fin_ready_queue, SgenPointerQueue *critical_fin_queue)
1851 sgen_report_all_roots (fin_ready_queue, critical_fin_queue);
1854 void
1855 sgen_client_collecting_major_report_roots (SgenPointerQueue *fin_ready_queue, SgenPointerQueue *critical_fin_queue)
1857 sgen_report_all_roots (fin_ready_queue, critical_fin_queue);
1860 #define MOVED_OBJECTS_NUM 64
1861 static void *moved_objects [MOVED_OBJECTS_NUM];
1862 static int moved_objects_idx = 0;
1864 static SgenPointerQueue moved_objects_queue = SGEN_POINTER_QUEUE_INIT (INTERNAL_MEM_MOVED_OBJECT);
1866 void
1867 mono_sgen_register_moved_object (void *obj, void *destination)
1870 * This function can be called from SGen's worker threads. We want to try
1871 * and avoid exposing those threads to the profiler API, so queue up move
1872 * events and send them later when the main GC thread calls
1873 * mono_sgen_gc_event_moves ().
1875 * TODO: Once SGen has multiple worker threads, we need to switch to a
1876 * lock-free data structure for the queue as multiple threads will be
1877 * adding to it at the same time.
1879 if (sgen_workers_is_worker_thread (mono_native_thread_id_get ())) {
1880 sgen_pointer_queue_add (&moved_objects_queue, obj);
1881 sgen_pointer_queue_add (&moved_objects_queue, destination);
1882 } else {
1883 if (moved_objects_idx == MOVED_OBJECTS_NUM) {
1884 MONO_PROFILER_RAISE (gc_moves, ((MonoObject **) moved_objects, moved_objects_idx));
1885 moved_objects_idx = 0;
1888 moved_objects [moved_objects_idx++] = obj;
1889 moved_objects [moved_objects_idx++] = destination;
1893 void
1894 mono_sgen_gc_event_moves (void)
1896 while (!sgen_pointer_queue_is_empty (&moved_objects_queue)) {
1897 void *dst = sgen_pointer_queue_pop (&moved_objects_queue);
1898 void *src = sgen_pointer_queue_pop (&moved_objects_queue);
1900 mono_sgen_register_moved_object (src, dst);
1903 if (moved_objects_idx) {
1904 MONO_PROFILER_RAISE (gc_moves, ((MonoObject **) moved_objects, moved_objects_idx));
1905 moved_objects_idx = 0;
1910 * Heap walking
1913 #define REFS_SIZE 128
1914 typedef struct {
1915 void *data;
1916 MonoGCReferences callback;
1917 int flags;
1918 int count;
1919 int called;
1920 MonoObject *refs [REFS_SIZE];
1921 uintptr_t offsets [REFS_SIZE];
1922 } HeapWalkInfo;
1924 #undef HANDLE_PTR
1925 #define HANDLE_PTR(ptr,obj) do { \
1926 if (*(ptr)) { \
1927 if (hwi->count == REFS_SIZE) { \
1928 hwi->callback ((MonoObject*)start, mono_object_class (start), hwi->called? 0: size, hwi->count, hwi->refs, hwi->offsets, hwi->data); \
1929 hwi->count = 0; \
1930 hwi->called = 1; \
1932 hwi->offsets [hwi->count] = (char*)(ptr)-(char*)start; \
1933 hwi->refs [hwi->count++] = *(ptr); \
1935 } while (0)
1937 static void
1938 collect_references (HeapWalkInfo *hwi, GCObject *obj, size_t size)
1940 char *start = (char*)obj;
1941 mword desc = sgen_obj_get_descriptor (obj);
1943 #include "sgen/sgen-scan-object.h"
1946 static void
1947 walk_references (GCObject *start, size_t size, void *data)
1949 HeapWalkInfo *hwi = (HeapWalkInfo *)data;
1950 hwi->called = 0;
1951 hwi->count = 0;
1952 collect_references (hwi, start, size);
1953 if (hwi->count || !hwi->called)
1954 hwi->callback (start, mono_object_class (start), hwi->called? 0: size, hwi->count, hwi->refs, hwi->offsets, hwi->data);
1958 * mono_gc_walk_heap:
1959 * \param flags flags for future use
1960 * \param callback a function pointer called for each object in the heap
1961 * \param data a user data pointer that is passed to callback
1962 * This function can be used to iterate over all the live objects in the heap;
1963 * for each object, \p callback is invoked, providing info about the object's
1964 * location in memory, its class, its size and the objects it references.
1965 * For each referenced object its offset from the object address is
1966 * reported in the offsets array.
1967 * The object references may be buffered, so the callback may be invoked
1968 * multiple times for the same object: in all but the first call, the size
1969 * argument will be zero.
1970 * Note that this function can be only called in the \c MONO_GC_EVENT_PRE_START_WORLD
1971 * profiler event handler.
1972 * \returns a non-zero value if the GC doesn't support heap walking
1975 mono_gc_walk_heap (int flags, MonoGCReferences callback, void *data)
1977 HeapWalkInfo hwi;
1979 hwi.flags = flags;
1980 hwi.callback = callback;
1981 hwi.data = data;
1983 sgen_clear_nursery_fragments ();
1984 sgen_scan_area_with_callback (sgen_nursery_section->data, sgen_nursery_section->end_data, walk_references, &hwi, FALSE, TRUE);
1986 sgen_major_collector.iterate_objects (ITERATE_OBJECTS_SWEEP_ALL, walk_references, &hwi);
1987 sgen_los_iterate_objects (walk_references, &hwi);
1989 return 0;
1993 * Threads
1996 void
1997 mono_gc_set_gc_callbacks (MonoGCCallbacks *callbacks)
1999 gc_callbacks = *callbacks;
2002 MonoGCCallbacks *
2003 mono_gc_get_gc_callbacks ()
2005 return &gc_callbacks;
2008 gpointer
2009 mono_gc_thread_attach (SgenThreadInfo *info)
2011 return sgen_thread_attach (info);
2014 void
2015 sgen_client_thread_attach (SgenThreadInfo* info)
2017 mono_tls_set_sgen_thread_info (info);
2019 info->client_info.skip = FALSE;
2021 info->client_info.stack_start = NULL;
2023 #ifdef SGEN_POSIX_STW
2024 info->client_info.stop_count = -1;
2025 info->client_info.signal = 0;
2026 #endif
2028 memset (&info->client_info.ctx, 0, sizeof (MonoContext));
2030 if (mono_gc_get_gc_callbacks ()->thread_attach_func)
2031 info->client_info.runtime_data = mono_gc_get_gc_callbacks ()->thread_attach_func ();
2033 sgen_binary_protocol_thread_register ((gpointer)mono_thread_info_get_tid (info));
2035 SGEN_LOG (3, "registered thread %p (%p) stack end %p", info, (gpointer)mono_thread_info_get_tid (info), info->client_info.info.stack_end);
2037 info->client_info.info.handle_stack = mono_handle_stack_alloc ();
2040 void
2041 mono_gc_thread_detach_with_lock (SgenThreadInfo *info)
2043 return sgen_thread_detach_with_lock (info);
2046 void
2047 sgen_client_thread_detach_with_lock (SgenThreadInfo *p)
2049 MonoNativeThreadId tid;
2051 mono_tls_set_sgen_thread_info (NULL);
2053 tid = mono_thread_info_get_tid (p);
2055 mono_threads_add_joinable_runtime_thread (&p->client_info.info);
2057 if (mono_gc_get_gc_callbacks ()->thread_detach_func) {
2058 mono_gc_get_gc_callbacks ()->thread_detach_func (p->client_info.runtime_data);
2059 p->client_info.runtime_data = NULL;
2062 sgen_binary_protocol_thread_unregister ((gpointer)tid);
2063 SGEN_LOG (3, "unregister thread %p (%p)", p, (gpointer)tid);
2065 HandleStack *handles = p->client_info.info.handle_stack;
2066 p->client_info.info.handle_stack = NULL;
2067 mono_handle_stack_free (handles);
2070 void
2071 mono_gc_skip_thread_changing (gboolean skip)
2074 * SGen's STW will respect the thread info flags, but we do need to take
2075 * the GC lock when changing them. If we don't do this, SGen might end up
2076 * trying to resume a thread that wasn't suspended because it had
2077 * MONO_THREAD_INFO_FLAGS_NO_GC set when STW began.
2079 LOCK_GC;
2081 if (skip) {
2083 * If we skip scanning a thread with a non-empty handle stack, we may move an
2084 * object but fail to update the reference in the handle.
2086 HandleStack *stack = mono_thread_info_current ()->client_info.info.handle_stack;
2087 g_assert (stack == NULL || mono_handle_stack_is_empty (stack));
2091 void
2092 mono_gc_skip_thread_changed (gboolean skip)
2094 UNLOCK_GC;
2097 gboolean
2098 mono_gc_thread_in_critical_region (SgenThreadInfo *info)
2100 return info->client_info.in_critical_region;
2104 * mono_gc_is_gc_thread:
2106 gboolean
2107 mono_gc_is_gc_thread (void)
2109 gboolean result;
2110 LOCK_GC;
2111 result = mono_thread_info_current () != NULL;
2112 UNLOCK_GC;
2113 return result;
2116 void
2117 sgen_client_thread_register_worker (void)
2119 mono_thread_info_register_small_id ();
2120 mono_native_thread_set_name (mono_native_thread_id_get (), "SGen worker");
2123 /* Variables holding start/end nursery so it won't have to be passed at every call */
2124 static void *scan_area_arg_start, *scan_area_arg_end;
2126 void
2127 mono_gc_conservatively_scan_area (void *start, void *end)
2129 sgen_conservatively_pin_objects_from ((void **)start, (void **)end, scan_area_arg_start, scan_area_arg_end, PIN_TYPE_STACK);
2132 void*
2133 mono_gc_scan_object (void *obj, void *gc_data)
2135 ScanCopyContext *ctx = (ScanCopyContext *)gc_data;
2136 ctx->ops->copy_or_mark_object ((GCObject**)&obj, ctx->queue);
2137 return obj;
2140 typedef struct {
2141 void **start_nursery;
2142 void **end_nursery;
2143 } PinHandleStackInteriorPtrData;
2145 /* Called when we're scanning the handle stack imprecisely and we encounter a pointer into the
2146 middle of an object.
2148 static void
2149 pin_handle_stack_interior_ptrs (void **ptr_slot, void *user_data)
2151 PinHandleStackInteriorPtrData *ud = (PinHandleStackInteriorPtrData *)user_data;
2152 sgen_conservatively_pin_objects_from (ptr_slot, ptr_slot+1, ud->start_nursery, ud->end_nursery, PIN_TYPE_STACK);
2157 * Mark from thread stacks and registers.
2159 void
2160 sgen_client_scan_thread_data (void *start_nursery, void *end_nursery, gboolean precise, ScanCopyContext ctx)
2162 scan_area_arg_start = start_nursery;
2163 scan_area_arg_end = end_nursery;
2164 #ifdef HOST_WASM
2165 //Under WASM we don't scan thread stacks and we can't trust the values we find there either.
2166 return;
2167 #endif
2169 FOREACH_THREAD_EXCLUDE (info, MONO_THREAD_INFO_FLAGS_NO_GC) {
2170 int skip_reason = 0;
2171 void *aligned_stack_start;
2173 if (info->client_info.skip) {
2174 SGEN_LOG (3, "Skipping dead thread %p, range: %p-%p, size: %zd", info, info->client_info.stack_start, info->client_info.info.stack_end, (char*)info->client_info.info.stack_end - (char*)info->client_info.stack_start);
2175 skip_reason = 1;
2176 } else if (!mono_thread_info_is_live (info)) {
2177 SGEN_LOG (3, "Skipping non-running thread %p, range: %p-%p, size: %zd (state %x)", info, info->client_info.stack_start, info->client_info.info.stack_end, (char*)info->client_info.info.stack_end - (char*)info->client_info.stack_start, info->client_info.info.thread_state);
2178 skip_reason = 3;
2179 } else if (!info->client_info.stack_start) {
2180 SGEN_LOG (3, "Skipping starting or detaching thread %p", info);
2181 skip_reason = 4;
2184 sgen_binary_protocol_scan_stack ((gpointer)mono_thread_info_get_tid (info), info->client_info.stack_start, info->client_info.info.stack_end, skip_reason);
2186 if (skip_reason) {
2187 if (precise) {
2188 /* If we skip a thread with a non-empty handle stack and then it
2189 * resumes running we may potentially move an object but fail to
2190 * update the reference in the handle.
2192 HandleStack *stack = info->client_info.info.handle_stack;
2193 g_assert (stack == NULL || mono_handle_stack_is_empty (stack));
2195 continue;
2198 g_assert (info->client_info.stack_start);
2199 g_assert (info->client_info.info.stack_end);
2201 aligned_stack_start = (void*)(mword) ALIGN_TO ((mword)info->client_info.stack_start, SIZEOF_VOID_P);
2202 #ifdef HOST_WIN32
2203 /* Windows uses a guard page before the committed stack memory pages to detect when the
2204 stack needs to be grown. If we suspend a thread just after a function prolog has
2205 decremented the stack pointer to point into the guard page but before the thread has
2206 been able to read or write to that page, starting the stack scan at aligned_stack_start
2207 will raise a STATUS_GUARD_PAGE_VIOLATION and the process will crash. This code uses
2208 VirtualQuery() to determine whether stack_start points into the guard page and then
2209 updates aligned_stack_start to point at the next non-guard page. */
2210 MEMORY_BASIC_INFORMATION mem_info;
2211 SIZE_T result = VirtualQuery(info->client_info.stack_start, &mem_info, sizeof(mem_info));
2212 g_assert (result != 0);
2213 if (mem_info.Protect & PAGE_GUARD) {
2214 aligned_stack_start = ((char*) mem_info.BaseAddress) + mem_info.RegionSize;
2216 #endif
2218 g_assert (info->client_info.suspend_done);
2219 SGEN_LOG (3, "Scanning thread %p, range: %p-%p, size: %zd, pinned=%zd", info, info->client_info.stack_start, info->client_info.info.stack_end, (char*)info->client_info.info.stack_end - (char*)info->client_info.stack_start, sgen_get_pinned_count ());
2220 if (mono_gc_get_gc_callbacks ()->thread_mark_func && !conservative_stack_mark) {
2221 mono_gc_get_gc_callbacks ()->thread_mark_func (info->client_info.runtime_data, (guint8 *)aligned_stack_start, (guint8 *)info->client_info.info.stack_end, precise, &ctx);
2222 } else if (!precise) {
2223 if (!conservative_stack_mark) {
2224 fprintf (stderr, "Precise stack mark not supported - disabling.\n");
2225 conservative_stack_mark = TRUE;
2227 //FIXME we should eventually use the new stack_mark from coop
2228 sgen_conservatively_pin_objects_from ((void **)aligned_stack_start, (void **)info->client_info.info.stack_end, start_nursery, end_nursery, PIN_TYPE_STACK);
2231 if (!precise) {
2232 sgen_conservatively_pin_objects_from ((void**)&info->client_info.ctx, (void**)(&info->client_info.ctx + 1),
2233 start_nursery, end_nursery, PIN_TYPE_STACK);
2236 // This is used on Coop GC for platforms where we cannot get the data for individual registers.
2237 // We force a spill of all registers into the stack and pass a chunk of data into sgen.
2238 //FIXME under coop, for now, what we need to ensure is that we scan any extra memory from info->client_info.info.stack_end to stack_mark
2239 MonoThreadUnwindState *state = &info->client_info.info.thread_saved_state [SELF_SUSPEND_STATE_INDEX];
2240 if (state && state->gc_stackdata) {
2241 sgen_conservatively_pin_objects_from ((void **)state->gc_stackdata, (void**)((char*)state->gc_stackdata + state->gc_stackdata_size),
2242 start_nursery, end_nursery, PIN_TYPE_STACK);
2246 if (info->client_info.info.handle_stack) {
2248 Make two passes over the handle stack. On the imprecise pass, pin all
2249 objects where the handle points into the interior of the object. On the
2250 precise pass, copy or mark all the objects that have handles to the
2251 beginning of the object.
2253 if (precise)
2254 mono_handle_stack_scan (info->client_info.info.handle_stack, (GcScanFunc)ctx.ops->copy_or_mark_object, ctx.queue, precise, TRUE);
2255 else {
2256 PinHandleStackInteriorPtrData ud;
2257 memset (&ud, 0, sizeof (ud));
2258 ud.start_nursery = (void**)start_nursery;
2259 ud.end_nursery = (void**)end_nursery;
2260 mono_handle_stack_scan (info->client_info.info.handle_stack, pin_handle_stack_interior_ptrs, &ud, precise, FALSE);
2263 } FOREACH_THREAD_END
2267 * mono_gc_set_stack_end:
2269 * Set the end of the current threads stack to STACK_END. The stack space between
2270 * STACK_END and the real end of the threads stack will not be scanned during collections.
2272 void
2273 mono_gc_set_stack_end (void *stack_end)
2275 SgenThreadInfo *info;
2277 LOCK_GC;
2278 info = mono_thread_info_current ();
2279 if (info) {
2280 SGEN_ASSERT (0, stack_end < info->client_info.info.stack_end, "Can only lower stack end");
2281 info->client_info.info.stack_end = stack_end;
2283 UNLOCK_GC;
2287 * Roots
2291 mono_gc_register_root (char *start, size_t size, MonoGCDescriptor descr, MonoGCRootSource source, void *key, const char *msg)
2293 return sgen_register_root (start, size, descr, descr ? ROOT_TYPE_NORMAL : ROOT_TYPE_PINNED, source, key, msg);
2297 mono_gc_register_root_wbarrier (char *start, size_t size, MonoGCDescriptor descr, MonoGCRootSource source, void *key, const char *msg)
2299 return sgen_register_root (start, size, descr, ROOT_TYPE_WBARRIER, source, key, msg);
2302 void
2303 mono_gc_deregister_root (char* addr)
2305 sgen_deregister_root (addr);
2309 * PThreads
2312 #ifndef HOST_WIN32
2314 mono_gc_pthread_create (pthread_t *new_thread, const pthread_attr_t *attr, void *(*start_routine)(void *), void *arg)
2316 int res;
2318 MONO_ENTER_GC_SAFE;
2319 mono_threads_join_lock ();
2320 res = pthread_create (new_thread, attr, start_routine, arg);
2321 mono_threads_join_unlock ();
2322 MONO_EXIT_GC_SAFE;
2324 return res;
2326 #endif
2329 * Miscellaneous
2332 static size_t last_heap_size = -1;
2333 static size_t worker_heap_size;
2335 void
2336 sgen_client_total_allocated_heap_changed (size_t allocated_heap)
2338 mono_runtime_resource_check_limit (MONO_RESOURCE_GC_HEAP, allocated_heap);
2341 * This function can be called from SGen's worker threads. We want to try
2342 * and avoid exposing those threads to the profiler API, so save the heap
2343 * size value and report it later when the main GC thread calls
2344 * mono_sgen_gc_event_resize ().
2346 worker_heap_size = allocated_heap;
2349 void
2350 mono_sgen_gc_event_resize (void)
2352 if (worker_heap_size != last_heap_size) {
2353 last_heap_size = worker_heap_size;
2354 MONO_PROFILER_RAISE (gc_resize, (last_heap_size));
2358 gboolean
2359 mono_gc_user_markers_supported (void)
2361 return TRUE;
2364 gboolean
2365 mono_object_is_alive (MonoObject* o)
2367 return TRUE;
2371 mono_gc_get_generation (MonoObject *obj)
2373 if (sgen_ptr_in_nursery (obj))
2374 return 0;
2375 return 1;
2378 const char *
2379 mono_gc_get_gc_name (void)
2381 return "sgen";
2384 char*
2385 mono_gc_get_description (void)
2387 #ifdef HAVE_CONC_GC_AS_DEFAULT
2388 return g_strdup ("sgen (concurrent by default)");
2389 #else
2390 return g_strdup ("sgen");
2391 #endif
2394 void
2395 mono_gc_set_desktop_mode (void)
2399 gboolean
2400 mono_gc_is_moving (void)
2402 return TRUE;
2405 gboolean
2406 mono_gc_is_disabled (void)
2408 return FALSE;
2411 #ifdef HOST_WIN32
2412 BOOL APIENTRY mono_gc_dllmain (HMODULE module_handle, DWORD reason, LPVOID reserved)
2414 return TRUE;
2416 #endif
2419 mono_gc_max_generation (void)
2421 return 1;
2424 gboolean
2425 mono_gc_precise_stack_mark_enabled (void)
2427 return !conservative_stack_mark;
2430 void
2431 mono_gc_collect (int generation)
2433 MONO_ENTER_GC_UNSAFE;
2434 sgen_gc_collect (generation);
2435 MONO_EXIT_GC_UNSAFE;
2439 mono_gc_collection_count (int generation)
2441 return sgen_gc_collection_count (generation);
2444 int64_t
2445 mono_gc_get_used_size (void)
2447 return (int64_t)sgen_gc_get_used_size ();
2450 int64_t
2451 mono_gc_get_heap_size (void)
2453 return (int64_t)sgen_gc_get_total_heap_allocation ();
2456 MonoGCDescriptor
2457 mono_gc_make_root_descr_user (MonoGCRootMarkFunc marker)
2459 return sgen_make_user_root_descriptor (marker);
2462 MonoGCDescriptor
2463 mono_gc_make_descr_for_string (gsize *bitmap, int numbits)
2465 return SGEN_DESC_STRING;
2468 void
2469 mono_gc_register_obj_with_weak_fields (void *obj)
2471 return sgen_register_obj_with_weak_fields ((MonoObject*)obj);
2474 void*
2475 mono_gc_get_nursery (int *shift_bits, size_t *size)
2477 *size = sgen_nursery_size;
2478 *shift_bits = sgen_nursery_bits;
2479 return sgen_get_nursery_start ();
2483 mono_gc_get_los_limit (void)
2485 return SGEN_MAX_SMALL_OBJ_SIZE;
2488 gpointer
2489 sgen_client_default_metadata (void)
2491 return mono_domain_get ();
2494 gpointer
2495 sgen_client_metadata_for_object (GCObject *obj)
2497 return mono_object_domain (obj);
2501 * mono_gchandle_new_internal:
2502 * \param obj managed object to get a handle for
2503 * \param pinned whether the object should be pinned
2504 * This returns a handle that wraps the object, this is used to keep a
2505 * reference to a managed object from the unmanaged world and preventing the
2506 * object from being disposed.
2508 * If \p pinned is false the address of the object can not be obtained, if it is
2509 * true the address of the object can be obtained. This will also pin the
2510 * object so it will not be possible by a moving garbage collector to move the
2511 * object.
2513 * \returns a handle that can be used to access the object from unmanaged code.
2515 guint32
2516 mono_gchandle_new_internal (MonoObject *obj, gboolean pinned)
2518 return sgen_gchandle_new (obj, pinned);
2522 * mono_gchandle_new_weakref_internal:
2523 * \param obj managed object to get a handle for
2524 * \param track_resurrection Determines how long to track the object, if this is set to TRUE, the object is tracked after finalization, if FALSE, the object is only tracked up until the point of finalization.
2526 * This returns a weak handle that wraps the object, this is used to
2527 * keep a reference to a managed object from the unmanaged world.
2528 * Unlike the \c mono_gchandle_new_internal the object can be reclaimed by the
2529 * garbage collector. In this case the value of the GCHandle will be
2530 * set to zero.
2532 * If \p track_resurrection is TRUE the object will be tracked through
2533 * finalization and if the object is resurrected during the execution
2534 * of the finalizer, then the returned weakref will continue to hold
2535 * a reference to the object. If \p track_resurrection is FALSE, then
2536 * the weak reference's target will become NULL as soon as the object
2537 * is passed on to the finalizer.
2539 * \returns a handle that can be used to access the object from
2540 * unmanaged code.
2542 guint32
2543 mono_gchandle_new_weakref_internal (GCObject *obj, gboolean track_resurrection)
2545 return sgen_gchandle_new_weakref (obj, track_resurrection);
2549 * mono_gchandle_is_in_domain:
2550 * \param gchandle a GCHandle's handle.
2551 * \param domain An application domain.
2552 * \returns TRUE if the object wrapped by the \p gchandle belongs to the specific \p domain.
2554 gboolean
2555 mono_gchandle_is_in_domain (guint32 gchandle, MonoDomain *domain)
2557 MonoDomain *gchandle_domain = (MonoDomain *)sgen_gchandle_get_metadata (gchandle);
2558 return domain->domain_id == gchandle_domain->domain_id;
2562 * mono_gchandle_free_internal:
2563 * \param gchandle a GCHandle's handle.
2565 * Frees the \p gchandle handle. If there are no outstanding
2566 * references, the garbage collector can reclaim the memory of the
2567 * object wrapped.
2569 void
2570 mono_gchandle_free_internal (guint32 gchandle)
2572 sgen_gchandle_free (gchandle);
2576 * mono_gchandle_free_domain:
2577 * \param unloading domain that is unloading
2579 * Function used internally to cleanup any GC handle for objects belonging
2580 * to the specified domain during appdomain unload.
2582 void
2583 mono_gchandle_free_domain (MonoDomain *unloading)
2588 * mono_gchandle_get_target_internal:
2589 * \param gchandle a GCHandle's handle.
2591 * The handle was previously created by calling \c mono_gchandle_new_internal or
2592 * \c mono_gchandle_new_weakref.
2594 * \returns a pointer to the \c MonoObject* represented by the handle or
2595 * NULL for a collected object if using a weakref handle.
2597 MonoObject*
2598 mono_gchandle_get_target_internal (guint32 gchandle)
2600 return sgen_gchandle_get_target (gchandle);
2603 static gpointer
2604 null_link_if_in_domain (gpointer hidden, GCHandleType handle_type, int max_generation, gpointer user)
2606 MonoDomain *unloading_domain = (MonoDomain *)user;
2607 MonoDomain *obj_domain;
2608 gboolean is_weak = MONO_GC_HANDLE_TYPE_IS_WEAK (handle_type);
2609 if (MONO_GC_HANDLE_IS_OBJECT_POINTER (hidden)) {
2610 MonoObject *obj = (MonoObject *)MONO_GC_REVEAL_POINTER (hidden, is_weak);
2611 obj_domain = mono_object_domain (obj);
2612 } else {
2613 obj_domain = (MonoDomain *)MONO_GC_REVEAL_POINTER (hidden, is_weak);
2615 if (unloading_domain->domain_id == obj_domain->domain_id)
2616 return NULL;
2617 return hidden;
2620 void
2621 sgen_null_links_for_domain (MonoDomain *domain)
2623 guint type;
2624 for (type = HANDLE_TYPE_MIN; type < HANDLE_TYPE_MAX; ++type)
2625 sgen_gchandle_iterate ((GCHandleType)type, GENERATION_OLD, null_link_if_in_domain, domain);
2628 void
2629 mono_gchandle_set_target (guint32 gchandle, MonoObject *obj)
2631 sgen_gchandle_set_target (gchandle, obj);
2634 void
2635 sgen_client_gchandle_created (int handle_type, GCObject *obj, guint32 handle)
2637 #ifndef DISABLE_PERFCOUNTERS
2638 mono_atomic_inc_i32 (&mono_perfcounters->gc_num_handles);
2639 #endif
2641 MONO_PROFILER_RAISE (gc_handle_created, (handle, (MonoGCHandleType)handle_type, obj));
2644 void
2645 sgen_client_gchandle_destroyed (int handle_type, guint32 handle)
2647 #ifndef DISABLE_PERFCOUNTERS
2648 mono_atomic_dec_i32 (&mono_perfcounters->gc_num_handles);
2649 #endif
2651 MONO_PROFILER_RAISE (gc_handle_deleted, (handle, (MonoGCHandleType)handle_type));
2654 void
2655 sgen_client_ensure_weak_gchandles_accessible (void)
2658 * During the second bridge processing step the world is
2659 * running again. That step processes all weak links once
2660 * more to null those that refer to dead objects. Before that
2661 * is completed, those links must not be followed, so we
2662 * conservatively wait for bridge processing when any weak
2663 * link is dereferenced.
2665 /* FIXME: A GC can occur after this check fails, in which case we
2666 * should wait for bridge processing but would fail to do so.
2668 if (G_UNLIKELY (mono_bridge_processing_in_progress))
2669 mono_gc_wait_for_bridge_processing ();
2672 void*
2673 mono_gc_invoke_with_gc_lock (MonoGCLockedCallbackFunc func, void *data)
2675 void *result;
2676 LOCK_INTERRUPTION;
2677 result = func (data);
2678 UNLOCK_INTERRUPTION;
2679 return result;
2682 void
2683 mono_gc_register_altstack (gpointer stack, gint32 stack_size, gpointer altstack, gint32 altstack_size)
2685 // FIXME:
2688 guint8*
2689 mono_gc_get_card_table (int *shift_bits, gpointer *mask)
2691 return sgen_get_card_table_configuration (shift_bits, mask);
2694 guint8*
2695 mono_gc_get_target_card_table (int *shift_bits, target_mgreg_t *mask)
2697 return sgen_get_target_card_table_configuration (shift_bits, mask);
2700 gboolean
2701 mono_gc_card_table_nursery_check (void)
2703 return !sgen_get_major_collector ()->is_concurrent;
2706 /* Negative value to remove */
2707 void
2708 mono_gc_add_memory_pressure (gint64 value)
2710 /* FIXME: Implement at some point? */
2714 * Logging
2717 void
2718 sgen_client_degraded_allocation (void)
2720 static gint32 last_major_gc_warned = -1;
2721 static gint32 num_degraded = 0;
2723 gint32 major_gc_count = mono_atomic_load_i32 (&mono_gc_stats.major_gc_count);
2724 //The WASM target aways triggers degrated allocation before collecting. So no point in printing the warning as it will just confuse users
2725 #if !defined (TARGET_WASM)
2726 if (mono_atomic_load_i32 (&last_major_gc_warned) < major_gc_count) {
2727 gint32 num = mono_atomic_inc_i32 (&num_degraded);
2728 if (num == 1 || num == 3)
2729 mono_trace (G_LOG_LEVEL_INFO, MONO_TRACE_GC, "Warning: Degraded allocation. Consider increasing nursery-size if the warning persists.");
2730 else if (num == 10)
2731 mono_trace (G_LOG_LEVEL_INFO, MONO_TRACE_GC, "Warning: Repeated degraded allocation. Consider increasing nursery-size.");
2733 mono_atomic_store_i32 (&last_major_gc_warned, major_gc_count);
2735 #endif
2739 * Debugging
2742 const char*
2743 sgen_client_description_for_internal_mem_type (int type)
2745 switch (type) {
2746 case INTERNAL_MEM_EPHEMERON_LINK: return "ephemeron-link";
2747 case INTERNAL_MEM_MOVED_OBJECT: return "moved-object";
2748 default:
2749 return NULL;
2753 void
2754 sgen_client_pre_collection_checks (void)
2756 if (sgen_mono_xdomain_checks) {
2757 sgen_clear_nursery_fragments ();
2758 sgen_check_for_xdomain_refs ();
2762 gboolean
2763 sgen_client_vtable_is_inited (MonoVTable *vt)
2765 return m_class_is_inited (vt->klass);
2768 const char*
2769 sgen_client_vtable_get_namespace (MonoVTable *vt)
2771 return m_class_get_name_space (vt->klass);
2774 const char*
2775 sgen_client_vtable_get_name (MonoVTable *vt)
2777 return m_class_get_name (vt->klass);
2781 * Initialization
2784 void
2785 sgen_client_init (void)
2787 mono_thread_callbacks_init ();
2788 mono_thread_info_init (sizeof (SgenThreadInfo));
2790 ///* Keep this the default for now */
2791 /* Precise marking is broken on all supported targets. Disable until fixed. */
2792 conservative_stack_mark = TRUE;
2794 sgen_register_fixed_internal_mem_type (INTERNAL_MEM_EPHEMERON_LINK, sizeof (EphemeronLinkNode));
2796 mono_sgen_init_stw ();
2798 mono_tls_init_gc_keys ();
2800 mono_thread_info_attach ();
2803 void
2804 mono_gc_init_icalls (void)
2806 mono_register_jit_icall (mono_gc_alloc_obj, "mono_gc_alloc_obj", mono_create_icall_signature ("object ptr int"), FALSE);
2807 mono_register_jit_icall (mono_gc_alloc_vector, "mono_gc_alloc_vector", mono_create_icall_signature ("object ptr int int"), FALSE);
2808 mono_register_jit_icall (mono_gc_alloc_string, "mono_gc_alloc_string", mono_create_icall_signature ("object ptr int int32"), FALSE);
2809 mono_register_jit_icall (mono_profiler_raise_gc_allocation, "mono_profiler_raise_gc_allocation", mono_create_icall_signature ("void object"), FALSE);
2812 gboolean
2813 sgen_client_handle_gc_param (const char *opt)
2815 if (g_str_has_prefix (opt, "stack-mark=")) {
2816 opt = strchr (opt, '=') + 1;
2817 if (!strcmp (opt, "precise")) {
2818 conservative_stack_mark = FALSE;
2819 } else if (!strcmp (opt, "conservative")) {
2820 conservative_stack_mark = TRUE;
2821 } else {
2822 sgen_env_var_error (MONO_GC_PARAMS_NAME, conservative_stack_mark ? "Using `conservative`." : "Using `precise`.",
2823 "Invalid value `%s` for `stack-mark` option, possible values are: `precise`, `conservative`.", opt);
2825 } else if (g_str_has_prefix (opt, "bridge-implementation=")) {
2826 opt = strchr (opt, '=') + 1;
2827 sgen_set_bridge_implementation (opt);
2828 } else if (g_str_has_prefix (opt, "toggleref-test")) {
2829 /* FIXME: This should probably in MONO_GC_DEBUG */
2830 sgen_register_test_toggleref_callback ();
2831 } else if (!sgen_bridge_handle_gc_param (opt)) {
2832 return FALSE;
2834 return TRUE;
2837 void
2838 sgen_client_print_gc_params_usage (void)
2840 fprintf (stderr, " stack-mark=MARK-METHOD (where MARK-METHOD is 'precise' or 'conservative')\n");
2843 gboolean
2844 sgen_client_handle_gc_debug (const char *opt)
2846 if (!strcmp (opt, "xdomain-checks")) {
2847 sgen_mono_xdomain_checks = TRUE;
2848 } else if (!strcmp (opt, "do-not-finalize")) {
2849 mono_do_not_finalize = TRUE;
2850 } else if (g_str_has_prefix (opt, "do-not-finalize=")) {
2851 opt = strchr (opt, '=') + 1;
2852 mono_do_not_finalize = TRUE;
2853 mono_do_not_finalize_class_names = g_strsplit (opt, ",", 0);
2854 } else if (!strcmp (opt, "log-finalizers")) {
2855 mono_log_finalizers = TRUE;
2856 } else if (!strcmp (opt, "no-managed-allocator")) {
2857 sgen_set_use_managed_allocator (FALSE);
2858 } else if (!sgen_bridge_handle_gc_debug (opt)) {
2859 return FALSE;
2861 return TRUE;
2864 void
2865 sgen_client_print_gc_debug_usage (void)
2867 fprintf (stderr, " xdomain-checks\n");
2868 fprintf (stderr, " do-not-finalize\n");
2869 fprintf (stderr, " log-finalizers\n");
2870 fprintf (stderr, " no-managed-allocator\n");
2871 sgen_bridge_print_gc_debug_usage ();
2875 gpointer
2876 sgen_client_get_provenance (void)
2878 #ifdef SGEN_OBJECT_PROVENANCE
2879 MonoGCCallbacks *cb = mono_gc_get_gc_callbacks ();
2880 gpointer (*get_provenance_func) (void);
2881 if (!cb)
2882 return NULL;
2883 get_provenance_func = cb->get_provenance_func;
2884 if (get_provenance_func)
2885 return get_provenance_func ();
2886 return NULL;
2887 #else
2888 return NULL;
2889 #endif
2892 void
2893 sgen_client_describe_invalid_pointer (GCObject *ptr)
2895 sgen_bridge_describe_pointer (ptr);
2898 static gboolean gc_inited;
2901 * mono_gc_base_init:
2903 void
2904 mono_gc_base_init (void)
2906 if (gc_inited)
2907 return;
2909 mono_counters_init ();
2911 #ifndef HOST_WIN32
2912 mono_w32handle_init ();
2913 #endif
2915 #ifdef HEAVY_STATISTICS
2916 mono_counters_register ("los marked cards", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &los_marked_cards);
2917 mono_counters_register ("los array cards scanned ", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &los_array_cards);
2918 mono_counters_register ("los array remsets", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &los_array_remsets);
2920 mono_counters_register ("WBarrier set arrayref", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_wbarrier_set_arrayref);
2921 mono_counters_register ("WBarrier value copy", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_wbarrier_value_copy);
2922 mono_counters_register ("WBarrier object copy", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_wbarrier_object_copy);
2923 #endif
2925 sgen_gc_init ();
2927 gc_inited = TRUE;
2930 void
2931 mono_gc_base_cleanup (void)
2934 * Note we don't fully cleanup the GC here, but the threads mainly.
2936 * We need to finish any work on the sgen threads before shutting down
2937 * the sgen threadpool. After this point we can still trigger GCs as
2938 * part of domain free, but they should all be forced and not use the
2939 * threadpool.
2941 sgen_finish_concurrent_work ("cleanup", TRUE);
2942 sgen_thread_pool_shutdown ();
2944 // We should have consumed any outstanding moves.
2945 g_assert (sgen_pointer_queue_is_empty (&moved_objects_queue));
2948 gboolean
2949 mono_gc_is_null (void)
2951 return FALSE;
2954 gsize *
2955 sgen_client_get_weak_bitmap (MonoVTable *vt, int *nbits)
2957 MonoClass *klass = vt->klass;
2959 return mono_class_get_weak_bitmap (klass, nbits);
2962 void
2963 sgen_client_binary_protocol_collection_begin (int minor_gc_count, int generation)
2965 static gboolean pseudo_roots_registered;
2967 MONO_GC_BEGIN (generation);
2969 MONO_PROFILER_RAISE (gc_event, (MONO_GC_EVENT_START, generation, generation == GENERATION_OLD && sgen_concurrent_collection_in_progress));
2971 if (!pseudo_roots_registered) {
2972 pseudo_roots_registered = TRUE;
2973 MONO_PROFILER_RAISE (gc_root_register, (SPECIAL_ADDRESS_FIN_QUEUE, 1, MONO_ROOT_SOURCE_FINALIZER_QUEUE, NULL, "Finalizer Queue"));
2974 MONO_PROFILER_RAISE (gc_root_register, (SPECIAL_ADDRESS_CRIT_FIN_QUEUE, 1, MONO_ROOT_SOURCE_FINALIZER_QUEUE, NULL, "Finalizer Queue (Critical)"));
2975 MONO_PROFILER_RAISE (gc_root_register, (SPECIAL_ADDRESS_EPHEMERON, 1, MONO_ROOT_SOURCE_EPHEMERON, NULL, "Ephemerons"));
2978 #ifndef DISABLE_PERFCOUNTERS
2979 if (generation == GENERATION_NURSERY)
2980 mono_atomic_inc_i32 (&mono_perfcounters->gc_collections0);
2981 else
2982 mono_atomic_inc_i32 (&mono_perfcounters->gc_collections1);
2983 #endif
2986 void
2987 sgen_client_binary_protocol_collection_end (int minor_gc_count, int generation, long long num_objects_scanned, long long num_unique_objects_scanned)
2989 MONO_GC_END (generation);
2991 MONO_PROFILER_RAISE (gc_event, (MONO_GC_EVENT_END, generation, generation == GENERATION_OLD && sgen_concurrent_collection_in_progress));
2994 #ifdef HOST_WASM
2995 void
2996 sgen_client_schedule_background_job (void (*cb)(void))
2998 mono_threads_schedule_background_job (cb);
3001 #endif
3003 #endif