[Loader] Change mono_trace level from info to debug (#19110)
[mono-project.git] / mono / metadata / sgen-mono.c
blobab104aafe799c2a884ff640642e17229f85bc6d0
1 /**
2 * \file
3 * SGen features specific to Mono.
5 * Copyright (C) 2014 Xamarin Inc
7 * Licensed under the MIT license. See LICENSE file in the project root for full license information.
8 */
10 #include "config.h"
11 #ifdef HAVE_SGEN_GC
13 #include "sgen/sgen-gc.h"
14 #include "sgen/sgen-protocol.h"
15 #include "metadata/monitor.h"
16 #include "sgen/sgen-layout-stats.h"
17 #include "sgen/sgen-client.h"
18 #include "sgen/sgen-cardtable.h"
19 #include "sgen/sgen-pinning.h"
20 #include "sgen/sgen-workers.h"
21 #include "metadata/class-init.h"
22 #include "metadata/marshal.h"
23 #include "metadata/method-builder.h"
24 #include "metadata/abi-details.h"
25 #include "metadata/class-abi-details.h"
26 #include "metadata/mono-gc.h"
27 #include "metadata/runtime.h"
28 #include "metadata/sgen-bridge-internals.h"
29 #include "metadata/sgen-mono.h"
30 #include "metadata/sgen-mono-ilgen.h"
31 #include "metadata/gc-internals.h"
32 #include "metadata/handle.h"
33 #include "metadata/abi-details.h"
34 #include "utils/mono-memory-model.h"
35 #include "utils/mono-logger-internals.h"
36 #include "utils/mono-threads-coop.h"
37 #include "utils/mono-threads.h"
38 #include "metadata/w32handle.h"
39 #include "icall-signatures.h"
40 #include "mono/utils/mono-tls-inline.h"
42 #if _MSC_VER
43 #pragma warning(disable:4312) // FIXME pointer cast to different size
44 #endif
46 #ifdef HEAVY_STATISTICS
47 static guint64 stat_wbarrier_set_arrayref = 0;
48 static guint64 stat_wbarrier_value_copy = 0;
49 static guint64 stat_wbarrier_object_copy = 0;
51 static guint64 los_marked_cards;
52 static guint64 los_array_cards;
53 static guint64 los_array_remsets;
54 #endif
56 /* If set, mark stacks conservatively, even if precise marking is possible */
57 static gboolean conservative_stack_mark = FALSE;
58 /* If set, check that there are no references to the domain left at domain unload */
59 gboolean sgen_mono_xdomain_checks = FALSE;
61 /* Functions supplied by the runtime to be called by the GC */
62 static MonoGCCallbacks gc_callbacks;
64 /* Used for GetGCMemoryInfo */
65 SgenGCInfo sgen_gc_info;
67 #define OPDEF(a,b,c,d,e,f,g,h,i,j) \
68 a = i,
70 enum {
71 #include "mono/cil/opcode.def"
72 CEE_LAST
75 #undef OPDEF
78 * Write barriers
81 static gboolean
82 ptr_on_stack (void *ptr)
84 gpointer stack_start = &stack_start;
85 SgenThreadInfo *info = mono_thread_info_current ();
87 if (ptr >= stack_start && ptr < (gpointer)info->client_info.info.stack_end)
88 return TRUE;
89 return FALSE;
92 #ifdef SGEN_HEAVY_BINARY_PROTOCOL
93 #undef HANDLE_PTR
94 #define HANDLE_PTR(ptr,obj) do { \
95 gpointer o = *(gpointer*)(ptr); \
96 if ((o)) { \
97 gpointer d = ((char*)dest) + ((char*)(ptr) - (char*)(obj)); \
98 sgen_binary_protocol_wbarrier (d, o, (gpointer) SGEN_LOAD_VTABLE (o)); \
99 } \
100 } while (0)
102 static void
103 scan_object_for_binary_protocol_copy_wbarrier (gpointer dest, char *start, mword desc)
105 #define SCAN_OBJECT_NOVTABLE
106 #include "sgen/sgen-scan-object.h"
108 #endif
110 void
111 mono_gc_wbarrier_value_copy_internal (gpointer dest, gconstpointer src, int count, MonoClass *klass)
113 HEAVY_STAT (++stat_wbarrier_value_copy);
114 g_assert (m_class_is_valuetype (klass));
116 SGEN_LOG (8, "Adding value remset at %p, count %d, descr %p for class %s (%p)", dest, count, (gpointer)(uintptr_t)m_class_get_gc_descr (klass), m_class_get_name (klass), klass);
118 if (sgen_ptr_in_nursery (dest) || ptr_on_stack (dest) || !sgen_gc_descr_has_references ((mword)m_class_get_gc_descr (klass))) {
119 size_t element_size = mono_class_value_size (klass, NULL);
120 size_t size = count * element_size;
121 mono_gc_memmove_atomic (dest, src, size);
122 return;
125 #ifdef SGEN_HEAVY_BINARY_PROTOCOL
126 if (sgen_binary_protocol_is_heavy_enabled ()) {
127 size_t element_size = mono_class_value_size (klass, NULL);
128 int i;
129 for (i = 0; i < count; ++i) {
130 scan_object_for_binary_protocol_copy_wbarrier ((char*)dest + i * element_size,
131 (char*)src + i * element_size - MONO_ABI_SIZEOF (MonoObject),
132 (mword) m_class_get_gc_descr (klass));
135 #endif
137 sgen_get_remset ()->wbarrier_value_copy (dest, src, count, mono_class_value_size (klass, NULL));
141 * mono_gc_wbarrier_object_copy_internal:
143 * Write barrier to call when \p obj is the result of a clone or copy of an object.
145 void
146 mono_gc_wbarrier_object_copy_internal (MonoObject* obj, MonoObject *src)
148 int size;
150 HEAVY_STAT (++stat_wbarrier_object_copy);
152 SGEN_ASSERT (6, !ptr_on_stack (obj), "Why is this called for a non-reference type?");
153 if (sgen_ptr_in_nursery (obj) || !SGEN_OBJECT_HAS_REFERENCES (src)) {
154 size = m_class_get_instance_size (mono_object_class (obj));
155 mono_gc_memmove_aligned ((char*)obj + MONO_ABI_SIZEOF (MonoObject), (char*)src + MONO_ABI_SIZEOF (MonoObject),
156 size - MONO_ABI_SIZEOF (MonoObject));
157 return;
160 #ifdef SGEN_HEAVY_BINARY_PROTOCOL
161 if (sgen_binary_protocol_is_heavy_enabled ())
162 scan_object_for_binary_protocol_copy_wbarrier (obj, (char*)src, (mword) src->vtable->gc_descr);
163 #endif
165 sgen_get_remset ()->wbarrier_object_copy (obj, src);
169 * mono_gc_wbarrier_set_arrayref_internal:
171 void
172 mono_gc_wbarrier_set_arrayref_internal (MonoArray *arr, gpointer slot_ptr, MonoObject* value)
174 HEAVY_STAT (++stat_wbarrier_set_arrayref);
175 if (sgen_ptr_in_nursery (slot_ptr)) {
176 *(void**)slot_ptr = value;
177 return;
179 SGEN_LOG (8, "Adding remset at %p", slot_ptr);
180 if (value)
181 sgen_binary_protocol_wbarrier (slot_ptr, value, value->vtable);
183 sgen_get_remset ()->wbarrier_set_field ((GCObject*)arr, slot_ptr, value);
187 * mono_gc_wbarrier_set_field_internal:
189 void
190 mono_gc_wbarrier_set_field_internal (MonoObject *obj, gpointer field_ptr, MonoObject* value)
192 mono_gc_wbarrier_set_arrayref_internal ((MonoArray*)obj, field_ptr, value);
195 void
196 mono_gc_wbarrier_range_copy (gpointer _dest, gconstpointer _src, int size)
198 sgen_wbarrier_range_copy (_dest, _src, size);
201 MonoRangeCopyFunction
202 mono_gc_get_range_copy_func (void)
204 return sgen_get_remset ()->wbarrier_range_copy;
208 mono_gc_get_suspend_signal (void)
210 return mono_threads_suspend_get_suspend_signal ();
214 mono_gc_get_restart_signal (void)
216 return mono_threads_suspend_get_restart_signal ();
219 static MonoMethod *write_barrier_conc_method;
220 static MonoMethod *write_barrier_noconc_method;
222 gboolean
223 sgen_is_critical_method (MonoMethod *method)
225 return sgen_is_managed_allocator (method);
228 gboolean
229 sgen_has_critical_method (void)
231 return sgen_has_managed_allocator ();
234 gboolean
235 mono_gc_is_critical_method (MonoMethod *method)
237 #ifdef HOST_WASM
238 //methods can't be critical under wasm due to the single thread'ness of it
239 return FALSE;
240 #else
241 return sgen_is_critical_method (method);
242 #endif
245 static MonoSgenMonoCallbacks sgenmono_cb;
246 static gboolean cb_inited = FALSE;
248 void
249 mono_install_sgen_mono_callbacks (MonoSgenMonoCallbacks *cb)
251 g_assert (!cb_inited);
252 g_assert (cb->version == MONO_SGEN_MONO_CALLBACKS_VERSION);
253 memcpy (&sgenmono_cb, cb, sizeof (MonoSgenMonoCallbacks));
254 cb_inited = TRUE;
257 #if !ENABLE_ILGEN
259 static void
260 emit_nursery_check_noilgen (MonoMethodBuilder *mb, gboolean is_concurrent)
264 static void
265 emit_managed_allocator_noilgen (MonoMethodBuilder *mb, gboolean slowpath, gboolean profiler, int atype)
269 static void
270 install_noilgen (void)
272 MonoSgenMonoCallbacks cb;
273 cb.version = MONO_SGEN_MONO_CALLBACKS_VERSION;
274 cb.emit_nursery_check = emit_nursery_check_noilgen;
275 cb.emit_managed_allocator = emit_managed_allocator_noilgen;
276 mono_install_sgen_mono_callbacks (&cb);
279 #endif
281 static MonoSgenMonoCallbacks *
282 get_sgen_mono_cb (void)
284 if (G_UNLIKELY (!cb_inited)) {
285 #ifdef ENABLE_ILGEN
286 mono_sgen_mono_ilgen_init ();
287 #else
288 install_noilgen ();
289 #endif
291 return &sgenmono_cb;
294 MonoMethod*
295 mono_gc_get_specific_write_barrier (gboolean is_concurrent)
297 MonoMethod *res;
298 MonoMethodBuilder *mb;
299 MonoMethodSignature *sig;
300 MonoMethod **write_barrier_method_addr;
301 WrapperInfo *info;
302 // FIXME: Maybe create a separate version for ctors (the branch would be
303 // correctly predicted more times)
304 if (is_concurrent)
305 write_barrier_method_addr = &write_barrier_conc_method;
306 else
307 write_barrier_method_addr = &write_barrier_noconc_method;
309 if (*write_barrier_method_addr)
310 return *write_barrier_method_addr;
312 /* Create the IL version of mono_gc_barrier_generic_store () */
313 sig = mono_metadata_signature_alloc (mono_defaults.corlib, 1);
314 sig->ret = mono_get_void_type ();
315 sig->params [0] = mono_get_int_type ();
317 if (is_concurrent)
318 mb = mono_mb_new (mono_defaults.object_class, "wbarrier_conc", MONO_WRAPPER_WRITE_BARRIER);
319 else
320 mb = mono_mb_new (mono_defaults.object_class, "wbarrier_noconc", MONO_WRAPPER_WRITE_BARRIER);
322 get_sgen_mono_cb ()->emit_nursery_check (mb, is_concurrent);
324 res = mono_mb_create_method (mb, sig, 16);
325 info = mono_wrapper_info_create (mb, WRAPPER_SUBTYPE_NONE);
326 mono_marshal_set_wrapper_info (res, info);
327 mono_mb_free (mb);
329 LOCK_GC;
330 if (*write_barrier_method_addr) {
331 /* Already created */
332 mono_free_method (res);
333 } else {
334 /* double-checked locking */
335 mono_memory_barrier ();
336 *write_barrier_method_addr = res;
338 UNLOCK_GC;
340 return *write_barrier_method_addr;
343 MonoMethod*
344 mono_gc_get_write_barrier (void)
346 return mono_gc_get_specific_write_barrier (sgen_major_collector.is_concurrent);
350 * Dummy filler objects
353 /* Vtable of the objects used to fill out nursery fragments before a collection */
354 static GCVTable array_fill_vtable;
356 static GCVTable
357 get_array_fill_vtable (void)
359 if (!array_fill_vtable) {
360 static char _vtable[sizeof(MonoVTable)+8];
361 MonoVTable* vtable = (MonoVTable*) ALIGN_TO((mword)_vtable, 8);
362 gsize bmap;
364 MonoClass *klass = mono_class_create_array_fill_type ();
365 MonoDomain *domain = mono_get_root_domain ();
366 g_assert (domain);
368 vtable->klass = klass;
369 bmap = 0;
370 vtable->gc_descr = mono_gc_make_descr_for_array (TRUE, &bmap, 0, 8);
371 vtable->rank = 1;
373 array_fill_vtable = vtable;
375 return array_fill_vtable;
378 gboolean
379 sgen_client_array_fill_range (char *start, size_t size)
381 MonoArray *o;
383 if (size < MONO_SIZEOF_MONO_ARRAY) {
384 memset (start, 0, size);
385 return FALSE;
388 o = (MonoArray*)start;
389 o->obj.vtable = (MonoVTable*)get_array_fill_vtable ();
390 /* Mark this as not a real object */
391 o->obj.synchronisation = (MonoThreadsSync *)GINT_TO_POINTER (-1);
392 o->bounds = NULL;
393 /* We use array of int64 */
394 g_assert ((size - MONO_SIZEOF_MONO_ARRAY) % 8 == 0);
395 o->max_length = (mono_array_size_t)((size - MONO_SIZEOF_MONO_ARRAY) / 8);
397 return TRUE;
400 void
401 sgen_client_zero_array_fill_header (void *p, size_t size)
403 if (size >= MONO_SIZEOF_MONO_ARRAY) {
404 memset (p, 0, MONO_SIZEOF_MONO_ARRAY);
405 } else {
406 static guint8 zeros [MONO_SIZEOF_MONO_ARRAY];
408 SGEN_ASSERT (0, !memcmp (p, zeros, size), "TLAB segment must be zeroed out.");
412 MonoVTable *
413 mono_gc_get_vtable (MonoObject *obj)
415 // See sgen/sgen-tagged-pointer.h.
416 return SGEN_LOAD_VTABLE (obj);
420 * Finalization
423 static MonoGCFinalizerCallbacks fin_callbacks;
425 guint
426 mono_gc_get_vtable_bits (MonoClass *klass)
428 guint res = 0;
429 /* FIXME move this to the bridge code */
430 if (sgen_need_bridge_processing ()) {
431 switch (sgen_bridge_class_kind (klass)) {
432 case GC_BRIDGE_TRANSPARENT_BRIDGE_CLASS:
433 case GC_BRIDGE_OPAQUE_BRIDGE_CLASS:
434 res = SGEN_GC_BIT_BRIDGE_OBJECT;
435 break;
436 case GC_BRIDGE_OPAQUE_CLASS:
437 res = SGEN_GC_BIT_BRIDGE_OPAQUE_OBJECT;
438 break;
439 case GC_BRIDGE_TRANSPARENT_CLASS:
440 break;
443 if (fin_callbacks.is_class_finalization_aware) {
444 if (fin_callbacks.is_class_finalization_aware (klass))
445 res |= SGEN_GC_BIT_FINALIZER_AWARE;
447 return res;
450 static gboolean
451 is_finalization_aware (MonoObject *obj)
453 MonoVTable *vt = SGEN_LOAD_VTABLE (obj);
454 return (vt->gc_bits & SGEN_GC_BIT_FINALIZER_AWARE) == SGEN_GC_BIT_FINALIZER_AWARE;
457 void
458 sgen_client_object_queued_for_finalization (GCObject *obj)
460 if (fin_callbacks.object_queued_for_finalization && is_finalization_aware (obj))
461 fin_callbacks.object_queued_for_finalization (obj);
463 #ifdef ENABLE_DTRACE
464 if (G_UNLIKELY (MONO_GC_FINALIZE_ENQUEUE_ENABLED ())) {
465 int gen = sgen_ptr_in_nursery (obj) ? GENERATION_NURSERY : GENERATION_OLD;
466 GCVTable vt = SGEN_LOAD_VTABLE (obj);
467 MONO_GC_FINALIZE_ENQUEUE ((mword)obj, sgen_safe_object_get_size (obj),
468 sgen_client_vtable_get_namespace (vt), sgen_client_vtable_get_name (vt), gen,
469 sgen_client_object_has_critical_finalizer (obj));
471 #endif
474 void
475 mono_gc_register_finalizer_callbacks (MonoGCFinalizerCallbacks *callbacks)
477 if (callbacks->version != MONO_GC_FINALIZER_EXTENSION_VERSION)
478 g_error ("Invalid finalizer callback version. Expected %d but got %d\n", MONO_GC_FINALIZER_EXTENSION_VERSION, callbacks->version);
480 fin_callbacks = *callbacks;
483 void
484 sgen_client_run_finalize (MonoObject *obj)
486 mono_gc_run_finalize (obj, NULL);
490 * mono_gc_invoke_finalizers:
493 mono_gc_invoke_finalizers (void)
495 return sgen_gc_invoke_finalizers ();
499 * mono_gc_pending_finalizers:
501 MonoBoolean
502 mono_gc_pending_finalizers (void)
504 return sgen_have_pending_finalizers ();
507 void
508 sgen_client_finalize_notify (void)
510 mono_gc_finalize_notify ();
513 void
514 mono_gc_register_for_finalization (MonoObject *obj, MonoFinalizationProc user_data)
516 sgen_object_register_for_finalization (obj, user_data);
519 static gboolean
520 object_in_domain_predicate (MonoObject *obj, void *user_data)
522 MonoDomain *domain = (MonoDomain *)user_data;
523 if (mono_object_domain (obj) == domain) {
524 SGEN_LOG (5, "Unregistering finalizer for object: %p (%s)", obj, sgen_client_vtable_get_name (SGEN_LOAD_VTABLE (obj)));
525 return TRUE;
527 return FALSE;
531 * mono_gc_finalizers_for_domain:
532 * \param domain the unloading appdomain
533 * \param out_array output array
534 * \param out_size size of output array
535 * Enqueue for finalization all objects that belong to the unloading appdomain \p domain.
536 * \p suspend is used for early termination of the enqueuing process.
538 void
539 mono_gc_finalize_domain (MonoDomain *domain)
541 sgen_finalize_if (object_in_domain_predicate, domain);
544 void
545 mono_gc_suspend_finalizers (void)
547 sgen_set_suspend_finalizers ();
551 * Ephemerons
554 typedef struct _EphemeronLinkNode EphemeronLinkNode;
556 struct _EphemeronLinkNode {
557 EphemeronLinkNode *next;
558 MonoArray *array;
561 typedef struct {
562 GCObject *key;
563 GCObject *value;
564 } Ephemeron;
566 static EphemeronLinkNode *ephemeron_list;
568 /* LOCKING: requires that the GC lock is held */
569 static MONO_PERMIT (need (sgen_gc_locked)) void
570 null_ephemerons_for_domain (MonoDomain *domain)
572 EphemeronLinkNode *current = ephemeron_list, *prev = NULL;
574 while (current) {
575 MonoObject *object = (MonoObject*)current->array;
577 if (object)
578 SGEN_ASSERT (0, object->vtable, "Can't have objects without vtables.");
580 if (object && object->vtable->domain == domain) {
581 EphemeronLinkNode *tmp = current;
583 if (prev)
584 prev->next = current->next;
585 else
586 ephemeron_list = current->next;
588 current = current->next;
589 sgen_free_internal (tmp, INTERNAL_MEM_EPHEMERON_LINK);
590 } else {
591 prev = current;
592 current = current->next;
597 /* LOCKING: requires that the GC lock is held */
598 void
599 sgen_client_clear_unreachable_ephemerons (ScanCopyContext ctx)
601 CopyOrMarkObjectFunc copy_func = ctx.ops->copy_or_mark_object;
602 SgenGrayQueue *queue = ctx.queue;
603 EphemeronLinkNode *current = ephemeron_list, *prev = NULL;
604 Ephemeron *cur, *array_end;
605 GCObject *tombstone;
607 while (current) {
608 MonoArray *array = current->array;
610 if (!sgen_is_object_alive_for_current_gen ((GCObject*)array)) {
611 EphemeronLinkNode *tmp = current;
613 SGEN_LOG (5, "Dead Ephemeron array at %p", array);
615 if (prev)
616 prev->next = current->next;
617 else
618 ephemeron_list = current->next;
620 current = current->next;
621 sgen_free_internal (tmp, INTERNAL_MEM_EPHEMERON_LINK);
623 continue;
626 copy_func ((GCObject**)&array, queue);
627 current->array = array;
629 SGEN_LOG (5, "Clearing unreachable entries for ephemeron array at %p", array);
631 cur = mono_array_addr_internal (array, Ephemeron, 0);
632 array_end = cur + mono_array_length_internal (array);
633 tombstone = SGEN_LOAD_VTABLE ((GCObject*)array)->domain->ephemeron_tombstone;
635 for (; cur < array_end; ++cur) {
636 GCObject *key = cur->key;
638 if (!key || key == tombstone)
639 continue;
641 SGEN_LOG (5, "[%" G_GSIZE_FORMAT "d] key %p (%s) value %p (%s)", cur - mono_array_addr_internal (array, Ephemeron, 0),
642 key, sgen_is_object_alive_for_current_gen (key) ? "reachable" : "unreachable",
643 cur->value, cur->value && sgen_is_object_alive_for_current_gen (cur->value) ? "reachable" : "unreachable");
645 if (!sgen_is_object_alive_for_current_gen (key)) {
646 cur->key = tombstone;
647 cur->value = NULL;
648 continue;
651 prev = current;
652 current = current->next;
657 LOCKING: requires that the GC lock is held
659 Limitations: We scan all ephemerons on every collection since the current design doesn't allow for a simple nursery/mature split.
661 gboolean
662 sgen_client_mark_ephemerons (ScanCopyContext ctx)
664 CopyOrMarkObjectFunc copy_func = ctx.ops->copy_or_mark_object;
665 SgenGrayQueue *queue = ctx.queue;
666 gboolean nothing_marked = TRUE;
667 EphemeronLinkNode *current = ephemeron_list;
668 Ephemeron *cur, *array_end;
669 GCObject *tombstone;
671 for (current = ephemeron_list; current; current = current->next) {
672 MonoArray *array = current->array;
673 SGEN_LOG (5, "Ephemeron array at %p", array);
675 /*It has to be alive*/
676 if (!sgen_is_object_alive_for_current_gen ((GCObject*)array)) {
677 SGEN_LOG (5, "\tnot reachable");
678 continue;
681 copy_func ((GCObject**)&array, queue);
683 cur = mono_array_addr_internal (array, Ephemeron, 0);
684 array_end = cur + mono_array_length_internal (array);
685 tombstone = SGEN_LOAD_VTABLE ((GCObject*)array)->domain->ephemeron_tombstone;
687 for (; cur < array_end; ++cur) {
688 GCObject *key = cur->key;
690 if (!key || key == tombstone)
691 continue;
693 SGEN_LOG (5, "[%" G_GSIZE_FORMAT "d] key %p (%s) value %p (%s)", cur - mono_array_addr_internal (array, Ephemeron, 0),
694 key, sgen_is_object_alive_for_current_gen (key) ? "reachable" : "unreachable",
695 cur->value, cur->value && sgen_is_object_alive_for_current_gen (cur->value) ? "reachable" : "unreachable");
697 if (sgen_is_object_alive_for_current_gen (key)) {
698 GCObject *value = cur->value;
700 copy_func (&cur->key, queue);
701 if (value) {
702 if (!sgen_is_object_alive_for_current_gen (value)) {
703 nothing_marked = FALSE;
704 sgen_binary_protocol_ephemeron_ref (current, key, value);
706 copy_func (&cur->value, queue);
712 SGEN_LOG (5, "Ephemeron run finished. Is it done %d", nothing_marked);
713 return nothing_marked;
716 gboolean
717 mono_gc_ephemeron_array_add (MonoObject *obj)
719 EphemeronLinkNode *node;
721 LOCK_GC;
723 node = (EphemeronLinkNode *)sgen_alloc_internal (INTERNAL_MEM_EPHEMERON_LINK);
724 if (!node) {
725 UNLOCK_GC;
726 return FALSE;
728 node->array = (MonoArray*)obj;
729 node->next = ephemeron_list;
730 ephemeron_list = node;
732 SGEN_LOG (5, "Registered ephemeron array %p", obj);
734 UNLOCK_GC;
735 return TRUE;
739 * Appdomain handling
742 static gboolean
743 need_remove_object_for_domain (GCObject *start, MonoDomain *domain)
745 if (mono_object_domain (start) == domain) {
746 SGEN_LOG (4, "Need to cleanup object %p", start);
747 sgen_binary_protocol_cleanup (start, (gpointer)SGEN_LOAD_VTABLE (start), sgen_safe_object_get_size ((GCObject*)start));
748 return TRUE;
750 return FALSE;
753 static void
754 process_object_for_domain_clearing (GCObject *start, MonoDomain *domain)
756 MonoVTable *vt = SGEN_LOAD_VTABLE (start);
757 if (vt->klass == mono_defaults.internal_thread_class)
758 g_assert (mono_object_domain (start) == mono_get_root_domain ());
759 /* The object could be a proxy for an object in the domain
760 we're deleting. */
761 #ifndef DISABLE_REMOTING
762 if (m_class_get_supertypes (mono_defaults.real_proxy_class) && mono_class_has_parent_fast (vt->klass, mono_defaults.real_proxy_class)) {
763 MonoObject *server = ((MonoRealProxy*)start)->unwrapped_server;
765 /* The server could already have been zeroed out, so
766 we need to check for that, too. */
767 if (server && (!SGEN_LOAD_VTABLE (server) || mono_object_domain (server) == domain)) {
768 SGEN_LOG (4, "Cleaning up remote pointer in %p to object %p", start, server);
769 ((MonoRealProxy*)start)->unwrapped_server = NULL;
772 #endif
775 static gboolean
776 clear_domain_process_object (GCObject *obj, MonoDomain *domain)
778 gboolean remove;
780 process_object_for_domain_clearing (obj, domain);
781 remove = need_remove_object_for_domain (obj, domain);
783 if (remove && obj->synchronisation) {
784 MonoGCHandle dislink = mono_monitor_get_object_monitor_gchandle (obj);
785 if (dislink)
786 mono_gchandle_free_internal (dislink);
789 return remove;
792 static void
793 clear_domain_process_minor_object_callback (GCObject *obj, size_t size, MonoDomain *domain)
795 if (clear_domain_process_object (obj, domain)) {
796 CANARIFY_SIZE (size);
797 memset (obj, 0, size);
801 static void
802 clear_domain_process_major_object_callback (GCObject *obj, size_t size, MonoDomain *domain)
804 clear_domain_process_object (obj, domain);
807 static void
808 clear_domain_free_major_non_pinned_object_callback (GCObject *obj, size_t size, MonoDomain *domain)
810 if (need_remove_object_for_domain (obj, domain))
811 sgen_major_collector.free_non_pinned_object (obj, size);
814 static void
815 clear_domain_free_major_pinned_object_callback (GCObject *obj, size_t size, MonoDomain *domain)
817 if (need_remove_object_for_domain (obj, domain))
818 sgen_major_collector.free_pinned_object (obj, size);
821 static void
822 clear_domain_process_los_object_callback (GCObject *obj, size_t size, MonoDomain *domain)
824 clear_domain_process_object (obj, domain);
827 static gboolean
828 clear_domain_free_los_object_callback (GCObject *obj, size_t size, MonoDomain *domain)
830 return need_remove_object_for_domain (obj, domain);
833 static void
834 sgen_finish_concurrent_work (const char *reason, gboolean stw)
836 if (sgen_get_concurrent_collection_in_progress ())
837 sgen_perform_collection (0, GENERATION_OLD, reason, TRUE, stw);
838 SGEN_ASSERT (0, !sgen_get_concurrent_collection_in_progress (), "We just ordered a synchronous collection. Why are we collecting concurrently?");
840 sgen_major_collector.finish_sweeping ();
844 * When appdomains are unloaded we can easily remove objects that have finalizers,
845 * but all the others could still be present in random places on the heap.
846 * We need a sweep to get rid of them even though it's going to be costly
847 * with big heaps.
848 * The reason we need to remove them is because we access the vtable and class
849 * structures to know the object size and the reference bitmap: once the domain is
850 * unloaded the point to random memory.
852 void
853 mono_gc_clear_domain (MonoDomain * domain)
855 int i;
857 LOCK_GC;
859 sgen_binary_protocol_domain_unload_begin (domain);
861 sgen_stop_world (0, FALSE);
863 sgen_finish_concurrent_work ("clear domain", FALSE);
865 sgen_process_fin_stage_entries ();
867 sgen_clear_nursery_fragments ();
869 FOREACH_THREAD_ALL (info) {
870 mono_handle_stack_free_domain (info->client_info.info.handle_stack, domain);
871 } FOREACH_THREAD_END
873 if (sgen_mono_xdomain_checks && domain != mono_get_root_domain ()) {
874 sgen_scan_for_registered_roots_in_domain (domain, ROOT_TYPE_NORMAL);
875 sgen_scan_for_registered_roots_in_domain (domain, ROOT_TYPE_WBARRIER);
876 sgen_check_for_xdomain_refs ();
879 /*Ephemerons and dislinks must be processed before LOS since they might end up pointing
880 to memory returned to the OS.*/
881 null_ephemerons_for_domain (domain);
882 sgen_null_links_for_domain (domain);
884 for (i = GENERATION_NURSERY; i < GENERATION_MAX; ++i)
885 sgen_remove_finalizers_if (object_in_domain_predicate, domain, i);
887 sgen_scan_area_with_callback (sgen_nursery_section->data, sgen_nursery_section->end_data,
888 (IterateObjectCallbackFunc)clear_domain_process_minor_object_callback, domain, FALSE, TRUE);
890 /* We need two passes over major and large objects because
891 freeing such objects might give their memory back to the OS
892 (in the case of large objects) or obliterate its vtable
893 (pinned objects with major-copying or pinned and non-pinned
894 objects with major-mark&sweep), but we might need to
895 dereference a pointer from an object to another object if
896 the first object is a proxy. */
897 sgen_major_collector.iterate_objects (ITERATE_OBJECTS_SWEEP_ALL, (IterateObjectCallbackFunc)clear_domain_process_major_object_callback, domain);
899 sgen_los_iterate_objects ((IterateObjectCallbackFunc)clear_domain_process_los_object_callback, domain);
900 sgen_los_iterate_objects_free ((IterateObjectResultCallbackFunc)clear_domain_free_los_object_callback, domain);
902 sgen_major_collector.iterate_objects (ITERATE_OBJECTS_SWEEP_NON_PINNED, (IterateObjectCallbackFunc)clear_domain_free_major_non_pinned_object_callback, domain);
903 sgen_major_collector.iterate_objects (ITERATE_OBJECTS_SWEEP_PINNED, (IterateObjectCallbackFunc)clear_domain_free_major_pinned_object_callback, domain);
905 if (domain == mono_get_root_domain ()) {
906 sgen_pin_stats_report ();
907 sgen_object_layout_dump (stdout);
910 sgen_restart_world (0, FALSE);
912 sgen_binary_protocol_domain_unload_end (domain);
913 sgen_binary_protocol_flush_buffers (FALSE);
915 UNLOCK_GC;
919 * Allocation
922 MonoObject*
923 mono_gc_alloc_obj (MonoVTable *vtable, size_t size)
925 MonoObject *obj = sgen_alloc_obj (vtable, size);
927 if (G_UNLIKELY (mono_profiler_allocations_enabled ()) && obj)
928 MONO_PROFILER_RAISE (gc_allocation, (obj));
930 return obj;
933 MonoObject*
934 mono_gc_alloc_pinned_obj (MonoVTable *vtable, size_t size)
936 MonoObject *obj = sgen_alloc_obj_pinned (vtable, size);
938 if (G_UNLIKELY (mono_profiler_allocations_enabled ()) && obj)
939 MONO_PROFILER_RAISE (gc_allocation, (obj));
941 return obj;
944 MonoObject*
945 mono_gc_alloc_mature (MonoVTable *vtable, size_t size)
947 MonoObject *obj = sgen_alloc_obj_mature (vtable, size);
949 if (G_UNLIKELY (mono_profiler_allocations_enabled ()) && obj)
950 MONO_PROFILER_RAISE (gc_allocation, (obj));
952 return obj;
956 * mono_gc_alloc_fixed:
958 MonoObject*
959 mono_gc_alloc_fixed (size_t size, MonoGCDescriptor descr, MonoGCRootSource source, void *key, const char *msg)
961 /* FIXME: do a single allocation */
962 void *res = g_calloc (1, size);
963 if (!res)
964 return NULL;
965 if (!mono_gc_register_root ((char *)res, size, descr, source, key, msg)) {
966 g_free (res);
967 res = NULL;
969 return (MonoObject*)res;
972 MonoObject*
973 mono_gc_alloc_fixed_no_descriptor (size_t size, MonoGCRootSource source, void *key, const char *msg)
975 return mono_gc_alloc_fixed (size, 0, source, key, msg);
979 * mono_gc_free_fixed:
981 void
982 mono_gc_free_fixed (void* addr)
984 mono_gc_deregister_root ((char *)addr);
985 g_free (addr);
989 * Managed allocator
992 static MonoMethod* alloc_method_cache [ATYPE_NUM];
993 static MonoMethod* slowpath_alloc_method_cache [ATYPE_NUM];
994 static MonoMethod* profiler_alloc_method_cache [ATYPE_NUM];
995 static gboolean use_managed_allocator = TRUE;
997 #ifdef MANAGED_ALLOCATION
998 /* FIXME: Do this in the JIT, where specialized allocation sequences can be created
999 * for each class. This is currently not easy to do, as it is hard to generate basic
1000 * blocks + branches, but it is easy with the linear IL codebase.
1002 * For this to work we'd need to solve the TLAB race, first. Now we
1003 * require the allocator to be in a few known methods to make sure
1004 * that they are executed atomically via the restart mechanism.
1006 static MonoMethod*
1007 create_allocator (int atype, ManagedAllocatorVariant variant)
1009 gboolean slowpath = variant == MANAGED_ALLOCATOR_SLOW_PATH;
1010 gboolean profiler = variant == MANAGED_ALLOCATOR_PROFILER;
1011 MonoMethodBuilder *mb;
1012 MonoMethod *res;
1013 MonoMethodSignature *csig;
1014 const char *name = NULL;
1015 WrapperInfo *info;
1016 int num_params, i;
1018 if (atype == ATYPE_SMALL) {
1019 name = slowpath ? "SlowAllocSmall" : (profiler ? "ProfilerAllocSmall" : "AllocSmall");
1020 } else if (atype == ATYPE_NORMAL) {
1021 name = slowpath ? "SlowAlloc" : (profiler ? "ProfilerAlloc" : "Alloc");
1022 } else if (atype == ATYPE_VECTOR) {
1023 name = slowpath ? "SlowAllocVector" : (profiler ? "ProfilerAllocVector" : "AllocVector");
1024 } else if (atype == ATYPE_STRING) {
1025 name = slowpath ? "SlowAllocString" : (profiler ? "ProfilerAllocString" : "AllocString");
1026 } else {
1027 g_assert_not_reached ();
1030 if (atype == ATYPE_NORMAL)
1031 num_params = 1;
1032 else
1033 num_params = 2;
1035 MonoType *int_type = mono_get_int_type ();
1036 csig = mono_metadata_signature_alloc (mono_defaults.corlib, num_params);
1037 if (atype == ATYPE_STRING) {
1038 csig->ret = m_class_get_byval_arg (mono_defaults.string_class);
1039 csig->params [0] = int_type;
1040 csig->params [1] = mono_get_int32_type ();
1041 } else {
1042 csig->ret = mono_get_object_type ();
1043 for (i = 0; i < num_params; i++)
1044 csig->params [i] = int_type;
1047 mb = mono_mb_new (mono_defaults.object_class, name, MONO_WRAPPER_ALLOC);
1049 get_sgen_mono_cb ()->emit_managed_allocator (mb, slowpath, profiler, atype);
1051 info = mono_wrapper_info_create (mb, WRAPPER_SUBTYPE_NONE);
1052 info->d.alloc.gc_name = "sgen";
1053 info->d.alloc.alloc_type = atype;
1055 res = mono_mb_create (mb, csig, 8, info);
1056 mono_mb_free (mb);
1058 return res;
1060 #endif
1063 mono_gc_get_aligned_size_for_allocator (int size)
1065 return SGEN_ALIGN_UP (size);
1069 * Generate an allocator method implementing the fast path of mono_gc_alloc_obj ().
1070 * The signature of the called method is:
1071 * object allocate (MonoVTable *vtable)
1073 MonoMethod*
1074 mono_gc_get_managed_allocator (MonoClass *klass, gboolean for_box, gboolean known_instance_size)
1076 #ifdef MANAGED_ALLOCATION
1077 ManagedAllocatorVariant variant = mono_profiler_allocations_enabled () ?
1078 MANAGED_ALLOCATOR_PROFILER : MANAGED_ALLOCATOR_REGULAR;
1080 if (sgen_collect_before_allocs)
1081 return NULL;
1082 if (m_class_get_instance_size (klass) > sgen_tlab_size)
1083 return NULL;
1084 if (known_instance_size && ALIGN_TO (m_class_get_instance_size (klass), SGEN_ALLOC_ALIGN) >= SGEN_MAX_SMALL_OBJ_SIZE)
1085 return NULL;
1086 if (mono_class_has_finalizer (klass) || mono_class_is_marshalbyref (klass) || m_class_has_weak_fields (klass))
1087 return NULL;
1088 if (m_class_get_rank (klass))
1089 return NULL;
1090 if (m_class_get_byval_arg (klass)->type == MONO_TYPE_STRING)
1091 return mono_gc_get_managed_allocator_by_type (ATYPE_STRING, variant);
1092 /* Generic classes have dynamic field and can go above MAX_SMALL_OBJ_SIZE. */
1093 if (known_instance_size)
1094 return mono_gc_get_managed_allocator_by_type (ATYPE_SMALL, variant);
1095 else
1096 return mono_gc_get_managed_allocator_by_type (ATYPE_NORMAL, variant);
1097 #else
1098 return NULL;
1099 #endif
1102 MonoMethod*
1103 mono_gc_get_managed_array_allocator (MonoClass *klass)
1105 #ifdef MANAGED_ALLOCATION
1106 if (m_class_get_rank (klass) != 1)
1107 return NULL;
1108 if (sgen_has_per_allocation_action)
1109 return NULL;
1110 g_assert (!mono_class_has_finalizer (klass) && !mono_class_is_marshalbyref (klass));
1112 return mono_gc_get_managed_allocator_by_type (ATYPE_VECTOR, mono_profiler_allocations_enabled () ?
1113 MANAGED_ALLOCATOR_PROFILER : MANAGED_ALLOCATOR_REGULAR);
1114 #else
1115 return NULL;
1116 #endif
1119 void
1120 sgen_set_use_managed_allocator (gboolean flag)
1122 use_managed_allocator = flag;
1125 MonoMethod*
1126 mono_gc_get_managed_allocator_by_type (int atype, ManagedAllocatorVariant variant)
1128 #ifdef MANAGED_ALLOCATION
1129 MonoMethod *res;
1130 MonoMethod **cache;
1132 if (variant != MANAGED_ALLOCATOR_SLOW_PATH && !use_managed_allocator)
1133 return NULL;
1135 switch (variant) {
1136 case MANAGED_ALLOCATOR_REGULAR: cache = alloc_method_cache; break;
1137 case MANAGED_ALLOCATOR_SLOW_PATH: cache = slowpath_alloc_method_cache; break;
1138 case MANAGED_ALLOCATOR_PROFILER: cache = profiler_alloc_method_cache; break;
1139 default: g_assert_not_reached (); break;
1142 res = cache [atype];
1143 if (res)
1144 return res;
1146 res = create_allocator (atype, variant);
1147 LOCK_GC;
1148 if (cache [atype]) {
1149 mono_free_method (res);
1150 res = cache [atype];
1151 } else {
1152 mono_memory_barrier ();
1153 cache [atype] = res;
1155 UNLOCK_GC;
1157 return res;
1158 #else
1159 return NULL;
1160 #endif
1163 guint32
1164 mono_gc_get_managed_allocator_types (void)
1166 return ATYPE_NUM;
1169 gboolean
1170 sgen_is_managed_allocator (MonoMethod *method)
1172 int i;
1174 for (i = 0; i < ATYPE_NUM; ++i)
1175 if (method == alloc_method_cache [i] || method == slowpath_alloc_method_cache [i] || method == profiler_alloc_method_cache [i])
1176 return TRUE;
1177 return FALSE;
1180 gboolean
1181 sgen_has_managed_allocator (void)
1183 int i;
1185 for (i = 0; i < ATYPE_NUM; ++i)
1186 if (alloc_method_cache [i] || slowpath_alloc_method_cache [i] || profiler_alloc_method_cache [i])
1187 return TRUE;
1188 return FALSE;
1191 #define ARRAY_OBJ_INDEX(ptr,array,elem_size) (((char*)(ptr) - ((char*)(array) + G_STRUCT_OFFSET (MonoArray, vector))) / (elem_size))
1193 gboolean
1194 sgen_client_cardtable_scan_object (GCObject *obj, guint8 *cards, ScanCopyContext ctx)
1196 MonoVTable *vt = SGEN_LOAD_VTABLE (obj);
1197 MonoClass *klass = vt->klass;
1199 SGEN_ASSERT (0, SGEN_VTABLE_HAS_REFERENCES (vt), "Why would we ever call this on reference-free objects?");
1201 if (vt->rank) {
1202 MonoArray *arr = (MonoArray*)obj;
1203 guint8 *card_data, *card_base;
1204 guint8 *card_data_end;
1205 char *obj_start = (char *)sgen_card_table_align_pointer (obj);
1206 mword bounds_size;
1207 mword obj_size = sgen_mono_array_size (vt, arr, &bounds_size, sgen_vtable_get_descriptor (vt));
1208 /* We don't want to scan the bounds entries at the end of multidimensional arrays */
1209 char *obj_end = (char*)obj + obj_size - bounds_size;
1210 size_t card_count;
1211 size_t extra_idx = 0;
1213 mword desc = (mword)m_class_get_gc_descr (m_class_get_element_class (klass));
1214 int elem_size = mono_array_element_size (klass);
1216 #ifdef SGEN_OBJECT_LAYOUT_STATISTICS
1217 if (m_class_is_valuetype (m_class_get_element_class (klass)))
1218 sgen_object_layout_scanned_vtype_array ();
1219 else
1220 sgen_object_layout_scanned_ref_array ();
1221 #endif
1223 if (cards)
1224 card_data = cards;
1225 else
1226 card_data = sgen_card_table_get_card_scan_address ((mword)obj);
1228 card_base = card_data;
1229 card_count = sgen_card_table_number_of_cards_in_range ((mword)obj, obj_size);
1231 #ifdef SGEN_HAVE_OVERLAPPING_CARDS
1232 LOOP_HEAD:
1233 card_data_end = card_base + card_count;
1236 * Check for overflow and if so, scan only until the end of the shadow
1237 * card table, leaving the rest for next iterations.
1239 if (!cards && card_data_end >= SGEN_SHADOW_CARDTABLE_END) {
1240 card_data_end = SGEN_SHADOW_CARDTABLE_END;
1242 card_count -= (card_data_end - card_base);
1244 #else
1245 card_data_end = card_data + card_count;
1246 #endif
1248 card_data = sgen_find_next_card (card_data, card_data_end);
1249 for (; card_data < card_data_end; card_data = sgen_find_next_card (card_data + 1, card_data_end)) {
1250 size_t index;
1251 size_t idx = (card_data - card_base) + extra_idx;
1252 char *start = (char*)(obj_start + idx * CARD_SIZE_IN_BYTES);
1253 char *card_end = start + CARD_SIZE_IN_BYTES;
1254 char *first_elem, *elem;
1256 HEAVY_STAT (++los_marked_cards);
1258 if (!cards)
1259 sgen_card_table_prepare_card_for_scanning (card_data);
1261 card_end = MIN (card_end, obj_end);
1263 if (start <= (char*)arr->vector)
1264 index = 0;
1265 else
1266 index = ARRAY_OBJ_INDEX (start, obj, elem_size);
1268 elem = first_elem = (char*)mono_array_addr_with_size_fast ((MonoArray*)obj, elem_size, index);
1269 if (m_class_is_valuetype (m_class_get_element_class (klass))) {
1270 ScanVTypeFunc scan_vtype_func = ctx.ops->scan_vtype;
1272 for (; elem < card_end; elem += elem_size)
1273 scan_vtype_func (obj, elem, desc, ctx.queue BINARY_PROTOCOL_ARG (elem_size));
1274 } else {
1275 ScanPtrFieldFunc scan_ptr_field_func = ctx.ops->scan_ptr_field;
1277 HEAVY_STAT (++los_array_cards);
1278 for (; elem < card_end; elem += SIZEOF_VOID_P)
1279 scan_ptr_field_func (obj, (GCObject**)elem, ctx.queue);
1282 sgen_binary_protocol_card_scan (first_elem, elem - first_elem);
1285 #ifdef SGEN_HAVE_OVERLAPPING_CARDS
1286 if (card_count > 0) {
1287 SGEN_ASSERT (0, card_data == SGEN_SHADOW_CARDTABLE_END, "Why we didn't stop at shadow cardtable end ?");
1288 extra_idx += card_data - card_base;
1289 card_base = card_data = sgen_shadow_cardtable;
1290 goto LOOP_HEAD;
1292 #endif
1293 return TRUE;
1296 return FALSE;
1300 * Array and string allocation
1303 MonoArray*
1304 mono_gc_alloc_vector (MonoVTable *vtable, size_t size, uintptr_t max_length)
1306 MonoArray *arr;
1307 TLAB_ACCESS_INIT;
1309 if (!SGEN_CAN_ALIGN_UP (size))
1310 return NULL;
1312 #ifndef DISABLE_CRITICAL_REGION
1313 ENTER_CRITICAL_REGION;
1314 arr = (MonoArray*)sgen_try_alloc_obj_nolock (vtable, size);
1315 if (arr) {
1316 /*This doesn't require fencing since EXIT_CRITICAL_REGION already does it for us*/
1317 arr->max_length = (mono_array_size_t)max_length;
1318 EXIT_CRITICAL_REGION;
1319 goto done;
1321 EXIT_CRITICAL_REGION;
1322 #endif
1324 LOCK_GC;
1326 arr = (MonoArray*)sgen_alloc_obj_nolock (vtable, size);
1327 if (G_UNLIKELY (!arr)) {
1328 UNLOCK_GC;
1329 return NULL;
1332 arr->max_length = (mono_array_size_t)max_length;
1334 UNLOCK_GC;
1336 done:
1337 if (G_UNLIKELY (mono_profiler_allocations_enabled ()))
1338 MONO_PROFILER_RAISE (gc_allocation, (&arr->obj));
1340 SGEN_ASSERT (6, SGEN_ALIGN_UP (size) == SGEN_ALIGN_UP (sgen_client_par_object_get_size (vtable, (GCObject*)arr)), "Vector has incorrect size.");
1341 return arr;
1344 MonoArray*
1345 mono_gc_alloc_array (MonoVTable *vtable, size_t size, uintptr_t max_length, uintptr_t bounds_size)
1347 MonoArray *arr;
1348 MonoArrayBounds *bounds;
1349 TLAB_ACCESS_INIT;
1351 if (!SGEN_CAN_ALIGN_UP (size))
1352 return NULL;
1354 #ifndef DISABLE_CRITICAL_REGION
1355 ENTER_CRITICAL_REGION;
1356 arr = (MonoArray*)sgen_try_alloc_obj_nolock (vtable, size);
1357 if (arr) {
1358 /*This doesn't require fencing since EXIT_CRITICAL_REGION already does it for us*/
1359 arr->max_length = (mono_array_size_t)max_length;
1361 bounds = (MonoArrayBounds*)((char*)arr + size - bounds_size);
1362 arr->bounds = bounds;
1363 EXIT_CRITICAL_REGION;
1364 goto done;
1366 EXIT_CRITICAL_REGION;
1367 #endif
1369 LOCK_GC;
1371 arr = (MonoArray*)sgen_alloc_obj_nolock (vtable, size);
1372 if (G_UNLIKELY (!arr)) {
1373 UNLOCK_GC;
1374 return NULL;
1377 arr->max_length = (mono_array_size_t)max_length;
1379 bounds = (MonoArrayBounds*)((char*)arr + size - bounds_size);
1380 arr->bounds = bounds;
1382 UNLOCK_GC;
1384 done:
1385 if (G_UNLIKELY (mono_profiler_allocations_enabled ()))
1386 MONO_PROFILER_RAISE (gc_allocation, (&arr->obj));
1388 SGEN_ASSERT (6, SGEN_ALIGN_UP (size) == SGEN_ALIGN_UP (sgen_client_par_object_get_size (vtable, (GCObject*)arr)), "Array has incorrect size.");
1389 return arr;
1392 MonoString*
1393 mono_gc_alloc_string (MonoVTable *vtable, size_t size, gint32 len)
1395 MonoString *str;
1396 TLAB_ACCESS_INIT;
1398 if (!SGEN_CAN_ALIGN_UP (size))
1399 return NULL;
1401 #ifndef DISABLE_CRITICAL_REGION
1402 ENTER_CRITICAL_REGION;
1403 str = (MonoString*)sgen_try_alloc_obj_nolock (vtable, size);
1404 if (str) {
1405 /*This doesn't require fencing since EXIT_CRITICAL_REGION already does it for us*/
1406 str->length = len;
1407 EXIT_CRITICAL_REGION;
1408 goto done;
1410 EXIT_CRITICAL_REGION;
1411 #endif
1413 LOCK_GC;
1415 str = (MonoString*)sgen_alloc_obj_nolock (vtable, size);
1416 if (G_UNLIKELY (!str)) {
1417 UNLOCK_GC;
1418 return NULL;
1421 str->length = len;
1423 UNLOCK_GC;
1425 done:
1426 if (G_UNLIKELY (mono_profiler_allocations_enabled ()))
1427 MONO_PROFILER_RAISE (gc_allocation, (&str->object));
1429 return str;
1433 * Strings
1436 void
1437 mono_gc_set_string_length (MonoString *str, gint32 new_length)
1439 mono_unichar2 *new_end = str->chars + new_length;
1441 /* zero the discarded string. This null-delimits the string and allows
1442 * the space to be reclaimed by SGen. */
1444 if (sgen_nursery_canaries_enabled () && sgen_ptr_in_nursery (str)) {
1445 CHECK_CANARY_FOR_OBJECT ((GCObject*)str, TRUE);
1446 memset (new_end, 0, (str->length - new_length + 1) * sizeof (mono_unichar2) + CANARY_SIZE);
1447 memcpy (new_end + 1 , CANARY_STRING, CANARY_SIZE);
1448 } else {
1449 memset (new_end, 0, (str->length - new_length + 1) * sizeof (mono_unichar2));
1452 str->length = new_length;
1456 * Profiling
1459 #define GC_ROOT_NUM 32
1460 #define SPECIAL_ADDRESS_FIN_QUEUE ((mono_byte*)1)
1461 #define SPECIAL_ADDRESS_CRIT_FIN_QUEUE ((mono_byte*)2)
1462 #define SPECIAL_ADDRESS_EPHEMERON ((mono_byte*)3)
1463 #define SPECIAL_ADDRESS_TOGGLEREF ((mono_byte*)4)
1465 typedef struct {
1466 int count; /* must be the first field */
1467 void *addresses [GC_ROOT_NUM];
1468 void *objects [GC_ROOT_NUM];
1469 } GCRootReport;
1471 static void
1472 notify_gc_roots (GCRootReport *report)
1474 if (!report->count)
1475 return;
1476 MONO_PROFILER_RAISE (gc_roots, (report->count, (const mono_byte *const *)report->addresses, (MonoObject *const *) report->objects));
1477 report->count = 0;
1480 static void
1481 report_gc_root (GCRootReport *report, void *address, void *object)
1483 if (report->count == GC_ROOT_NUM)
1484 notify_gc_roots (report);
1485 report->addresses [report->count] = address;
1486 report->objects [report->count] = object;
1487 report->count++;
1490 static void
1491 single_arg_report_root (MonoObject **obj, void *gc_data)
1493 GCRootReport *report = (GCRootReport*)gc_data;
1494 if (*obj)
1495 report_gc_root (report, obj, *obj);
1498 static void
1499 two_args_report_root (void *address, MonoObject *obj, void *gc_data)
1501 GCRootReport *report = (GCRootReport*)gc_data;
1502 if (obj)
1503 report_gc_root (report, address, obj);
1506 static void
1507 precisely_report_roots_from (GCRootReport *report, void** start_root, void** end_root, mword desc)
1509 switch (desc & ROOT_DESC_TYPE_MASK) {
1510 case ROOT_DESC_BITMAP:
1511 desc >>= ROOT_DESC_TYPE_SHIFT;
1512 while (desc) {
1513 if ((desc & 1) && *start_root)
1514 report_gc_root (report, start_root, *start_root);
1515 desc >>= 1;
1516 start_root++;
1518 return;
1519 case ROOT_DESC_COMPLEX: {
1520 gsize *bitmap_data = (gsize *)sgen_get_complex_descriptor_bitmap (desc);
1521 gsize bwords = (*bitmap_data) - 1;
1522 void **start_run = start_root;
1523 bitmap_data++;
1524 while (bwords-- > 0) {
1525 gsize bmap = *bitmap_data++;
1526 void **objptr = start_run;
1527 while (bmap) {
1528 if ((bmap & 1) && *objptr)
1529 report_gc_root (report, objptr, *objptr);
1530 bmap >>= 1;
1531 ++objptr;
1533 start_run += GC_BITS_PER_WORD;
1535 break;
1537 case ROOT_DESC_VECTOR: {
1538 void **p;
1540 for (p = start_root; p < end_root; p++) {
1541 if (*p)
1542 report_gc_root (report, p, *p);
1544 break;
1546 case ROOT_DESC_USER: {
1547 MonoGCRootMarkFunc marker = (MonoGCRootMarkFunc)sgen_get_user_descriptor_func (desc);
1549 if ((void*)marker == (void*)sgen_mark_normal_gc_handles)
1550 sgen_gc_handles_report_roots (two_args_report_root, report);
1551 else
1552 marker ((MonoObject**)start_root, single_arg_report_root, report);
1553 break;
1555 case ROOT_DESC_RUN_LEN:
1556 g_assert_not_reached ();
1557 default:
1558 g_assert_not_reached ();
1562 static void
1563 report_pinning_roots (GCRootReport *report, void **start, void **end)
1565 while (start < end) {
1566 mword addr = (mword)*start;
1567 addr &= ~(SGEN_ALLOC_ALIGN - 1);
1568 if (addr)
1569 report_gc_root (report, start, (void*)addr);
1571 start++;
1575 static SgenPointerQueue pinned_objects = SGEN_POINTER_QUEUE_INIT (INTERNAL_MEM_MOVED_OBJECT);
1576 static mword lower_bound, upper_bound;
1578 static GCObject*
1579 find_pinned_obj (char *addr)
1581 size_t idx = sgen_pointer_queue_search (&pinned_objects, addr);
1583 if (idx != pinned_objects.next_slot) {
1584 if (pinned_objects.data [idx] == addr)
1585 return (GCObject*)pinned_objects.data [idx];
1586 if (idx == 0)
1587 return NULL;
1590 GCObject *obj = (GCObject*)pinned_objects.data [idx - 1];
1591 if (addr > (char*)obj && addr < ((char*)obj + sgen_safe_object_get_size (obj)))
1592 return obj;
1593 return NULL;
1598 * We pass @root_report_address so register are properly accounted towards their thread
1600 static void
1601 report_conservative_roots (GCRootReport *report, void *root_report_address, void **start, void **end)
1603 while (start < end) {
1604 mword addr = (mword)*start;
1605 addr &= ~(SGEN_ALLOC_ALIGN - 1);
1607 if (addr < lower_bound || addr > upper_bound) {
1608 ++start;
1609 continue;
1612 GCObject *obj = find_pinned_obj ((char*)addr);
1613 if (obj)
1614 report_gc_root (report, root_report_address, obj);
1615 start++;
1619 typedef struct {
1620 gboolean precise;
1621 GCRootReport *report;
1622 SgenThreadInfo *info;
1623 } ReportHandleStackRoot;
1625 static void
1626 report_handle_stack_root (gpointer *ptr, gpointer user_data)
1628 ReportHandleStackRoot *ud = (ReportHandleStackRoot*)user_data;
1629 GCRootReport *report = ud->report;
1630 gpointer addr = ud->info->client_info.info.handle_stack;
1632 // Note: We know that *ptr != NULL.
1633 if (ud->precise)
1634 report_gc_root (report, addr, *ptr);
1635 else
1636 report_conservative_roots (report, addr, ptr, ptr + 1);
1639 static void
1640 report_handle_stack_roots (GCRootReport *report, SgenThreadInfo *info, gboolean precise)
1642 ReportHandleStackRoot ud;
1643 memset (&ud, 0, sizeof (ud));
1644 ud.precise = precise;
1645 ud.report = report;
1646 ud.info = info;
1648 mono_handle_stack_scan (info->client_info.info.handle_stack, report_handle_stack_root, &ud, ud.precise, FALSE);
1651 static void*
1652 get_aligned_stack_start (SgenThreadInfo *info)
1654 void* aligned_stack_start = (void*)(mword) ALIGN_TO ((mword)info->client_info.stack_start, SIZEOF_VOID_P);
1655 #if _WIN32
1656 // Due to the guard page mechanism providing gradual commit of Windows stacks,
1657 // stack pages must be touched in order.
1659 // This mechanism is only transparent (kernel handles page faults and user never sees them),
1660 // for the thread touching its own stack. Not for cross-thread stack references as are being
1661 // done here.
1663 // Here is a small program that demonstrates the behavior:
1665 // #include <windows.h>
1666 // #include <stdio.h>
1668 // #pragma optimize ("x", on)
1670 // int volatile * volatile Event1;
1671 // int volatile Event2;
1672 // HANDLE ThreadHandle;
1674 // DWORD __stdcall thread (void* x)
1675 // {
1676 // while (!Event1)
1677 // _mm_pause ();
1679 // __try {
1680 // *Event1 = 0x123;
1681 // } __except (GetExceptionCode () == STATUS_GUARD_PAGE_VIOLATION) {
1682 // printf ("oops\n");
1683 // }
1684 // Event2 = 1;
1685 // return 0;
1686 // }
1688 // int unlucky;
1689 // int print = 1;
1691 // __declspec (noinline)
1692 // __declspec (safebuffers)
1693 // void f (void)
1694 // {
1695 // int local [5];
1697 // while (unlucky && ((size_t)_AddressOfReturnAddress () - 8) & 0xFFF)
1698 // f ();
1700 // unlucky = 0;
1701 // Event1 = local;
1703 // while (!Event2)
1704 // _mm_pause ();
1706 // if (print) {
1707 // printf ("%X\n", local [0]);
1708 // print = 0;
1709 // }
1711 // if (ThreadHandle) {
1712 // WaitForSingleObject (ThreadHandle, INFINITE);
1713 // ThreadHandle = NULL;
1714 // }
1715 // }
1717 // int main (int argc, char** argv)
1718 // {
1719 // unlucky = argc > 1;
1720 // ThreadHandle = CreateThread (0, 0, thread, 0, 0, 0);
1721 // f ();
1722 // }
1724 // This would seem to be a problem otherwise, not just for garbage collectors.
1726 // We therefore have a few choices:
1728 // 1. Historical slow code: VirtualQuery and check for guard page. Slow.
1730 // MEMORY_BASIC_INFORMATION mem_info;
1731 // SIZE_T result = VirtualQuery (info->client_info.stack_start, &mem_info, sizeof(mem_info));
1732 // g_assert (result != 0);
1733 // if (mem_info.Protect & PAGE_GUARD) {
1734 // aligned_stack_start = ((char*) mem_info.BaseAddress) + mem_info.RegionSize;
1735 // }
1737 // VirtualQuery not historically allowed in UWP, but it is now.
1739 // 2. Touch page under __try / __except and handle STATUS_GUARD_PAGE_VIOLATION.
1740 // Good but compiler specific.
1742 // __try {
1743 // *(volatile char*)aligned_stack_start;
1744 // } __except (GetExceptionCode () == STATUS_GUARD_PAGE_VIOLATION) {
1745 // MEMORY_BASIC_INFORMATION mem_info;
1746 // const SIZE_T result = VirtualQuery(aligned_stack_start, &mem_info, sizeof(mem_info));
1747 // g_assert (result >= sizeof (mem_info));
1748 // VirtualProtect (aligned_stack_start, 1, mem_info.Protect | PAGE_GUARD, &mem_info.Protect);
1749 // }
1751 // 3. Vectored exception handler. Not terrible. Not compiler specific.
1753 // 4. Check against the high watermark in the TIB. That is done.
1754 // TIB is the public prefix TEB. It is Windows.h, ntddk.h, etc.
1756 aligned_stack_start = MAX (aligned_stack_start, info->client_info.info.windows_tib->StackLimit);
1757 #endif
1758 return aligned_stack_start;
1761 static void
1762 report_stack_roots (void)
1764 GCRootReport report = {0};
1765 FOREACH_THREAD_EXCLUDE (info, MONO_THREAD_INFO_FLAGS_NO_GC) {
1766 void *aligned_stack_start;
1768 if (info->client_info.skip) {
1769 continue;
1770 } else if (!mono_thread_info_is_live (info)) {
1771 continue;
1772 } else if (!info->client_info.stack_start) {
1773 continue;
1776 g_assert (info->client_info.stack_start);
1777 g_assert (info->client_info.info.stack_end);
1779 aligned_stack_start = get_aligned_stack_start (info);
1780 g_assert (info->client_info.suspend_done);
1782 report_conservative_roots (&report, aligned_stack_start, (void **)aligned_stack_start, (void **)info->client_info.info.stack_end);
1783 report_conservative_roots (&report, aligned_stack_start, (void**)&info->client_info.ctx, (void**)(&info->client_info.ctx + 1));
1785 report_handle_stack_roots (&report, info, FALSE);
1786 report_handle_stack_roots (&report, info, TRUE);
1787 } FOREACH_THREAD_END
1789 notify_gc_roots (&report);
1792 static void
1793 report_pin_queue (void)
1795 lower_bound = SIZE_MAX;
1796 upper_bound = 0;
1798 //sort the addresses
1799 sgen_pointer_queue_sort_uniq (&pinned_objects);
1801 for (int i = 0; i < pinned_objects.next_slot; ++i) {
1802 GCObject *obj = (GCObject*)pinned_objects.data [i];
1803 ssize_t size = sgen_safe_object_get_size (obj);
1805 ssize_t addr = (ssize_t)obj;
1806 lower_bound = MIN (lower_bound, addr);
1807 upper_bound = MAX (upper_bound, addr + size);
1810 report_stack_roots ();
1811 sgen_pointer_queue_clear (&pinned_objects);
1814 static void
1815 report_finalizer_roots_from_queue (SgenPointerQueue *queue, void* queue_address)
1817 GCRootReport report;
1818 size_t i;
1820 report.count = 0;
1821 for (i = 0; i < queue->next_slot; ++i) {
1822 void *obj = queue->data [i];
1823 if (!obj)
1824 continue;
1825 report_gc_root (&report, queue_address, obj);
1827 notify_gc_roots (&report);
1830 static void
1831 report_registered_roots_by_type (int root_type)
1833 GCRootReport report = { 0 };
1834 void **start_root;
1835 RootRecord *root;
1836 report.count = 0;
1837 SGEN_HASH_TABLE_FOREACH (&sgen_roots_hash [root_type], void **, start_root, RootRecord *, root) {
1838 SGEN_LOG (6, "Profiler root scan %p-%p (desc: %p)", start_root, root->end_root, (void*)(intptr_t)root->root_desc);
1839 if (root_type == ROOT_TYPE_PINNED)
1840 report_pinning_roots (&report, start_root, (void**)root->end_root);
1841 else
1842 precisely_report_roots_from (&report, start_root, (void**)root->end_root, root->root_desc);
1843 } SGEN_HASH_TABLE_FOREACH_END;
1844 notify_gc_roots (&report);
1847 static void
1848 report_registered_roots (void)
1850 for (int i = 0; i < ROOT_TYPE_NUM; ++i)
1851 report_registered_roots_by_type (i);
1854 static void
1855 report_ephemeron_roots (void)
1857 EphemeronLinkNode *current = ephemeron_list;
1858 Ephemeron *cur, *array_end;
1859 GCObject *tombstone;
1860 GCRootReport report = { 0 };
1862 for (current = ephemeron_list; current; current = current->next) {
1863 MonoArray *array = current->array;
1865 if (!sgen_is_object_alive_for_current_gen ((GCObject*)array))
1866 continue;
1868 cur = mono_array_addr_internal (array, Ephemeron, 0);
1869 array_end = cur + mono_array_length_internal (array);
1870 tombstone = SGEN_LOAD_VTABLE ((GCObject*)array)->domain->ephemeron_tombstone;
1872 for (; cur < array_end; ++cur) {
1873 GCObject *key = cur->key;
1875 if (!key || key == tombstone)
1876 continue;
1878 if (cur->value && sgen_is_object_alive_for_current_gen (key))
1879 report_gc_root (&report, SPECIAL_ADDRESS_EPHEMERON, cur->value);
1883 notify_gc_roots (&report);
1886 static void
1887 report_toggleref_root (MonoObject* obj, gpointer data)
1889 report_gc_root ((GCRootReport*)data, SPECIAL_ADDRESS_TOGGLEREF, obj);
1892 static void
1893 report_toggleref_roots (void)
1895 GCRootReport report = { 0 };
1896 sgen_foreach_toggleref_root (report_toggleref_root, &report);
1897 notify_gc_roots (&report);
1900 static void
1901 sgen_report_all_roots (SgenPointerQueue *fin_ready_queue, SgenPointerQueue *critical_fin_queue)
1903 if (!MONO_PROFILER_ENABLED (gc_roots))
1904 return;
1906 report_registered_roots ();
1907 report_ephemeron_roots ();
1908 report_toggleref_roots ();
1909 report_pin_queue ();
1910 report_finalizer_roots_from_queue (fin_ready_queue, SPECIAL_ADDRESS_FIN_QUEUE);
1911 report_finalizer_roots_from_queue (critical_fin_queue, SPECIAL_ADDRESS_CRIT_FIN_QUEUE);
1914 void
1915 sgen_client_pinning_start (void)
1917 if (!MONO_PROFILER_ENABLED (gc_roots))
1918 return;
1920 sgen_pointer_queue_clear (&pinned_objects);
1923 void
1924 sgen_client_pinning_end (void)
1926 if (!MONO_PROFILER_ENABLED (gc_roots))
1927 return;
1930 void
1931 sgen_client_nursery_objects_pinned (void **definitely_pinned, int count)
1933 if (!MONO_PROFILER_ENABLED (gc_roots))
1934 return;
1936 for (int i = 0; i < count; ++i)
1937 sgen_pointer_queue_add (&pinned_objects, definitely_pinned [i]);
1940 void
1941 sgen_client_pinned_los_object (GCObject *obj)
1943 if (!MONO_PROFILER_ENABLED (gc_roots))
1944 return;
1946 sgen_pointer_queue_add (&pinned_objects, obj);
1949 void
1950 sgen_client_pinned_cemented_object (GCObject *obj)
1952 if (!MONO_PROFILER_ENABLED (gc_roots))
1953 return;
1955 // TODO: How do we report this in a way that makes sense?
1958 void
1959 sgen_client_pinned_major_heap_object (GCObject *obj)
1961 if (!MONO_PROFILER_ENABLED (gc_roots))
1962 return;
1964 sgen_pointer_queue_add (&pinned_objects, obj);
1967 void
1968 sgen_client_collecting_minor_report_roots (SgenPointerQueue *fin_ready_queue, SgenPointerQueue *critical_fin_queue)
1970 sgen_report_all_roots (fin_ready_queue, critical_fin_queue);
1973 void
1974 sgen_client_collecting_major_report_roots (SgenPointerQueue *fin_ready_queue, SgenPointerQueue *critical_fin_queue)
1976 sgen_report_all_roots (fin_ready_queue, critical_fin_queue);
1979 #define MOVED_OBJECTS_NUM 64
1980 static void *moved_objects [MOVED_OBJECTS_NUM];
1981 static int moved_objects_idx = 0;
1983 static SgenPointerQueue moved_objects_queue = SGEN_POINTER_QUEUE_INIT (INTERNAL_MEM_MOVED_OBJECT);
1985 void
1986 mono_sgen_register_moved_object (void *obj, void *destination)
1989 * This function can be called from SGen's worker threads. We want to try
1990 * and avoid exposing those threads to the profiler API, so queue up move
1991 * events and send them later when the main GC thread calls
1992 * mono_sgen_gc_event_moves ().
1994 * TODO: Once SGen has multiple worker threads, we need to switch to a
1995 * lock-free data structure for the queue as multiple threads will be
1996 * adding to it at the same time.
1998 if (sgen_workers_is_worker_thread (mono_native_thread_id_get ())) {
1999 sgen_pointer_queue_add (&moved_objects_queue, obj);
2000 sgen_pointer_queue_add (&moved_objects_queue, destination);
2001 } else {
2002 if (moved_objects_idx == MOVED_OBJECTS_NUM) {
2003 MONO_PROFILER_RAISE (gc_moves, ((MonoObject **) moved_objects, moved_objects_idx));
2004 moved_objects_idx = 0;
2007 moved_objects [moved_objects_idx++] = obj;
2008 moved_objects [moved_objects_idx++] = destination;
2012 void
2013 mono_sgen_gc_event_moves (void)
2015 while (!sgen_pointer_queue_is_empty (&moved_objects_queue)) {
2016 void *dst = sgen_pointer_queue_pop (&moved_objects_queue);
2017 void *src = sgen_pointer_queue_pop (&moved_objects_queue);
2019 mono_sgen_register_moved_object (src, dst);
2022 if (moved_objects_idx) {
2023 MONO_PROFILER_RAISE (gc_moves, ((MonoObject **) moved_objects, moved_objects_idx));
2024 moved_objects_idx = 0;
2029 * Heap walking
2032 #define REFS_SIZE 128
2033 typedef struct {
2034 void *data;
2035 MonoGCReferences callback;
2036 int flags;
2037 int count;
2038 int called;
2039 MonoObject *refs [REFS_SIZE];
2040 uintptr_t offsets [REFS_SIZE];
2041 } HeapWalkInfo;
2043 #undef HANDLE_PTR
2044 #define HANDLE_PTR(ptr,obj) do { \
2045 if (*(ptr)) { \
2046 if (hwi->count == REFS_SIZE) { \
2047 hwi->callback ((MonoObject*)start, mono_object_class (start), hwi->called? 0: size, hwi->count, hwi->refs, hwi->offsets, hwi->data); \
2048 hwi->count = 0; \
2049 hwi->called = 1; \
2051 hwi->offsets [hwi->count] = (char*)(ptr)-(char*)start; \
2052 hwi->refs [hwi->count++] = *(ptr); \
2054 } while (0)
2056 static void
2057 collect_references (HeapWalkInfo *hwi, GCObject *obj, size_t size)
2059 char *start = (char*)obj;
2060 mword desc = sgen_obj_get_descriptor (obj);
2062 #include "sgen/sgen-scan-object.h"
2065 static void
2066 walk_references (GCObject *start, size_t size, void *data)
2068 HeapWalkInfo *hwi = (HeapWalkInfo *)data;
2069 hwi->called = 0;
2070 hwi->count = 0;
2071 collect_references (hwi, start, size);
2072 if (hwi->count || !hwi->called)
2073 hwi->callback (start, mono_object_class (start), hwi->called? 0: size, hwi->count, hwi->refs, hwi->offsets, hwi->data);
2077 * mono_gc_walk_heap:
2078 * \param flags flags for future use
2079 * \param callback a function pointer called for each object in the heap
2080 * \param data a user data pointer that is passed to callback
2081 * This function can be used to iterate over all the live objects in the heap;
2082 * for each object, \p callback is invoked, providing info about the object's
2083 * location in memory, its class, its size and the objects it references.
2084 * For each referenced object its offset from the object address is
2085 * reported in the offsets array.
2086 * The object references may be buffered, so the callback may be invoked
2087 * multiple times for the same object: in all but the first call, the size
2088 * argument will be zero.
2089 * Note that this function can be only called in the \c MONO_GC_EVENT_PRE_START_WORLD
2090 * profiler event handler.
2091 * \returns a non-zero value if the GC doesn't support heap walking
2094 mono_gc_walk_heap (int flags, MonoGCReferences callback, void *data)
2096 HeapWalkInfo hwi;
2098 hwi.flags = flags;
2099 hwi.callback = callback;
2100 hwi.data = data;
2102 sgen_clear_nursery_fragments ();
2103 sgen_scan_area_with_callback (sgen_nursery_section->data, sgen_nursery_section->end_data, walk_references, &hwi, FALSE, TRUE);
2105 sgen_major_collector.iterate_objects (ITERATE_OBJECTS_SWEEP_ALL, walk_references, &hwi);
2106 sgen_los_iterate_objects (walk_references, &hwi);
2108 return 0;
2112 * Threads
2115 void
2116 mono_gc_set_gc_callbacks (MonoGCCallbacks *callbacks)
2118 gc_callbacks = *callbacks;
2121 MonoGCCallbacks *
2122 mono_gc_get_gc_callbacks ()
2124 return &gc_callbacks;
2127 gpointer
2128 mono_gc_thread_attach (SgenThreadInfo *info)
2130 return sgen_thread_attach (info);
2133 void
2134 sgen_client_thread_attach (SgenThreadInfo* info)
2136 mono_tls_set_sgen_thread_info (info);
2138 info->client_info.skip = FALSE;
2140 info->client_info.stack_start = NULL;
2142 #ifdef SGEN_POSIX_STW
2143 info->client_info.stop_count = -1;
2144 info->client_info.signal = 0;
2145 #endif
2147 memset (&info->client_info.ctx, 0, sizeof (MonoContext));
2149 if (mono_gc_get_gc_callbacks ()->thread_attach_func)
2150 info->client_info.runtime_data = mono_gc_get_gc_callbacks ()->thread_attach_func ();
2152 sgen_binary_protocol_thread_register ((gpointer)mono_thread_info_get_tid (info));
2154 SGEN_LOG (3, "registered thread %p (%p) stack end %p", info, (gpointer)mono_thread_info_get_tid (info), info->client_info.info.stack_end);
2156 info->client_info.info.handle_stack = mono_handle_stack_alloc ();
2159 void
2160 mono_gc_thread_detach (SgenThreadInfo *info)
2164 void
2165 mono_gc_thread_detach_with_lock (SgenThreadInfo *info)
2167 sgen_thread_detach_with_lock (info);
2170 void
2171 sgen_client_thread_detach_with_lock (SgenThreadInfo *p)
2173 MonoNativeThreadId tid;
2175 mono_tls_set_sgen_thread_info (NULL);
2177 sgen_increment_bytes_allocated_detached (p->total_bytes_allocated);
2179 tid = mono_thread_info_get_tid (p);
2181 mono_threads_add_joinable_runtime_thread (&p->client_info.info);
2183 if (mono_gc_get_gc_callbacks ()->thread_detach_func) {
2184 mono_gc_get_gc_callbacks ()->thread_detach_func (p->client_info.runtime_data);
2185 p->client_info.runtime_data = NULL;
2188 sgen_binary_protocol_thread_unregister ((gpointer)tid);
2189 SGEN_LOG (3, "unregister thread %p (%p)", p, (gpointer)tid);
2191 HandleStack *handles = p->client_info.info.handle_stack;
2192 p->client_info.info.handle_stack = NULL;
2193 mono_handle_stack_free (handles);
2196 void
2197 mono_gc_skip_thread_changing (gboolean skip)
2200 * SGen's STW will respect the thread info flags, but we do need to take
2201 * the GC lock when changing them. If we don't do this, SGen might end up
2202 * trying to resume a thread that wasn't suspended because it had
2203 * MONO_THREAD_INFO_FLAGS_NO_GC set when STW began.
2205 LOCK_GC;
2207 if (skip) {
2209 * If we skip scanning a thread with a non-empty handle stack, we may move an
2210 * object but fail to update the reference in the handle.
2212 HandleStack *stack = mono_thread_info_current ()->client_info.info.handle_stack;
2213 g_assert (stack == NULL || mono_handle_stack_is_empty (stack));
2217 void
2218 mono_gc_skip_thread_changed (gboolean skip)
2220 UNLOCK_GC;
2223 gboolean
2224 mono_gc_thread_in_critical_region (SgenThreadInfo *info)
2226 return info->client_info.in_critical_region;
2230 * mono_gc_is_gc_thread:
2232 gboolean
2233 mono_gc_is_gc_thread (void)
2235 gboolean result;
2236 LOCK_GC;
2237 result = mono_thread_info_current () != NULL;
2238 UNLOCK_GC;
2239 return result;
2242 void
2243 sgen_client_thread_register_worker (void)
2245 mono_thread_info_register_small_id ();
2246 mono_native_thread_set_name (mono_native_thread_id_get (), "SGen worker");
2247 mono_thread_set_name_windows (GetCurrentThread (), L"SGen worker");
2250 /* Variables holding start/end nursery so it won't have to be passed at every call */
2251 static void *scan_area_arg_start, *scan_area_arg_end;
2253 void
2254 mono_gc_conservatively_scan_area (void *start, void *end)
2256 sgen_conservatively_pin_objects_from ((void **)start, (void **)end, scan_area_arg_start, scan_area_arg_end, PIN_TYPE_STACK);
2259 void*
2260 mono_gc_scan_object (void *obj, void *gc_data)
2262 ScanCopyContext *ctx = (ScanCopyContext *)gc_data;
2263 ctx->ops->copy_or_mark_object ((GCObject**)&obj, ctx->queue);
2264 return obj;
2267 typedef struct {
2268 void **start_nursery;
2269 void **end_nursery;
2270 } PinHandleStackInteriorPtrData;
2272 /* Called when we're scanning the handle stack imprecisely and we encounter a pointer into the
2273 middle of an object.
2275 static void
2276 pin_handle_stack_interior_ptrs (void **ptr_slot, void *user_data)
2278 PinHandleStackInteriorPtrData *ud = (PinHandleStackInteriorPtrData *)user_data;
2279 sgen_conservatively_pin_objects_from (ptr_slot, ptr_slot+1, ud->start_nursery, ud->end_nursery, PIN_TYPE_STACK);
2282 #ifdef HOST_WASM
2283 extern gboolean mono_wasm_enable_gc;
2284 #endif
2287 * Mark from thread stacks and registers.
2289 void
2290 sgen_client_scan_thread_data (void *start_nursery, void *end_nursery, gboolean precise, ScanCopyContext ctx)
2292 scan_area_arg_start = start_nursery;
2293 scan_area_arg_end = end_nursery;
2294 #ifdef HOST_WASM
2295 //Under WASM we don't scan thread stacks and we can't trust the values we find there either.
2296 if (!mono_wasm_enable_gc)
2297 return;
2298 #endif
2300 SGEN_TV_DECLARE (scan_thread_data_start);
2301 SGEN_TV_DECLARE (scan_thread_data_end);
2303 SGEN_TV_GETTIME (scan_thread_data_start);
2305 if (gc_callbacks.interp_mark_func)
2306 /* The interpreter code uses only compiler write barriers so have to synchronize with it */
2307 mono_memory_barrier_process_wide ();
2309 FOREACH_THREAD_EXCLUDE (info, MONO_THREAD_INFO_FLAGS_NO_GC) {
2310 int skip_reason = 0;
2311 void *aligned_stack_start;
2313 if (info->client_info.skip) {
2314 SGEN_LOG (3, "Skipping dead thread %p, range: %p-%p, size: %" G_GSIZE_FORMAT "d", info, info->client_info.stack_start, info->client_info.info.stack_end, (char*)info->client_info.info.stack_end - (char*)info->client_info.stack_start);
2315 skip_reason = 1;
2316 } else if (!mono_thread_info_is_live (info)) {
2317 SGEN_LOG (3, "Skipping non-running thread %p, range: %p-%p, size: %" G_GSIZE_FORMAT "d (state %x)", info, info->client_info.stack_start, info->client_info.info.stack_end, (char*)info->client_info.info.stack_end - (char*)info->client_info.stack_start, info->client_info.info.thread_state.raw);
2318 skip_reason = 3;
2319 } else if (!info->client_info.stack_start) {
2320 SGEN_LOG (3, "Skipping starting or detaching thread %p", info);
2321 skip_reason = 4;
2324 sgen_binary_protocol_scan_stack ((gpointer)mono_thread_info_get_tid (info), info->client_info.stack_start, info->client_info.info.stack_end, skip_reason);
2326 if (skip_reason) {
2327 if (precise) {
2328 /* If we skip a thread with a non-empty handle stack and then it
2329 * resumes running we may potentially move an object but fail to
2330 * update the reference in the handle.
2332 HandleStack *stack = info->client_info.info.handle_stack;
2333 g_assert (stack == NULL || mono_handle_stack_is_empty (stack));
2335 continue;
2338 g_assert (info->client_info.stack_start);
2339 g_assert (info->client_info.info.stack_end);
2341 aligned_stack_start = get_aligned_stack_start (info);
2342 g_assert (info->client_info.suspend_done);
2343 SGEN_LOG (3, "Scanning thread %p, range: %p-%p, size: %" G_GSIZE_FORMAT "d, pinned=%" G_GSIZE_FORMAT "d", info, info->client_info.stack_start, info->client_info.info.stack_end, (char*)info->client_info.info.stack_end - (char*)info->client_info.stack_start, sgen_get_pinned_count ());
2344 if (mono_gc_get_gc_callbacks ()->thread_mark_func && !conservative_stack_mark) {
2345 mono_gc_get_gc_callbacks ()->thread_mark_func (info->client_info.runtime_data, (guint8 *)aligned_stack_start, (guint8 *)info->client_info.info.stack_end, precise, &ctx);
2346 } else if (!precise) {
2347 if (!conservative_stack_mark) {
2348 fprintf (stderr, "Precise stack mark not supported - disabling.\n");
2349 conservative_stack_mark = TRUE;
2351 //FIXME we should eventually use the new stack_mark from coop
2352 sgen_conservatively_pin_objects_from ((void **)aligned_stack_start, (void **)info->client_info.info.stack_end, start_nursery, end_nursery, PIN_TYPE_STACK);
2355 if (!precise) {
2356 sgen_conservatively_pin_objects_from ((void**)&info->client_info.ctx, (void**)(&info->client_info.ctx + 1),
2357 start_nursery, end_nursery, PIN_TYPE_STACK);
2360 // This is used on Coop GC for platforms where we cannot get the data for individual registers.
2361 // We force a spill of all registers into the stack and pass a chunk of data into sgen.
2362 //FIXME under coop, for now, what we need to ensure is that we scan any extra memory from info->client_info.info.stack_end to stack_mark
2363 MonoThreadUnwindState *state = &info->client_info.info.thread_saved_state [SELF_SUSPEND_STATE_INDEX];
2364 if (state && state->gc_stackdata) {
2365 sgen_conservatively_pin_objects_from ((void **)state->gc_stackdata, (void**)((char*)state->gc_stackdata + state->gc_stackdata_size),
2366 start_nursery, end_nursery, PIN_TYPE_STACK);
2370 if (gc_callbacks.interp_mark_func) {
2371 PinHandleStackInteriorPtrData ud;
2372 memset (&ud, 0, sizeof (ud));
2373 ud.start_nursery = (void**)start_nursery;
2374 ud.end_nursery = (void**)end_nursery;
2375 SGEN_LOG (3, "Scanning thread %p interp stack", info);
2376 gc_callbacks.interp_mark_func (&info->client_info.info, pin_handle_stack_interior_ptrs, &ud, precise);
2378 if (info->client_info.info.handle_stack) {
2380 Make two passes over the handle stack. On the imprecise pass, pin all
2381 objects where the handle points into the interior of the object. On the
2382 precise pass, copy or mark all the objects that have handles to the
2383 beginning of the object.
2385 if (precise)
2386 mono_handle_stack_scan (info->client_info.info.handle_stack, (GcScanFunc)ctx.ops->copy_or_mark_object, ctx.queue, precise, TRUE);
2387 else {
2388 PinHandleStackInteriorPtrData ud;
2389 memset (&ud, 0, sizeof (ud));
2390 ud.start_nursery = (void**)start_nursery;
2391 ud.end_nursery = (void**)end_nursery;
2392 mono_handle_stack_scan (info->client_info.info.handle_stack, pin_handle_stack_interior_ptrs, &ud, precise, FALSE);
2395 } FOREACH_THREAD_END
2397 SGEN_TV_GETTIME (scan_thread_data_end);
2398 SGEN_LOG (2, "Scanning thread data: %lld usecs", (long long)(SGEN_TV_ELAPSED (scan_thread_data_start, scan_thread_data_end) / 10));
2402 * mono_gc_set_stack_end:
2404 * Set the end of the current threads stack to STACK_END. The stack space between
2405 * STACK_END and the real end of the threads stack will not be scanned during collections.
2407 void
2408 mono_gc_set_stack_end (void *stack_end)
2410 SgenThreadInfo *info;
2412 LOCK_GC;
2413 info = mono_thread_info_current ();
2414 if (info) {
2415 SGEN_ASSERT (0, stack_end < info->client_info.info.stack_end, "Can only lower stack end");
2416 info->client_info.info.stack_end = stack_end;
2418 UNLOCK_GC;
2422 * Roots
2426 mono_gc_register_root (char *start, size_t size, MonoGCDescriptor descr, MonoGCRootSource source, void *key, const char *msg)
2428 return sgen_register_root (start, size, descr, descr ? ROOT_TYPE_NORMAL : ROOT_TYPE_PINNED, source, key, msg);
2432 mono_gc_register_root_wbarrier (char *start, size_t size, MonoGCDescriptor descr, MonoGCRootSource source, void *key, const char *msg)
2434 return sgen_register_root (start, size, descr, ROOT_TYPE_WBARRIER, source, key, msg);
2437 void
2438 mono_gc_deregister_root (char* addr)
2440 sgen_deregister_root (addr);
2444 * PThreads
2447 #ifndef HOST_WIN32
2449 mono_gc_pthread_create (pthread_t *new_thread, const pthread_attr_t *attr, void *(*start_routine)(void *), void *arg)
2451 int res;
2453 MONO_ENTER_GC_SAFE;
2454 mono_threads_join_lock ();
2455 res = pthread_create (new_thread, attr, start_routine, arg);
2456 mono_threads_join_unlock ();
2457 MONO_EXIT_GC_SAFE;
2459 return res;
2461 #endif
2464 * Miscellaneous
2467 static size_t last_heap_size = -1;
2468 static size_t worker_heap_size;
2470 void
2471 sgen_client_total_allocated_heap_changed (size_t allocated_heap)
2473 mono_runtime_resource_check_limit (MONO_RESOURCE_GC_HEAP, allocated_heap);
2476 * This function can be called from SGen's worker threads. We want to try
2477 * and avoid exposing those threads to the profiler API, so save the heap
2478 * size value and report it later when the main GC thread calls
2479 * mono_sgen_gc_event_resize ().
2481 worker_heap_size = allocated_heap;
2484 void
2485 mono_sgen_gc_event_resize (void)
2487 if (worker_heap_size != last_heap_size) {
2488 last_heap_size = worker_heap_size;
2489 MONO_PROFILER_RAISE (gc_resize, (last_heap_size));
2493 gboolean
2494 mono_gc_user_markers_supported (void)
2496 return TRUE;
2499 gboolean
2500 mono_object_is_alive (MonoObject* o)
2502 return TRUE;
2506 mono_gc_get_generation (MonoObject *obj)
2508 if (sgen_ptr_in_nursery (obj))
2509 return 0;
2510 return 1;
2513 const char *
2514 mono_gc_get_gc_name (void)
2516 return "sgen";
2519 char*
2520 mono_gc_get_description (void)
2522 #ifdef HAVE_CONC_GC_AS_DEFAULT
2523 return g_strdup ("sgen (concurrent by default)");
2524 #else
2525 return g_strdup ("sgen");
2526 #endif
2529 void
2530 mono_gc_set_desktop_mode (void)
2534 gboolean
2535 mono_gc_is_moving (void)
2537 return TRUE;
2540 gboolean
2541 mono_gc_is_disabled (void)
2543 return FALSE;
2546 #ifdef HOST_WIN32
2547 BOOL APIENTRY mono_gc_dllmain (HMODULE module_handle, DWORD reason, LPVOID reserved)
2549 return TRUE;
2551 #endif
2554 mono_gc_max_generation (void)
2556 return 1;
2559 gboolean
2560 mono_gc_precise_stack_mark_enabled (void)
2562 return !conservative_stack_mark;
2565 void
2566 mono_gc_collect (int generation)
2568 MONO_ENTER_GC_UNSAFE;
2569 sgen_gc_collect (generation);
2570 MONO_EXIT_GC_UNSAFE;
2574 mono_gc_collection_count (int generation)
2576 return sgen_gc_collection_count (generation);
2579 int64_t
2580 mono_gc_get_used_size (void)
2582 return (int64_t)sgen_gc_get_used_size ();
2585 int64_t
2586 mono_gc_get_heap_size (void)
2588 return (int64_t)sgen_gc_get_total_heap_allocation ();
2591 void
2592 mono_gc_get_gcmemoryinfo (gint64* high_memory_load_threshold_bytes,
2593 gint64* memory_load_bytes,
2594 gint64* total_available_memory_bytes,
2595 gint64* heap_size_bytes,
2596 gint64* fragmented_bytes)
2598 *high_memory_load_threshold_bytes = sgen_gc_info.high_memory_load_threshold_bytes;
2599 *fragmented_bytes = sgen_gc_info.fragmented_bytes;
2601 *heap_size_bytes = sgen_gc_info.heap_size_bytes;
2603 *memory_load_bytes = sgen_gc_info.memory_load_bytes;
2604 *total_available_memory_bytes = sgen_gc_info.total_available_memory_bytes;
2607 MonoGCDescriptor
2608 mono_gc_make_root_descr_user (MonoGCRootMarkFunc marker)
2610 return sgen_make_user_root_descriptor (marker);
2613 MonoGCDescriptor
2614 mono_gc_make_descr_for_string (gsize *bitmap, int numbits)
2616 return SGEN_DESC_STRING;
2619 void
2620 mono_gc_register_obj_with_weak_fields (void *obj)
2622 sgen_register_obj_with_weak_fields ((MonoObject*)obj);
2625 void*
2626 mono_gc_get_nursery (int *shift_bits, size_t *size)
2628 *size = sgen_nursery_size;
2629 *shift_bits = sgen_nursery_bits;
2630 return sgen_get_nursery_start ();
2634 mono_gc_get_los_limit (void)
2636 return SGEN_MAX_SMALL_OBJ_SIZE;
2639 guint64
2640 mono_gc_get_allocated_bytes_for_current_thread (void)
2642 SgenThreadInfo* info;
2643 info = mono_thread_info_current ();
2645 /*There are some more allocated bytes in the current tlab that have not been recorded yet */
2646 return info->total_bytes_allocated + (ptrdiff_t)(info->tlab_next - info->tlab_start);
2649 guint64
2650 mono_gc_get_total_allocated_bytes (MonoBoolean precise)
2652 return sgen_get_total_allocated_bytes (precise);
2655 gpointer
2656 sgen_client_default_metadata (void)
2658 return mono_domain_get ();
2661 gpointer
2662 sgen_client_metadata_for_object (GCObject *obj)
2664 return mono_object_domain (obj);
2668 * mono_gchandle_new_internal:
2669 * \param obj managed object to get a handle for
2670 * \param pinned whether the object should be pinned
2671 * This returns a handle that wraps the object, this is used to keep a
2672 * reference to a managed object from the unmanaged world and preventing the
2673 * object from being disposed.
2675 * If \p pinned is false the address of the object can not be obtained, if it is
2676 * true the address of the object can be obtained. This will also pin the
2677 * object so it will not be possible by a moving garbage collector to move the
2678 * object.
2680 * \returns a handle that can be used to access the object from unmanaged code.
2682 MonoGCHandle
2683 mono_gchandle_new_internal (MonoObject *obj, gboolean pinned)
2685 return MONO_GC_HANDLE_FROM_UINT (sgen_gchandle_new (obj, pinned));
2689 * mono_gchandle_new_weakref_internal:
2690 * \param obj managed object to get a handle for
2691 * \param track_resurrection Determines how long to track the object, if this is set to TRUE, the object is tracked after finalization, if FALSE, the object is only tracked up until the point of finalization.
2693 * This returns a weak handle that wraps the object, this is used to
2694 * keep a reference to a managed object from the unmanaged world.
2695 * Unlike the \c mono_gchandle_new_internal the object can be reclaimed by the
2696 * garbage collector. In this case the value of the GCHandle will be
2697 * set to zero.
2699 * If \p track_resurrection is TRUE the object will be tracked through
2700 * finalization and if the object is resurrected during the execution
2701 * of the finalizer, then the returned weakref will continue to hold
2702 * a reference to the object. If \p track_resurrection is FALSE, then
2703 * the weak reference's target will become NULL as soon as the object
2704 * is passed on to the finalizer.
2706 * \returns a handle that can be used to access the object from
2707 * unmanaged code.
2709 MonoGCHandle
2710 mono_gchandle_new_weakref_internal (GCObject *obj, gboolean track_resurrection)
2712 return MONO_GC_HANDLE_FROM_UINT (sgen_gchandle_new_weakref (obj, track_resurrection));
2716 * mono_gchandle_is_in_domain:
2717 * \param gchandle a GCHandle's handle.
2718 * \param domain An application domain.
2719 * \returns TRUE if the object wrapped by the \p gchandle belongs to the specific \p domain.
2721 gboolean
2722 mono_gchandle_is_in_domain (MonoGCHandle gchandle, MonoDomain *domain)
2724 MonoDomain *gchandle_domain = (MonoDomain *)sgen_gchandle_get_metadata (MONO_GC_HANDLE_TO_UINT (gchandle));
2725 return domain->domain_id == gchandle_domain->domain_id;
2729 * mono_gchandle_free_internal:
2730 * \param gchandle a GCHandle's handle.
2732 * Frees the \p gchandle handle. If there are no outstanding
2733 * references, the garbage collector can reclaim the memory of the
2734 * object wrapped.
2736 void
2737 mono_gchandle_free_internal (MonoGCHandle gchandle)
2739 sgen_gchandle_free (MONO_GC_HANDLE_TO_UINT (gchandle));
2743 * mono_gchandle_free_domain:
2744 * \param unloading domain that is unloading
2746 * Function used internally to cleanup any GC handle for objects belonging
2747 * to the specified domain during appdomain unload.
2749 void
2750 mono_gchandle_free_domain (MonoDomain *unloading)
2755 * mono_gchandle_get_target_internal:
2756 * \param gchandle a GCHandle's handle.
2758 * The handle was previously created by calling \c mono_gchandle_new_internal or
2759 * \c mono_gchandle_new_weakref.
2761 * \returns a pointer to the \c MonoObject* represented by the handle or
2762 * NULL for a collected object if using a weakref handle.
2764 MonoObject*
2765 mono_gchandle_get_target_internal (MonoGCHandle gchandle)
2767 return sgen_gchandle_get_target (MONO_GC_HANDLE_TO_UINT (gchandle));
2770 static gpointer
2771 null_link_if_in_domain (gpointer hidden, GCHandleType handle_type, int max_generation, gpointer user)
2773 MonoDomain *unloading_domain = (MonoDomain *)user;
2774 MonoDomain *obj_domain;
2775 gboolean is_weak = MONO_GC_HANDLE_TYPE_IS_WEAK (handle_type);
2776 if (MONO_GC_HANDLE_IS_OBJECT_POINTER (hidden)) {
2777 MonoObject *obj = (MonoObject *)MONO_GC_REVEAL_POINTER (hidden, is_weak);
2778 obj_domain = mono_object_domain (obj);
2779 } else {
2780 obj_domain = (MonoDomain *)MONO_GC_REVEAL_POINTER (hidden, is_weak);
2782 if (unloading_domain->domain_id == obj_domain->domain_id)
2783 return NULL;
2784 return hidden;
2787 void
2788 sgen_null_links_for_domain (MonoDomain *domain)
2790 guint type;
2791 for (type = HANDLE_TYPE_MIN; type < HANDLE_TYPE_MAX; ++type)
2792 sgen_gchandle_iterate ((GCHandleType)type, GENERATION_OLD, null_link_if_in_domain, domain);
2795 void
2796 mono_gchandle_set_target (MonoGCHandle gchandle, MonoObject *obj)
2798 sgen_gchandle_set_target (MONO_GC_HANDLE_TO_UINT (gchandle), obj);
2801 void
2802 sgen_client_gchandle_created (int handle_type, GCObject *obj, guint32 handle)
2804 #ifndef DISABLE_PERFCOUNTERS
2805 mono_atomic_inc_i32 (&mono_perfcounters->gc_num_handles);
2806 #endif
2808 MONO_PROFILER_RAISE (gc_handle_created, (handle, (MonoGCHandleType)handle_type, obj));
2811 void
2812 sgen_client_gchandle_destroyed (int handle_type, guint32 handle)
2814 #ifndef DISABLE_PERFCOUNTERS
2815 mono_atomic_dec_i32 (&mono_perfcounters->gc_num_handles);
2816 #endif
2818 MONO_PROFILER_RAISE (gc_handle_deleted, (handle, (MonoGCHandleType)handle_type));
2821 void
2822 sgen_client_ensure_weak_gchandles_accessible (void)
2825 * During the second bridge processing step the world is
2826 * running again. That step processes all weak links once
2827 * more to null those that refer to dead objects. Before that
2828 * is completed, those links must not be followed, so we
2829 * conservatively wait for bridge processing when any weak
2830 * link is dereferenced.
2832 /* FIXME: A GC can occur after this check fails, in which case we
2833 * should wait for bridge processing but would fail to do so.
2835 if (G_UNLIKELY (mono_bridge_processing_in_progress))
2836 mono_gc_wait_for_bridge_processing ();
2839 void*
2840 mono_gc_invoke_with_gc_lock (MonoGCLockedCallbackFunc func, void *data)
2842 void *result;
2843 LOCK_INTERRUPTION;
2844 result = func (data);
2845 UNLOCK_INTERRUPTION;
2846 return result;
2849 void
2850 mono_gc_register_altstack (gpointer stack, gint32 stack_size, gpointer altstack, gint32 altstack_size)
2852 // FIXME:
2855 guint8*
2856 mono_gc_get_card_table (int *shift_bits, gpointer *mask)
2858 return sgen_get_card_table_configuration (shift_bits, mask);
2861 guint8*
2862 mono_gc_get_target_card_table (int *shift_bits, target_mgreg_t *mask)
2864 return sgen_get_target_card_table_configuration (shift_bits, mask);
2867 gboolean
2868 mono_gc_card_table_nursery_check (void)
2870 return !sgen_get_major_collector ()->is_concurrent;
2873 /* Negative value to remove */
2874 void
2875 mono_gc_add_memory_pressure (gint64 value)
2877 /* FIXME: Implement at some point? */
2881 * Logging
2884 void
2885 sgen_client_degraded_allocation (void)
2887 //The WASM target aways triggers degrated allocation before collecting. So no point in printing the warning as it will just confuse users
2888 #ifndef HOST_WASM
2889 static gint32 last_major_gc_warned = -1;
2890 static gint32 num_degraded = 0;
2892 gint32 major_gc_count = mono_atomic_load_i32 (&mono_gc_stats.major_gc_count);
2893 if (mono_atomic_load_i32 (&last_major_gc_warned) < major_gc_count) {
2894 gint32 num = mono_atomic_inc_i32 (&num_degraded);
2895 if (num == 1 || num == 3)
2896 mono_trace (G_LOG_LEVEL_WARNING, MONO_TRACE_GC, "Warning: Degraded allocation. Consider increasing nursery-size if the warning persists.");
2897 else if (num == 10)
2898 mono_trace (G_LOG_LEVEL_WARNING, MONO_TRACE_GC, "Warning: Repeated degraded allocation. Consider increasing nursery-size.");
2900 mono_atomic_store_i32 (&last_major_gc_warned, major_gc_count);
2902 #endif
2906 * Debugging
2909 const char*
2910 sgen_client_description_for_internal_mem_type (int type)
2912 switch (type) {
2913 case INTERNAL_MEM_EPHEMERON_LINK: return "ephemeron-link";
2914 case INTERNAL_MEM_MOVED_OBJECT: return "moved-object";
2915 default:
2916 return NULL;
2920 void
2921 sgen_client_pre_collection_checks (void)
2923 if (sgen_mono_xdomain_checks) {
2924 sgen_clear_nursery_fragments ();
2925 sgen_check_for_xdomain_refs ();
2929 gboolean
2930 sgen_client_vtable_is_inited (MonoVTable *vt)
2932 return m_class_is_inited (vt->klass);
2935 const char*
2936 sgen_client_vtable_get_namespace (MonoVTable *vt)
2938 return m_class_get_name_space (vt->klass);
2941 const char*
2942 sgen_client_vtable_get_name (MonoVTable *vt)
2944 return m_class_get_name (vt->klass);
2948 * Initialization
2951 void
2952 sgen_client_init (void)
2954 mono_thread_callbacks_init ();
2955 mono_thread_info_init (sizeof (SgenThreadInfo));
2957 ///* Keep this the default for now */
2958 /* Precise marking is broken on all supported targets. Disable until fixed. */
2959 conservative_stack_mark = TRUE;
2961 sgen_register_fixed_internal_mem_type (INTERNAL_MEM_EPHEMERON_LINK, sizeof (EphemeronLinkNode));
2963 mono_sgen_init_stw ();
2965 mono_tls_init_gc_keys ();
2967 mono_thread_info_attach ();
2970 void
2971 mono_gc_init_icalls (void)
2973 mono_register_jit_icall (mono_gc_alloc_obj, mono_icall_sig_object_ptr_int, FALSE);
2974 mono_register_jit_icall (mono_gc_alloc_vector, mono_icall_sig_object_ptr_int_int, FALSE);
2975 mono_register_jit_icall (mono_gc_alloc_string, mono_icall_sig_object_ptr_int_int32, FALSE);
2976 mono_register_jit_icall (mono_profiler_raise_gc_allocation, mono_icall_sig_void_object, FALSE);
2979 gboolean
2980 sgen_client_handle_gc_param (const char *opt)
2982 if (g_str_has_prefix (opt, "stack-mark=")) {
2983 opt = strchr (opt, '=') + 1;
2984 if (!strcmp (opt, "precise")) {
2985 conservative_stack_mark = FALSE;
2986 } else if (!strcmp (opt, "conservative")) {
2987 conservative_stack_mark = TRUE;
2988 } else {
2989 sgen_env_var_error (MONO_GC_PARAMS_NAME, conservative_stack_mark ? "Using `conservative`." : "Using `precise`.",
2990 "Invalid value `%s` for `stack-mark` option, possible values are: `precise`, `conservative`.", opt);
2992 } else if (g_str_has_prefix (opt, "bridge-implementation=")) {
2993 opt = strchr (opt, '=') + 1;
2994 sgen_set_bridge_implementation (opt);
2995 } else if (g_str_has_prefix (opt, "toggleref-test")) {
2996 /* FIXME: This should probably in MONO_GC_DEBUG */
2997 sgen_register_test_toggleref_callback ();
2998 } else if (!sgen_bridge_handle_gc_param (opt)) {
2999 return FALSE;
3001 return TRUE;
3004 void
3005 sgen_client_print_gc_params_usage (void)
3007 fprintf (stderr, " stack-mark=MARK-METHOD (where MARK-METHOD is 'precise' or 'conservative')\n");
3010 gboolean
3011 sgen_client_handle_gc_debug (const char *opt)
3013 if (!strcmp (opt, "xdomain-checks")) {
3014 sgen_mono_xdomain_checks = TRUE;
3015 } else if (!strcmp (opt, "do-not-finalize")) {
3016 mono_do_not_finalize = TRUE;
3017 } else if (g_str_has_prefix (opt, "do-not-finalize=")) {
3018 opt = strchr (opt, '=') + 1;
3019 mono_do_not_finalize = TRUE;
3020 mono_do_not_finalize_class_names = g_strsplit (opt, ",", 0);
3021 } else if (!strcmp (opt, "log-finalizers")) {
3022 mono_log_finalizers = TRUE;
3023 } else if (!strcmp (opt, "no-managed-allocator")) {
3024 sgen_set_use_managed_allocator (FALSE);
3025 } else if (!strcmp (opt, "managed-allocator")) {
3027 * This option can be used to override the disabling of the managed allocator by
3028 * the nursery canaries option. This can be used when knowing for sure that no
3029 * aot code will be used by the application.
3031 sgen_set_use_managed_allocator (TRUE);
3032 } else if (!sgen_bridge_handle_gc_debug (opt)) {
3033 return FALSE;
3035 return TRUE;
3038 void
3039 sgen_client_print_gc_debug_usage (void)
3041 fprintf (stderr, " xdomain-checks\n");
3042 fprintf (stderr, " do-not-finalize\n");
3043 fprintf (stderr, " log-finalizers\n");
3044 fprintf (stderr, " no-managed-allocator\n");
3045 sgen_bridge_print_gc_debug_usage ();
3049 gpointer
3050 sgen_client_get_provenance (void)
3052 #ifdef SGEN_OBJECT_PROVENANCE
3053 MonoGCCallbacks *cb = mono_gc_get_gc_callbacks ();
3054 gpointer (*get_provenance_func) (void);
3055 if (!cb)
3056 return NULL;
3057 get_provenance_func = cb->get_provenance_func;
3058 if (get_provenance_func)
3059 return get_provenance_func ();
3060 return NULL;
3061 #else
3062 return NULL;
3063 #endif
3066 void
3067 sgen_client_describe_invalid_pointer (GCObject *ptr)
3069 sgen_bridge_describe_pointer (ptr);
3072 static gboolean gc_inited;
3075 * mono_gc_base_init:
3077 void
3078 mono_gc_base_init (void)
3080 if (gc_inited)
3081 return;
3083 mono_counters_init ();
3085 #ifndef HOST_WIN32
3086 mono_w32handle_init ();
3087 #endif
3089 #ifdef HEAVY_STATISTICS
3090 mono_counters_register ("los marked cards", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &los_marked_cards);
3091 mono_counters_register ("los array cards scanned ", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &los_array_cards);
3092 mono_counters_register ("los array remsets", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &los_array_remsets);
3094 mono_counters_register ("WBarrier set arrayref", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_wbarrier_set_arrayref);
3095 mono_counters_register ("WBarrier value copy", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_wbarrier_value_copy);
3096 mono_counters_register ("WBarrier object copy", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_wbarrier_object_copy);
3097 #endif
3099 sgen_gc_init ();
3101 gc_inited = TRUE;
3104 void
3105 mono_gc_base_cleanup (void)
3108 * Note we don't fully cleanup the GC here, but the threads mainly.
3110 * We need to finish any work on the sgen threads before shutting down
3111 * the sgen threadpool. After this point we can still trigger GCs as
3112 * part of domain free, but they should all be forced and not use the
3113 * threadpool.
3115 sgen_finish_concurrent_work ("cleanup", TRUE);
3116 sgen_thread_pool_shutdown ();
3118 // We should have consumed any outstanding moves.
3119 g_assert (sgen_pointer_queue_is_empty (&moved_objects_queue));
3122 gboolean
3123 mono_gc_is_null (void)
3125 return FALSE;
3128 gsize *
3129 sgen_client_get_weak_bitmap (MonoVTable *vt, int *nbits)
3131 MonoClass *klass = vt->klass;
3133 return mono_class_get_weak_bitmap (klass, nbits);
3136 void
3137 sgen_client_binary_protocol_collection_begin (int minor_gc_count, int generation)
3139 static gboolean pseudo_roots_registered;
3141 MONO_GC_BEGIN (generation);
3143 MONO_PROFILER_RAISE (gc_event, (MONO_GC_EVENT_START, generation, generation == GENERATION_OLD && sgen_concurrent_collection_in_progress));
3145 if (!pseudo_roots_registered) {
3146 pseudo_roots_registered = TRUE;
3147 MONO_PROFILER_RAISE (gc_root_register, (SPECIAL_ADDRESS_FIN_QUEUE, 1, MONO_ROOT_SOURCE_FINALIZER_QUEUE, NULL, "Finalizer Queue"));
3148 MONO_PROFILER_RAISE (gc_root_register, (SPECIAL_ADDRESS_CRIT_FIN_QUEUE, 1, MONO_ROOT_SOURCE_FINALIZER_QUEUE, NULL, "Finalizer Queue (Critical)"));
3149 MONO_PROFILER_RAISE (gc_root_register, (SPECIAL_ADDRESS_EPHEMERON, 1, MONO_ROOT_SOURCE_EPHEMERON, NULL, "Ephemerons"));
3150 MONO_PROFILER_RAISE (gc_root_register, (SPECIAL_ADDRESS_TOGGLEREF, 1, MONO_ROOT_SOURCE_TOGGLEREF, NULL, "ToggleRefs"));
3153 #ifndef DISABLE_PERFCOUNTERS
3154 if (generation == GENERATION_NURSERY)
3155 mono_atomic_inc_i32 (&mono_perfcounters->gc_collections0);
3156 else
3157 mono_atomic_inc_i32 (&mono_perfcounters->gc_collections1);
3158 #endif
3161 void
3162 sgen_client_binary_protocol_collection_end (int minor_gc_count, int generation, long long num_objects_scanned, long long num_unique_objects_scanned)
3164 MONO_GC_END (generation);
3166 MONO_PROFILER_RAISE (gc_event, (MONO_GC_EVENT_END, generation, generation == GENERATION_OLD && sgen_concurrent_collection_in_progress));
3169 #ifdef HOST_WASM
3170 void
3171 sgen_client_schedule_background_job (void (*cb)(void))
3173 mono_threads_schedule_background_job (cb);
3176 #endif
3178 #endif