Implemeneted imprecise.
[mono-project.git] / mono / metadata / sgen-mono.c
blob7ed40c7c230d4f24247e01a7533ff309f7ef9604
1 /**
2 * \file
3 * SGen features specific to Mono.
5 * Copyright (C) 2014 Xamarin Inc
7 * Licensed under the MIT license. See LICENSE file in the project root for full license information.
8 */
10 #include "config.h"
11 #ifdef HAVE_SGEN_GC
13 #include "sgen/sgen-gc.h"
14 #include "sgen/sgen-protocol.h"
15 #include "metadata/monitor.h"
16 #include "sgen/sgen-layout-stats.h"
17 #include "sgen/sgen-client.h"
18 #include "sgen/sgen-cardtable.h"
19 #include "sgen/sgen-pinning.h"
20 #include "sgen/sgen-workers.h"
21 #include "metadata/class-init.h"
22 #include "metadata/marshal.h"
23 #include "metadata/method-builder.h"
24 #include "metadata/abi-details.h"
25 #include "metadata/class-abi-details.h"
26 #include "metadata/mono-gc.h"
27 #include "metadata/runtime.h"
28 #include "metadata/sgen-bridge-internals.h"
29 #include "metadata/sgen-mono.h"
30 #include "metadata/sgen-mono-ilgen.h"
31 #include "metadata/gc-internals.h"
32 #include "metadata/handle.h"
33 #include "metadata/abi-details.h"
34 #include "utils/mono-memory-model.h"
35 #include "utils/mono-logger-internals.h"
36 #include "utils/mono-threads-coop.h"
37 #include "utils/mono-threads.h"
38 #include "metadata/w32handle.h"
39 #include "icall-signatures.h"
41 #ifdef HEAVY_STATISTICS
42 static guint64 stat_wbarrier_set_arrayref = 0;
43 static guint64 stat_wbarrier_value_copy = 0;
44 static guint64 stat_wbarrier_object_copy = 0;
46 static guint64 los_marked_cards;
47 static guint64 los_array_cards;
48 static guint64 los_array_remsets;
49 #endif
51 /* If set, mark stacks conservatively, even if precise marking is possible */
52 static gboolean conservative_stack_mark = FALSE;
53 /* If set, check that there are no references to the domain left at domain unload */
54 gboolean sgen_mono_xdomain_checks = FALSE;
56 /* Functions supplied by the runtime to be called by the GC */
57 static MonoGCCallbacks gc_callbacks;
59 /* The total number of bytes allocated so far in program exection.
60 * This is not constantly syncrhonized, but only updated on each GC. */
61 static gint64 total_bytes_allocated = 0;
63 #define OPDEF(a,b,c,d,e,f,g,h,i,j) \
64 a = i,
66 enum {
67 #include "mono/cil/opcode.def"
68 CEE_LAST
71 #undef OPDEF
74 * Write barriers
77 static gboolean
78 ptr_on_stack (void *ptr)
80 gpointer stack_start = &stack_start;
81 SgenThreadInfo *info = mono_thread_info_current ();
83 if (ptr >= stack_start && ptr < (gpointer)info->client_info.info.stack_end)
84 return TRUE;
85 return FALSE;
88 #ifdef SGEN_HEAVY_BINARY_PROTOCOL
89 #undef HANDLE_PTR
90 #define HANDLE_PTR(ptr,obj) do { \
91 gpointer o = *(gpointer*)(ptr); \
92 if ((o)) { \
93 gpointer d = ((char*)dest) + ((char*)(ptr) - (char*)(obj)); \
94 sgen_binary_protocol_wbarrier (d, o, (gpointer) SGEN_LOAD_VTABLE (o)); \
95 } \
96 } while (0)
98 static void
99 scan_object_for_binary_protocol_copy_wbarrier (gpointer dest, char *start, mword desc)
101 #define SCAN_OBJECT_NOVTABLE
102 #include "sgen/sgen-scan-object.h"
104 #endif
106 void
107 mono_gc_wbarrier_value_copy_internal (gpointer dest, gpointer src, int count, MonoClass *klass)
109 HEAVY_STAT (++stat_wbarrier_value_copy);
110 g_assert (m_class_is_valuetype (klass));
112 SGEN_LOG (8, "Adding value remset at %p, count %d, descr %p for class %s (%p)", dest, count, (gpointer)(uintptr_t)m_class_get_gc_descr (klass), m_class_get_name (klass), klass);
114 if (sgen_ptr_in_nursery (dest) || ptr_on_stack (dest) || !sgen_gc_descr_has_references ((mword)m_class_get_gc_descr (klass))) {
115 size_t element_size = mono_class_value_size (klass, NULL);
116 size_t size = count * element_size;
117 mono_gc_memmove_atomic (dest, src, size);
118 return;
121 #ifdef SGEN_HEAVY_BINARY_PROTOCOL
122 if (sgen_binary_protocol_is_heavy_enabled ()) {
123 size_t element_size = mono_class_value_size (klass, NULL);
124 int i;
125 for (i = 0; i < count; ++i) {
126 scan_object_for_binary_protocol_copy_wbarrier ((char*)dest + i * element_size,
127 (char*)src + i * element_size - MONO_ABI_SIZEOF (MonoObject),
128 (mword) klass->gc_descr);
131 #endif
133 sgen_get_remset ()->wbarrier_value_copy (dest, src, count, mono_class_value_size (klass, NULL));
137 * mono_gc_wbarrier_object_copy_internal:
139 * Write barrier to call when \p obj is the result of a clone or copy of an object.
141 void
142 mono_gc_wbarrier_object_copy_internal (MonoObject* obj, MonoObject *src)
144 int size;
146 HEAVY_STAT (++stat_wbarrier_object_copy);
148 SGEN_ASSERT (6, !ptr_on_stack (obj), "Why is this called for a non-reference type?");
149 if (sgen_ptr_in_nursery (obj) || !SGEN_OBJECT_HAS_REFERENCES (src)) {
150 size = m_class_get_instance_size (mono_object_class (obj));
151 mono_gc_memmove_aligned ((char*)obj + MONO_ABI_SIZEOF (MonoObject), (char*)src + MONO_ABI_SIZEOF (MonoObject),
152 size - MONO_ABI_SIZEOF (MonoObject));
153 return;
156 #ifdef SGEN_HEAVY_BINARY_PROTOCOL
157 if (sgen_binary_protocol_is_heavy_enabled ())
158 scan_object_for_binary_protocol_copy_wbarrier (obj, (char*)src, (mword) src->vtable->gc_descr);
159 #endif
161 sgen_get_remset ()->wbarrier_object_copy (obj, src);
165 * mono_gc_wbarrier_set_arrayref_internal:
167 void
168 mono_gc_wbarrier_set_arrayref_internal (MonoArray *arr, gpointer slot_ptr, MonoObject* value)
170 HEAVY_STAT (++stat_wbarrier_set_arrayref);
171 if (sgen_ptr_in_nursery (slot_ptr)) {
172 *(void**)slot_ptr = value;
173 return;
175 SGEN_LOG (8, "Adding remset at %p", slot_ptr);
176 if (value)
177 sgen_binary_protocol_wbarrier (slot_ptr, value, value->vtable);
179 sgen_get_remset ()->wbarrier_set_field ((GCObject*)arr, slot_ptr, value);
183 * mono_gc_wbarrier_set_field_internal:
185 void
186 mono_gc_wbarrier_set_field_internal (MonoObject *obj, gpointer field_ptr, MonoObject* value)
188 mono_gc_wbarrier_set_arrayref_internal ((MonoArray*)obj, field_ptr, value);
191 void
192 mono_gc_wbarrier_range_copy (gpointer _dest, gconstpointer _src, int size)
194 sgen_wbarrier_range_copy (_dest, _src, size);
197 MonoRangeCopyFunction
198 mono_gc_get_range_copy_func (void)
200 return sgen_get_remset ()->wbarrier_range_copy;
204 mono_gc_get_suspend_signal (void)
206 return mono_threads_suspend_get_suspend_signal ();
210 mono_gc_get_restart_signal (void)
212 return mono_threads_suspend_get_restart_signal ();
215 static MonoMethod *write_barrier_conc_method;
216 static MonoMethod *write_barrier_noconc_method;
218 gboolean
219 sgen_is_critical_method (MonoMethod *method)
221 return sgen_is_managed_allocator (method);
224 gboolean
225 sgen_has_critical_method (void)
227 return sgen_has_managed_allocator ();
230 gboolean
231 mono_gc_is_critical_method (MonoMethod *method)
233 #ifdef HOST_WASM
234 //methods can't be critical under wasm due to the single thread'ness of it
235 return FALSE;
236 #else
237 return sgen_is_critical_method (method);
238 #endif
241 static MonoSgenMonoCallbacks sgenmono_cb;
242 static gboolean cb_inited = FALSE;
244 void
245 mono_install_sgen_mono_callbacks (MonoSgenMonoCallbacks *cb)
247 g_assert (!cb_inited);
248 g_assert (cb->version == MONO_SGEN_MONO_CALLBACKS_VERSION);
249 memcpy (&sgenmono_cb, cb, sizeof (MonoSgenMonoCallbacks));
250 cb_inited = TRUE;
253 #if !ENABLE_ILGEN
255 static void
256 emit_nursery_check_noilgen (MonoMethodBuilder *mb, gboolean is_concurrent)
260 static void
261 emit_managed_allocater_noilgen (MonoMethodBuilder *mb, gboolean slowpath, gboolean profiler, int atype)
265 static void
266 install_noilgen (void)
268 MonoSgenMonoCallbacks cb;
269 cb.version = MONO_SGEN_MONO_CALLBACKS_VERSION;
270 cb.emit_nursery_check = emit_nursery_check_noilgen;
271 cb.emit_managed_allocater = emit_managed_allocater_noilgen;
272 mono_install_sgen_mono_callbacks (&cb);
275 #endif
277 static MonoSgenMonoCallbacks *
278 get_sgen_mono_cb (void)
280 if (G_UNLIKELY (!cb_inited)) {
281 #ifdef ENABLE_ILGEN
282 mono_sgen_mono_ilgen_init ();
283 #else
284 install_noilgen ();
285 #endif
287 return &sgenmono_cb;
290 MonoMethod*
291 mono_gc_get_specific_write_barrier (gboolean is_concurrent)
293 MonoMethod *res;
294 MonoMethodBuilder *mb;
295 MonoMethodSignature *sig;
296 MonoMethod **write_barrier_method_addr;
297 WrapperInfo *info;
298 // FIXME: Maybe create a separate version for ctors (the branch would be
299 // correctly predicted more times)
300 if (is_concurrent)
301 write_barrier_method_addr = &write_barrier_conc_method;
302 else
303 write_barrier_method_addr = &write_barrier_noconc_method;
305 if (*write_barrier_method_addr)
306 return *write_barrier_method_addr;
308 /* Create the IL version of mono_gc_barrier_generic_store () */
309 sig = mono_metadata_signature_alloc (mono_defaults.corlib, 1);
310 sig->ret = mono_get_void_type ();
311 sig->params [0] = mono_get_int_type ();
313 if (is_concurrent)
314 mb = mono_mb_new (mono_defaults.object_class, "wbarrier_conc", MONO_WRAPPER_WRITE_BARRIER);
315 else
316 mb = mono_mb_new (mono_defaults.object_class, "wbarrier_noconc", MONO_WRAPPER_WRITE_BARRIER);
318 get_sgen_mono_cb ()->emit_nursery_check (mb, is_concurrent);
320 res = mono_mb_create_method (mb, sig, 16);
321 info = mono_wrapper_info_create (mb, WRAPPER_SUBTYPE_NONE);
322 mono_marshal_set_wrapper_info (res, info);
323 mono_mb_free (mb);
325 LOCK_GC;
326 if (*write_barrier_method_addr) {
327 /* Already created */
328 mono_free_method (res);
329 } else {
330 /* double-checked locking */
331 mono_memory_barrier ();
332 *write_barrier_method_addr = res;
334 UNLOCK_GC;
336 return *write_barrier_method_addr;
339 MonoMethod*
340 mono_gc_get_write_barrier (void)
342 return mono_gc_get_specific_write_barrier (sgen_major_collector.is_concurrent);
346 * Dummy filler objects
349 /* Vtable of the objects used to fill out nursery fragments before a collection */
350 static GCVTable array_fill_vtable;
352 static GCVTable
353 get_array_fill_vtable (void)
355 if (!array_fill_vtable) {
356 static char _vtable[sizeof(MonoVTable)+8];
357 MonoVTable* vtable = (MonoVTable*) ALIGN_TO((mword)_vtable, 8);
358 gsize bmap;
360 MonoClass *klass = mono_class_create_array_fill_type ();
361 MonoDomain *domain = mono_get_root_domain ();
362 g_assert (domain);
364 vtable->klass = klass;
365 bmap = 0;
366 vtable->gc_descr = mono_gc_make_descr_for_array (TRUE, &bmap, 0, 8);
367 vtable->rank = 1;
369 array_fill_vtable = vtable;
371 return array_fill_vtable;
374 gboolean
375 sgen_client_array_fill_range (char *start, size_t size)
377 MonoArray *o;
379 if (size < MONO_SIZEOF_MONO_ARRAY) {
380 memset (start, 0, size);
381 return FALSE;
384 o = (MonoArray*)start;
385 o->obj.vtable = (MonoVTable*)get_array_fill_vtable ();
386 /* Mark this as not a real object */
387 o->obj.synchronisation = (MonoThreadsSync *)GINT_TO_POINTER (-1);
388 o->bounds = NULL;
389 /* We use array of int64 */
390 g_assert ((size - MONO_SIZEOF_MONO_ARRAY) % 8 == 0);
391 o->max_length = (mono_array_size_t)((size - MONO_SIZEOF_MONO_ARRAY) / 8);
393 return TRUE;
396 void
397 sgen_client_zero_array_fill_header (void *p, size_t size)
399 if (size >= MONO_SIZEOF_MONO_ARRAY) {
400 memset (p, 0, MONO_SIZEOF_MONO_ARRAY);
401 } else {
402 static guint8 zeros [MONO_SIZEOF_MONO_ARRAY];
404 SGEN_ASSERT (0, !memcmp (p, zeros, size), "TLAB segment must be zeroed out.");
408 MonoVTable *
409 mono_gc_get_vtable (MonoObject *obj)
411 // See sgen/sgen-tagged-pointer.h.
412 return SGEN_LOAD_VTABLE (obj);
416 * Finalization
419 static MonoGCFinalizerCallbacks fin_callbacks;
421 guint
422 mono_gc_get_vtable_bits (MonoClass *klass)
424 guint res = 0;
425 /* FIXME move this to the bridge code */
426 if (sgen_need_bridge_processing ()) {
427 switch (sgen_bridge_class_kind (klass)) {
428 case GC_BRIDGE_TRANSPARENT_BRIDGE_CLASS:
429 case GC_BRIDGE_OPAQUE_BRIDGE_CLASS:
430 res = SGEN_GC_BIT_BRIDGE_OBJECT;
431 break;
432 case GC_BRIDGE_OPAQUE_CLASS:
433 res = SGEN_GC_BIT_BRIDGE_OPAQUE_OBJECT;
434 break;
435 case GC_BRIDGE_TRANSPARENT_CLASS:
436 break;
439 if (fin_callbacks.is_class_finalization_aware) {
440 if (fin_callbacks.is_class_finalization_aware (klass))
441 res |= SGEN_GC_BIT_FINALIZER_AWARE;
443 return res;
446 static gboolean
447 is_finalization_aware (MonoObject *obj)
449 MonoVTable *vt = SGEN_LOAD_VTABLE (obj);
450 return (vt->gc_bits & SGEN_GC_BIT_FINALIZER_AWARE) == SGEN_GC_BIT_FINALIZER_AWARE;
453 void
454 sgen_client_object_queued_for_finalization (GCObject *obj)
456 if (fin_callbacks.object_queued_for_finalization && is_finalization_aware (obj))
457 fin_callbacks.object_queued_for_finalization (obj);
459 #ifdef ENABLE_DTRACE
460 if (G_UNLIKELY (MONO_GC_FINALIZE_ENQUEUE_ENABLED ())) {
461 int gen = sgen_ptr_in_nursery (obj) ? GENERATION_NURSERY : GENERATION_OLD;
462 GCVTable vt = SGEN_LOAD_VTABLE (obj);
463 MONO_GC_FINALIZE_ENQUEUE ((mword)obj, sgen_safe_object_get_size (obj),
464 sgen_client_vtable_get_namespace (vt), sgen_client_vtable_get_name (vt), gen,
465 sgen_client_object_has_critical_finalizer (obj));
467 #endif
470 void
471 mono_gc_register_finalizer_callbacks (MonoGCFinalizerCallbacks *callbacks)
473 if (callbacks->version != MONO_GC_FINALIZER_EXTENSION_VERSION)
474 g_error ("Invalid finalizer callback version. Expected %d but got %d\n", MONO_GC_FINALIZER_EXTENSION_VERSION, callbacks->version);
476 fin_callbacks = *callbacks;
479 void
480 sgen_client_run_finalize (MonoObject *obj)
482 mono_gc_run_finalize (obj, NULL);
486 * mono_gc_invoke_finalizers:
489 mono_gc_invoke_finalizers (void)
491 return sgen_gc_invoke_finalizers ();
495 * mono_gc_pending_finalizers:
497 MonoBoolean
498 mono_gc_pending_finalizers (void)
500 return sgen_have_pending_finalizers ();
503 void
504 sgen_client_finalize_notify (void)
506 mono_gc_finalize_notify ();
509 void
510 mono_gc_register_for_finalization (MonoObject *obj, MonoFinalizationProc user_data)
512 sgen_object_register_for_finalization (obj, user_data);
515 static gboolean
516 object_in_domain_predicate (MonoObject *obj, void *user_data)
518 MonoDomain *domain = (MonoDomain *)user_data;
519 if (mono_object_domain (obj) == domain) {
520 SGEN_LOG (5, "Unregistering finalizer for object: %p (%s)", obj, sgen_client_vtable_get_name (SGEN_LOAD_VTABLE (obj)));
521 return TRUE;
523 return FALSE;
527 * mono_gc_finalizers_for_domain:
528 * \param domain the unloading appdomain
529 * \param out_array output array
530 * \param out_size size of output array
531 * Enqueue for finalization all objects that belong to the unloading appdomain \p domain.
532 * \p suspend is used for early termination of the enqueuing process.
534 void
535 mono_gc_finalize_domain (MonoDomain *domain)
537 sgen_finalize_if (object_in_domain_predicate, domain);
540 void
541 mono_gc_suspend_finalizers (void)
543 sgen_set_suspend_finalizers ();
547 * Ephemerons
550 typedef struct _EphemeronLinkNode EphemeronLinkNode;
552 struct _EphemeronLinkNode {
553 EphemeronLinkNode *next;
554 MonoArray *array;
557 typedef struct {
558 GCObject *key;
559 GCObject *value;
560 } Ephemeron;
562 static EphemeronLinkNode *ephemeron_list;
564 /* LOCKING: requires that the GC lock is held */
565 static MONO_PERMIT (need (sgen_gc_locked)) void
566 null_ephemerons_for_domain (MonoDomain *domain)
568 EphemeronLinkNode *current = ephemeron_list, *prev = NULL;
570 while (current) {
571 MonoObject *object = (MonoObject*)current->array;
573 if (object)
574 SGEN_ASSERT (0, object->vtable, "Can't have objects without vtables.");
576 if (object && object->vtable->domain == domain) {
577 EphemeronLinkNode *tmp = current;
579 if (prev)
580 prev->next = current->next;
581 else
582 ephemeron_list = current->next;
584 current = current->next;
585 sgen_free_internal (tmp, INTERNAL_MEM_EPHEMERON_LINK);
586 } else {
587 prev = current;
588 current = current->next;
593 /* LOCKING: requires that the GC lock is held */
594 void
595 sgen_client_clear_unreachable_ephemerons (ScanCopyContext ctx)
597 CopyOrMarkObjectFunc copy_func = ctx.ops->copy_or_mark_object;
598 SgenGrayQueue *queue = ctx.queue;
599 EphemeronLinkNode *current = ephemeron_list, *prev = NULL;
600 Ephemeron *cur, *array_end;
601 GCObject *tombstone;
603 while (current) {
604 MonoArray *array = current->array;
606 if (!sgen_is_object_alive_for_current_gen ((GCObject*)array)) {
607 EphemeronLinkNode *tmp = current;
609 SGEN_LOG (5, "Dead Ephemeron array at %p", array);
611 if (prev)
612 prev->next = current->next;
613 else
614 ephemeron_list = current->next;
616 current = current->next;
617 sgen_free_internal (tmp, INTERNAL_MEM_EPHEMERON_LINK);
619 continue;
622 copy_func ((GCObject**)&array, queue);
623 current->array = array;
625 SGEN_LOG (5, "Clearing unreachable entries for ephemeron array at %p", array);
627 cur = mono_array_addr_internal (array, Ephemeron, 0);
628 array_end = cur + mono_array_length_internal (array);
629 tombstone = SGEN_LOAD_VTABLE ((GCObject*)array)->domain->ephemeron_tombstone;
631 for (; cur < array_end; ++cur) {
632 GCObject *key = cur->key;
634 if (!key || key == tombstone)
635 continue;
637 SGEN_LOG (5, "[%zd] key %p (%s) value %p (%s)", cur - mono_array_addr_internal (array, Ephemeron, 0),
638 key, sgen_is_object_alive_for_current_gen (key) ? "reachable" : "unreachable",
639 cur->value, cur->value && sgen_is_object_alive_for_current_gen (cur->value) ? "reachable" : "unreachable");
641 if (!sgen_is_object_alive_for_current_gen (key)) {
642 cur->key = tombstone;
643 cur->value = NULL;
644 continue;
647 prev = current;
648 current = current->next;
653 LOCKING: requires that the GC lock is held
655 Limitations: We scan all ephemerons on every collection since the current design doesn't allow for a simple nursery/mature split.
657 gboolean
658 sgen_client_mark_ephemerons (ScanCopyContext ctx)
660 CopyOrMarkObjectFunc copy_func = ctx.ops->copy_or_mark_object;
661 SgenGrayQueue *queue = ctx.queue;
662 gboolean nothing_marked = TRUE;
663 EphemeronLinkNode *current = ephemeron_list;
664 Ephemeron *cur, *array_end;
665 GCObject *tombstone;
667 for (current = ephemeron_list; current; current = current->next) {
668 MonoArray *array = current->array;
669 SGEN_LOG (5, "Ephemeron array at %p", array);
671 /*It has to be alive*/
672 if (!sgen_is_object_alive_for_current_gen ((GCObject*)array)) {
673 SGEN_LOG (5, "\tnot reachable");
674 continue;
677 copy_func ((GCObject**)&array, queue);
679 cur = mono_array_addr_internal (array, Ephemeron, 0);
680 array_end = cur + mono_array_length_internal (array);
681 tombstone = SGEN_LOAD_VTABLE ((GCObject*)array)->domain->ephemeron_tombstone;
683 for (; cur < array_end; ++cur) {
684 GCObject *key = cur->key;
686 if (!key || key == tombstone)
687 continue;
689 SGEN_LOG (5, "[%zd] key %p (%s) value %p (%s)", cur - mono_array_addr_internal (array, Ephemeron, 0),
690 key, sgen_is_object_alive_for_current_gen (key) ? "reachable" : "unreachable",
691 cur->value, cur->value && sgen_is_object_alive_for_current_gen (cur->value) ? "reachable" : "unreachable");
693 if (sgen_is_object_alive_for_current_gen (key)) {
694 GCObject *value = cur->value;
696 copy_func (&cur->key, queue);
697 if (value) {
698 if (!sgen_is_object_alive_for_current_gen (value)) {
699 nothing_marked = FALSE;
700 sgen_binary_protocol_ephemeron_ref (current, key, value);
702 copy_func (&cur->value, queue);
708 SGEN_LOG (5, "Ephemeron run finished. Is it done %d", nothing_marked);
709 return nothing_marked;
712 gboolean
713 mono_gc_ephemeron_array_add (MonoObject *obj)
715 EphemeronLinkNode *node;
717 LOCK_GC;
719 node = (EphemeronLinkNode *)sgen_alloc_internal (INTERNAL_MEM_EPHEMERON_LINK);
720 if (!node) {
721 UNLOCK_GC;
722 return FALSE;
724 node->array = (MonoArray*)obj;
725 node->next = ephemeron_list;
726 ephemeron_list = node;
728 SGEN_LOG (5, "Registered ephemeron array %p", obj);
730 UNLOCK_GC;
731 return TRUE;
735 * Appdomain handling
738 static gboolean
739 need_remove_object_for_domain (GCObject *start, MonoDomain *domain)
741 if (mono_object_domain (start) == domain) {
742 SGEN_LOG (4, "Need to cleanup object %p", start);
743 sgen_binary_protocol_cleanup (start, (gpointer)SGEN_LOAD_VTABLE (start), sgen_safe_object_get_size ((GCObject*)start));
744 return TRUE;
746 return FALSE;
749 static void
750 process_object_for_domain_clearing (GCObject *start, MonoDomain *domain)
752 MonoVTable *vt = SGEN_LOAD_VTABLE (start);
753 if (vt->klass == mono_defaults.internal_thread_class)
754 g_assert (mono_object_domain (start) == mono_get_root_domain ());
755 /* The object could be a proxy for an object in the domain
756 we're deleting. */
757 #ifndef DISABLE_REMOTING
758 if (m_class_get_supertypes (mono_defaults.real_proxy_class) && mono_class_has_parent_fast (vt->klass, mono_defaults.real_proxy_class)) {
759 MonoObject *server = ((MonoRealProxy*)start)->unwrapped_server;
761 /* The server could already have been zeroed out, so
762 we need to check for that, too. */
763 if (server && (!SGEN_LOAD_VTABLE (server) || mono_object_domain (server) == domain)) {
764 SGEN_LOG (4, "Cleaning up remote pointer in %p to object %p", start, server);
765 ((MonoRealProxy*)start)->unwrapped_server = NULL;
768 #endif
771 static gboolean
772 clear_domain_process_object (GCObject *obj, MonoDomain *domain)
774 gboolean remove;
776 process_object_for_domain_clearing (obj, domain);
777 remove = need_remove_object_for_domain (obj, domain);
779 if (remove && obj->synchronisation) {
780 guint32 dislink = mono_monitor_get_object_monitor_gchandle (obj);
781 if (dislink)
782 mono_gchandle_free_internal (dislink);
785 return remove;
788 static void
789 clear_domain_process_minor_object_callback (GCObject *obj, size_t size, MonoDomain *domain)
791 if (clear_domain_process_object (obj, domain)) {
792 CANARIFY_SIZE (size);
793 memset (obj, 0, size);
797 static void
798 clear_domain_process_major_object_callback (GCObject *obj, size_t size, MonoDomain *domain)
800 clear_domain_process_object (obj, domain);
803 static void
804 clear_domain_free_major_non_pinned_object_callback (GCObject *obj, size_t size, MonoDomain *domain)
806 if (need_remove_object_for_domain (obj, domain))
807 sgen_major_collector.free_non_pinned_object (obj, size);
810 static void
811 clear_domain_free_major_pinned_object_callback (GCObject *obj, size_t size, MonoDomain *domain)
813 if (need_remove_object_for_domain (obj, domain))
814 sgen_major_collector.free_pinned_object (obj, size);
817 static void
818 sgen_finish_concurrent_work (const char *reason, gboolean stw)
820 if (sgen_get_concurrent_collection_in_progress ())
821 sgen_perform_collection (0, GENERATION_OLD, reason, TRUE, stw);
822 SGEN_ASSERT (0, !sgen_get_concurrent_collection_in_progress (), "We just ordered a synchronous collection. Why are we collecting concurrently?");
824 sgen_major_collector.finish_sweeping ();
828 * When appdomains are unloaded we can easily remove objects that have finalizers,
829 * but all the others could still be present in random places on the heap.
830 * We need a sweep to get rid of them even though it's going to be costly
831 * with big heaps.
832 * The reason we need to remove them is because we access the vtable and class
833 * structures to know the object size and the reference bitmap: once the domain is
834 * unloaded the point to random memory.
836 void
837 mono_gc_clear_domain (MonoDomain * domain)
839 LOSObject *bigobj, *prev;
840 int i;
842 LOCK_GC;
844 sgen_binary_protocol_domain_unload_begin (domain);
846 sgen_stop_world (0, FALSE);
848 sgen_finish_concurrent_work ("clear domain", FALSE);
850 sgen_process_fin_stage_entries ();
852 sgen_clear_nursery_fragments ();
854 FOREACH_THREAD_ALL (info) {
855 mono_handle_stack_free_domain (info->client_info.info.handle_stack, domain);
856 } FOREACH_THREAD_END
858 if (sgen_mono_xdomain_checks && domain != mono_get_root_domain ()) {
859 sgen_scan_for_registered_roots_in_domain (domain, ROOT_TYPE_NORMAL);
860 sgen_scan_for_registered_roots_in_domain (domain, ROOT_TYPE_WBARRIER);
861 sgen_check_for_xdomain_refs ();
864 /*Ephemerons and dislinks must be processed before LOS since they might end up pointing
865 to memory returned to the OS.*/
866 null_ephemerons_for_domain (domain);
867 sgen_null_links_for_domain (domain);
869 for (i = GENERATION_NURSERY; i < GENERATION_MAX; ++i)
870 sgen_remove_finalizers_if (object_in_domain_predicate, domain, i);
872 sgen_scan_area_with_callback (sgen_nursery_section->data, sgen_nursery_section->end_data,
873 (IterateObjectCallbackFunc)clear_domain_process_minor_object_callback, domain, FALSE, TRUE);
875 /* We need two passes over major and large objects because
876 freeing such objects might give their memory back to the OS
877 (in the case of large objects) or obliterate its vtable
878 (pinned objects with major-copying or pinned and non-pinned
879 objects with major-mark&sweep), but we might need to
880 dereference a pointer from an object to another object if
881 the first object is a proxy. */
882 sgen_major_collector.iterate_objects (ITERATE_OBJECTS_SWEEP_ALL, (IterateObjectCallbackFunc)clear_domain_process_major_object_callback, domain);
883 for (bigobj = sgen_los_object_list; bigobj; bigobj = bigobj->next)
884 clear_domain_process_object ((GCObject*)bigobj->data, domain);
886 prev = NULL;
887 for (bigobj = sgen_los_object_list; bigobj;) {
888 if (need_remove_object_for_domain ((GCObject*)bigobj->data, domain)) {
889 LOSObject *to_free = bigobj;
890 if (prev)
891 prev->next = bigobj->next;
892 else
893 sgen_los_object_list = bigobj->next;
894 bigobj = bigobj->next;
895 SGEN_LOG (4, "Freeing large object %p", bigobj->data);
896 sgen_los_free_object (to_free);
897 continue;
899 prev = bigobj;
900 bigobj = bigobj->next;
902 sgen_major_collector.iterate_objects (ITERATE_OBJECTS_SWEEP_NON_PINNED, (IterateObjectCallbackFunc)clear_domain_free_major_non_pinned_object_callback, domain);
903 sgen_major_collector.iterate_objects (ITERATE_OBJECTS_SWEEP_PINNED, (IterateObjectCallbackFunc)clear_domain_free_major_pinned_object_callback, domain);
905 if (domain == mono_get_root_domain ()) {
906 sgen_pin_stats_report ();
907 sgen_object_layout_dump (stdout);
910 sgen_restart_world (0, FALSE);
912 sgen_binary_protocol_domain_unload_end (domain);
913 sgen_binary_protocol_flush_buffers (FALSE);
915 UNLOCK_GC;
919 * Allocation
922 MonoObject*
923 mono_gc_alloc_obj (MonoVTable *vtable, size_t size)
925 MonoObject *obj = sgen_alloc_obj (vtable, size);
927 if (G_UNLIKELY (mono_profiler_allocations_enabled ()) && obj)
928 MONO_PROFILER_RAISE (gc_allocation, (obj));
930 return obj;
933 MonoObject*
934 mono_gc_alloc_pinned_obj (MonoVTable *vtable, size_t size)
936 MonoObject *obj = sgen_alloc_obj_pinned (vtable, size);
938 if (G_UNLIKELY (mono_profiler_allocations_enabled ()) && obj)
939 MONO_PROFILER_RAISE (gc_allocation, (obj));
941 return obj;
944 MonoObject*
945 mono_gc_alloc_mature (MonoVTable *vtable, size_t size)
947 MonoObject *obj = sgen_alloc_obj_mature (vtable, size);
949 if (G_UNLIKELY (mono_profiler_allocations_enabled ()) && obj)
950 MONO_PROFILER_RAISE (gc_allocation, (obj));
952 return obj;
956 * mono_gc_alloc_fixed:
958 MonoObject*
959 mono_gc_alloc_fixed (size_t size, MonoGCDescriptor descr, MonoGCRootSource source, void *key, const char *msg)
961 /* FIXME: do a single allocation */
962 void *res = g_calloc (1, size);
963 if (!res)
964 return NULL;
965 if (!mono_gc_register_root ((char *)res, size, descr, source, key, msg)) {
966 g_free (res);
967 res = NULL;
969 return (MonoObject*)res;
972 MonoObject*
973 mono_gc_alloc_fixed_no_descriptor (size_t size, MonoGCRootSource source, void *key, const char *msg)
975 return mono_gc_alloc_fixed (size, 0, source, key, msg);
979 * mono_gc_free_fixed:
981 void
982 mono_gc_free_fixed (void* addr)
984 mono_gc_deregister_root ((char *)addr);
985 g_free (addr);
989 * Managed allocator
992 static MonoMethod* alloc_method_cache [ATYPE_NUM];
993 static MonoMethod* slowpath_alloc_method_cache [ATYPE_NUM];
994 static MonoMethod* profiler_alloc_method_cache [ATYPE_NUM];
995 static gboolean use_managed_allocator = TRUE;
997 #ifdef MANAGED_ALLOCATION
998 /* FIXME: Do this in the JIT, where specialized allocation sequences can be created
999 * for each class. This is currently not easy to do, as it is hard to generate basic
1000 * blocks + branches, but it is easy with the linear IL codebase.
1002 * For this to work we'd need to solve the TLAB race, first. Now we
1003 * require the allocator to be in a few known methods to make sure
1004 * that they are executed atomically via the restart mechanism.
1006 static MonoMethod*
1007 create_allocator (int atype, ManagedAllocatorVariant variant)
1009 gboolean slowpath = variant == MANAGED_ALLOCATOR_SLOW_PATH;
1010 gboolean profiler = variant == MANAGED_ALLOCATOR_PROFILER;
1011 MonoMethodBuilder *mb;
1012 MonoMethod *res;
1013 MonoMethodSignature *csig;
1014 const char *name = NULL;
1015 WrapperInfo *info;
1016 int num_params, i;
1018 if (atype == ATYPE_SMALL) {
1019 name = slowpath ? "SlowAllocSmall" : (profiler ? "ProfilerAllocSmall" : "AllocSmall");
1020 } else if (atype == ATYPE_NORMAL) {
1021 name = slowpath ? "SlowAlloc" : (profiler ? "ProfilerAlloc" : "Alloc");
1022 } else if (atype == ATYPE_VECTOR) {
1023 name = slowpath ? "SlowAllocVector" : (profiler ? "ProfilerAllocVector" : "AllocVector");
1024 } else if (atype == ATYPE_STRING) {
1025 name = slowpath ? "SlowAllocString" : (profiler ? "ProfilerAllocString" : "AllocString");
1026 } else {
1027 g_assert_not_reached ();
1030 if (atype == ATYPE_NORMAL)
1031 num_params = 1;
1032 else
1033 num_params = 2;
1035 MonoType *int_type = mono_get_int_type ();
1036 csig = mono_metadata_signature_alloc (mono_defaults.corlib, num_params);
1037 if (atype == ATYPE_STRING) {
1038 csig->ret = m_class_get_byval_arg (mono_defaults.string_class);
1039 csig->params [0] = int_type;
1040 csig->params [1] = mono_get_int32_type ();
1041 } else {
1042 csig->ret = mono_get_object_type ();
1043 for (i = 0; i < num_params; i++)
1044 csig->params [i] = int_type;
1047 mb = mono_mb_new (mono_defaults.object_class, name, MONO_WRAPPER_ALLOC);
1049 get_sgen_mono_cb ()->emit_managed_allocater (mb, slowpath, profiler, atype);
1051 info = mono_wrapper_info_create (mb, WRAPPER_SUBTYPE_NONE);
1052 info->d.alloc.gc_name = "sgen";
1053 info->d.alloc.alloc_type = atype;
1055 res = mono_mb_create (mb, csig, 8, info);
1056 mono_mb_free (mb);
1058 return res;
1060 #endif
1063 mono_gc_get_aligned_size_for_allocator (int size)
1065 return SGEN_ALIGN_UP (size);
1069 * Generate an allocator method implementing the fast path of mono_gc_alloc_obj ().
1070 * The signature of the called method is:
1071 * object allocate (MonoVTable *vtable)
1073 MonoMethod*
1074 mono_gc_get_managed_allocator (MonoClass *klass, gboolean for_box, gboolean known_instance_size)
1076 #ifdef MANAGED_ALLOCATION
1077 ManagedAllocatorVariant variant = mono_profiler_allocations_enabled () ?
1078 MANAGED_ALLOCATOR_PROFILER : MANAGED_ALLOCATOR_REGULAR;
1080 if (sgen_collect_before_allocs)
1081 return NULL;
1082 if (m_class_get_instance_size (klass) > sgen_tlab_size)
1083 return NULL;
1084 if (known_instance_size && ALIGN_TO (m_class_get_instance_size (klass), SGEN_ALLOC_ALIGN) >= SGEN_MAX_SMALL_OBJ_SIZE)
1085 return NULL;
1086 if (mono_class_has_finalizer (klass) || mono_class_is_marshalbyref (klass) || m_class_has_weak_fields (klass))
1087 return NULL;
1088 if (m_class_get_rank (klass))
1089 return NULL;
1090 if (m_class_get_byval_arg (klass)->type == MONO_TYPE_STRING)
1091 return mono_gc_get_managed_allocator_by_type (ATYPE_STRING, variant);
1092 /* Generic classes have dynamic field and can go above MAX_SMALL_OBJ_SIZE. */
1093 if (known_instance_size)
1094 return mono_gc_get_managed_allocator_by_type (ATYPE_SMALL, variant);
1095 else
1096 return mono_gc_get_managed_allocator_by_type (ATYPE_NORMAL, variant);
1097 #else
1098 return NULL;
1099 #endif
1102 MonoMethod*
1103 mono_gc_get_managed_array_allocator (MonoClass *klass)
1105 #ifdef MANAGED_ALLOCATION
1106 if (m_class_get_rank (klass) != 1)
1107 return NULL;
1108 if (sgen_has_per_allocation_action)
1109 return NULL;
1110 g_assert (!mono_class_has_finalizer (klass) && !mono_class_is_marshalbyref (klass));
1112 return mono_gc_get_managed_allocator_by_type (ATYPE_VECTOR, mono_profiler_allocations_enabled () ?
1113 MANAGED_ALLOCATOR_PROFILER : MANAGED_ALLOCATOR_REGULAR);
1114 #else
1115 return NULL;
1116 #endif
1119 void
1120 sgen_set_use_managed_allocator (gboolean flag)
1122 use_managed_allocator = flag;
1125 MonoMethod*
1126 mono_gc_get_managed_allocator_by_type (int atype, ManagedAllocatorVariant variant)
1128 #ifdef MANAGED_ALLOCATION
1129 MonoMethod *res;
1130 MonoMethod **cache;
1132 if (variant != MANAGED_ALLOCATOR_SLOW_PATH && !use_managed_allocator)
1133 return NULL;
1135 switch (variant) {
1136 case MANAGED_ALLOCATOR_REGULAR: cache = alloc_method_cache; break;
1137 case MANAGED_ALLOCATOR_SLOW_PATH: cache = slowpath_alloc_method_cache; break;
1138 case MANAGED_ALLOCATOR_PROFILER: cache = profiler_alloc_method_cache; break;
1139 default: g_assert_not_reached (); break;
1142 res = cache [atype];
1143 if (res)
1144 return res;
1146 res = create_allocator (atype, variant);
1147 LOCK_GC;
1148 if (cache [atype]) {
1149 mono_free_method (res);
1150 res = cache [atype];
1151 } else {
1152 mono_memory_barrier ();
1153 cache [atype] = res;
1155 UNLOCK_GC;
1157 return res;
1158 #else
1159 return NULL;
1160 #endif
1163 guint32
1164 mono_gc_get_managed_allocator_types (void)
1166 return ATYPE_NUM;
1169 gboolean
1170 sgen_is_managed_allocator (MonoMethod *method)
1172 int i;
1174 for (i = 0; i < ATYPE_NUM; ++i)
1175 if (method == alloc_method_cache [i] || method == slowpath_alloc_method_cache [i] || method == profiler_alloc_method_cache [i])
1176 return TRUE;
1177 return FALSE;
1180 gboolean
1181 sgen_has_managed_allocator (void)
1183 int i;
1185 for (i = 0; i < ATYPE_NUM; ++i)
1186 if (alloc_method_cache [i] || slowpath_alloc_method_cache [i] || profiler_alloc_method_cache [i])
1187 return TRUE;
1188 return FALSE;
1191 #define ARRAY_OBJ_INDEX(ptr,array,elem_size) (((char*)(ptr) - ((char*)(array) + G_STRUCT_OFFSET (MonoArray, vector))) / (elem_size))
1193 gboolean
1194 sgen_client_cardtable_scan_object (GCObject *obj, guint8 *cards, ScanCopyContext ctx)
1196 MonoVTable *vt = SGEN_LOAD_VTABLE (obj);
1197 MonoClass *klass = vt->klass;
1199 SGEN_ASSERT (0, SGEN_VTABLE_HAS_REFERENCES (vt), "Why would we ever call this on reference-free objects?");
1201 if (vt->rank) {
1202 MonoArray *arr = (MonoArray*)obj;
1203 guint8 *card_data, *card_base;
1204 guint8 *card_data_end;
1205 char *obj_start = (char *)sgen_card_table_align_pointer (obj);
1206 mword bounds_size;
1207 mword obj_size = sgen_mono_array_size (vt, arr, &bounds_size, sgen_vtable_get_descriptor (vt));
1208 /* We don't want to scan the bounds entries at the end of multidimensional arrays */
1209 char *obj_end = (char*)obj + obj_size - bounds_size;
1210 size_t card_count;
1211 size_t extra_idx = 0;
1213 mword desc = (mword)m_class_get_gc_descr (m_class_get_element_class (klass));
1214 int elem_size = mono_array_element_size (klass);
1216 #ifdef SGEN_OBJECT_LAYOUT_STATISTICS
1217 if (m_class_is_valuetype (m_class_get_element_class (klass)))
1218 sgen_object_layout_scanned_vtype_array ();
1219 else
1220 sgen_object_layout_scanned_ref_array ();
1221 #endif
1223 if (cards)
1224 card_data = cards;
1225 else
1226 card_data = sgen_card_table_get_card_scan_address ((mword)obj);
1228 card_base = card_data;
1229 card_count = sgen_card_table_number_of_cards_in_range ((mword)obj, obj_size);
1231 #ifdef SGEN_HAVE_OVERLAPPING_CARDS
1232 LOOP_HEAD:
1233 card_data_end = card_base + card_count;
1236 * Check for overflow and if so, scan only until the end of the shadow
1237 * card table, leaving the rest for next iterations.
1239 if (!cards && card_data_end >= SGEN_SHADOW_CARDTABLE_END) {
1240 card_data_end = SGEN_SHADOW_CARDTABLE_END;
1242 card_count -= (card_data_end - card_base);
1244 #else
1245 card_data_end = card_data + card_count;
1246 #endif
1248 card_data = sgen_find_next_card (card_data, card_data_end);
1249 for (; card_data < card_data_end; card_data = sgen_find_next_card (card_data + 1, card_data_end)) {
1250 size_t index;
1251 size_t idx = (card_data - card_base) + extra_idx;
1252 char *start = (char*)(obj_start + idx * CARD_SIZE_IN_BYTES);
1253 char *card_end = start + CARD_SIZE_IN_BYTES;
1254 char *first_elem, *elem;
1256 HEAVY_STAT (++los_marked_cards);
1258 if (!cards)
1259 sgen_card_table_prepare_card_for_scanning (card_data);
1261 card_end = MIN (card_end, obj_end);
1263 if (start <= (char*)arr->vector)
1264 index = 0;
1265 else
1266 index = ARRAY_OBJ_INDEX (start, obj, elem_size);
1268 elem = first_elem = (char*)mono_array_addr_with_size_fast ((MonoArray*)obj, elem_size, index);
1269 if (m_class_is_valuetype (m_class_get_element_class (klass))) {
1270 ScanVTypeFunc scan_vtype_func = ctx.ops->scan_vtype;
1272 for (; elem < card_end; elem += elem_size)
1273 scan_vtype_func (obj, elem, desc, ctx.queue BINARY_PROTOCOL_ARG (elem_size));
1274 } else {
1275 ScanPtrFieldFunc scan_ptr_field_func = ctx.ops->scan_ptr_field;
1277 HEAVY_STAT (++los_array_cards);
1278 for (; elem < card_end; elem += SIZEOF_VOID_P)
1279 scan_ptr_field_func (obj, (GCObject**)elem, ctx.queue);
1282 sgen_binary_protocol_card_scan (first_elem, elem - first_elem);
1285 #ifdef SGEN_HAVE_OVERLAPPING_CARDS
1286 if (card_count > 0) {
1287 SGEN_ASSERT (0, card_data == SGEN_SHADOW_CARDTABLE_END, "Why we didn't stop at shadow cardtable end ?");
1288 extra_idx += card_data - card_base;
1289 card_base = card_data = sgen_shadow_cardtable;
1290 goto LOOP_HEAD;
1292 #endif
1293 return TRUE;
1296 return FALSE;
1300 * Array and string allocation
1303 MonoArray*
1304 mono_gc_alloc_vector (MonoVTable *vtable, size_t size, uintptr_t max_length)
1306 MonoArray *arr;
1307 TLAB_ACCESS_INIT;
1309 if (!SGEN_CAN_ALIGN_UP (size))
1310 return NULL;
1312 #ifndef DISABLE_CRITICAL_REGION
1313 ENTER_CRITICAL_REGION;
1314 arr = (MonoArray*)sgen_try_alloc_obj_nolock (vtable, size);
1315 if (arr) {
1316 /*This doesn't require fencing since EXIT_CRITICAL_REGION already does it for us*/
1317 arr->max_length = (mono_array_size_t)max_length;
1318 EXIT_CRITICAL_REGION;
1319 goto done;
1321 EXIT_CRITICAL_REGION;
1322 #endif
1324 LOCK_GC;
1326 arr = (MonoArray*)sgen_alloc_obj_nolock (vtable, size);
1327 if (G_UNLIKELY (!arr)) {
1328 UNLOCK_GC;
1329 return NULL;
1332 arr->max_length = (mono_array_size_t)max_length;
1334 UNLOCK_GC;
1336 done:
1337 if (G_UNLIKELY (mono_profiler_allocations_enabled ()))
1338 MONO_PROFILER_RAISE (gc_allocation, (&arr->obj));
1340 SGEN_ASSERT (6, SGEN_ALIGN_UP (size) == SGEN_ALIGN_UP (sgen_client_par_object_get_size (vtable, (GCObject*)arr)), "Vector has incorrect size.");
1341 return arr;
1344 MonoArray*
1345 mono_gc_alloc_array (MonoVTable *vtable, size_t size, uintptr_t max_length, uintptr_t bounds_size)
1347 MonoArray *arr;
1348 MonoArrayBounds *bounds;
1349 TLAB_ACCESS_INIT;
1351 if (!SGEN_CAN_ALIGN_UP (size))
1352 return NULL;
1354 #ifndef DISABLE_CRITICAL_REGION
1355 ENTER_CRITICAL_REGION;
1356 arr = (MonoArray*)sgen_try_alloc_obj_nolock (vtable, size);
1357 if (arr) {
1358 /*This doesn't require fencing since EXIT_CRITICAL_REGION already does it for us*/
1359 arr->max_length = (mono_array_size_t)max_length;
1361 bounds = (MonoArrayBounds*)((char*)arr + size - bounds_size);
1362 arr->bounds = bounds;
1363 EXIT_CRITICAL_REGION;
1364 goto done;
1366 EXIT_CRITICAL_REGION;
1367 #endif
1369 LOCK_GC;
1371 arr = (MonoArray*)sgen_alloc_obj_nolock (vtable, size);
1372 if (G_UNLIKELY (!arr)) {
1373 UNLOCK_GC;
1374 return NULL;
1377 arr->max_length = (mono_array_size_t)max_length;
1379 bounds = (MonoArrayBounds*)((char*)arr + size - bounds_size);
1380 arr->bounds = bounds;
1382 UNLOCK_GC;
1384 done:
1385 if (G_UNLIKELY (mono_profiler_allocations_enabled ()))
1386 MONO_PROFILER_RAISE (gc_allocation, (&arr->obj));
1388 SGEN_ASSERT (6, SGEN_ALIGN_UP (size) == SGEN_ALIGN_UP (sgen_client_par_object_get_size (vtable, (GCObject*)arr)), "Array has incorrect size.");
1389 return arr;
1392 MonoString*
1393 mono_gc_alloc_string (MonoVTable *vtable, size_t size, gint32 len)
1395 MonoString *str;
1396 TLAB_ACCESS_INIT;
1398 if (!SGEN_CAN_ALIGN_UP (size))
1399 return NULL;
1401 #ifndef DISABLE_CRITICAL_REGION
1402 ENTER_CRITICAL_REGION;
1403 str = (MonoString*)sgen_try_alloc_obj_nolock (vtable, size);
1404 if (str) {
1405 /*This doesn't require fencing since EXIT_CRITICAL_REGION already does it for us*/
1406 str->length = len;
1407 EXIT_CRITICAL_REGION;
1408 goto done;
1410 EXIT_CRITICAL_REGION;
1411 #endif
1413 LOCK_GC;
1415 str = (MonoString*)sgen_alloc_obj_nolock (vtable, size);
1416 if (G_UNLIKELY (!str)) {
1417 UNLOCK_GC;
1418 return NULL;
1421 str->length = len;
1423 UNLOCK_GC;
1425 done:
1426 if (G_UNLIKELY (mono_profiler_allocations_enabled ()))
1427 MONO_PROFILER_RAISE (gc_allocation, (&str->object));
1429 return str;
1433 * Strings
1436 void
1437 mono_gc_set_string_length (MonoString *str, gint32 new_length)
1439 mono_unichar2 *new_end = str->chars + new_length;
1441 /* zero the discarded string. This null-delimits the string and allows
1442 * the space to be reclaimed by SGen. */
1444 if (sgen_nursery_canaries_enabled () && sgen_ptr_in_nursery (str)) {
1445 CHECK_CANARY_FOR_OBJECT ((GCObject*)str, TRUE);
1446 memset (new_end, 0, (str->length - new_length + 1) * sizeof (mono_unichar2) + CANARY_SIZE);
1447 memcpy (new_end + 1 , CANARY_STRING, CANARY_SIZE);
1448 } else {
1449 memset (new_end, 0, (str->length - new_length + 1) * sizeof (mono_unichar2));
1452 str->length = new_length;
1456 * Profiling
1459 #define GC_ROOT_NUM 32
1460 #define SPECIAL_ADDRESS_FIN_QUEUE ((mono_byte*)1)
1461 #define SPECIAL_ADDRESS_CRIT_FIN_QUEUE ((mono_byte*)2)
1462 #define SPECIAL_ADDRESS_EPHEMERON ((mono_byte*)3)
1464 typedef struct {
1465 int count; /* must be the first field */
1466 void *addresses [GC_ROOT_NUM];
1467 void *objects [GC_ROOT_NUM];
1468 } GCRootReport;
1470 static void
1471 notify_gc_roots (GCRootReport *report)
1473 if (!report->count)
1474 return;
1475 MONO_PROFILER_RAISE (gc_roots, (report->count, (const mono_byte *const *)report->addresses, (MonoObject *const *) report->objects));
1476 report->count = 0;
1479 static void
1480 report_gc_root (GCRootReport *report, void *address, void *object)
1482 if (report->count == GC_ROOT_NUM)
1483 notify_gc_roots (report);
1484 report->addresses [report->count] = address;
1485 report->objects [report->count] = object;
1486 report->count++;
1489 static void
1490 single_arg_report_root (MonoObject **obj, void *gc_data)
1492 GCRootReport *report = (GCRootReport*)gc_data;
1493 if (*obj)
1494 report_gc_root (report, obj, *obj);
1497 static void
1498 two_args_report_root (void *address, MonoObject *obj, void *gc_data)
1500 GCRootReport *report = (GCRootReport*)gc_data;
1501 if (obj)
1502 report_gc_root (report, address, obj);
1505 static void
1506 precisely_report_roots_from (GCRootReport *report, void** start_root, void** end_root, mword desc)
1508 switch (desc & ROOT_DESC_TYPE_MASK) {
1509 case ROOT_DESC_BITMAP:
1510 desc >>= ROOT_DESC_TYPE_SHIFT;
1511 while (desc) {
1512 if ((desc & 1) && *start_root)
1513 report_gc_root (report, start_root, *start_root);
1514 desc >>= 1;
1515 start_root++;
1517 return;
1518 case ROOT_DESC_COMPLEX: {
1519 gsize *bitmap_data = (gsize *)sgen_get_complex_descriptor_bitmap (desc);
1520 gsize bwords = (*bitmap_data) - 1;
1521 void **start_run = start_root;
1522 bitmap_data++;
1523 while (bwords-- > 0) {
1524 gsize bmap = *bitmap_data++;
1525 void **objptr = start_run;
1526 while (bmap) {
1527 if ((bmap & 1) && *objptr)
1528 report_gc_root (report, objptr, *objptr);
1529 bmap >>= 1;
1530 ++objptr;
1532 start_run += GC_BITS_PER_WORD;
1534 break;
1536 case ROOT_DESC_VECTOR: {
1537 void **p;
1539 for (p = start_root; p < end_root; p++) {
1540 if (*p)
1541 report_gc_root (report, p, *p);
1543 break;
1545 case ROOT_DESC_USER: {
1546 MonoGCRootMarkFunc marker = (MonoGCRootMarkFunc)sgen_get_user_descriptor_func (desc);
1548 if ((void*)marker == (void*)sgen_mark_normal_gc_handles)
1549 sgen_gc_handles_report_roots (two_args_report_root, report);
1550 else
1551 marker ((MonoObject**)start_root, single_arg_report_root, report);
1552 break;
1554 case ROOT_DESC_RUN_LEN:
1555 g_assert_not_reached ();
1556 default:
1557 g_assert_not_reached ();
1561 static void
1562 report_pinning_roots (GCRootReport *report, void **start, void **end)
1564 while (start < end) {
1565 mword addr = (mword)*start;
1566 addr &= ~(SGEN_ALLOC_ALIGN - 1);
1567 if (addr)
1568 report_gc_root (report, start, (void*)addr);
1570 start++;
1574 static SgenPointerQueue pinned_objects = SGEN_POINTER_QUEUE_INIT (INTERNAL_MEM_MOVED_OBJECT);
1575 static mword lower_bound, upper_bound;
1577 static GCObject*
1578 find_pinned_obj (char *addr)
1580 size_t idx = sgen_pointer_queue_search (&pinned_objects, addr);
1582 if (idx != pinned_objects.next_slot) {
1583 if (pinned_objects.data [idx] == addr)
1584 return (GCObject*)pinned_objects.data [idx];
1585 if (idx == 0)
1586 return NULL;
1589 GCObject *obj = (GCObject*)pinned_objects.data [idx - 1];
1590 if (addr > (char*)obj && addr < ((char*)obj + sgen_safe_object_get_size (obj)))
1591 return obj;
1592 return NULL;
1597 * We pass @root_report_address so register are properly accounted towards their thread
1599 static void
1600 report_conservative_roots (GCRootReport *report, void *root_report_address, void **start, void **end)
1602 while (start < end) {
1603 mword addr = (mword)*start;
1604 addr &= ~(SGEN_ALLOC_ALIGN - 1);
1606 if (addr < lower_bound || addr > upper_bound) {
1607 ++start;
1608 continue;
1611 GCObject *obj = find_pinned_obj ((char*)addr);
1612 if (obj)
1613 report_gc_root (report, root_report_address, obj);
1614 start++;
1618 typedef struct {
1619 gboolean precise;
1620 GCRootReport *report;
1621 SgenThreadInfo *info;
1622 } ReportHandleStackRoot;
1624 static void
1625 report_handle_stack_root (gpointer *ptr, gpointer user_data)
1627 ReportHandleStackRoot *ud = (ReportHandleStackRoot*)user_data;
1628 GCRootReport *report = ud->report;
1629 gpointer addr = ud->info->client_info.info.handle_stack;
1631 // Note: We know that *ptr != NULL.
1632 if (ud->precise)
1633 report_gc_root (report, addr, *ptr);
1634 else
1635 report_conservative_roots (report, addr, ptr, ptr + 1);
1638 static void
1639 report_handle_stack_roots (GCRootReport *report, SgenThreadInfo *info, gboolean precise)
1641 ReportHandleStackRoot ud;
1642 memset (&ud, 0, sizeof (ud));
1643 ud.precise = precise;
1644 ud.report = report;
1645 ud.info = info;
1647 mono_handle_stack_scan (info->client_info.info.handle_stack, report_handle_stack_root, &ud, ud.precise, FALSE);
1650 static void
1651 report_stack_roots (void)
1653 GCRootReport report = {0};
1654 FOREACH_THREAD_EXCLUDE (info, MONO_THREAD_INFO_FLAGS_NO_GC) {
1655 void *aligned_stack_start;
1657 if (info->client_info.skip) {
1658 continue;
1659 } else if (!mono_thread_info_is_live (info)) {
1660 continue;
1661 } else if (!info->client_info.stack_start) {
1662 continue;
1665 g_assert (info->client_info.stack_start);
1666 g_assert (info->client_info.info.stack_end);
1668 aligned_stack_start = (void*)(mword) ALIGN_TO ((mword)info->client_info.stack_start, SIZEOF_VOID_P);
1669 #ifdef HOST_WIN32
1670 /* Windows uses a guard page before the committed stack memory pages to detect when the
1671 stack needs to be grown. If we suspend a thread just after a function prolog has
1672 decremented the stack pointer to point into the guard page but before the thread has
1673 been able to read or write to that page, starting the stack scan at aligned_stack_start
1674 will raise a STATUS_GUARD_PAGE_VIOLATION and the process will crash. This code uses
1675 VirtualQuery() to determine whether stack_start points into the guard page and then
1676 updates aligned_stack_start to point at the next non-guard page. */
1677 MEMORY_BASIC_INFORMATION mem_info;
1678 SIZE_T result = VirtualQuery (info->client_info.stack_start, &mem_info, sizeof(mem_info));
1679 g_assert (result != 0);
1680 if (mem_info.Protect & PAGE_GUARD) {
1681 aligned_stack_start = ((char*) mem_info.BaseAddress) + mem_info.RegionSize;
1683 #endif
1685 g_assert (info->client_info.suspend_done);
1687 report_conservative_roots (&report, aligned_stack_start, (void **)aligned_stack_start, (void **)info->client_info.info.stack_end);
1688 report_conservative_roots (&report, aligned_stack_start, (void**)&info->client_info.ctx, (void**)(&info->client_info.ctx + 1));
1690 report_handle_stack_roots (&report, info, FALSE);
1691 report_handle_stack_roots (&report, info, TRUE);
1692 } FOREACH_THREAD_END
1694 notify_gc_roots (&report);
1697 static void
1698 report_pin_queue (void)
1700 lower_bound = SIZE_MAX;
1701 upper_bound = 0;
1703 //sort the addresses
1704 sgen_pointer_queue_sort_uniq (&pinned_objects);
1706 for (int i = 0; i < pinned_objects.next_slot; ++i) {
1707 GCObject *obj = (GCObject*)pinned_objects.data [i];
1708 ssize_t size = sgen_safe_object_get_size (obj);
1710 ssize_t addr = (ssize_t)obj;
1711 lower_bound = MIN (lower_bound, addr);
1712 upper_bound = MAX (upper_bound, addr + size);
1715 report_stack_roots ();
1716 sgen_pointer_queue_clear (&pinned_objects);
1719 static void
1720 report_finalizer_roots_from_queue (SgenPointerQueue *queue, void* queue_address)
1722 GCRootReport report;
1723 size_t i;
1725 report.count = 0;
1726 for (i = 0; i < queue->next_slot; ++i) {
1727 void *obj = queue->data [i];
1728 if (!obj)
1729 continue;
1730 report_gc_root (&report, queue_address, obj);
1732 notify_gc_roots (&report);
1735 static void
1736 report_registered_roots_by_type (int root_type)
1738 GCRootReport report = { 0 };
1739 void **start_root;
1740 RootRecord *root;
1741 report.count = 0;
1742 SGEN_HASH_TABLE_FOREACH (&sgen_roots_hash [root_type], void **, start_root, RootRecord *, root) {
1743 SGEN_LOG (6, "Profiler root scan %p-%p (desc: %p)", start_root, root->end_root, (void*)(intptr_t)root->root_desc);
1744 if (root_type == ROOT_TYPE_PINNED)
1745 report_pinning_roots (&report, start_root, (void**)root->end_root);
1746 else
1747 precisely_report_roots_from (&report, start_root, (void**)root->end_root, root->root_desc);
1748 } SGEN_HASH_TABLE_FOREACH_END;
1749 notify_gc_roots (&report);
1752 static void
1753 report_registered_roots (void)
1755 for (int i = 0; i < ROOT_TYPE_NUM; ++i)
1756 report_registered_roots_by_type (i);
1759 static void
1760 report_ephemeron_roots (void)
1762 EphemeronLinkNode *current = ephemeron_list;
1763 Ephemeron *cur, *array_end;
1764 GCObject *tombstone;
1765 GCRootReport report = { 0 };
1767 for (current = ephemeron_list; current; current = current->next) {
1768 MonoArray *array = current->array;
1770 if (!sgen_is_object_alive_for_current_gen ((GCObject*)array))
1771 continue;
1773 cur = mono_array_addr_internal (array, Ephemeron, 0);
1774 array_end = cur + mono_array_length_internal (array);
1775 tombstone = SGEN_LOAD_VTABLE ((GCObject*)array)->domain->ephemeron_tombstone;
1777 for (; cur < array_end; ++cur) {
1778 GCObject *key = cur->key;
1780 if (!key || key == tombstone)
1781 continue;
1783 if (cur->value && sgen_is_object_alive_for_current_gen (key))
1784 report_gc_root (&report, SPECIAL_ADDRESS_EPHEMERON, cur->value);
1788 notify_gc_roots (&report);
1791 static void
1792 sgen_report_all_roots (SgenPointerQueue *fin_ready_queue, SgenPointerQueue *critical_fin_queue)
1794 if (!MONO_PROFILER_ENABLED (gc_roots))
1795 return;
1797 report_registered_roots ();
1798 report_ephemeron_roots ();
1799 report_pin_queue ();
1800 report_finalizer_roots_from_queue (fin_ready_queue, SPECIAL_ADDRESS_FIN_QUEUE);
1801 report_finalizer_roots_from_queue (critical_fin_queue, SPECIAL_ADDRESS_CRIT_FIN_QUEUE);
1804 void
1805 sgen_client_pinning_start (void)
1807 if (!MONO_PROFILER_ENABLED (gc_roots))
1808 return;
1810 sgen_pointer_queue_clear (&pinned_objects);
1813 void
1814 sgen_client_pinning_end (void)
1816 if (!MONO_PROFILER_ENABLED (gc_roots))
1817 return;
1820 void
1821 sgen_client_nursery_objects_pinned (void **definitely_pinned, int count)
1823 if (!MONO_PROFILER_ENABLED (gc_roots))
1824 return;
1826 for (int i = 0; i < count; ++i)
1827 sgen_pointer_queue_add (&pinned_objects, definitely_pinned [i]);
1830 void
1831 sgen_client_pinned_los_object (GCObject *obj)
1833 if (!MONO_PROFILER_ENABLED (gc_roots))
1834 return;
1836 sgen_pointer_queue_add (&pinned_objects, obj);
1839 void
1840 sgen_client_pinned_cemented_object (GCObject *obj)
1842 if (!MONO_PROFILER_ENABLED (gc_roots))
1843 return;
1845 // TODO: How do we report this in a way that makes sense?
1848 void
1849 sgen_client_pinned_major_heap_object (GCObject *obj)
1851 if (!MONO_PROFILER_ENABLED (gc_roots))
1852 return;
1854 sgen_pointer_queue_add (&pinned_objects, obj);
1857 void
1858 sgen_client_collecting_minor_report_roots (SgenPointerQueue *fin_ready_queue, SgenPointerQueue *critical_fin_queue)
1860 sgen_report_all_roots (fin_ready_queue, critical_fin_queue);
1863 void
1864 sgen_client_collecting_major_report_roots (SgenPointerQueue *fin_ready_queue, SgenPointerQueue *critical_fin_queue)
1866 sgen_report_all_roots (fin_ready_queue, critical_fin_queue);
1869 #define MOVED_OBJECTS_NUM 64
1870 static void *moved_objects [MOVED_OBJECTS_NUM];
1871 static int moved_objects_idx = 0;
1873 static SgenPointerQueue moved_objects_queue = SGEN_POINTER_QUEUE_INIT (INTERNAL_MEM_MOVED_OBJECT);
1875 void
1876 mono_sgen_register_moved_object (void *obj, void *destination)
1879 * This function can be called from SGen's worker threads. We want to try
1880 * and avoid exposing those threads to the profiler API, so queue up move
1881 * events and send them later when the main GC thread calls
1882 * mono_sgen_gc_event_moves ().
1884 * TODO: Once SGen has multiple worker threads, we need to switch to a
1885 * lock-free data structure for the queue as multiple threads will be
1886 * adding to it at the same time.
1888 if (sgen_workers_is_worker_thread (mono_native_thread_id_get ())) {
1889 sgen_pointer_queue_add (&moved_objects_queue, obj);
1890 sgen_pointer_queue_add (&moved_objects_queue, destination);
1891 } else {
1892 if (moved_objects_idx == MOVED_OBJECTS_NUM) {
1893 MONO_PROFILER_RAISE (gc_moves, ((MonoObject **) moved_objects, moved_objects_idx));
1894 moved_objects_idx = 0;
1897 moved_objects [moved_objects_idx++] = obj;
1898 moved_objects [moved_objects_idx++] = destination;
1902 void
1903 mono_sgen_gc_event_moves (void)
1905 while (!sgen_pointer_queue_is_empty (&moved_objects_queue)) {
1906 void *dst = sgen_pointer_queue_pop (&moved_objects_queue);
1907 void *src = sgen_pointer_queue_pop (&moved_objects_queue);
1909 mono_sgen_register_moved_object (src, dst);
1912 if (moved_objects_idx) {
1913 MONO_PROFILER_RAISE (gc_moves, ((MonoObject **) moved_objects, moved_objects_idx));
1914 moved_objects_idx = 0;
1919 * Heap walking
1922 #define REFS_SIZE 128
1923 typedef struct {
1924 void *data;
1925 MonoGCReferences callback;
1926 int flags;
1927 int count;
1928 int called;
1929 MonoObject *refs [REFS_SIZE];
1930 uintptr_t offsets [REFS_SIZE];
1931 } HeapWalkInfo;
1933 #undef HANDLE_PTR
1934 #define HANDLE_PTR(ptr,obj) do { \
1935 if (*(ptr)) { \
1936 if (hwi->count == REFS_SIZE) { \
1937 hwi->callback ((MonoObject*)start, mono_object_class (start), hwi->called? 0: size, hwi->count, hwi->refs, hwi->offsets, hwi->data); \
1938 hwi->count = 0; \
1939 hwi->called = 1; \
1941 hwi->offsets [hwi->count] = (char*)(ptr)-(char*)start; \
1942 hwi->refs [hwi->count++] = *(ptr); \
1944 } while (0)
1946 static void
1947 collect_references (HeapWalkInfo *hwi, GCObject *obj, size_t size)
1949 char *start = (char*)obj;
1950 mword desc = sgen_obj_get_descriptor (obj);
1952 #include "sgen/sgen-scan-object.h"
1955 static void
1956 walk_references (GCObject *start, size_t size, void *data)
1958 HeapWalkInfo *hwi = (HeapWalkInfo *)data;
1959 hwi->called = 0;
1960 hwi->count = 0;
1961 collect_references (hwi, start, size);
1962 if (hwi->count || !hwi->called)
1963 hwi->callback (start, mono_object_class (start), hwi->called? 0: size, hwi->count, hwi->refs, hwi->offsets, hwi->data);
1967 * mono_gc_walk_heap:
1968 * \param flags flags for future use
1969 * \param callback a function pointer called for each object in the heap
1970 * \param data a user data pointer that is passed to callback
1971 * This function can be used to iterate over all the live objects in the heap;
1972 * for each object, \p callback is invoked, providing info about the object's
1973 * location in memory, its class, its size and the objects it references.
1974 * For each referenced object its offset from the object address is
1975 * reported in the offsets array.
1976 * The object references may be buffered, so the callback may be invoked
1977 * multiple times for the same object: in all but the first call, the size
1978 * argument will be zero.
1979 * Note that this function can be only called in the \c MONO_GC_EVENT_PRE_START_WORLD
1980 * profiler event handler.
1981 * \returns a non-zero value if the GC doesn't support heap walking
1984 mono_gc_walk_heap (int flags, MonoGCReferences callback, void *data)
1986 HeapWalkInfo hwi;
1988 hwi.flags = flags;
1989 hwi.callback = callback;
1990 hwi.data = data;
1992 sgen_clear_nursery_fragments ();
1993 sgen_scan_area_with_callback (sgen_nursery_section->data, sgen_nursery_section->end_data, walk_references, &hwi, FALSE, TRUE);
1995 sgen_major_collector.iterate_objects (ITERATE_OBJECTS_SWEEP_ALL, walk_references, &hwi);
1996 sgen_los_iterate_objects (walk_references, &hwi);
1998 return 0;
2002 * Threads
2005 void
2006 mono_gc_set_gc_callbacks (MonoGCCallbacks *callbacks)
2008 gc_callbacks = *callbacks;
2011 MonoGCCallbacks *
2012 mono_gc_get_gc_callbacks ()
2014 return &gc_callbacks;
2017 gpointer
2018 mono_gc_thread_attach (SgenThreadInfo *info)
2020 return sgen_thread_attach (info);
2023 void
2024 sgen_client_thread_attach (SgenThreadInfo* info)
2026 mono_tls_set_sgen_thread_info (info);
2028 info->client_info.skip = FALSE;
2030 info->client_info.stack_start = NULL;
2032 #ifdef SGEN_POSIX_STW
2033 info->client_info.stop_count = -1;
2034 info->client_info.signal = 0;
2035 #endif
2037 memset (&info->client_info.ctx, 0, sizeof (MonoContext));
2039 if (mono_gc_get_gc_callbacks ()->thread_attach_func)
2040 info->client_info.runtime_data = mono_gc_get_gc_callbacks ()->thread_attach_func ();
2042 sgen_binary_protocol_thread_register ((gpointer)mono_thread_info_get_tid (info));
2044 SGEN_LOG (3, "registered thread %p (%p) stack end %p", info, (gpointer)mono_thread_info_get_tid (info), info->client_info.info.stack_end);
2046 info->client_info.info.handle_stack = mono_handle_stack_alloc ();
2049 void
2050 mono_gc_thread_detach_with_lock (SgenThreadInfo *info)
2052 return sgen_thread_detach_with_lock (info);
2055 void
2056 sgen_client_thread_detach_with_lock (SgenThreadInfo *p)
2058 MonoNativeThreadId tid;
2060 mono_tls_set_sgen_thread_info (NULL);
2062 tid = mono_thread_info_get_tid (p);
2064 mono_threads_add_joinable_runtime_thread (&p->client_info.info);
2066 if (mono_gc_get_gc_callbacks ()->thread_detach_func) {
2067 mono_gc_get_gc_callbacks ()->thread_detach_func (p->client_info.runtime_data);
2068 p->client_info.runtime_data = NULL;
2071 sgen_binary_protocol_thread_unregister ((gpointer)tid);
2072 SGEN_LOG (3, "unregister thread %p (%p)", p, (gpointer)tid);
2074 HandleStack *handles = p->client_info.info.handle_stack;
2075 p->client_info.info.handle_stack = NULL;
2076 mono_handle_stack_free (handles);
2079 void
2080 mono_gc_skip_thread_changing (gboolean skip)
2083 * SGen's STW will respect the thread info flags, but we do need to take
2084 * the GC lock when changing them. If we don't do this, SGen might end up
2085 * trying to resume a thread that wasn't suspended because it had
2086 * MONO_THREAD_INFO_FLAGS_NO_GC set when STW began.
2088 LOCK_GC;
2090 if (skip) {
2092 * If we skip scanning a thread with a non-empty handle stack, we may move an
2093 * object but fail to update the reference in the handle.
2095 HandleStack *stack = mono_thread_info_current ()->client_info.info.handle_stack;
2096 g_assert (stack == NULL || mono_handle_stack_is_empty (stack));
2100 void
2101 mono_gc_skip_thread_changed (gboolean skip)
2103 UNLOCK_GC;
2106 gboolean
2107 mono_gc_thread_in_critical_region (SgenThreadInfo *info)
2109 return info->client_info.in_critical_region;
2113 * mono_gc_is_gc_thread:
2115 gboolean
2116 mono_gc_is_gc_thread (void)
2118 gboolean result;
2119 LOCK_GC;
2120 result = mono_thread_info_current () != NULL;
2121 UNLOCK_GC;
2122 return result;
2125 void
2126 sgen_client_thread_register_worker (void)
2128 mono_thread_info_register_small_id ();
2129 mono_native_thread_set_name (mono_native_thread_id_get (), "SGen worker");
2130 mono_thread_set_name_windows (GetCurrentThread (), L"SGen worker");
2133 /* Variables holding start/end nursery so it won't have to be passed at every call */
2134 static void *scan_area_arg_start, *scan_area_arg_end;
2136 void
2137 mono_gc_conservatively_scan_area (void *start, void *end)
2139 sgen_conservatively_pin_objects_from ((void **)start, (void **)end, scan_area_arg_start, scan_area_arg_end, PIN_TYPE_STACK);
2142 void*
2143 mono_gc_scan_object (void *obj, void *gc_data)
2145 ScanCopyContext *ctx = (ScanCopyContext *)gc_data;
2146 ctx->ops->copy_or_mark_object ((GCObject**)&obj, ctx->queue);
2147 return obj;
2150 typedef struct {
2151 void **start_nursery;
2152 void **end_nursery;
2153 } PinHandleStackInteriorPtrData;
2155 /* Called when we're scanning the handle stack imprecisely and we encounter a pointer into the
2156 middle of an object.
2158 static void
2159 pin_handle_stack_interior_ptrs (void **ptr_slot, void *user_data)
2161 PinHandleStackInteriorPtrData *ud = (PinHandleStackInteriorPtrData *)user_data;
2162 sgen_conservatively_pin_objects_from (ptr_slot, ptr_slot+1, ud->start_nursery, ud->end_nursery, PIN_TYPE_STACK);
2165 #ifdef HOST_WASM
2166 extern gboolean mono_wasm_enable_gc;
2167 #endif
2170 * Mark from thread stacks and registers.
2172 void
2173 sgen_client_scan_thread_data (void *start_nursery, void *end_nursery, gboolean precise, ScanCopyContext ctx)
2175 scan_area_arg_start = start_nursery;
2176 scan_area_arg_end = end_nursery;
2177 #ifdef HOST_WASM
2178 //Under WASM we don't scan thread stacks and we can't trust the values we find there either.
2179 if (!mono_wasm_enable_gc)
2180 return;
2181 #endif
2183 FOREACH_THREAD_EXCLUDE (info, MONO_THREAD_INFO_FLAGS_NO_GC) {
2184 int skip_reason = 0;
2185 void *aligned_stack_start;
2187 if (info->client_info.skip) {
2188 SGEN_LOG (3, "Skipping dead thread %p, range: %p-%p, size: %zd", info, info->client_info.stack_start, info->client_info.info.stack_end, (char*)info->client_info.info.stack_end - (char*)info->client_info.stack_start);
2189 skip_reason = 1;
2190 } else if (!mono_thread_info_is_live (info)) {
2191 SGEN_LOG (3, "Skipping non-running thread %p, range: %p-%p, size: %zd (state %x)", info, info->client_info.stack_start, info->client_info.info.stack_end, (char*)info->client_info.info.stack_end - (char*)info->client_info.stack_start, info->client_info.info.thread_state);
2192 skip_reason = 3;
2193 } else if (!info->client_info.stack_start) {
2194 SGEN_LOG (3, "Skipping starting or detaching thread %p", info);
2195 skip_reason = 4;
2198 sgen_binary_protocol_scan_stack ((gpointer)mono_thread_info_get_tid (info), info->client_info.stack_start, info->client_info.info.stack_end, skip_reason);
2200 if (skip_reason) {
2201 if (precise) {
2202 /* If we skip a thread with a non-empty handle stack and then it
2203 * resumes running we may potentially move an object but fail to
2204 * update the reference in the handle.
2206 HandleStack *stack = info->client_info.info.handle_stack;
2207 g_assert (stack == NULL || mono_handle_stack_is_empty (stack));
2209 continue;
2212 g_assert (info->client_info.stack_start);
2213 g_assert (info->client_info.info.stack_end);
2215 aligned_stack_start = (void*)(mword) ALIGN_TO ((mword)info->client_info.stack_start, SIZEOF_VOID_P);
2216 #ifdef HOST_WIN32
2217 /* Windows uses a guard page before the committed stack memory pages to detect when the
2218 stack needs to be grown. If we suspend a thread just after a function prolog has
2219 decremented the stack pointer to point into the guard page but before the thread has
2220 been able to read or write to that page, starting the stack scan at aligned_stack_start
2221 will raise a STATUS_GUARD_PAGE_VIOLATION and the process will crash. This code uses
2222 VirtualQuery() to determine whether stack_start points into the guard page and then
2223 updates aligned_stack_start to point at the next non-guard page. */
2224 MEMORY_BASIC_INFORMATION mem_info;
2225 SIZE_T result = VirtualQuery(info->client_info.stack_start, &mem_info, sizeof(mem_info));
2226 g_assert (result != 0);
2227 if (mem_info.Protect & PAGE_GUARD) {
2228 aligned_stack_start = ((char*) mem_info.BaseAddress) + mem_info.RegionSize;
2230 #endif
2232 g_assert (info->client_info.suspend_done);
2233 SGEN_LOG (3, "Scanning thread %p, range: %p-%p, size: %zd, pinned=%zd", info, info->client_info.stack_start, info->client_info.info.stack_end, (char*)info->client_info.info.stack_end - (char*)info->client_info.stack_start, sgen_get_pinned_count ());
2234 if (mono_gc_get_gc_callbacks ()->thread_mark_func && !conservative_stack_mark) {
2235 mono_gc_get_gc_callbacks ()->thread_mark_func (info->client_info.runtime_data, (guint8 *)aligned_stack_start, (guint8 *)info->client_info.info.stack_end, precise, &ctx);
2236 } else if (!precise) {
2237 if (!conservative_stack_mark) {
2238 fprintf (stderr, "Precise stack mark not supported - disabling.\n");
2239 conservative_stack_mark = TRUE;
2241 //FIXME we should eventually use the new stack_mark from coop
2242 sgen_conservatively_pin_objects_from ((void **)aligned_stack_start, (void **)info->client_info.info.stack_end, start_nursery, end_nursery, PIN_TYPE_STACK);
2245 if (!precise) {
2246 sgen_conservatively_pin_objects_from ((void**)&info->client_info.ctx, (void**)(&info->client_info.ctx + 1),
2247 start_nursery, end_nursery, PIN_TYPE_STACK);
2250 // This is used on Coop GC for platforms where we cannot get the data for individual registers.
2251 // We force a spill of all registers into the stack and pass a chunk of data into sgen.
2252 //FIXME under coop, for now, what we need to ensure is that we scan any extra memory from info->client_info.info.stack_end to stack_mark
2253 MonoThreadUnwindState *state = &info->client_info.info.thread_saved_state [SELF_SUSPEND_STATE_INDEX];
2254 if (state && state->gc_stackdata) {
2255 sgen_conservatively_pin_objects_from ((void **)state->gc_stackdata, (void**)((char*)state->gc_stackdata + state->gc_stackdata_size),
2256 start_nursery, end_nursery, PIN_TYPE_STACK);
2260 if (info->client_info.info.handle_stack) {
2262 Make two passes over the handle stack. On the imprecise pass, pin all
2263 objects where the handle points into the interior of the object. On the
2264 precise pass, copy or mark all the objects that have handles to the
2265 beginning of the object.
2267 if (precise)
2268 mono_handle_stack_scan (info->client_info.info.handle_stack, (GcScanFunc)ctx.ops->copy_or_mark_object, ctx.queue, precise, TRUE);
2269 else {
2270 PinHandleStackInteriorPtrData ud;
2271 memset (&ud, 0, sizeof (ud));
2272 ud.start_nursery = (void**)start_nursery;
2273 ud.end_nursery = (void**)end_nursery;
2274 mono_handle_stack_scan (info->client_info.info.handle_stack, pin_handle_stack_interior_ptrs, &ud, precise, FALSE);
2277 } FOREACH_THREAD_END
2281 * mono_gc_set_stack_end:
2283 * Set the end of the current threads stack to STACK_END. The stack space between
2284 * STACK_END and the real end of the threads stack will not be scanned during collections.
2286 void
2287 mono_gc_set_stack_end (void *stack_end)
2289 SgenThreadInfo *info;
2291 LOCK_GC;
2292 info = mono_thread_info_current ();
2293 if (info) {
2294 SGEN_ASSERT (0, stack_end < info->client_info.info.stack_end, "Can only lower stack end");
2295 info->client_info.info.stack_end = stack_end;
2297 UNLOCK_GC;
2301 * Roots
2305 mono_gc_register_root (char *start, size_t size, MonoGCDescriptor descr, MonoGCRootSource source, void *key, const char *msg)
2307 return sgen_register_root (start, size, descr, descr ? ROOT_TYPE_NORMAL : ROOT_TYPE_PINNED, source, key, msg);
2311 mono_gc_register_root_wbarrier (char *start, size_t size, MonoGCDescriptor descr, MonoGCRootSource source, void *key, const char *msg)
2313 return sgen_register_root (start, size, descr, ROOT_TYPE_WBARRIER, source, key, msg);
2316 void
2317 mono_gc_deregister_root (char* addr)
2319 sgen_deregister_root (addr);
2323 * PThreads
2326 #ifndef HOST_WIN32
2328 mono_gc_pthread_create (pthread_t *new_thread, const pthread_attr_t *attr, void *(*start_routine)(void *), void *arg)
2330 int res;
2332 MONO_ENTER_GC_SAFE;
2333 mono_threads_join_lock ();
2334 res = pthread_create (new_thread, attr, start_routine, arg);
2335 mono_threads_join_unlock ();
2336 MONO_EXIT_GC_SAFE;
2338 return res;
2340 #endif
2343 * Miscellaneous
2346 static size_t last_heap_size = -1;
2347 static size_t worker_heap_size;
2349 void
2350 sgen_client_total_allocated_heap_changed (size_t allocated_heap)
2352 mono_runtime_resource_check_limit (MONO_RESOURCE_GC_HEAP, allocated_heap);
2355 * This function can be called from SGen's worker threads. We want to try
2356 * and avoid exposing those threads to the profiler API, so save the heap
2357 * size value and report it later when the main GC thread calls
2358 * mono_sgen_gc_event_resize ().
2360 worker_heap_size = allocated_heap;
2363 void
2364 mono_sgen_gc_event_resize (void)
2366 if (worker_heap_size != last_heap_size) {
2367 last_heap_size = worker_heap_size;
2368 MONO_PROFILER_RAISE (gc_resize, (last_heap_size));
2372 gboolean
2373 mono_gc_user_markers_supported (void)
2375 return TRUE;
2378 gboolean
2379 mono_object_is_alive (MonoObject* o)
2381 return TRUE;
2385 mono_gc_get_generation (MonoObject *obj)
2387 if (sgen_ptr_in_nursery (obj))
2388 return 0;
2389 return 1;
2392 const char *
2393 mono_gc_get_gc_name (void)
2395 return "sgen";
2398 char*
2399 mono_gc_get_description (void)
2401 #ifdef HAVE_CONC_GC_AS_DEFAULT
2402 return g_strdup ("sgen (concurrent by default)");
2403 #else
2404 return g_strdup ("sgen");
2405 #endif
2408 void
2409 mono_gc_set_desktop_mode (void)
2413 gboolean
2414 mono_gc_is_moving (void)
2416 return TRUE;
2419 gboolean
2420 mono_gc_is_disabled (void)
2422 return FALSE;
2425 #ifdef HOST_WIN32
2426 BOOL APIENTRY mono_gc_dllmain (HMODULE module_handle, DWORD reason, LPVOID reserved)
2428 return TRUE;
2430 #endif
2433 mono_gc_max_generation (void)
2435 return 1;
2438 gboolean
2439 mono_gc_precise_stack_mark_enabled (void)
2441 return !conservative_stack_mark;
2444 void
2445 mono_gc_collect (int generation)
2447 MONO_ENTER_GC_UNSAFE;
2448 sgen_gc_collect (generation);
2449 MONO_EXIT_GC_UNSAFE;
2453 mono_gc_collection_count (int generation)
2455 return sgen_gc_collection_count (generation);
2458 int64_t
2459 mono_gc_get_used_size (void)
2461 return (int64_t)sgen_gc_get_used_size ();
2464 int64_t
2465 mono_gc_get_heap_size (void)
2467 return (int64_t)sgen_gc_get_total_heap_allocation ();
2470 MonoGCDescriptor
2471 mono_gc_make_root_descr_user (MonoGCRootMarkFunc marker)
2473 return sgen_make_user_root_descriptor (marker);
2476 MonoGCDescriptor
2477 mono_gc_make_descr_for_string (gsize *bitmap, int numbits)
2479 return SGEN_DESC_STRING;
2482 void
2483 mono_gc_register_obj_with_weak_fields (void *obj)
2485 return sgen_register_obj_with_weak_fields ((MonoObject*)obj);
2488 void*
2489 mono_gc_get_nursery (int *shift_bits, size_t *size)
2491 *size = sgen_nursery_size;
2492 *shift_bits = sgen_nursery_bits;
2493 return sgen_get_nursery_start ();
2497 mono_gc_get_los_limit (void)
2499 return SGEN_MAX_SMALL_OBJ_SIZE;
2502 void
2503 sgen_set_total_bytes_allocated(guint64 bytes)
2505 total_bytes_allocated = bytes;
2508 guint64
2509 mono_gc_get_allocated_bytes_for_current_thread (void)
2511 SgenThreadInfo* info;
2512 info = mono_thread_info_current ();
2514 /*There are some more allocated bytes in the current tlab that have not been recorded yet */
2515 return info->total_bytes_allocated + info->tlab_next - info->tlab_start;
2518 guint64
2519 mono_gc_get_total_allocated_bytes(MonoBoolean precise)
2521 return total_bytes_allocated;
2524 gpointer
2525 sgen_client_default_metadata (void)
2527 return mono_domain_get ();
2530 gpointer
2531 sgen_client_metadata_for_object (GCObject *obj)
2533 return mono_object_domain (obj);
2537 * mono_gchandle_new_internal:
2538 * \param obj managed object to get a handle for
2539 * \param pinned whether the object should be pinned
2540 * This returns a handle that wraps the object, this is used to keep a
2541 * reference to a managed object from the unmanaged world and preventing the
2542 * object from being disposed.
2544 * If \p pinned is false the address of the object can not be obtained, if it is
2545 * true the address of the object can be obtained. This will also pin the
2546 * object so it will not be possible by a moving garbage collector to move the
2547 * object.
2549 * \returns a handle that can be used to access the object from unmanaged code.
2551 guint32
2552 mono_gchandle_new_internal (MonoObject *obj, gboolean pinned)
2554 return sgen_gchandle_new (obj, pinned);
2558 * mono_gchandle_new_weakref_internal:
2559 * \param obj managed object to get a handle for
2560 * \param track_resurrection Determines how long to track the object, if this is set to TRUE, the object is tracked after finalization, if FALSE, the object is only tracked up until the point of finalization.
2562 * This returns a weak handle that wraps the object, this is used to
2563 * keep a reference to a managed object from the unmanaged world.
2564 * Unlike the \c mono_gchandle_new_internal the object can be reclaimed by the
2565 * garbage collector. In this case the value of the GCHandle will be
2566 * set to zero.
2568 * If \p track_resurrection is TRUE the object will be tracked through
2569 * finalization and if the object is resurrected during the execution
2570 * of the finalizer, then the returned weakref will continue to hold
2571 * a reference to the object. If \p track_resurrection is FALSE, then
2572 * the weak reference's target will become NULL as soon as the object
2573 * is passed on to the finalizer.
2575 * \returns a handle that can be used to access the object from
2576 * unmanaged code.
2578 guint32
2579 mono_gchandle_new_weakref_internal (GCObject *obj, gboolean track_resurrection)
2581 return sgen_gchandle_new_weakref (obj, track_resurrection);
2585 * mono_gchandle_is_in_domain:
2586 * \param gchandle a GCHandle's handle.
2587 * \param domain An application domain.
2588 * \returns TRUE if the object wrapped by the \p gchandle belongs to the specific \p domain.
2590 gboolean
2591 mono_gchandle_is_in_domain (guint32 gchandle, MonoDomain *domain)
2593 MonoDomain *gchandle_domain = (MonoDomain *)sgen_gchandle_get_metadata (gchandle);
2594 return domain->domain_id == gchandle_domain->domain_id;
2598 * mono_gchandle_free_internal:
2599 * \param gchandle a GCHandle's handle.
2601 * Frees the \p gchandle handle. If there are no outstanding
2602 * references, the garbage collector can reclaim the memory of the
2603 * object wrapped.
2605 void
2606 mono_gchandle_free_internal (guint32 gchandle)
2608 sgen_gchandle_free (gchandle);
2612 * mono_gchandle_free_domain:
2613 * \param unloading domain that is unloading
2615 * Function used internally to cleanup any GC handle for objects belonging
2616 * to the specified domain during appdomain unload.
2618 void
2619 mono_gchandle_free_domain (MonoDomain *unloading)
2624 * mono_gchandle_get_target_internal:
2625 * \param gchandle a GCHandle's handle.
2627 * The handle was previously created by calling \c mono_gchandle_new_internal or
2628 * \c mono_gchandle_new_weakref.
2630 * \returns a pointer to the \c MonoObject* represented by the handle or
2631 * NULL for a collected object if using a weakref handle.
2633 MonoObject*
2634 mono_gchandle_get_target_internal (guint32 gchandle)
2636 return sgen_gchandle_get_target (gchandle);
2639 static gpointer
2640 null_link_if_in_domain (gpointer hidden, GCHandleType handle_type, int max_generation, gpointer user)
2642 MonoDomain *unloading_domain = (MonoDomain *)user;
2643 MonoDomain *obj_domain;
2644 gboolean is_weak = MONO_GC_HANDLE_TYPE_IS_WEAK (handle_type);
2645 if (MONO_GC_HANDLE_IS_OBJECT_POINTER (hidden)) {
2646 MonoObject *obj = (MonoObject *)MONO_GC_REVEAL_POINTER (hidden, is_weak);
2647 obj_domain = mono_object_domain (obj);
2648 } else {
2649 obj_domain = (MonoDomain *)MONO_GC_REVEAL_POINTER (hidden, is_weak);
2651 if (unloading_domain->domain_id == obj_domain->domain_id)
2652 return NULL;
2653 return hidden;
2656 void
2657 sgen_null_links_for_domain (MonoDomain *domain)
2659 guint type;
2660 for (type = HANDLE_TYPE_MIN; type < HANDLE_TYPE_MAX; ++type)
2661 sgen_gchandle_iterate ((GCHandleType)type, GENERATION_OLD, null_link_if_in_domain, domain);
2664 void
2665 mono_gchandle_set_target (guint32 gchandle, MonoObject *obj)
2667 sgen_gchandle_set_target (gchandle, obj);
2670 void
2671 sgen_client_gchandle_created (int handle_type, GCObject *obj, guint32 handle)
2673 #ifndef DISABLE_PERFCOUNTERS
2674 mono_atomic_inc_i32 (&mono_perfcounters->gc_num_handles);
2675 #endif
2677 MONO_PROFILER_RAISE (gc_handle_created, (handle, (MonoGCHandleType)handle_type, obj));
2680 void
2681 sgen_client_gchandle_destroyed (int handle_type, guint32 handle)
2683 #ifndef DISABLE_PERFCOUNTERS
2684 mono_atomic_dec_i32 (&mono_perfcounters->gc_num_handles);
2685 #endif
2687 MONO_PROFILER_RAISE (gc_handle_deleted, (handle, (MonoGCHandleType)handle_type));
2690 void
2691 sgen_client_ensure_weak_gchandles_accessible (void)
2694 * During the second bridge processing step the world is
2695 * running again. That step processes all weak links once
2696 * more to null those that refer to dead objects. Before that
2697 * is completed, those links must not be followed, so we
2698 * conservatively wait for bridge processing when any weak
2699 * link is dereferenced.
2701 /* FIXME: A GC can occur after this check fails, in which case we
2702 * should wait for bridge processing but would fail to do so.
2704 if (G_UNLIKELY (mono_bridge_processing_in_progress))
2705 mono_gc_wait_for_bridge_processing ();
2708 void*
2709 mono_gc_invoke_with_gc_lock (MonoGCLockedCallbackFunc func, void *data)
2711 void *result;
2712 LOCK_INTERRUPTION;
2713 result = func (data);
2714 UNLOCK_INTERRUPTION;
2715 return result;
2718 void
2719 mono_gc_register_altstack (gpointer stack, gint32 stack_size, gpointer altstack, gint32 altstack_size)
2721 // FIXME:
2724 guint8*
2725 mono_gc_get_card_table (int *shift_bits, gpointer *mask)
2727 return sgen_get_card_table_configuration (shift_bits, mask);
2730 guint8*
2731 mono_gc_get_target_card_table (int *shift_bits, target_mgreg_t *mask)
2733 return sgen_get_target_card_table_configuration (shift_bits, mask);
2736 gboolean
2737 mono_gc_card_table_nursery_check (void)
2739 return !sgen_get_major_collector ()->is_concurrent;
2742 /* Negative value to remove */
2743 void
2744 mono_gc_add_memory_pressure (gint64 value)
2746 /* FIXME: Implement at some point? */
2750 * Logging
2753 void
2754 sgen_client_degraded_allocation (void)
2756 static gint32 last_major_gc_warned = -1;
2757 static gint32 num_degraded = 0;
2759 gint32 major_gc_count = mono_atomic_load_i32 (&mono_gc_stats.major_gc_count);
2760 //The WASM target aways triggers degrated allocation before collecting. So no point in printing the warning as it will just confuse users
2761 #if !defined (TARGET_WASM)
2762 if (mono_atomic_load_i32 (&last_major_gc_warned) < major_gc_count) {
2763 gint32 num = mono_atomic_inc_i32 (&num_degraded);
2764 if (num == 1 || num == 3)
2765 mono_trace (G_LOG_LEVEL_INFO, MONO_TRACE_GC, "Warning: Degraded allocation. Consider increasing nursery-size if the warning persists.");
2766 else if (num == 10)
2767 mono_trace (G_LOG_LEVEL_INFO, MONO_TRACE_GC, "Warning: Repeated degraded allocation. Consider increasing nursery-size.");
2769 mono_atomic_store_i32 (&last_major_gc_warned, major_gc_count);
2771 #endif
2775 * Debugging
2778 const char*
2779 sgen_client_description_for_internal_mem_type (int type)
2781 switch (type) {
2782 case INTERNAL_MEM_EPHEMERON_LINK: return "ephemeron-link";
2783 case INTERNAL_MEM_MOVED_OBJECT: return "moved-object";
2784 default:
2785 return NULL;
2789 void
2790 sgen_client_pre_collection_checks (void)
2792 if (sgen_mono_xdomain_checks) {
2793 sgen_clear_nursery_fragments ();
2794 sgen_check_for_xdomain_refs ();
2798 gboolean
2799 sgen_client_vtable_is_inited (MonoVTable *vt)
2801 return m_class_is_inited (vt->klass);
2804 const char*
2805 sgen_client_vtable_get_namespace (MonoVTable *vt)
2807 return m_class_get_name_space (vt->klass);
2810 const char*
2811 sgen_client_vtable_get_name (MonoVTable *vt)
2813 return m_class_get_name (vt->klass);
2817 * Initialization
2820 void
2821 sgen_client_init (void)
2823 mono_thread_callbacks_init ();
2824 mono_thread_info_init (sizeof (SgenThreadInfo));
2826 ///* Keep this the default for now */
2827 /* Precise marking is broken on all supported targets. Disable until fixed. */
2828 conservative_stack_mark = TRUE;
2830 sgen_register_fixed_internal_mem_type (INTERNAL_MEM_EPHEMERON_LINK, sizeof (EphemeronLinkNode));
2832 mono_sgen_init_stw ();
2834 mono_tls_init_gc_keys ();
2836 mono_thread_info_attach ();
2839 void
2840 mono_gc_init_icalls (void)
2842 mono_register_jit_icall (mono_gc_alloc_obj, mono_icall_sig_object_ptr_int, FALSE);
2843 mono_register_jit_icall (mono_gc_alloc_vector, mono_icall_sig_object_ptr_int_int, FALSE);
2844 mono_register_jit_icall (mono_gc_alloc_string, mono_icall_sig_object_ptr_int_int32, FALSE);
2845 mono_register_jit_icall (mono_profiler_raise_gc_allocation, mono_icall_sig_void_object, FALSE);
2848 gboolean
2849 sgen_client_handle_gc_param (const char *opt)
2851 if (g_str_has_prefix (opt, "stack-mark=")) {
2852 opt = strchr (opt, '=') + 1;
2853 if (!strcmp (opt, "precise")) {
2854 conservative_stack_mark = FALSE;
2855 } else if (!strcmp (opt, "conservative")) {
2856 conservative_stack_mark = TRUE;
2857 } else {
2858 sgen_env_var_error (MONO_GC_PARAMS_NAME, conservative_stack_mark ? "Using `conservative`." : "Using `precise`.",
2859 "Invalid value `%s` for `stack-mark` option, possible values are: `precise`, `conservative`.", opt);
2861 } else if (g_str_has_prefix (opt, "bridge-implementation=")) {
2862 opt = strchr (opt, '=') + 1;
2863 sgen_set_bridge_implementation (opt);
2864 } else if (g_str_has_prefix (opt, "toggleref-test")) {
2865 /* FIXME: This should probably in MONO_GC_DEBUG */
2866 sgen_register_test_toggleref_callback ();
2867 } else if (!sgen_bridge_handle_gc_param (opt)) {
2868 return FALSE;
2870 return TRUE;
2873 void
2874 sgen_client_print_gc_params_usage (void)
2876 fprintf (stderr, " stack-mark=MARK-METHOD (where MARK-METHOD is 'precise' or 'conservative')\n");
2879 gboolean
2880 sgen_client_handle_gc_debug (const char *opt)
2882 if (!strcmp (opt, "xdomain-checks")) {
2883 sgen_mono_xdomain_checks = TRUE;
2884 } else if (!strcmp (opt, "do-not-finalize")) {
2885 mono_do_not_finalize = TRUE;
2886 } else if (g_str_has_prefix (opt, "do-not-finalize=")) {
2887 opt = strchr (opt, '=') + 1;
2888 mono_do_not_finalize = TRUE;
2889 mono_do_not_finalize_class_names = g_strsplit (opt, ",", 0);
2890 } else if (!strcmp (opt, "log-finalizers")) {
2891 mono_log_finalizers = TRUE;
2892 } else if (!strcmp (opt, "no-managed-allocator")) {
2893 sgen_set_use_managed_allocator (FALSE);
2894 } else if (!sgen_bridge_handle_gc_debug (opt)) {
2895 return FALSE;
2897 return TRUE;
2900 void
2901 sgen_client_print_gc_debug_usage (void)
2903 fprintf (stderr, " xdomain-checks\n");
2904 fprintf (stderr, " do-not-finalize\n");
2905 fprintf (stderr, " log-finalizers\n");
2906 fprintf (stderr, " no-managed-allocator\n");
2907 sgen_bridge_print_gc_debug_usage ();
2911 gpointer
2912 sgen_client_get_provenance (void)
2914 #ifdef SGEN_OBJECT_PROVENANCE
2915 MonoGCCallbacks *cb = mono_gc_get_gc_callbacks ();
2916 gpointer (*get_provenance_func) (void);
2917 if (!cb)
2918 return NULL;
2919 get_provenance_func = cb->get_provenance_func;
2920 if (get_provenance_func)
2921 return get_provenance_func ();
2922 return NULL;
2923 #else
2924 return NULL;
2925 #endif
2928 void
2929 sgen_client_describe_invalid_pointer (GCObject *ptr)
2931 sgen_bridge_describe_pointer (ptr);
2934 static gboolean gc_inited;
2937 * mono_gc_base_init:
2939 void
2940 mono_gc_base_init (void)
2942 if (gc_inited)
2943 return;
2945 mono_counters_init ();
2947 #ifndef HOST_WIN32
2948 mono_w32handle_init ();
2949 #endif
2951 #ifdef HEAVY_STATISTICS
2952 mono_counters_register ("los marked cards", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &los_marked_cards);
2953 mono_counters_register ("los array cards scanned ", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &los_array_cards);
2954 mono_counters_register ("los array remsets", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &los_array_remsets);
2956 mono_counters_register ("WBarrier set arrayref", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_wbarrier_set_arrayref);
2957 mono_counters_register ("WBarrier value copy", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_wbarrier_value_copy);
2958 mono_counters_register ("WBarrier object copy", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_wbarrier_object_copy);
2959 #endif
2961 sgen_gc_init ();
2963 gc_inited = TRUE;
2966 void
2967 mono_gc_base_cleanup (void)
2970 * Note we don't fully cleanup the GC here, but the threads mainly.
2972 * We need to finish any work on the sgen threads before shutting down
2973 * the sgen threadpool. After this point we can still trigger GCs as
2974 * part of domain free, but they should all be forced and not use the
2975 * threadpool.
2977 sgen_finish_concurrent_work ("cleanup", TRUE);
2978 sgen_thread_pool_shutdown ();
2980 // We should have consumed any outstanding moves.
2981 g_assert (sgen_pointer_queue_is_empty (&moved_objects_queue));
2984 gboolean
2985 mono_gc_is_null (void)
2987 return FALSE;
2990 gsize *
2991 sgen_client_get_weak_bitmap (MonoVTable *vt, int *nbits)
2993 MonoClass *klass = vt->klass;
2995 return mono_class_get_weak_bitmap (klass, nbits);
2998 void
2999 sgen_client_binary_protocol_collection_begin (int minor_gc_count, int generation)
3001 static gboolean pseudo_roots_registered;
3003 MONO_GC_BEGIN (generation);
3005 MONO_PROFILER_RAISE (gc_event, (MONO_GC_EVENT_START, generation, generation == GENERATION_OLD && sgen_concurrent_collection_in_progress));
3007 if (!pseudo_roots_registered) {
3008 pseudo_roots_registered = TRUE;
3009 MONO_PROFILER_RAISE (gc_root_register, (SPECIAL_ADDRESS_FIN_QUEUE, 1, MONO_ROOT_SOURCE_FINALIZER_QUEUE, NULL, "Finalizer Queue"));
3010 MONO_PROFILER_RAISE (gc_root_register, (SPECIAL_ADDRESS_CRIT_FIN_QUEUE, 1, MONO_ROOT_SOURCE_FINALIZER_QUEUE, NULL, "Finalizer Queue (Critical)"));
3011 MONO_PROFILER_RAISE (gc_root_register, (SPECIAL_ADDRESS_EPHEMERON, 1, MONO_ROOT_SOURCE_EPHEMERON, NULL, "Ephemerons"));
3014 #ifndef DISABLE_PERFCOUNTERS
3015 if (generation == GENERATION_NURSERY)
3016 mono_atomic_inc_i32 (&mono_perfcounters->gc_collections0);
3017 else
3018 mono_atomic_inc_i32 (&mono_perfcounters->gc_collections1);
3019 #endif
3022 void
3023 sgen_client_binary_protocol_collection_end (int minor_gc_count, int generation, long long num_objects_scanned, long long num_unique_objects_scanned)
3025 MONO_GC_END (generation);
3027 MONO_PROFILER_RAISE (gc_event, (MONO_GC_EVENT_END, generation, generation == GENERATION_OLD && sgen_concurrent_collection_in_progress));
3030 #ifdef HOST_WASM
3031 void
3032 sgen_client_schedule_background_job (void (*cb)(void))
3034 mono_threads_schedule_background_job (cb);
3037 #endif
3039 #endif