3 * SGen features specific to Mono.
5 * Copyright (C) 2014 Xamarin Inc
7 * Licensed under the MIT license. See LICENSE file in the project root for full license information.
13 #include "sgen/sgen-gc.h"
14 #include "sgen/sgen-protocol.h"
15 #include "metadata/monitor.h"
16 #include "sgen/sgen-layout-stats.h"
17 #include "sgen/sgen-client.h"
18 #include "sgen/sgen-cardtable.h"
19 #include "sgen/sgen-pinning.h"
20 #include "sgen/sgen-workers.h"
21 #include "metadata/class-init.h"
22 #include "metadata/marshal.h"
23 #include "metadata/method-builder.h"
24 #include "metadata/abi-details.h"
25 #include "metadata/class-abi-details.h"
26 #include "metadata/mono-gc.h"
27 #include "metadata/runtime.h"
28 #include "metadata/sgen-bridge-internals.h"
29 #include "metadata/sgen-mono.h"
30 #include "metadata/sgen-mono-ilgen.h"
31 #include "metadata/gc-internals.h"
32 #include "metadata/handle.h"
33 #include "metadata/abi-details.h"
34 #include "utils/mono-memory-model.h"
35 #include "utils/mono-logger-internals.h"
36 #include "utils/mono-threads-coop.h"
37 #include "utils/mono-threads.h"
38 #include "metadata/w32handle.h"
39 #include "icall-signatures.h"
41 #ifdef HEAVY_STATISTICS
42 static guint64 stat_wbarrier_set_arrayref
= 0;
43 static guint64 stat_wbarrier_value_copy
= 0;
44 static guint64 stat_wbarrier_object_copy
= 0;
46 static guint64 los_marked_cards
;
47 static guint64 los_array_cards
;
48 static guint64 los_array_remsets
;
51 /* If set, mark stacks conservatively, even if precise marking is possible */
52 static gboolean conservative_stack_mark
= FALSE
;
53 /* If set, check that there are no references to the domain left at domain unload */
54 gboolean sgen_mono_xdomain_checks
= FALSE
;
56 /* Functions supplied by the runtime to be called by the GC */
57 static MonoGCCallbacks gc_callbacks
;
59 #define OPDEF(a,b,c,d,e,f,g,h,i,j) \
63 #include "mono/cil/opcode.def"
74 ptr_on_stack (void *ptr
)
76 gpointer stack_start
= &stack_start
;
77 SgenThreadInfo
*info
= mono_thread_info_current ();
79 if (ptr
>= stack_start
&& ptr
< (gpointer
)info
->client_info
.info
.stack_end
)
84 #ifdef SGEN_HEAVY_BINARY_PROTOCOL
86 #define HANDLE_PTR(ptr,obj) do { \
87 gpointer o = *(gpointer*)(ptr); \
89 gpointer d = ((char*)dest) + ((char*)(ptr) - (char*)(obj)); \
90 sgen_binary_protocol_wbarrier (d, o, (gpointer) SGEN_LOAD_VTABLE (o)); \
95 scan_object_for_binary_protocol_copy_wbarrier (gpointer dest
, char *start
, mword desc
)
97 #define SCAN_OBJECT_NOVTABLE
98 #include "sgen/sgen-scan-object.h"
103 mono_gc_wbarrier_value_copy_internal (gpointer dest
, gconstpointer src
, int count
, MonoClass
*klass
)
105 HEAVY_STAT (++stat_wbarrier_value_copy
);
106 g_assert (m_class_is_valuetype (klass
));
108 SGEN_LOG (8, "Adding value remset at %p, count %d, descr %p for class %s (%p)", dest
, count
, (gpointer
)(uintptr_t)m_class_get_gc_descr (klass
), m_class_get_name (klass
), klass
);
110 if (sgen_ptr_in_nursery (dest
) || ptr_on_stack (dest
) || !sgen_gc_descr_has_references ((mword
)m_class_get_gc_descr (klass
))) {
111 size_t element_size
= mono_class_value_size (klass
, NULL
);
112 size_t size
= count
* element_size
;
113 mono_gc_memmove_atomic (dest
, src
, size
);
117 #ifdef SGEN_HEAVY_BINARY_PROTOCOL
118 if (sgen_binary_protocol_is_heavy_enabled ()) {
119 size_t element_size
= mono_class_value_size (klass
, NULL
);
121 for (i
= 0; i
< count
; ++i
) {
122 scan_object_for_binary_protocol_copy_wbarrier ((char*)dest
+ i
* element_size
,
123 (char*)src
+ i
* element_size
- MONO_ABI_SIZEOF (MonoObject
),
124 (mword
) m_class_get_gc_descr (klass
));
129 sgen_get_remset ()->wbarrier_value_copy (dest
, src
, count
, mono_class_value_size (klass
, NULL
));
133 * mono_gc_wbarrier_object_copy_internal:
135 * Write barrier to call when \p obj is the result of a clone or copy of an object.
138 mono_gc_wbarrier_object_copy_internal (MonoObject
* obj
, MonoObject
*src
)
142 HEAVY_STAT (++stat_wbarrier_object_copy
);
144 SGEN_ASSERT (6, !ptr_on_stack (obj
), "Why is this called for a non-reference type?");
145 if (sgen_ptr_in_nursery (obj
) || !SGEN_OBJECT_HAS_REFERENCES (src
)) {
146 size
= m_class_get_instance_size (mono_object_class (obj
));
147 mono_gc_memmove_aligned ((char*)obj
+ MONO_ABI_SIZEOF (MonoObject
), (char*)src
+ MONO_ABI_SIZEOF (MonoObject
),
148 size
- MONO_ABI_SIZEOF (MonoObject
));
152 #ifdef SGEN_HEAVY_BINARY_PROTOCOL
153 if (sgen_binary_protocol_is_heavy_enabled ())
154 scan_object_for_binary_protocol_copy_wbarrier (obj
, (char*)src
, (mword
) src
->vtable
->gc_descr
);
157 sgen_get_remset ()->wbarrier_object_copy (obj
, src
);
161 * mono_gc_wbarrier_set_arrayref_internal:
164 mono_gc_wbarrier_set_arrayref_internal (MonoArray
*arr
, gpointer slot_ptr
, MonoObject
* value
)
166 HEAVY_STAT (++stat_wbarrier_set_arrayref
);
167 if (sgen_ptr_in_nursery (slot_ptr
)) {
168 *(void**)slot_ptr
= value
;
171 SGEN_LOG (8, "Adding remset at %p", slot_ptr
);
173 sgen_binary_protocol_wbarrier (slot_ptr
, value
, value
->vtable
);
175 sgen_get_remset ()->wbarrier_set_field ((GCObject
*)arr
, slot_ptr
, value
);
179 * mono_gc_wbarrier_set_field_internal:
182 mono_gc_wbarrier_set_field_internal (MonoObject
*obj
, gpointer field_ptr
, MonoObject
* value
)
184 mono_gc_wbarrier_set_arrayref_internal ((MonoArray
*)obj
, field_ptr
, value
);
188 mono_gc_wbarrier_range_copy (gpointer _dest
, gconstpointer _src
, int size
)
190 sgen_wbarrier_range_copy (_dest
, _src
, size
);
193 MonoRangeCopyFunction
194 mono_gc_get_range_copy_func (void)
196 return sgen_get_remset ()->wbarrier_range_copy
;
200 mono_gc_get_suspend_signal (void)
202 return mono_threads_suspend_get_suspend_signal ();
206 mono_gc_get_restart_signal (void)
208 return mono_threads_suspend_get_restart_signal ();
211 static MonoMethod
*write_barrier_conc_method
;
212 static MonoMethod
*write_barrier_noconc_method
;
215 sgen_is_critical_method (MonoMethod
*method
)
217 return sgen_is_managed_allocator (method
);
221 sgen_has_critical_method (void)
223 return sgen_has_managed_allocator ();
227 mono_gc_is_critical_method (MonoMethod
*method
)
230 //methods can't be critical under wasm due to the single thread'ness of it
233 return sgen_is_critical_method (method
);
237 static MonoSgenMonoCallbacks sgenmono_cb
;
238 static gboolean cb_inited
= FALSE
;
241 mono_install_sgen_mono_callbacks (MonoSgenMonoCallbacks
*cb
)
243 g_assert (!cb_inited
);
244 g_assert (cb
->version
== MONO_SGEN_MONO_CALLBACKS_VERSION
);
245 memcpy (&sgenmono_cb
, cb
, sizeof (MonoSgenMonoCallbacks
));
252 emit_nursery_check_noilgen (MonoMethodBuilder
*mb
, gboolean is_concurrent
)
257 emit_managed_allocator_noilgen (MonoMethodBuilder
*mb
, gboolean slowpath
, gboolean profiler
, int atype
)
262 install_noilgen (void)
264 MonoSgenMonoCallbacks cb
;
265 cb
.version
= MONO_SGEN_MONO_CALLBACKS_VERSION
;
266 cb
.emit_nursery_check
= emit_nursery_check_noilgen
;
267 cb
.emit_managed_allocator
= emit_managed_allocator_noilgen
;
268 mono_install_sgen_mono_callbacks (&cb
);
273 static MonoSgenMonoCallbacks
*
274 get_sgen_mono_cb (void)
276 if (G_UNLIKELY (!cb_inited
)) {
278 mono_sgen_mono_ilgen_init ();
287 mono_gc_get_specific_write_barrier (gboolean is_concurrent
)
290 MonoMethodBuilder
*mb
;
291 MonoMethodSignature
*sig
;
292 MonoMethod
**write_barrier_method_addr
;
294 // FIXME: Maybe create a separate version for ctors (the branch would be
295 // correctly predicted more times)
297 write_barrier_method_addr
= &write_barrier_conc_method
;
299 write_barrier_method_addr
= &write_barrier_noconc_method
;
301 if (*write_barrier_method_addr
)
302 return *write_barrier_method_addr
;
304 /* Create the IL version of mono_gc_barrier_generic_store () */
305 sig
= mono_metadata_signature_alloc (mono_defaults
.corlib
, 1);
306 sig
->ret
= mono_get_void_type ();
307 sig
->params
[0] = mono_get_int_type ();
310 mb
= mono_mb_new (mono_defaults
.object_class
, "wbarrier_conc", MONO_WRAPPER_WRITE_BARRIER
);
312 mb
= mono_mb_new (mono_defaults
.object_class
, "wbarrier_noconc", MONO_WRAPPER_WRITE_BARRIER
);
314 get_sgen_mono_cb ()->emit_nursery_check (mb
, is_concurrent
);
316 res
= mono_mb_create_method (mb
, sig
, 16);
317 info
= mono_wrapper_info_create (mb
, WRAPPER_SUBTYPE_NONE
);
318 mono_marshal_set_wrapper_info (res
, info
);
322 if (*write_barrier_method_addr
) {
323 /* Already created */
324 mono_free_method (res
);
326 /* double-checked locking */
327 mono_memory_barrier ();
328 *write_barrier_method_addr
= res
;
332 return *write_barrier_method_addr
;
336 mono_gc_get_write_barrier (void)
338 return mono_gc_get_specific_write_barrier (sgen_major_collector
.is_concurrent
);
342 * Dummy filler objects
345 /* Vtable of the objects used to fill out nursery fragments before a collection */
346 static GCVTable array_fill_vtable
;
349 get_array_fill_vtable (void)
351 if (!array_fill_vtable
) {
352 static char _vtable
[sizeof(MonoVTable
)+8];
353 MonoVTable
* vtable
= (MonoVTable
*) ALIGN_TO((mword
)_vtable
, 8);
356 MonoClass
*klass
= mono_class_create_array_fill_type ();
357 MonoDomain
*domain
= mono_get_root_domain ();
360 vtable
->klass
= klass
;
362 vtable
->gc_descr
= mono_gc_make_descr_for_array (TRUE
, &bmap
, 0, 8);
365 array_fill_vtable
= vtable
;
367 return array_fill_vtable
;
371 sgen_client_array_fill_range (char *start
, size_t size
)
375 if (size
< MONO_SIZEOF_MONO_ARRAY
) {
376 memset (start
, 0, size
);
380 o
= (MonoArray
*)start
;
381 o
->obj
.vtable
= (MonoVTable
*)get_array_fill_vtable ();
382 /* Mark this as not a real object */
383 o
->obj
.synchronisation
= (MonoThreadsSync
*)GINT_TO_POINTER (-1);
385 /* We use array of int64 */
386 g_assert ((size
- MONO_SIZEOF_MONO_ARRAY
) % 8 == 0);
387 o
->max_length
= (mono_array_size_t
)((size
- MONO_SIZEOF_MONO_ARRAY
) / 8);
393 sgen_client_zero_array_fill_header (void *p
, size_t size
)
395 if (size
>= MONO_SIZEOF_MONO_ARRAY
) {
396 memset (p
, 0, MONO_SIZEOF_MONO_ARRAY
);
398 static guint8 zeros
[MONO_SIZEOF_MONO_ARRAY
];
400 SGEN_ASSERT (0, !memcmp (p
, zeros
, size
), "TLAB segment must be zeroed out.");
405 mono_gc_get_vtable (MonoObject
*obj
)
407 // See sgen/sgen-tagged-pointer.h.
408 return SGEN_LOAD_VTABLE (obj
);
415 static MonoGCFinalizerCallbacks fin_callbacks
;
418 mono_gc_get_vtable_bits (MonoClass
*klass
)
421 /* FIXME move this to the bridge code */
422 if (sgen_need_bridge_processing ()) {
423 switch (sgen_bridge_class_kind (klass
)) {
424 case GC_BRIDGE_TRANSPARENT_BRIDGE_CLASS
:
425 case GC_BRIDGE_OPAQUE_BRIDGE_CLASS
:
426 res
= SGEN_GC_BIT_BRIDGE_OBJECT
;
428 case GC_BRIDGE_OPAQUE_CLASS
:
429 res
= SGEN_GC_BIT_BRIDGE_OPAQUE_OBJECT
;
431 case GC_BRIDGE_TRANSPARENT_CLASS
:
435 if (fin_callbacks
.is_class_finalization_aware
) {
436 if (fin_callbacks
.is_class_finalization_aware (klass
))
437 res
|= SGEN_GC_BIT_FINALIZER_AWARE
;
443 is_finalization_aware (MonoObject
*obj
)
445 MonoVTable
*vt
= SGEN_LOAD_VTABLE (obj
);
446 return (vt
->gc_bits
& SGEN_GC_BIT_FINALIZER_AWARE
) == SGEN_GC_BIT_FINALIZER_AWARE
;
450 sgen_client_object_queued_for_finalization (GCObject
*obj
)
452 if (fin_callbacks
.object_queued_for_finalization
&& is_finalization_aware (obj
))
453 fin_callbacks
.object_queued_for_finalization (obj
);
456 if (G_UNLIKELY (MONO_GC_FINALIZE_ENQUEUE_ENABLED ())) {
457 int gen
= sgen_ptr_in_nursery (obj
) ? GENERATION_NURSERY
: GENERATION_OLD
;
458 GCVTable vt
= SGEN_LOAD_VTABLE (obj
);
459 MONO_GC_FINALIZE_ENQUEUE ((mword
)obj
, sgen_safe_object_get_size (obj
),
460 sgen_client_vtable_get_namespace (vt
), sgen_client_vtable_get_name (vt
), gen
,
461 sgen_client_object_has_critical_finalizer (obj
));
467 mono_gc_register_finalizer_callbacks (MonoGCFinalizerCallbacks
*callbacks
)
469 if (callbacks
->version
!= MONO_GC_FINALIZER_EXTENSION_VERSION
)
470 g_error ("Invalid finalizer callback version. Expected %d but got %d\n", MONO_GC_FINALIZER_EXTENSION_VERSION
, callbacks
->version
);
472 fin_callbacks
= *callbacks
;
476 sgen_client_run_finalize (MonoObject
*obj
)
478 mono_gc_run_finalize (obj
, NULL
);
482 * mono_gc_invoke_finalizers:
485 mono_gc_invoke_finalizers (void)
487 return sgen_gc_invoke_finalizers ();
491 * mono_gc_pending_finalizers:
494 mono_gc_pending_finalizers (void)
496 return sgen_have_pending_finalizers ();
500 sgen_client_finalize_notify (void)
502 mono_gc_finalize_notify ();
506 mono_gc_register_for_finalization (MonoObject
*obj
, MonoFinalizationProc user_data
)
508 sgen_object_register_for_finalization (obj
, user_data
);
512 object_in_domain_predicate (MonoObject
*obj
, void *user_data
)
514 MonoDomain
*domain
= (MonoDomain
*)user_data
;
515 if (mono_object_domain (obj
) == domain
) {
516 SGEN_LOG (5, "Unregistering finalizer for object: %p (%s)", obj
, sgen_client_vtable_get_name (SGEN_LOAD_VTABLE (obj
)));
523 * mono_gc_finalizers_for_domain:
524 * \param domain the unloading appdomain
525 * \param out_array output array
526 * \param out_size size of output array
527 * Enqueue for finalization all objects that belong to the unloading appdomain \p domain.
528 * \p suspend is used for early termination of the enqueuing process.
531 mono_gc_finalize_domain (MonoDomain
*domain
)
533 sgen_finalize_if (object_in_domain_predicate
, domain
);
537 mono_gc_suspend_finalizers (void)
539 sgen_set_suspend_finalizers ();
546 typedef struct _EphemeronLinkNode EphemeronLinkNode
;
548 struct _EphemeronLinkNode
{
549 EphemeronLinkNode
*next
;
558 static EphemeronLinkNode
*ephemeron_list
;
560 /* LOCKING: requires that the GC lock is held */
561 static MONO_PERMIT (need (sgen_gc_locked
)) void
562 null_ephemerons_for_domain (MonoDomain
*domain
)
564 EphemeronLinkNode
*current
= ephemeron_list
, *prev
= NULL
;
567 MonoObject
*object
= (MonoObject
*)current
->array
;
570 SGEN_ASSERT (0, object
->vtable
, "Can't have objects without vtables.");
572 if (object
&& object
->vtable
->domain
== domain
) {
573 EphemeronLinkNode
*tmp
= current
;
576 prev
->next
= current
->next
;
578 ephemeron_list
= current
->next
;
580 current
= current
->next
;
581 sgen_free_internal (tmp
, INTERNAL_MEM_EPHEMERON_LINK
);
584 current
= current
->next
;
589 /* LOCKING: requires that the GC lock is held */
591 sgen_client_clear_unreachable_ephemerons (ScanCopyContext ctx
)
593 CopyOrMarkObjectFunc copy_func
= ctx
.ops
->copy_or_mark_object
;
594 SgenGrayQueue
*queue
= ctx
.queue
;
595 EphemeronLinkNode
*current
= ephemeron_list
, *prev
= NULL
;
596 Ephemeron
*cur
, *array_end
;
600 MonoArray
*array
= current
->array
;
602 if (!sgen_is_object_alive_for_current_gen ((GCObject
*)array
)) {
603 EphemeronLinkNode
*tmp
= current
;
605 SGEN_LOG (5, "Dead Ephemeron array at %p", array
);
608 prev
->next
= current
->next
;
610 ephemeron_list
= current
->next
;
612 current
= current
->next
;
613 sgen_free_internal (tmp
, INTERNAL_MEM_EPHEMERON_LINK
);
618 copy_func ((GCObject
**)&array
, queue
);
619 current
->array
= array
;
621 SGEN_LOG (5, "Clearing unreachable entries for ephemeron array at %p", array
);
623 cur
= mono_array_addr_internal (array
, Ephemeron
, 0);
624 array_end
= cur
+ mono_array_length_internal (array
);
625 tombstone
= SGEN_LOAD_VTABLE ((GCObject
*)array
)->domain
->ephemeron_tombstone
;
627 for (; cur
< array_end
; ++cur
) {
628 GCObject
*key
= cur
->key
;
630 if (!key
|| key
== tombstone
)
633 SGEN_LOG (5, "[%zd] key %p (%s) value %p (%s)", cur
- mono_array_addr_internal (array
, Ephemeron
, 0),
634 key
, sgen_is_object_alive_for_current_gen (key
) ? "reachable" : "unreachable",
635 cur
->value
, cur
->value
&& sgen_is_object_alive_for_current_gen (cur
->value
) ? "reachable" : "unreachable");
637 if (!sgen_is_object_alive_for_current_gen (key
)) {
638 cur
->key
= tombstone
;
644 current
= current
->next
;
649 LOCKING: requires that the GC lock is held
651 Limitations: We scan all ephemerons on every collection since the current design doesn't allow for a simple nursery/mature split.
654 sgen_client_mark_ephemerons (ScanCopyContext ctx
)
656 CopyOrMarkObjectFunc copy_func
= ctx
.ops
->copy_or_mark_object
;
657 SgenGrayQueue
*queue
= ctx
.queue
;
658 gboolean nothing_marked
= TRUE
;
659 EphemeronLinkNode
*current
= ephemeron_list
;
660 Ephemeron
*cur
, *array_end
;
663 for (current
= ephemeron_list
; current
; current
= current
->next
) {
664 MonoArray
*array
= current
->array
;
665 SGEN_LOG (5, "Ephemeron array at %p", array
);
667 /*It has to be alive*/
668 if (!sgen_is_object_alive_for_current_gen ((GCObject
*)array
)) {
669 SGEN_LOG (5, "\tnot reachable");
673 copy_func ((GCObject
**)&array
, queue
);
675 cur
= mono_array_addr_internal (array
, Ephemeron
, 0);
676 array_end
= cur
+ mono_array_length_internal (array
);
677 tombstone
= SGEN_LOAD_VTABLE ((GCObject
*)array
)->domain
->ephemeron_tombstone
;
679 for (; cur
< array_end
; ++cur
) {
680 GCObject
*key
= cur
->key
;
682 if (!key
|| key
== tombstone
)
685 SGEN_LOG (5, "[%zd] key %p (%s) value %p (%s)", cur
- mono_array_addr_internal (array
, Ephemeron
, 0),
686 key
, sgen_is_object_alive_for_current_gen (key
) ? "reachable" : "unreachable",
687 cur
->value
, cur
->value
&& sgen_is_object_alive_for_current_gen (cur
->value
) ? "reachable" : "unreachable");
689 if (sgen_is_object_alive_for_current_gen (key
)) {
690 GCObject
*value
= cur
->value
;
692 copy_func (&cur
->key
, queue
);
694 if (!sgen_is_object_alive_for_current_gen (value
)) {
695 nothing_marked
= FALSE
;
696 sgen_binary_protocol_ephemeron_ref (current
, key
, value
);
698 copy_func (&cur
->value
, queue
);
704 SGEN_LOG (5, "Ephemeron run finished. Is it done %d", nothing_marked
);
705 return nothing_marked
;
709 mono_gc_ephemeron_array_add (MonoObject
*obj
)
711 EphemeronLinkNode
*node
;
715 node
= (EphemeronLinkNode
*)sgen_alloc_internal (INTERNAL_MEM_EPHEMERON_LINK
);
720 node
->array
= (MonoArray
*)obj
;
721 node
->next
= ephemeron_list
;
722 ephemeron_list
= node
;
724 SGEN_LOG (5, "Registered ephemeron array %p", obj
);
735 need_remove_object_for_domain (GCObject
*start
, MonoDomain
*domain
)
737 if (mono_object_domain (start
) == domain
) {
738 SGEN_LOG (4, "Need to cleanup object %p", start
);
739 sgen_binary_protocol_cleanup (start
, (gpointer
)SGEN_LOAD_VTABLE (start
), sgen_safe_object_get_size ((GCObject
*)start
));
746 process_object_for_domain_clearing (GCObject
*start
, MonoDomain
*domain
)
748 MonoVTable
*vt
= SGEN_LOAD_VTABLE (start
);
749 if (vt
->klass
== mono_defaults
.internal_thread_class
)
750 g_assert (mono_object_domain (start
) == mono_get_root_domain ());
751 /* The object could be a proxy for an object in the domain
753 #ifndef DISABLE_REMOTING
754 if (m_class_get_supertypes (mono_defaults
.real_proxy_class
) && mono_class_has_parent_fast (vt
->klass
, mono_defaults
.real_proxy_class
)) {
755 MonoObject
*server
= ((MonoRealProxy
*)start
)->unwrapped_server
;
757 /* The server could already have been zeroed out, so
758 we need to check for that, too. */
759 if (server
&& (!SGEN_LOAD_VTABLE (server
) || mono_object_domain (server
) == domain
)) {
760 SGEN_LOG (4, "Cleaning up remote pointer in %p to object %p", start
, server
);
761 ((MonoRealProxy
*)start
)->unwrapped_server
= NULL
;
768 clear_domain_process_object (GCObject
*obj
, MonoDomain
*domain
)
772 process_object_for_domain_clearing (obj
, domain
);
773 remove
= need_remove_object_for_domain (obj
, domain
);
775 if (remove
&& obj
->synchronisation
) {
776 guint32 dislink
= mono_monitor_get_object_monitor_gchandle (obj
);
778 mono_gchandle_free_internal (dislink
);
785 clear_domain_process_minor_object_callback (GCObject
*obj
, size_t size
, MonoDomain
*domain
)
787 if (clear_domain_process_object (obj
, domain
)) {
788 CANARIFY_SIZE (size
);
789 memset (obj
, 0, size
);
794 clear_domain_process_major_object_callback (GCObject
*obj
, size_t size
, MonoDomain
*domain
)
796 clear_domain_process_object (obj
, domain
);
800 clear_domain_free_major_non_pinned_object_callback (GCObject
*obj
, size_t size
, MonoDomain
*domain
)
802 if (need_remove_object_for_domain (obj
, domain
))
803 sgen_major_collector
.free_non_pinned_object (obj
, size
);
807 clear_domain_free_major_pinned_object_callback (GCObject
*obj
, size_t size
, MonoDomain
*domain
)
809 if (need_remove_object_for_domain (obj
, domain
))
810 sgen_major_collector
.free_pinned_object (obj
, size
);
814 sgen_finish_concurrent_work (const char *reason
, gboolean stw
)
816 if (sgen_get_concurrent_collection_in_progress ())
817 sgen_perform_collection (0, GENERATION_OLD
, reason
, TRUE
, stw
);
818 SGEN_ASSERT (0, !sgen_get_concurrent_collection_in_progress (), "We just ordered a synchronous collection. Why are we collecting concurrently?");
820 sgen_major_collector
.finish_sweeping ();
824 * When appdomains are unloaded we can easily remove objects that have finalizers,
825 * but all the others could still be present in random places on the heap.
826 * We need a sweep to get rid of them even though it's going to be costly
828 * The reason we need to remove them is because we access the vtable and class
829 * structures to know the object size and the reference bitmap: once the domain is
830 * unloaded the point to random memory.
833 mono_gc_clear_domain (MonoDomain
* domain
)
835 LOSObject
*bigobj
, *prev
;
840 sgen_binary_protocol_domain_unload_begin (domain
);
842 sgen_stop_world (0, FALSE
);
844 sgen_finish_concurrent_work ("clear domain", FALSE
);
846 sgen_process_fin_stage_entries ();
848 sgen_clear_nursery_fragments ();
850 FOREACH_THREAD_ALL (info
) {
851 mono_handle_stack_free_domain (info
->client_info
.info
.handle_stack
, domain
);
854 if (sgen_mono_xdomain_checks
&& domain
!= mono_get_root_domain ()) {
855 sgen_scan_for_registered_roots_in_domain (domain
, ROOT_TYPE_NORMAL
);
856 sgen_scan_for_registered_roots_in_domain (domain
, ROOT_TYPE_WBARRIER
);
857 sgen_check_for_xdomain_refs ();
860 /*Ephemerons and dislinks must be processed before LOS since they might end up pointing
861 to memory returned to the OS.*/
862 null_ephemerons_for_domain (domain
);
863 sgen_null_links_for_domain (domain
);
865 for (i
= GENERATION_NURSERY
; i
< GENERATION_MAX
; ++i
)
866 sgen_remove_finalizers_if (object_in_domain_predicate
, domain
, i
);
868 sgen_scan_area_with_callback (sgen_nursery_section
->data
, sgen_nursery_section
->end_data
,
869 (IterateObjectCallbackFunc
)clear_domain_process_minor_object_callback
, domain
, FALSE
, TRUE
);
871 /* We need two passes over major and large objects because
872 freeing such objects might give their memory back to the OS
873 (in the case of large objects) or obliterate its vtable
874 (pinned objects with major-copying or pinned and non-pinned
875 objects with major-mark&sweep), but we might need to
876 dereference a pointer from an object to another object if
877 the first object is a proxy. */
878 sgen_major_collector
.iterate_objects (ITERATE_OBJECTS_SWEEP_ALL
, (IterateObjectCallbackFunc
)clear_domain_process_major_object_callback
, domain
);
879 for (bigobj
= sgen_los_object_list
; bigobj
; bigobj
= bigobj
->next
)
880 clear_domain_process_object ((GCObject
*)bigobj
->data
, domain
);
883 for (bigobj
= sgen_los_object_list
; bigobj
;) {
884 if (need_remove_object_for_domain ((GCObject
*)bigobj
->data
, domain
)) {
885 LOSObject
*to_free
= bigobj
;
887 prev
->next
= bigobj
->next
;
889 sgen_los_object_list
= bigobj
->next
;
890 bigobj
= bigobj
->next
;
891 SGEN_LOG (4, "Freeing large object %p", bigobj
->data
);
892 sgen_los_free_object (to_free
);
896 bigobj
= bigobj
->next
;
898 sgen_major_collector
.iterate_objects (ITERATE_OBJECTS_SWEEP_NON_PINNED
, (IterateObjectCallbackFunc
)clear_domain_free_major_non_pinned_object_callback
, domain
);
899 sgen_major_collector
.iterate_objects (ITERATE_OBJECTS_SWEEP_PINNED
, (IterateObjectCallbackFunc
)clear_domain_free_major_pinned_object_callback
, domain
);
901 if (domain
== mono_get_root_domain ()) {
902 sgen_pin_stats_report ();
903 sgen_object_layout_dump (stdout
);
906 sgen_restart_world (0, FALSE
);
908 sgen_binary_protocol_domain_unload_end (domain
);
909 sgen_binary_protocol_flush_buffers (FALSE
);
919 mono_gc_alloc_obj (MonoVTable
*vtable
, size_t size
)
921 MonoObject
*obj
= sgen_alloc_obj (vtable
, size
);
923 if (G_UNLIKELY (mono_profiler_allocations_enabled ()) && obj
)
924 MONO_PROFILER_RAISE (gc_allocation
, (obj
));
930 mono_gc_alloc_pinned_obj (MonoVTable
*vtable
, size_t size
)
932 MonoObject
*obj
= sgen_alloc_obj_pinned (vtable
, size
);
934 if (G_UNLIKELY (mono_profiler_allocations_enabled ()) && obj
)
935 MONO_PROFILER_RAISE (gc_allocation
, (obj
));
941 mono_gc_alloc_mature (MonoVTable
*vtable
, size_t size
)
943 MonoObject
*obj
= sgen_alloc_obj_mature (vtable
, size
);
945 if (G_UNLIKELY (mono_profiler_allocations_enabled ()) && obj
)
946 MONO_PROFILER_RAISE (gc_allocation
, (obj
));
952 * mono_gc_alloc_fixed:
955 mono_gc_alloc_fixed (size_t size
, MonoGCDescriptor descr
, MonoGCRootSource source
, void *key
, const char *msg
)
957 /* FIXME: do a single allocation */
958 void *res
= g_calloc (1, size
);
961 if (!mono_gc_register_root ((char *)res
, size
, descr
, source
, key
, msg
)) {
965 return (MonoObject
*)res
;
969 mono_gc_alloc_fixed_no_descriptor (size_t size
, MonoGCRootSource source
, void *key
, const char *msg
)
971 return mono_gc_alloc_fixed (size
, 0, source
, key
, msg
);
975 * mono_gc_free_fixed:
978 mono_gc_free_fixed (void* addr
)
980 mono_gc_deregister_root ((char *)addr
);
988 static MonoMethod
* alloc_method_cache
[ATYPE_NUM
];
989 static MonoMethod
* slowpath_alloc_method_cache
[ATYPE_NUM
];
990 static MonoMethod
* profiler_alloc_method_cache
[ATYPE_NUM
];
991 static gboolean use_managed_allocator
= TRUE
;
993 #ifdef MANAGED_ALLOCATION
994 /* FIXME: Do this in the JIT, where specialized allocation sequences can be created
995 * for each class. This is currently not easy to do, as it is hard to generate basic
996 * blocks + branches, but it is easy with the linear IL codebase.
998 * For this to work we'd need to solve the TLAB race, first. Now we
999 * require the allocator to be in a few known methods to make sure
1000 * that they are executed atomically via the restart mechanism.
1003 create_allocator (int atype
, ManagedAllocatorVariant variant
)
1005 gboolean slowpath
= variant
== MANAGED_ALLOCATOR_SLOW_PATH
;
1006 gboolean profiler
= variant
== MANAGED_ALLOCATOR_PROFILER
;
1007 MonoMethodBuilder
*mb
;
1009 MonoMethodSignature
*csig
;
1010 const char *name
= NULL
;
1014 if (atype
== ATYPE_SMALL
) {
1015 name
= slowpath
? "SlowAllocSmall" : (profiler
? "ProfilerAllocSmall" : "AllocSmall");
1016 } else if (atype
== ATYPE_NORMAL
) {
1017 name
= slowpath
? "SlowAlloc" : (profiler
? "ProfilerAlloc" : "Alloc");
1018 } else if (atype
== ATYPE_VECTOR
) {
1019 name
= slowpath
? "SlowAllocVector" : (profiler
? "ProfilerAllocVector" : "AllocVector");
1020 } else if (atype
== ATYPE_STRING
) {
1021 name
= slowpath
? "SlowAllocString" : (profiler
? "ProfilerAllocString" : "AllocString");
1023 g_assert_not_reached ();
1026 if (atype
== ATYPE_NORMAL
)
1031 MonoType
*int_type
= mono_get_int_type ();
1032 csig
= mono_metadata_signature_alloc (mono_defaults
.corlib
, num_params
);
1033 if (atype
== ATYPE_STRING
) {
1034 csig
->ret
= m_class_get_byval_arg (mono_defaults
.string_class
);
1035 csig
->params
[0] = int_type
;
1036 csig
->params
[1] = mono_get_int32_type ();
1038 csig
->ret
= mono_get_object_type ();
1039 for (i
= 0; i
< num_params
; i
++)
1040 csig
->params
[i
] = int_type
;
1043 mb
= mono_mb_new (mono_defaults
.object_class
, name
, MONO_WRAPPER_ALLOC
);
1045 get_sgen_mono_cb ()->emit_managed_allocator (mb
, slowpath
, profiler
, atype
);
1047 info
= mono_wrapper_info_create (mb
, WRAPPER_SUBTYPE_NONE
);
1048 info
->d
.alloc
.gc_name
= "sgen";
1049 info
->d
.alloc
.alloc_type
= atype
;
1051 res
= mono_mb_create (mb
, csig
, 8, info
);
1059 mono_gc_get_aligned_size_for_allocator (int size
)
1061 return SGEN_ALIGN_UP (size
);
1065 * Generate an allocator method implementing the fast path of mono_gc_alloc_obj ().
1066 * The signature of the called method is:
1067 * object allocate (MonoVTable *vtable)
1070 mono_gc_get_managed_allocator (MonoClass
*klass
, gboolean for_box
, gboolean known_instance_size
)
1072 #ifdef MANAGED_ALLOCATION
1073 ManagedAllocatorVariant variant
= mono_profiler_allocations_enabled () ?
1074 MANAGED_ALLOCATOR_PROFILER
: MANAGED_ALLOCATOR_REGULAR
;
1076 if (sgen_collect_before_allocs
)
1078 if (m_class_get_instance_size (klass
) > sgen_tlab_size
)
1080 if (known_instance_size
&& ALIGN_TO (m_class_get_instance_size (klass
), SGEN_ALLOC_ALIGN
) >= SGEN_MAX_SMALL_OBJ_SIZE
)
1082 if (mono_class_has_finalizer (klass
) || mono_class_is_marshalbyref (klass
) || m_class_has_weak_fields (klass
))
1084 if (m_class_get_rank (klass
))
1086 if (m_class_get_byval_arg (klass
)->type
== MONO_TYPE_STRING
)
1087 return mono_gc_get_managed_allocator_by_type (ATYPE_STRING
, variant
);
1088 /* Generic classes have dynamic field and can go above MAX_SMALL_OBJ_SIZE. */
1089 if (known_instance_size
)
1090 return mono_gc_get_managed_allocator_by_type (ATYPE_SMALL
, variant
);
1092 return mono_gc_get_managed_allocator_by_type (ATYPE_NORMAL
, variant
);
1099 mono_gc_get_managed_array_allocator (MonoClass
*klass
)
1101 #ifdef MANAGED_ALLOCATION
1102 if (m_class_get_rank (klass
) != 1)
1104 if (sgen_has_per_allocation_action
)
1106 g_assert (!mono_class_has_finalizer (klass
) && !mono_class_is_marshalbyref (klass
));
1108 return mono_gc_get_managed_allocator_by_type (ATYPE_VECTOR
, mono_profiler_allocations_enabled () ?
1109 MANAGED_ALLOCATOR_PROFILER
: MANAGED_ALLOCATOR_REGULAR
);
1116 sgen_set_use_managed_allocator (gboolean flag
)
1118 use_managed_allocator
= flag
;
1122 mono_gc_get_managed_allocator_by_type (int atype
, ManagedAllocatorVariant variant
)
1124 #ifdef MANAGED_ALLOCATION
1128 if (variant
!= MANAGED_ALLOCATOR_SLOW_PATH
&& !use_managed_allocator
)
1132 case MANAGED_ALLOCATOR_REGULAR
: cache
= alloc_method_cache
; break;
1133 case MANAGED_ALLOCATOR_SLOW_PATH
: cache
= slowpath_alloc_method_cache
; break;
1134 case MANAGED_ALLOCATOR_PROFILER
: cache
= profiler_alloc_method_cache
; break;
1135 default: g_assert_not_reached (); break;
1138 res
= cache
[atype
];
1142 res
= create_allocator (atype
, variant
);
1144 if (cache
[atype
]) {
1145 mono_free_method (res
);
1146 res
= cache
[atype
];
1148 mono_memory_barrier ();
1149 cache
[atype
] = res
;
1160 mono_gc_get_managed_allocator_types (void)
1166 sgen_is_managed_allocator (MonoMethod
*method
)
1170 for (i
= 0; i
< ATYPE_NUM
; ++i
)
1171 if (method
== alloc_method_cache
[i
] || method
== slowpath_alloc_method_cache
[i
] || method
== profiler_alloc_method_cache
[i
])
1177 sgen_has_managed_allocator (void)
1181 for (i
= 0; i
< ATYPE_NUM
; ++i
)
1182 if (alloc_method_cache
[i
] || slowpath_alloc_method_cache
[i
] || profiler_alloc_method_cache
[i
])
1187 #define ARRAY_OBJ_INDEX(ptr,array,elem_size) (((char*)(ptr) - ((char*)(array) + G_STRUCT_OFFSET (MonoArray, vector))) / (elem_size))
1190 sgen_client_cardtable_scan_object (GCObject
*obj
, guint8
*cards
, ScanCopyContext ctx
)
1192 MonoVTable
*vt
= SGEN_LOAD_VTABLE (obj
);
1193 MonoClass
*klass
= vt
->klass
;
1195 SGEN_ASSERT (0, SGEN_VTABLE_HAS_REFERENCES (vt
), "Why would we ever call this on reference-free objects?");
1198 MonoArray
*arr
= (MonoArray
*)obj
;
1199 guint8
*card_data
, *card_base
;
1200 guint8
*card_data_end
;
1201 char *obj_start
= (char *)sgen_card_table_align_pointer (obj
);
1203 mword obj_size
= sgen_mono_array_size (vt
, arr
, &bounds_size
, sgen_vtable_get_descriptor (vt
));
1204 /* We don't want to scan the bounds entries at the end of multidimensional arrays */
1205 char *obj_end
= (char*)obj
+ obj_size
- bounds_size
;
1207 size_t extra_idx
= 0;
1209 mword desc
= (mword
)m_class_get_gc_descr (m_class_get_element_class (klass
));
1210 int elem_size
= mono_array_element_size (klass
);
1212 #ifdef SGEN_OBJECT_LAYOUT_STATISTICS
1213 if (m_class_is_valuetype (m_class_get_element_class (klass
)))
1214 sgen_object_layout_scanned_vtype_array ();
1216 sgen_object_layout_scanned_ref_array ();
1222 card_data
= sgen_card_table_get_card_scan_address ((mword
)obj
);
1224 card_base
= card_data
;
1225 card_count
= sgen_card_table_number_of_cards_in_range ((mword
)obj
, obj_size
);
1227 #ifdef SGEN_HAVE_OVERLAPPING_CARDS
1229 card_data_end
= card_base
+ card_count
;
1232 * Check for overflow and if so, scan only until the end of the shadow
1233 * card table, leaving the rest for next iterations.
1235 if (!cards
&& card_data_end
>= SGEN_SHADOW_CARDTABLE_END
) {
1236 card_data_end
= SGEN_SHADOW_CARDTABLE_END
;
1238 card_count
-= (card_data_end
- card_base
);
1241 card_data_end
= card_data
+ card_count
;
1244 card_data
= sgen_find_next_card (card_data
, card_data_end
);
1245 for (; card_data
< card_data_end
; card_data
= sgen_find_next_card (card_data
+ 1, card_data_end
)) {
1247 size_t idx
= (card_data
- card_base
) + extra_idx
;
1248 char *start
= (char*)(obj_start
+ idx
* CARD_SIZE_IN_BYTES
);
1249 char *card_end
= start
+ CARD_SIZE_IN_BYTES
;
1250 char *first_elem
, *elem
;
1252 HEAVY_STAT (++los_marked_cards
);
1255 sgen_card_table_prepare_card_for_scanning (card_data
);
1257 card_end
= MIN (card_end
, obj_end
);
1259 if (start
<= (char*)arr
->vector
)
1262 index
= ARRAY_OBJ_INDEX (start
, obj
, elem_size
);
1264 elem
= first_elem
= (char*)mono_array_addr_with_size_fast ((MonoArray
*)obj
, elem_size
, index
);
1265 if (m_class_is_valuetype (m_class_get_element_class (klass
))) {
1266 ScanVTypeFunc scan_vtype_func
= ctx
.ops
->scan_vtype
;
1268 for (; elem
< card_end
; elem
+= elem_size
)
1269 scan_vtype_func (obj
, elem
, desc
, ctx
.queue
BINARY_PROTOCOL_ARG (elem_size
));
1271 ScanPtrFieldFunc scan_ptr_field_func
= ctx
.ops
->scan_ptr_field
;
1273 HEAVY_STAT (++los_array_cards
);
1274 for (; elem
< card_end
; elem
+= SIZEOF_VOID_P
)
1275 scan_ptr_field_func (obj
, (GCObject
**)elem
, ctx
.queue
);
1278 sgen_binary_protocol_card_scan (first_elem
, elem
- first_elem
);
1281 #ifdef SGEN_HAVE_OVERLAPPING_CARDS
1282 if (card_count
> 0) {
1283 SGEN_ASSERT (0, card_data
== SGEN_SHADOW_CARDTABLE_END
, "Why we didn't stop at shadow cardtable end ?");
1284 extra_idx
+= card_data
- card_base
;
1285 card_base
= card_data
= sgen_shadow_cardtable
;
1296 * Array and string allocation
1300 mono_gc_alloc_vector (MonoVTable
*vtable
, size_t size
, uintptr_t max_length
)
1305 if (!SGEN_CAN_ALIGN_UP (size
))
1308 #ifndef DISABLE_CRITICAL_REGION
1309 ENTER_CRITICAL_REGION
;
1310 arr
= (MonoArray
*)sgen_try_alloc_obj_nolock (vtable
, size
);
1312 /*This doesn't require fencing since EXIT_CRITICAL_REGION already does it for us*/
1313 arr
->max_length
= (mono_array_size_t
)max_length
;
1314 EXIT_CRITICAL_REGION
;
1317 EXIT_CRITICAL_REGION
;
1322 arr
= (MonoArray
*)sgen_alloc_obj_nolock (vtable
, size
);
1323 if (G_UNLIKELY (!arr
)) {
1328 arr
->max_length
= (mono_array_size_t
)max_length
;
1333 if (G_UNLIKELY (mono_profiler_allocations_enabled ()))
1334 MONO_PROFILER_RAISE (gc_allocation
, (&arr
->obj
));
1336 SGEN_ASSERT (6, SGEN_ALIGN_UP (size
) == SGEN_ALIGN_UP (sgen_client_par_object_get_size (vtable
, (GCObject
*)arr
)), "Vector has incorrect size.");
1341 mono_gc_alloc_array (MonoVTable
*vtable
, size_t size
, uintptr_t max_length
, uintptr_t bounds_size
)
1344 MonoArrayBounds
*bounds
;
1347 if (!SGEN_CAN_ALIGN_UP (size
))
1350 #ifndef DISABLE_CRITICAL_REGION
1351 ENTER_CRITICAL_REGION
;
1352 arr
= (MonoArray
*)sgen_try_alloc_obj_nolock (vtable
, size
);
1354 /*This doesn't require fencing since EXIT_CRITICAL_REGION already does it for us*/
1355 arr
->max_length
= (mono_array_size_t
)max_length
;
1357 bounds
= (MonoArrayBounds
*)((char*)arr
+ size
- bounds_size
);
1358 arr
->bounds
= bounds
;
1359 EXIT_CRITICAL_REGION
;
1362 EXIT_CRITICAL_REGION
;
1367 arr
= (MonoArray
*)sgen_alloc_obj_nolock (vtable
, size
);
1368 if (G_UNLIKELY (!arr
)) {
1373 arr
->max_length
= (mono_array_size_t
)max_length
;
1375 bounds
= (MonoArrayBounds
*)((char*)arr
+ size
- bounds_size
);
1376 arr
->bounds
= bounds
;
1381 if (G_UNLIKELY (mono_profiler_allocations_enabled ()))
1382 MONO_PROFILER_RAISE (gc_allocation
, (&arr
->obj
));
1384 SGEN_ASSERT (6, SGEN_ALIGN_UP (size
) == SGEN_ALIGN_UP (sgen_client_par_object_get_size (vtable
, (GCObject
*)arr
)), "Array has incorrect size.");
1389 mono_gc_alloc_string (MonoVTable
*vtable
, size_t size
, gint32 len
)
1394 if (!SGEN_CAN_ALIGN_UP (size
))
1397 #ifndef DISABLE_CRITICAL_REGION
1398 ENTER_CRITICAL_REGION
;
1399 str
= (MonoString
*)sgen_try_alloc_obj_nolock (vtable
, size
);
1401 /*This doesn't require fencing since EXIT_CRITICAL_REGION already does it for us*/
1403 EXIT_CRITICAL_REGION
;
1406 EXIT_CRITICAL_REGION
;
1411 str
= (MonoString
*)sgen_alloc_obj_nolock (vtable
, size
);
1412 if (G_UNLIKELY (!str
)) {
1422 if (G_UNLIKELY (mono_profiler_allocations_enabled ()))
1423 MONO_PROFILER_RAISE (gc_allocation
, (&str
->object
));
1433 mono_gc_set_string_length (MonoString
*str
, gint32 new_length
)
1435 mono_unichar2
*new_end
= str
->chars
+ new_length
;
1437 /* zero the discarded string. This null-delimits the string and allows
1438 * the space to be reclaimed by SGen. */
1440 if (sgen_nursery_canaries_enabled () && sgen_ptr_in_nursery (str
)) {
1441 CHECK_CANARY_FOR_OBJECT ((GCObject
*)str
, TRUE
);
1442 memset (new_end
, 0, (str
->length
- new_length
+ 1) * sizeof (mono_unichar2
) + CANARY_SIZE
);
1443 memcpy (new_end
+ 1 , CANARY_STRING
, CANARY_SIZE
);
1445 memset (new_end
, 0, (str
->length
- new_length
+ 1) * sizeof (mono_unichar2
));
1448 str
->length
= new_length
;
1455 #define GC_ROOT_NUM 32
1456 #define SPECIAL_ADDRESS_FIN_QUEUE ((mono_byte*)1)
1457 #define SPECIAL_ADDRESS_CRIT_FIN_QUEUE ((mono_byte*)2)
1458 #define SPECIAL_ADDRESS_EPHEMERON ((mono_byte*)3)
1459 #define SPECIAL_ADDRESS_TOGGLEREF ((mono_byte*)4)
1462 int count
; /* must be the first field */
1463 void *addresses
[GC_ROOT_NUM
];
1464 void *objects
[GC_ROOT_NUM
];
1468 notify_gc_roots (GCRootReport
*report
)
1472 MONO_PROFILER_RAISE (gc_roots
, (report
->count
, (const mono_byte
*const *)report
->addresses
, (MonoObject
*const *) report
->objects
));
1477 report_gc_root (GCRootReport
*report
, void *address
, void *object
)
1479 if (report
->count
== GC_ROOT_NUM
)
1480 notify_gc_roots (report
);
1481 report
->addresses
[report
->count
] = address
;
1482 report
->objects
[report
->count
] = object
;
1487 single_arg_report_root (MonoObject
**obj
, void *gc_data
)
1489 GCRootReport
*report
= (GCRootReport
*)gc_data
;
1491 report_gc_root (report
, obj
, *obj
);
1495 two_args_report_root (void *address
, MonoObject
*obj
, void *gc_data
)
1497 GCRootReport
*report
= (GCRootReport
*)gc_data
;
1499 report_gc_root (report
, address
, obj
);
1503 precisely_report_roots_from (GCRootReport
*report
, void** start_root
, void** end_root
, mword desc
)
1505 switch (desc
& ROOT_DESC_TYPE_MASK
) {
1506 case ROOT_DESC_BITMAP
:
1507 desc
>>= ROOT_DESC_TYPE_SHIFT
;
1509 if ((desc
& 1) && *start_root
)
1510 report_gc_root (report
, start_root
, *start_root
);
1515 case ROOT_DESC_COMPLEX
: {
1516 gsize
*bitmap_data
= (gsize
*)sgen_get_complex_descriptor_bitmap (desc
);
1517 gsize bwords
= (*bitmap_data
) - 1;
1518 void **start_run
= start_root
;
1520 while (bwords
-- > 0) {
1521 gsize bmap
= *bitmap_data
++;
1522 void **objptr
= start_run
;
1524 if ((bmap
& 1) && *objptr
)
1525 report_gc_root (report
, objptr
, *objptr
);
1529 start_run
+= GC_BITS_PER_WORD
;
1533 case ROOT_DESC_VECTOR
: {
1536 for (p
= start_root
; p
< end_root
; p
++) {
1538 report_gc_root (report
, p
, *p
);
1542 case ROOT_DESC_USER
: {
1543 MonoGCRootMarkFunc marker
= (MonoGCRootMarkFunc
)sgen_get_user_descriptor_func (desc
);
1545 if ((void*)marker
== (void*)sgen_mark_normal_gc_handles
)
1546 sgen_gc_handles_report_roots (two_args_report_root
, report
);
1548 marker ((MonoObject
**)start_root
, single_arg_report_root
, report
);
1551 case ROOT_DESC_RUN_LEN
:
1552 g_assert_not_reached ();
1554 g_assert_not_reached ();
1559 report_pinning_roots (GCRootReport
*report
, void **start
, void **end
)
1561 while (start
< end
) {
1562 mword addr
= (mword
)*start
;
1563 addr
&= ~(SGEN_ALLOC_ALIGN
- 1);
1565 report_gc_root (report
, start
, (void*)addr
);
1571 static SgenPointerQueue pinned_objects
= SGEN_POINTER_QUEUE_INIT (INTERNAL_MEM_MOVED_OBJECT
);
1572 static mword lower_bound
, upper_bound
;
1575 find_pinned_obj (char *addr
)
1577 size_t idx
= sgen_pointer_queue_search (&pinned_objects
, addr
);
1579 if (idx
!= pinned_objects
.next_slot
) {
1580 if (pinned_objects
.data
[idx
] == addr
)
1581 return (GCObject
*)pinned_objects
.data
[idx
];
1586 GCObject
*obj
= (GCObject
*)pinned_objects
.data
[idx
- 1];
1587 if (addr
> (char*)obj
&& addr
< ((char*)obj
+ sgen_safe_object_get_size (obj
)))
1594 * We pass @root_report_address so register are properly accounted towards their thread
1597 report_conservative_roots (GCRootReport
*report
, void *root_report_address
, void **start
, void **end
)
1599 while (start
< end
) {
1600 mword addr
= (mword
)*start
;
1601 addr
&= ~(SGEN_ALLOC_ALIGN
- 1);
1603 if (addr
< lower_bound
|| addr
> upper_bound
) {
1608 GCObject
*obj
= find_pinned_obj ((char*)addr
);
1610 report_gc_root (report
, root_report_address
, obj
);
1617 GCRootReport
*report
;
1618 SgenThreadInfo
*info
;
1619 } ReportHandleStackRoot
;
1622 report_handle_stack_root (gpointer
*ptr
, gpointer user_data
)
1624 ReportHandleStackRoot
*ud
= (ReportHandleStackRoot
*)user_data
;
1625 GCRootReport
*report
= ud
->report
;
1626 gpointer addr
= ud
->info
->client_info
.info
.handle_stack
;
1628 // Note: We know that *ptr != NULL.
1630 report_gc_root (report
, addr
, *ptr
);
1632 report_conservative_roots (report
, addr
, ptr
, ptr
+ 1);
1636 report_handle_stack_roots (GCRootReport
*report
, SgenThreadInfo
*info
, gboolean precise
)
1638 ReportHandleStackRoot ud
;
1639 memset (&ud
, 0, sizeof (ud
));
1640 ud
.precise
= precise
;
1644 mono_handle_stack_scan (info
->client_info
.info
.handle_stack
, report_handle_stack_root
, &ud
, ud
.precise
, FALSE
);
1648 get_aligned_stack_start (SgenThreadInfo
*info
)
1650 void* aligned_stack_start
= (void*)(mword
) ALIGN_TO ((mword
)info
->client_info
.stack_start
, SIZEOF_VOID_P
);
1652 // Due to the guard page mechanism providing gradual commit of Windows stacks,
1653 // stack pages must be touched in order.
1655 // This mechanism is only transparent (kernel handles page faults and user never sees them),
1656 // for the thread touching its own stack. Not for cross-thread stack references as are being
1659 // Here is a small program that demonstrates the behavior:
1661 // #include <windows.h>
1662 // #include <stdio.h>
1664 // #pragma optimize ("x", on)
1666 // int volatile * volatile Event1;
1667 // int volatile Event2;
1668 // HANDLE ThreadHandle;
1670 // DWORD __stdcall thread (void* x)
1677 // } __except (GetExceptionCode () == STATUS_GUARD_PAGE_VIOLATION) {
1678 // printf ("oops\n");
1687 // __declspec (noinline)
1688 // __declspec (safebuffers)
1693 // while (unlucky && ((size_t)_AddressOfReturnAddress () - 8) & 0xFFF)
1703 // printf ("%X\n", local [0]);
1707 // if (ThreadHandle) {
1708 // WaitForSingleObject (ThreadHandle, INFINITE);
1709 // ThreadHandle = NULL;
1713 // int main (int argc, char** argv)
1715 // unlucky = argc > 1;
1716 // ThreadHandle = CreateThread (0, 0, thread, 0, 0, 0);
1720 // This would seem to be a problem otherwise, not just for garbage collectors.
1722 // We therefore have a few choices:
1724 // 1. Historical slow code: VirtualQuery and check for guard page. Slow.
1726 // MEMORY_BASIC_INFORMATION mem_info;
1727 // SIZE_T result = VirtualQuery (info->client_info.stack_start, &mem_info, sizeof(mem_info));
1728 // g_assert (result != 0);
1729 // if (mem_info.Protect & PAGE_GUARD) {
1730 // aligned_stack_start = ((char*) mem_info.BaseAddress) + mem_info.RegionSize;
1733 // VirtualQuery not historically allowed in UWP, but it is now.
1735 // 2. Touch page under __try / __except and handle STATUS_GUARD_PAGE_VIOLATION.
1736 // Good but compiler specific.
1739 // *(volatile char*)aligned_stack_start;
1740 // } __except (GetExceptionCode () == STATUS_GUARD_PAGE_VIOLATION) {
1741 // MEMORY_BASIC_INFORMATION mem_info;
1742 // const SIZE_T result = VirtualQuery(aligned_stack_start, &mem_info, sizeof(mem_info));
1743 // g_assert (result >= sizeof (mem_info));
1744 // VirtualProtect (aligned_stack_start, 1, mem_info.Protect | PAGE_GUARD, &mem_info.Protect);
1747 // 3. Vectored exception handler. Not terrible. Not compiler specific.
1749 // 4. Check against the high watermark in the TIB. That is done.
1750 // TIB is the public prefix TEB. It is Windows.h, ntddk.h, etc.
1752 aligned_stack_start
= MAX (aligned_stack_start
, info
->client_info
.info
.windows_tib
->StackLimit
);
1754 return aligned_stack_start
;
1758 report_stack_roots (void)
1760 GCRootReport report
= {0};
1761 FOREACH_THREAD_EXCLUDE (info
, MONO_THREAD_INFO_FLAGS_NO_GC
) {
1762 void *aligned_stack_start
;
1764 if (info
->client_info
.skip
) {
1766 } else if (!mono_thread_info_is_live (info
)) {
1768 } else if (!info
->client_info
.stack_start
) {
1772 g_assert (info
->client_info
.stack_start
);
1773 g_assert (info
->client_info
.info
.stack_end
);
1775 aligned_stack_start
= get_aligned_stack_start (info
);
1776 g_assert (info
->client_info
.suspend_done
);
1778 report_conservative_roots (&report
, aligned_stack_start
, (void **)aligned_stack_start
, (void **)info
->client_info
.info
.stack_end
);
1779 report_conservative_roots (&report
, aligned_stack_start
, (void**)&info
->client_info
.ctx
, (void**)(&info
->client_info
.ctx
+ 1));
1781 report_handle_stack_roots (&report
, info
, FALSE
);
1782 report_handle_stack_roots (&report
, info
, TRUE
);
1783 } FOREACH_THREAD_END
1785 notify_gc_roots (&report
);
1789 report_pin_queue (void)
1791 lower_bound
= SIZE_MAX
;
1794 //sort the addresses
1795 sgen_pointer_queue_sort_uniq (&pinned_objects
);
1797 for (int i
= 0; i
< pinned_objects
.next_slot
; ++i
) {
1798 GCObject
*obj
= (GCObject
*)pinned_objects
.data
[i
];
1799 ssize_t size
= sgen_safe_object_get_size (obj
);
1801 ssize_t addr
= (ssize_t
)obj
;
1802 lower_bound
= MIN (lower_bound
, addr
);
1803 upper_bound
= MAX (upper_bound
, addr
+ size
);
1806 report_stack_roots ();
1807 sgen_pointer_queue_clear (&pinned_objects
);
1811 report_finalizer_roots_from_queue (SgenPointerQueue
*queue
, void* queue_address
)
1813 GCRootReport report
;
1817 for (i
= 0; i
< queue
->next_slot
; ++i
) {
1818 void *obj
= queue
->data
[i
];
1821 report_gc_root (&report
, queue_address
, obj
);
1823 notify_gc_roots (&report
);
1827 report_registered_roots_by_type (int root_type
)
1829 GCRootReport report
= { 0 };
1833 SGEN_HASH_TABLE_FOREACH (&sgen_roots_hash
[root_type
], void **, start_root
, RootRecord
*, root
) {
1834 SGEN_LOG (6, "Profiler root scan %p-%p (desc: %p)", start_root
, root
->end_root
, (void*)(intptr_t)root
->root_desc
);
1835 if (root_type
== ROOT_TYPE_PINNED
)
1836 report_pinning_roots (&report
, start_root
, (void**)root
->end_root
);
1838 precisely_report_roots_from (&report
, start_root
, (void**)root
->end_root
, root
->root_desc
);
1839 } SGEN_HASH_TABLE_FOREACH_END
;
1840 notify_gc_roots (&report
);
1844 report_registered_roots (void)
1846 for (int i
= 0; i
< ROOT_TYPE_NUM
; ++i
)
1847 report_registered_roots_by_type (i
);
1851 report_ephemeron_roots (void)
1853 EphemeronLinkNode
*current
= ephemeron_list
;
1854 Ephemeron
*cur
, *array_end
;
1855 GCObject
*tombstone
;
1856 GCRootReport report
= { 0 };
1858 for (current
= ephemeron_list
; current
; current
= current
->next
) {
1859 MonoArray
*array
= current
->array
;
1861 if (!sgen_is_object_alive_for_current_gen ((GCObject
*)array
))
1864 cur
= mono_array_addr_internal (array
, Ephemeron
, 0);
1865 array_end
= cur
+ mono_array_length_internal (array
);
1866 tombstone
= SGEN_LOAD_VTABLE ((GCObject
*)array
)->domain
->ephemeron_tombstone
;
1868 for (; cur
< array_end
; ++cur
) {
1869 GCObject
*key
= cur
->key
;
1871 if (!key
|| key
== tombstone
)
1874 if (cur
->value
&& sgen_is_object_alive_for_current_gen (key
))
1875 report_gc_root (&report
, SPECIAL_ADDRESS_EPHEMERON
, cur
->value
);
1879 notify_gc_roots (&report
);
1883 report_toggleref_root (MonoObject
* obj
, gpointer data
)
1885 report_gc_root ((GCRootReport
*)data
, SPECIAL_ADDRESS_TOGGLEREF
, obj
);
1889 report_toggleref_roots (void)
1891 GCRootReport report
= { 0 };
1892 sgen_foreach_toggleref_root (report_toggleref_root
, &report
);
1893 notify_gc_roots (&report
);
1897 sgen_report_all_roots (SgenPointerQueue
*fin_ready_queue
, SgenPointerQueue
*critical_fin_queue
)
1899 if (!MONO_PROFILER_ENABLED (gc_roots
))
1902 report_registered_roots ();
1903 report_ephemeron_roots ();
1904 report_toggleref_roots ();
1905 report_pin_queue ();
1906 report_finalizer_roots_from_queue (fin_ready_queue
, SPECIAL_ADDRESS_FIN_QUEUE
);
1907 report_finalizer_roots_from_queue (critical_fin_queue
, SPECIAL_ADDRESS_CRIT_FIN_QUEUE
);
1911 sgen_client_pinning_start (void)
1913 if (!MONO_PROFILER_ENABLED (gc_roots
))
1916 sgen_pointer_queue_clear (&pinned_objects
);
1920 sgen_client_pinning_end (void)
1922 if (!MONO_PROFILER_ENABLED (gc_roots
))
1927 sgen_client_nursery_objects_pinned (void **definitely_pinned
, int count
)
1929 if (!MONO_PROFILER_ENABLED (gc_roots
))
1932 for (int i
= 0; i
< count
; ++i
)
1933 sgen_pointer_queue_add (&pinned_objects
, definitely_pinned
[i
]);
1937 sgen_client_pinned_los_object (GCObject
*obj
)
1939 if (!MONO_PROFILER_ENABLED (gc_roots
))
1942 sgen_pointer_queue_add (&pinned_objects
, obj
);
1946 sgen_client_pinned_cemented_object (GCObject
*obj
)
1948 if (!MONO_PROFILER_ENABLED (gc_roots
))
1951 // TODO: How do we report this in a way that makes sense?
1955 sgen_client_pinned_major_heap_object (GCObject
*obj
)
1957 if (!MONO_PROFILER_ENABLED (gc_roots
))
1960 sgen_pointer_queue_add (&pinned_objects
, obj
);
1964 sgen_client_collecting_minor_report_roots (SgenPointerQueue
*fin_ready_queue
, SgenPointerQueue
*critical_fin_queue
)
1966 sgen_report_all_roots (fin_ready_queue
, critical_fin_queue
);
1970 sgen_client_collecting_major_report_roots (SgenPointerQueue
*fin_ready_queue
, SgenPointerQueue
*critical_fin_queue
)
1972 sgen_report_all_roots (fin_ready_queue
, critical_fin_queue
);
1975 #define MOVED_OBJECTS_NUM 64
1976 static void *moved_objects
[MOVED_OBJECTS_NUM
];
1977 static int moved_objects_idx
= 0;
1979 static SgenPointerQueue moved_objects_queue
= SGEN_POINTER_QUEUE_INIT (INTERNAL_MEM_MOVED_OBJECT
);
1982 mono_sgen_register_moved_object (void *obj
, void *destination
)
1985 * This function can be called from SGen's worker threads. We want to try
1986 * and avoid exposing those threads to the profiler API, so queue up move
1987 * events and send them later when the main GC thread calls
1988 * mono_sgen_gc_event_moves ().
1990 * TODO: Once SGen has multiple worker threads, we need to switch to a
1991 * lock-free data structure for the queue as multiple threads will be
1992 * adding to it at the same time.
1994 if (sgen_workers_is_worker_thread (mono_native_thread_id_get ())) {
1995 sgen_pointer_queue_add (&moved_objects_queue
, obj
);
1996 sgen_pointer_queue_add (&moved_objects_queue
, destination
);
1998 if (moved_objects_idx
== MOVED_OBJECTS_NUM
) {
1999 MONO_PROFILER_RAISE (gc_moves
, ((MonoObject
**) moved_objects
, moved_objects_idx
));
2000 moved_objects_idx
= 0;
2003 moved_objects
[moved_objects_idx
++] = obj
;
2004 moved_objects
[moved_objects_idx
++] = destination
;
2009 mono_sgen_gc_event_moves (void)
2011 while (!sgen_pointer_queue_is_empty (&moved_objects_queue
)) {
2012 void *dst
= sgen_pointer_queue_pop (&moved_objects_queue
);
2013 void *src
= sgen_pointer_queue_pop (&moved_objects_queue
);
2015 mono_sgen_register_moved_object (src
, dst
);
2018 if (moved_objects_idx
) {
2019 MONO_PROFILER_RAISE (gc_moves
, ((MonoObject
**) moved_objects
, moved_objects_idx
));
2020 moved_objects_idx
= 0;
2028 #define REFS_SIZE 128
2031 MonoGCReferences callback
;
2035 MonoObject
*refs
[REFS_SIZE
];
2036 uintptr_t offsets
[REFS_SIZE
];
2040 #define HANDLE_PTR(ptr,obj) do { \
2042 if (hwi->count == REFS_SIZE) { \
2043 hwi->callback ((MonoObject*)start, mono_object_class (start), hwi->called? 0: size, hwi->count, hwi->refs, hwi->offsets, hwi->data); \
2047 hwi->offsets [hwi->count] = (char*)(ptr)-(char*)start; \
2048 hwi->refs [hwi->count++] = *(ptr); \
2053 collect_references (HeapWalkInfo
*hwi
, GCObject
*obj
, size_t size
)
2055 char *start
= (char*)obj
;
2056 mword desc
= sgen_obj_get_descriptor (obj
);
2058 #include "sgen/sgen-scan-object.h"
2062 walk_references (GCObject
*start
, size_t size
, void *data
)
2064 HeapWalkInfo
*hwi
= (HeapWalkInfo
*)data
;
2067 collect_references (hwi
, start
, size
);
2068 if (hwi
->count
|| !hwi
->called
)
2069 hwi
->callback (start
, mono_object_class (start
), hwi
->called
? 0: size
, hwi
->count
, hwi
->refs
, hwi
->offsets
, hwi
->data
);
2073 * mono_gc_walk_heap:
2074 * \param flags flags for future use
2075 * \param callback a function pointer called for each object in the heap
2076 * \param data a user data pointer that is passed to callback
2077 * This function can be used to iterate over all the live objects in the heap;
2078 * for each object, \p callback is invoked, providing info about the object's
2079 * location in memory, its class, its size and the objects it references.
2080 * For each referenced object its offset from the object address is
2081 * reported in the offsets array.
2082 * The object references may be buffered, so the callback may be invoked
2083 * multiple times for the same object: in all but the first call, the size
2084 * argument will be zero.
2085 * Note that this function can be only called in the \c MONO_GC_EVENT_PRE_START_WORLD
2086 * profiler event handler.
2087 * \returns a non-zero value if the GC doesn't support heap walking
2090 mono_gc_walk_heap (int flags
, MonoGCReferences callback
, void *data
)
2095 hwi
.callback
= callback
;
2098 sgen_clear_nursery_fragments ();
2099 sgen_scan_area_with_callback (sgen_nursery_section
->data
, sgen_nursery_section
->end_data
, walk_references
, &hwi
, FALSE
, TRUE
);
2101 sgen_major_collector
.iterate_objects (ITERATE_OBJECTS_SWEEP_ALL
, walk_references
, &hwi
);
2102 sgen_los_iterate_objects (walk_references
, &hwi
);
2112 mono_gc_set_gc_callbacks (MonoGCCallbacks
*callbacks
)
2114 gc_callbacks
= *callbacks
;
2118 mono_gc_get_gc_callbacks ()
2120 return &gc_callbacks
;
2124 mono_gc_thread_attach (SgenThreadInfo
*info
)
2126 return sgen_thread_attach (info
);
2130 sgen_client_thread_attach (SgenThreadInfo
* info
)
2132 mono_tls_set_sgen_thread_info (info
);
2134 info
->client_info
.skip
= FALSE
;
2136 info
->client_info
.stack_start
= NULL
;
2138 #ifdef SGEN_POSIX_STW
2139 info
->client_info
.stop_count
= -1;
2140 info
->client_info
.signal
= 0;
2143 memset (&info
->client_info
.ctx
, 0, sizeof (MonoContext
));
2145 if (mono_gc_get_gc_callbacks ()->thread_attach_func
)
2146 info
->client_info
.runtime_data
= mono_gc_get_gc_callbacks ()->thread_attach_func ();
2148 sgen_binary_protocol_thread_register ((gpointer
)mono_thread_info_get_tid (info
));
2150 SGEN_LOG (3, "registered thread %p (%p) stack end %p", info
, (gpointer
)mono_thread_info_get_tid (info
), info
->client_info
.info
.stack_end
);
2152 info
->client_info
.info
.handle_stack
= mono_handle_stack_alloc ();
2156 mono_gc_thread_detach_with_lock (SgenThreadInfo
*info
)
2158 return sgen_thread_detach_with_lock (info
);
2162 sgen_client_thread_detach_with_lock (SgenThreadInfo
*p
)
2164 MonoNativeThreadId tid
;
2166 mono_tls_set_sgen_thread_info (NULL
);
2168 sgen_increment_bytes_allocated_detached (p
->total_bytes_allocated
);
2170 tid
= mono_thread_info_get_tid (p
);
2172 mono_threads_add_joinable_runtime_thread (&p
->client_info
.info
);
2174 if (mono_gc_get_gc_callbacks ()->thread_detach_func
) {
2175 mono_gc_get_gc_callbacks ()->thread_detach_func (p
->client_info
.runtime_data
);
2176 p
->client_info
.runtime_data
= NULL
;
2179 sgen_binary_protocol_thread_unregister ((gpointer
)tid
);
2180 SGEN_LOG (3, "unregister thread %p (%p)", p
, (gpointer
)tid
);
2182 HandleStack
*handles
= p
->client_info
.info
.handle_stack
;
2183 p
->client_info
.info
.handle_stack
= NULL
;
2184 mono_handle_stack_free (handles
);
2188 mono_gc_skip_thread_changing (gboolean skip
)
2191 * SGen's STW will respect the thread info flags, but we do need to take
2192 * the GC lock when changing them. If we don't do this, SGen might end up
2193 * trying to resume a thread that wasn't suspended because it had
2194 * MONO_THREAD_INFO_FLAGS_NO_GC set when STW began.
2200 * If we skip scanning a thread with a non-empty handle stack, we may move an
2201 * object but fail to update the reference in the handle.
2203 HandleStack
*stack
= mono_thread_info_current ()->client_info
.info
.handle_stack
;
2204 g_assert (stack
== NULL
|| mono_handle_stack_is_empty (stack
));
2209 mono_gc_skip_thread_changed (gboolean skip
)
2215 mono_gc_thread_in_critical_region (SgenThreadInfo
*info
)
2217 return info
->client_info
.in_critical_region
;
2221 * mono_gc_is_gc_thread:
2224 mono_gc_is_gc_thread (void)
2228 result
= mono_thread_info_current () != NULL
;
2234 sgen_client_thread_register_worker (void)
2236 mono_thread_info_register_small_id ();
2237 mono_native_thread_set_name (mono_native_thread_id_get (), "SGen worker");
2238 mono_thread_set_name_windows (GetCurrentThread (), L
"SGen worker");
2241 /* Variables holding start/end nursery so it won't have to be passed at every call */
2242 static void *scan_area_arg_start
, *scan_area_arg_end
;
2245 mono_gc_conservatively_scan_area (void *start
, void *end
)
2247 sgen_conservatively_pin_objects_from ((void **)start
, (void **)end
, scan_area_arg_start
, scan_area_arg_end
, PIN_TYPE_STACK
);
2251 mono_gc_scan_object (void *obj
, void *gc_data
)
2253 ScanCopyContext
*ctx
= (ScanCopyContext
*)gc_data
;
2254 ctx
->ops
->copy_or_mark_object ((GCObject
**)&obj
, ctx
->queue
);
2259 void **start_nursery
;
2261 } PinHandleStackInteriorPtrData
;
2263 /* Called when we're scanning the handle stack imprecisely and we encounter a pointer into the
2264 middle of an object.
2267 pin_handle_stack_interior_ptrs (void **ptr_slot
, void *user_data
)
2269 PinHandleStackInteriorPtrData
*ud
= (PinHandleStackInteriorPtrData
*)user_data
;
2270 sgen_conservatively_pin_objects_from (ptr_slot
, ptr_slot
+1, ud
->start_nursery
, ud
->end_nursery
, PIN_TYPE_STACK
);
2274 extern gboolean mono_wasm_enable_gc
;
2278 * Mark from thread stacks and registers.
2281 sgen_client_scan_thread_data (void *start_nursery
, void *end_nursery
, gboolean precise
, ScanCopyContext ctx
)
2283 scan_area_arg_start
= start_nursery
;
2284 scan_area_arg_end
= end_nursery
;
2286 //Under WASM we don't scan thread stacks and we can't trust the values we find there either.
2287 if (!mono_wasm_enable_gc
)
2291 SGEN_TV_DECLARE (scan_thread_data_start
);
2292 SGEN_TV_DECLARE (scan_thread_data_end
);
2294 SGEN_TV_GETTIME (scan_thread_data_start
);
2296 FOREACH_THREAD_EXCLUDE (info
, MONO_THREAD_INFO_FLAGS_NO_GC
) {
2297 int skip_reason
= 0;
2298 void *aligned_stack_start
;
2300 if (info
->client_info
.skip
) {
2301 SGEN_LOG (3, "Skipping dead thread %p, range: %p-%p, size: %zd", info
, info
->client_info
.stack_start
, info
->client_info
.info
.stack_end
, (char*)info
->client_info
.info
.stack_end
- (char*)info
->client_info
.stack_start
);
2303 } else if (!mono_thread_info_is_live (info
)) {
2304 SGEN_LOG (3, "Skipping non-running thread %p, range: %p-%p, size: %zd (state %x)", info
, info
->client_info
.stack_start
, info
->client_info
.info
.stack_end
, (char*)info
->client_info
.info
.stack_end
- (char*)info
->client_info
.stack_start
, info
->client_info
.info
.thread_state
);
2306 } else if (!info
->client_info
.stack_start
) {
2307 SGEN_LOG (3, "Skipping starting or detaching thread %p", info
);
2311 sgen_binary_protocol_scan_stack ((gpointer
)mono_thread_info_get_tid (info
), info
->client_info
.stack_start
, info
->client_info
.info
.stack_end
, skip_reason
);
2315 /* If we skip a thread with a non-empty handle stack and then it
2316 * resumes running we may potentially move an object but fail to
2317 * update the reference in the handle.
2319 HandleStack
*stack
= info
->client_info
.info
.handle_stack
;
2320 g_assert (stack
== NULL
|| mono_handle_stack_is_empty (stack
));
2325 g_assert (info
->client_info
.stack_start
);
2326 g_assert (info
->client_info
.info
.stack_end
);
2328 aligned_stack_start
= get_aligned_stack_start (info
);
2329 g_assert (info
->client_info
.suspend_done
);
2330 SGEN_LOG (3, "Scanning thread %p, range: %p-%p, size: %zd, pinned=%zd", info
, info
->client_info
.stack_start
, info
->client_info
.info
.stack_end
, (char*)info
->client_info
.info
.stack_end
- (char*)info
->client_info
.stack_start
, sgen_get_pinned_count ());
2331 if (mono_gc_get_gc_callbacks ()->thread_mark_func
&& !conservative_stack_mark
) {
2332 mono_gc_get_gc_callbacks ()->thread_mark_func (info
->client_info
.runtime_data
, (guint8
*)aligned_stack_start
, (guint8
*)info
->client_info
.info
.stack_end
, precise
, &ctx
);
2333 } else if (!precise
) {
2334 if (!conservative_stack_mark
) {
2335 fprintf (stderr
, "Precise stack mark not supported - disabling.\n");
2336 conservative_stack_mark
= TRUE
;
2338 //FIXME we should eventually use the new stack_mark from coop
2339 sgen_conservatively_pin_objects_from ((void **)aligned_stack_start
, (void **)info
->client_info
.info
.stack_end
, start_nursery
, end_nursery
, PIN_TYPE_STACK
);
2343 sgen_conservatively_pin_objects_from ((void**)&info
->client_info
.ctx
, (void**)(&info
->client_info
.ctx
+ 1),
2344 start_nursery
, end_nursery
, PIN_TYPE_STACK
);
2347 // This is used on Coop GC for platforms where we cannot get the data for individual registers.
2348 // We force a spill of all registers into the stack and pass a chunk of data into sgen.
2349 //FIXME under coop, for now, what we need to ensure is that we scan any extra memory from info->client_info.info.stack_end to stack_mark
2350 MonoThreadUnwindState
*state
= &info
->client_info
.info
.thread_saved_state
[SELF_SUSPEND_STATE_INDEX
];
2351 if (state
&& state
->gc_stackdata
) {
2352 sgen_conservatively_pin_objects_from ((void **)state
->gc_stackdata
, (void**)((char*)state
->gc_stackdata
+ state
->gc_stackdata_size
),
2353 start_nursery
, end_nursery
, PIN_TYPE_STACK
);
2357 if (info
->client_info
.info
.handle_stack
) {
2359 Make two passes over the handle stack. On the imprecise pass, pin all
2360 objects where the handle points into the interior of the object. On the
2361 precise pass, copy or mark all the objects that have handles to the
2362 beginning of the object.
2365 mono_handle_stack_scan (info
->client_info
.info
.handle_stack
, (GcScanFunc
)ctx
.ops
->copy_or_mark_object
, ctx
.queue
, precise
, TRUE
);
2367 PinHandleStackInteriorPtrData ud
;
2368 memset (&ud
, 0, sizeof (ud
));
2369 ud
.start_nursery
= (void**)start_nursery
;
2370 ud
.end_nursery
= (void**)end_nursery
;
2371 mono_handle_stack_scan (info
->client_info
.info
.handle_stack
, pin_handle_stack_interior_ptrs
, &ud
, precise
, FALSE
);
2374 } FOREACH_THREAD_END
2376 SGEN_TV_GETTIME (scan_thread_data_end
);
2377 SGEN_LOG (2, "Scanning thread data: %lld usecs", (long long)(SGEN_TV_ELAPSED (scan_thread_data_start
, scan_thread_data_end
) / 10));
2381 * mono_gc_set_stack_end:
2383 * Set the end of the current threads stack to STACK_END. The stack space between
2384 * STACK_END and the real end of the threads stack will not be scanned during collections.
2387 mono_gc_set_stack_end (void *stack_end
)
2389 SgenThreadInfo
*info
;
2392 info
= mono_thread_info_current ();
2394 SGEN_ASSERT (0, stack_end
< info
->client_info
.info
.stack_end
, "Can only lower stack end");
2395 info
->client_info
.info
.stack_end
= stack_end
;
2405 mono_gc_register_root (char *start
, size_t size
, MonoGCDescriptor descr
, MonoGCRootSource source
, void *key
, const char *msg
)
2407 return sgen_register_root (start
, size
, descr
, descr
? ROOT_TYPE_NORMAL
: ROOT_TYPE_PINNED
, source
, key
, msg
);
2411 mono_gc_register_root_wbarrier (char *start
, size_t size
, MonoGCDescriptor descr
, MonoGCRootSource source
, void *key
, const char *msg
)
2413 return sgen_register_root (start
, size
, descr
, ROOT_TYPE_WBARRIER
, source
, key
, msg
);
2417 mono_gc_deregister_root (char* addr
)
2419 sgen_deregister_root (addr
);
2428 mono_gc_pthread_create (pthread_t
*new_thread
, const pthread_attr_t
*attr
, void *(*start_routine
)(void *), void *arg
)
2433 mono_threads_join_lock ();
2434 res
= pthread_create (new_thread
, attr
, start_routine
, arg
);
2435 mono_threads_join_unlock ();
2446 static size_t last_heap_size
= -1;
2447 static size_t worker_heap_size
;
2450 sgen_client_total_allocated_heap_changed (size_t allocated_heap
)
2452 mono_runtime_resource_check_limit (MONO_RESOURCE_GC_HEAP
, allocated_heap
);
2455 * This function can be called from SGen's worker threads. We want to try
2456 * and avoid exposing those threads to the profiler API, so save the heap
2457 * size value and report it later when the main GC thread calls
2458 * mono_sgen_gc_event_resize ().
2460 worker_heap_size
= allocated_heap
;
2464 mono_sgen_gc_event_resize (void)
2466 if (worker_heap_size
!= last_heap_size
) {
2467 last_heap_size
= worker_heap_size
;
2468 MONO_PROFILER_RAISE (gc_resize
, (last_heap_size
));
2473 mono_gc_user_markers_supported (void)
2479 mono_object_is_alive (MonoObject
* o
)
2485 mono_gc_get_generation (MonoObject
*obj
)
2487 if (sgen_ptr_in_nursery (obj
))
2493 mono_gc_get_gc_name (void)
2499 mono_gc_get_description (void)
2501 #ifdef HAVE_CONC_GC_AS_DEFAULT
2502 return g_strdup ("sgen (concurrent by default)");
2504 return g_strdup ("sgen");
2509 mono_gc_set_desktop_mode (void)
2514 mono_gc_is_moving (void)
2520 mono_gc_is_disabled (void)
2526 BOOL APIENTRY
mono_gc_dllmain (HMODULE module_handle
, DWORD reason
, LPVOID reserved
)
2533 mono_gc_max_generation (void)
2539 mono_gc_precise_stack_mark_enabled (void)
2541 return !conservative_stack_mark
;
2545 mono_gc_collect (int generation
)
2547 MONO_ENTER_GC_UNSAFE
;
2548 sgen_gc_collect (generation
);
2549 MONO_EXIT_GC_UNSAFE
;
2553 mono_gc_collection_count (int generation
)
2555 return sgen_gc_collection_count (generation
);
2559 mono_gc_get_used_size (void)
2561 return (int64_t)sgen_gc_get_used_size ();
2565 mono_gc_get_heap_size (void)
2567 return (int64_t)sgen_gc_get_total_heap_allocation ();
2571 mono_gc_make_root_descr_user (MonoGCRootMarkFunc marker
)
2573 return sgen_make_user_root_descriptor (marker
);
2577 mono_gc_make_descr_for_string (gsize
*bitmap
, int numbits
)
2579 return SGEN_DESC_STRING
;
2583 mono_gc_register_obj_with_weak_fields (void *obj
)
2585 return sgen_register_obj_with_weak_fields ((MonoObject
*)obj
);
2589 mono_gc_get_nursery (int *shift_bits
, size_t *size
)
2591 *size
= sgen_nursery_size
;
2592 *shift_bits
= sgen_nursery_bits
;
2593 return sgen_get_nursery_start ();
2597 mono_gc_get_los_limit (void)
2599 return SGEN_MAX_SMALL_OBJ_SIZE
;
2603 mono_gc_get_allocated_bytes_for_current_thread (void)
2605 SgenThreadInfo
* info
;
2606 info
= mono_thread_info_current ();
2608 /*There are some more allocated bytes in the current tlab that have not been recorded yet */
2609 return info
->total_bytes_allocated
+ info
->tlab_next
- info
->tlab_start
;
2613 mono_gc_get_total_allocated_bytes (MonoBoolean precise
)
2615 return sgen_get_total_allocated_bytes (precise
);
2619 sgen_client_default_metadata (void)
2621 return mono_domain_get ();
2625 sgen_client_metadata_for_object (GCObject
*obj
)
2627 return mono_object_domain (obj
);
2631 * mono_gchandle_new_internal:
2632 * \param obj managed object to get a handle for
2633 * \param pinned whether the object should be pinned
2634 * This returns a handle that wraps the object, this is used to keep a
2635 * reference to a managed object from the unmanaged world and preventing the
2636 * object from being disposed.
2638 * If \p pinned is false the address of the object can not be obtained, if it is
2639 * true the address of the object can be obtained. This will also pin the
2640 * object so it will not be possible by a moving garbage collector to move the
2643 * \returns a handle that can be used to access the object from unmanaged code.
2646 mono_gchandle_new_internal (MonoObject
*obj
, gboolean pinned
)
2648 return sgen_gchandle_new (obj
, pinned
);
2652 * mono_gchandle_new_weakref_internal:
2653 * \param obj managed object to get a handle for
2654 * \param track_resurrection Determines how long to track the object, if this is set to TRUE, the object is tracked after finalization, if FALSE, the object is only tracked up until the point of finalization.
2656 * This returns a weak handle that wraps the object, this is used to
2657 * keep a reference to a managed object from the unmanaged world.
2658 * Unlike the \c mono_gchandle_new_internal the object can be reclaimed by the
2659 * garbage collector. In this case the value of the GCHandle will be
2662 * If \p track_resurrection is TRUE the object will be tracked through
2663 * finalization and if the object is resurrected during the execution
2664 * of the finalizer, then the returned weakref will continue to hold
2665 * a reference to the object. If \p track_resurrection is FALSE, then
2666 * the weak reference's target will become NULL as soon as the object
2667 * is passed on to the finalizer.
2669 * \returns a handle that can be used to access the object from
2673 mono_gchandle_new_weakref_internal (GCObject
*obj
, gboolean track_resurrection
)
2675 return sgen_gchandle_new_weakref (obj
, track_resurrection
);
2679 * mono_gchandle_is_in_domain:
2680 * \param gchandle a GCHandle's handle.
2681 * \param domain An application domain.
2682 * \returns TRUE if the object wrapped by the \p gchandle belongs to the specific \p domain.
2685 mono_gchandle_is_in_domain (guint32 gchandle
, MonoDomain
*domain
)
2687 MonoDomain
*gchandle_domain
= (MonoDomain
*)sgen_gchandle_get_metadata (gchandle
);
2688 return domain
->domain_id
== gchandle_domain
->domain_id
;
2692 * mono_gchandle_free_internal:
2693 * \param gchandle a GCHandle's handle.
2695 * Frees the \p gchandle handle. If there are no outstanding
2696 * references, the garbage collector can reclaim the memory of the
2700 mono_gchandle_free_internal (guint32 gchandle
)
2702 sgen_gchandle_free (gchandle
);
2706 * mono_gchandle_free_domain:
2707 * \param unloading domain that is unloading
2709 * Function used internally to cleanup any GC handle for objects belonging
2710 * to the specified domain during appdomain unload.
2713 mono_gchandle_free_domain (MonoDomain
*unloading
)
2718 * mono_gchandle_get_target_internal:
2719 * \param gchandle a GCHandle's handle.
2721 * The handle was previously created by calling \c mono_gchandle_new_internal or
2722 * \c mono_gchandle_new_weakref.
2724 * \returns a pointer to the \c MonoObject* represented by the handle or
2725 * NULL for a collected object if using a weakref handle.
2728 mono_gchandle_get_target_internal (guint32 gchandle
)
2730 return sgen_gchandle_get_target (gchandle
);
2734 null_link_if_in_domain (gpointer hidden
, GCHandleType handle_type
, int max_generation
, gpointer user
)
2736 MonoDomain
*unloading_domain
= (MonoDomain
*)user
;
2737 MonoDomain
*obj_domain
;
2738 gboolean is_weak
= MONO_GC_HANDLE_TYPE_IS_WEAK (handle_type
);
2739 if (MONO_GC_HANDLE_IS_OBJECT_POINTER (hidden
)) {
2740 MonoObject
*obj
= (MonoObject
*)MONO_GC_REVEAL_POINTER (hidden
, is_weak
);
2741 obj_domain
= mono_object_domain (obj
);
2743 obj_domain
= (MonoDomain
*)MONO_GC_REVEAL_POINTER (hidden
, is_weak
);
2745 if (unloading_domain
->domain_id
== obj_domain
->domain_id
)
2751 sgen_null_links_for_domain (MonoDomain
*domain
)
2754 for (type
= HANDLE_TYPE_MIN
; type
< HANDLE_TYPE_MAX
; ++type
)
2755 sgen_gchandle_iterate ((GCHandleType
)type
, GENERATION_OLD
, null_link_if_in_domain
, domain
);
2759 mono_gchandle_set_target (guint32 gchandle
, MonoObject
*obj
)
2761 sgen_gchandle_set_target (gchandle
, obj
);
2765 sgen_client_gchandle_created (int handle_type
, GCObject
*obj
, guint32 handle
)
2767 #ifndef DISABLE_PERFCOUNTERS
2768 mono_atomic_inc_i32 (&mono_perfcounters
->gc_num_handles
);
2771 MONO_PROFILER_RAISE (gc_handle_created
, (handle
, (MonoGCHandleType
)handle_type
, obj
));
2775 sgen_client_gchandle_destroyed (int handle_type
, guint32 handle
)
2777 #ifndef DISABLE_PERFCOUNTERS
2778 mono_atomic_dec_i32 (&mono_perfcounters
->gc_num_handles
);
2781 MONO_PROFILER_RAISE (gc_handle_deleted
, (handle
, (MonoGCHandleType
)handle_type
));
2785 sgen_client_ensure_weak_gchandles_accessible (void)
2788 * During the second bridge processing step the world is
2789 * running again. That step processes all weak links once
2790 * more to null those that refer to dead objects. Before that
2791 * is completed, those links must not be followed, so we
2792 * conservatively wait for bridge processing when any weak
2793 * link is dereferenced.
2795 /* FIXME: A GC can occur after this check fails, in which case we
2796 * should wait for bridge processing but would fail to do so.
2798 if (G_UNLIKELY (mono_bridge_processing_in_progress
))
2799 mono_gc_wait_for_bridge_processing ();
2803 mono_gc_invoke_with_gc_lock (MonoGCLockedCallbackFunc func
, void *data
)
2807 result
= func (data
);
2808 UNLOCK_INTERRUPTION
;
2813 mono_gc_register_altstack (gpointer stack
, gint32 stack_size
, gpointer altstack
, gint32 altstack_size
)
2819 mono_gc_get_card_table (int *shift_bits
, gpointer
*mask
)
2821 return sgen_get_card_table_configuration (shift_bits
, mask
);
2825 mono_gc_get_target_card_table (int *shift_bits
, target_mgreg_t
*mask
)
2827 return sgen_get_target_card_table_configuration (shift_bits
, mask
);
2831 mono_gc_card_table_nursery_check (void)
2833 return !sgen_get_major_collector ()->is_concurrent
;
2836 /* Negative value to remove */
2838 mono_gc_add_memory_pressure (gint64 value
)
2840 /* FIXME: Implement at some point? */
2848 sgen_client_degraded_allocation (void)
2850 static gint32 last_major_gc_warned
= -1;
2851 static gint32 num_degraded
= 0;
2853 gint32 major_gc_count
= mono_atomic_load_i32 (&mono_gc_stats
.major_gc_count
);
2854 //The WASM target aways triggers degrated allocation before collecting. So no point in printing the warning as it will just confuse users
2855 #if !defined (TARGET_WASM)
2856 if (mono_atomic_load_i32 (&last_major_gc_warned
) < major_gc_count
) {
2857 gint32 num
= mono_atomic_inc_i32 (&num_degraded
);
2858 if (num
== 1 || num
== 3)
2859 mono_trace (G_LOG_LEVEL_INFO
, MONO_TRACE_GC
, "Warning: Degraded allocation. Consider increasing nursery-size if the warning persists.");
2861 mono_trace (G_LOG_LEVEL_INFO
, MONO_TRACE_GC
, "Warning: Repeated degraded allocation. Consider increasing nursery-size.");
2863 mono_atomic_store_i32 (&last_major_gc_warned
, major_gc_count
);
2873 sgen_client_description_for_internal_mem_type (int type
)
2876 case INTERNAL_MEM_EPHEMERON_LINK
: return "ephemeron-link";
2877 case INTERNAL_MEM_MOVED_OBJECT
: return "moved-object";
2884 sgen_client_pre_collection_checks (void)
2886 if (sgen_mono_xdomain_checks
) {
2887 sgen_clear_nursery_fragments ();
2888 sgen_check_for_xdomain_refs ();
2893 sgen_client_vtable_is_inited (MonoVTable
*vt
)
2895 return m_class_is_inited (vt
->klass
);
2899 sgen_client_vtable_get_namespace (MonoVTable
*vt
)
2901 return m_class_get_name_space (vt
->klass
);
2905 sgen_client_vtable_get_name (MonoVTable
*vt
)
2907 return m_class_get_name (vt
->klass
);
2915 sgen_client_init (void)
2917 mono_thread_callbacks_init ();
2918 mono_thread_info_init (sizeof (SgenThreadInfo
));
2920 ///* Keep this the default for now */
2921 /* Precise marking is broken on all supported targets. Disable until fixed. */
2922 conservative_stack_mark
= TRUE
;
2924 sgen_register_fixed_internal_mem_type (INTERNAL_MEM_EPHEMERON_LINK
, sizeof (EphemeronLinkNode
));
2926 mono_sgen_init_stw ();
2928 mono_tls_init_gc_keys ();
2930 mono_thread_info_attach ();
2934 mono_gc_init_icalls (void)
2936 mono_register_jit_icall (mono_gc_alloc_obj
, mono_icall_sig_object_ptr_int
, FALSE
);
2937 mono_register_jit_icall (mono_gc_alloc_vector
, mono_icall_sig_object_ptr_int_int
, FALSE
);
2938 mono_register_jit_icall (mono_gc_alloc_string
, mono_icall_sig_object_ptr_int_int32
, FALSE
);
2939 mono_register_jit_icall (mono_profiler_raise_gc_allocation
, mono_icall_sig_void_object
, FALSE
);
2943 sgen_client_handle_gc_param (const char *opt
)
2945 if (g_str_has_prefix (opt
, "stack-mark=")) {
2946 opt
= strchr (opt
, '=') + 1;
2947 if (!strcmp (opt
, "precise")) {
2948 conservative_stack_mark
= FALSE
;
2949 } else if (!strcmp (opt
, "conservative")) {
2950 conservative_stack_mark
= TRUE
;
2952 sgen_env_var_error (MONO_GC_PARAMS_NAME
, conservative_stack_mark
? "Using `conservative`." : "Using `precise`.",
2953 "Invalid value `%s` for `stack-mark` option, possible values are: `precise`, `conservative`.", opt
);
2955 } else if (g_str_has_prefix (opt
, "bridge-implementation=")) {
2956 opt
= strchr (opt
, '=') + 1;
2957 sgen_set_bridge_implementation (opt
);
2958 } else if (g_str_has_prefix (opt
, "toggleref-test")) {
2959 /* FIXME: This should probably in MONO_GC_DEBUG */
2960 sgen_register_test_toggleref_callback ();
2961 } else if (!sgen_bridge_handle_gc_param (opt
)) {
2968 sgen_client_print_gc_params_usage (void)
2970 fprintf (stderr
, " stack-mark=MARK-METHOD (where MARK-METHOD is 'precise' or 'conservative')\n");
2974 sgen_client_handle_gc_debug (const char *opt
)
2976 if (!strcmp (opt
, "xdomain-checks")) {
2977 sgen_mono_xdomain_checks
= TRUE
;
2978 } else if (!strcmp (opt
, "do-not-finalize")) {
2979 mono_do_not_finalize
= TRUE
;
2980 } else if (g_str_has_prefix (opt
, "do-not-finalize=")) {
2981 opt
= strchr (opt
, '=') + 1;
2982 mono_do_not_finalize
= TRUE
;
2983 mono_do_not_finalize_class_names
= g_strsplit (opt
, ",", 0);
2984 } else if (!strcmp (opt
, "log-finalizers")) {
2985 mono_log_finalizers
= TRUE
;
2986 } else if (!strcmp (opt
, "no-managed-allocator")) {
2987 sgen_set_use_managed_allocator (FALSE
);
2988 } else if (!sgen_bridge_handle_gc_debug (opt
)) {
2995 sgen_client_print_gc_debug_usage (void)
2997 fprintf (stderr
, " xdomain-checks\n");
2998 fprintf (stderr
, " do-not-finalize\n");
2999 fprintf (stderr
, " log-finalizers\n");
3000 fprintf (stderr
, " no-managed-allocator\n");
3001 sgen_bridge_print_gc_debug_usage ();
3006 sgen_client_get_provenance (void)
3008 #ifdef SGEN_OBJECT_PROVENANCE
3009 MonoGCCallbacks
*cb
= mono_gc_get_gc_callbacks ();
3010 gpointer (*get_provenance_func
) (void);
3013 get_provenance_func
= cb
->get_provenance_func
;
3014 if (get_provenance_func
)
3015 return get_provenance_func ();
3023 sgen_client_describe_invalid_pointer (GCObject
*ptr
)
3025 sgen_bridge_describe_pointer (ptr
);
3028 static gboolean gc_inited
;
3031 * mono_gc_base_init:
3034 mono_gc_base_init (void)
3039 mono_counters_init ();
3042 mono_w32handle_init ();
3045 #ifdef HEAVY_STATISTICS
3046 mono_counters_register ("los marked cards", MONO_COUNTER_GC
| MONO_COUNTER_ULONG
, &los_marked_cards
);
3047 mono_counters_register ("los array cards scanned ", MONO_COUNTER_GC
| MONO_COUNTER_ULONG
, &los_array_cards
);
3048 mono_counters_register ("los array remsets", MONO_COUNTER_GC
| MONO_COUNTER_ULONG
, &los_array_remsets
);
3050 mono_counters_register ("WBarrier set arrayref", MONO_COUNTER_GC
| MONO_COUNTER_ULONG
, &stat_wbarrier_set_arrayref
);
3051 mono_counters_register ("WBarrier value copy", MONO_COUNTER_GC
| MONO_COUNTER_ULONG
, &stat_wbarrier_value_copy
);
3052 mono_counters_register ("WBarrier object copy", MONO_COUNTER_GC
| MONO_COUNTER_ULONG
, &stat_wbarrier_object_copy
);
3061 mono_gc_base_cleanup (void)
3064 * Note we don't fully cleanup the GC here, but the threads mainly.
3066 * We need to finish any work on the sgen threads before shutting down
3067 * the sgen threadpool. After this point we can still trigger GCs as
3068 * part of domain free, but they should all be forced and not use the
3071 sgen_finish_concurrent_work ("cleanup", TRUE
);
3072 sgen_thread_pool_shutdown ();
3074 // We should have consumed any outstanding moves.
3075 g_assert (sgen_pointer_queue_is_empty (&moved_objects_queue
));
3079 mono_gc_is_null (void)
3085 sgen_client_get_weak_bitmap (MonoVTable
*vt
, int *nbits
)
3087 MonoClass
*klass
= vt
->klass
;
3089 return mono_class_get_weak_bitmap (klass
, nbits
);
3093 sgen_client_binary_protocol_collection_begin (int minor_gc_count
, int generation
)
3095 static gboolean pseudo_roots_registered
;
3097 MONO_GC_BEGIN (generation
);
3099 MONO_PROFILER_RAISE (gc_event
, (MONO_GC_EVENT_START
, generation
, generation
== GENERATION_OLD
&& sgen_concurrent_collection_in_progress
));
3101 if (!pseudo_roots_registered
) {
3102 pseudo_roots_registered
= TRUE
;
3103 MONO_PROFILER_RAISE (gc_root_register
, (SPECIAL_ADDRESS_FIN_QUEUE
, 1, MONO_ROOT_SOURCE_FINALIZER_QUEUE
, NULL
, "Finalizer Queue"));
3104 MONO_PROFILER_RAISE (gc_root_register
, (SPECIAL_ADDRESS_CRIT_FIN_QUEUE
, 1, MONO_ROOT_SOURCE_FINALIZER_QUEUE
, NULL
, "Finalizer Queue (Critical)"));
3105 MONO_PROFILER_RAISE (gc_root_register
, (SPECIAL_ADDRESS_EPHEMERON
, 1, MONO_ROOT_SOURCE_EPHEMERON
, NULL
, "Ephemerons"));
3106 MONO_PROFILER_RAISE (gc_root_register
, (SPECIAL_ADDRESS_TOGGLEREF
, 1, MONO_ROOT_SOURCE_TOGGLEREF
, NULL
, "ToggleRefs"));
3109 #ifndef DISABLE_PERFCOUNTERS
3110 if (generation
== GENERATION_NURSERY
)
3111 mono_atomic_inc_i32 (&mono_perfcounters
->gc_collections0
);
3113 mono_atomic_inc_i32 (&mono_perfcounters
->gc_collections1
);
3118 sgen_client_binary_protocol_collection_end (int minor_gc_count
, int generation
, long long num_objects_scanned
, long long num_unique_objects_scanned
)
3120 MONO_GC_END (generation
);
3122 MONO_PROFILER_RAISE (gc_event
, (MONO_GC_EVENT_END
, generation
, generation
== GENERATION_OLD
&& sgen_concurrent_collection_in_progress
));
3127 sgen_client_schedule_background_job (void (*cb
)(void))
3129 mono_threads_schedule_background_job (cb
);