2 * sgen-mono.c: SGen features specific to Mono.
4 * Copyright (C) 2014 Xamarin Inc
6 * Licensed under the MIT license. See LICENSE file in the project root for full license information.
12 #include "sgen/sgen-gc.h"
13 #include "sgen/sgen-protocol.h"
14 #include "metadata/monitor.h"
15 #include "sgen/sgen-layout-stats.h"
16 #include "sgen/sgen-client.h"
17 #include "sgen/sgen-cardtable.h"
18 #include "sgen/sgen-pinning.h"
19 #include "sgen/sgen-thread-pool.h"
20 #include "metadata/marshal.h"
21 #include "metadata/method-builder.h"
22 #include "metadata/abi-details.h"
23 #include "metadata/mono-gc.h"
24 #include "metadata/runtime.h"
25 #include "metadata/sgen-bridge-internals.h"
26 #include "metadata/gc-internals.h"
27 #include "metadata/handle.h"
28 #include "utils/mono-memory-model.h"
29 #include "utils/mono-logger-internals.h"
30 #include "utils/mono-threads-coop.h"
31 #include "sgen/sgen-thread-pool.h"
33 #ifdef HEAVY_STATISTICS
34 static guint64 stat_wbarrier_set_arrayref
= 0;
35 static guint64 stat_wbarrier_value_copy
= 0;
36 static guint64 stat_wbarrier_object_copy
= 0;
38 static guint64 los_marked_cards
;
39 static guint64 los_array_cards
;
40 static guint64 los_array_remsets
;
43 /* If set, mark stacks conservatively, even if precise marking is possible */
44 static gboolean conservative_stack_mark
= FALSE
;
45 /* If set, check that there are no references to the domain left at domain unload */
46 gboolean sgen_mono_xdomain_checks
= FALSE
;
48 /* Functions supplied by the runtime to be called by the GC */
49 static MonoGCCallbacks gc_callbacks
;
52 __thread SgenThreadInfo
*sgen_thread_info
;
54 MonoNativeTlsKey thread_info_key
;
57 #define ALIGN_TO(val,align) ((((guint64)val) + ((align) - 1)) & ~((align) - 1))
59 #define OPDEF(a,b,c,d,e,f,g,h,i,j) \
63 #include "mono/cil/opcode.def"
74 ptr_on_stack (void *ptr
)
76 gpointer stack_start
= &stack_start
;
77 SgenThreadInfo
*info
= mono_thread_info_current ();
79 if (ptr
>= stack_start
&& ptr
< (gpointer
)info
->client_info
.stack_end
)
84 #ifdef SGEN_HEAVY_BINARY_PROTOCOL
86 #define HANDLE_PTR(ptr,obj) do { \
87 gpointer o = *(gpointer*)(ptr); \
89 gpointer d = ((char*)dest) + ((char*)(ptr) - (char*)(obj)); \
90 binary_protocol_wbarrier (d, o, (gpointer) SGEN_LOAD_VTABLE (o)); \
95 scan_object_for_binary_protocol_copy_wbarrier (gpointer dest
, char *start
, mword desc
)
97 #define SCAN_OBJECT_NOVTABLE
98 #include "sgen/sgen-scan-object.h"
103 mono_gc_wbarrier_value_copy (gpointer dest
, gpointer src
, int count
, MonoClass
*klass
)
105 HEAVY_STAT (++stat_wbarrier_value_copy
);
106 g_assert (klass
->valuetype
);
108 SGEN_LOG (8, "Adding value remset at %p, count %d, descr %p for class %s (%p)", dest
, count
, (gpointer
)klass
->gc_descr
, klass
->name
, klass
);
110 if (sgen_ptr_in_nursery (dest
) || ptr_on_stack (dest
) || !sgen_gc_descr_has_references ((mword
)klass
->gc_descr
)) {
111 size_t element_size
= mono_class_value_size (klass
, NULL
);
112 size_t size
= count
* element_size
;
113 mono_gc_memmove_atomic (dest
, src
, size
);
117 #ifdef SGEN_HEAVY_BINARY_PROTOCOL
118 if (binary_protocol_is_heavy_enabled ()) {
119 size_t element_size
= mono_class_value_size (klass
, NULL
);
121 for (i
= 0; i
< count
; ++i
) {
122 scan_object_for_binary_protocol_copy_wbarrier ((char*)dest
+ i
* element_size
,
123 (char*)src
+ i
* element_size
- sizeof (MonoObject
),
124 (mword
) klass
->gc_descr
);
129 sgen_get_remset ()->wbarrier_value_copy (dest
, src
, count
, mono_class_value_size (klass
, NULL
));
133 * mono_gc_wbarrier_object_copy:
135 * Write barrier to call when obj is the result of a clone or copy of an object.
138 mono_gc_wbarrier_object_copy (MonoObject
* obj
, MonoObject
*src
)
142 HEAVY_STAT (++stat_wbarrier_object_copy
);
144 SGEN_ASSERT (6, !ptr_on_stack (obj
), "Why is this called for a non-reference type?");
145 if (sgen_ptr_in_nursery (obj
) || !SGEN_OBJECT_HAS_REFERENCES (src
)) {
146 size
= mono_object_class (obj
)->instance_size
;
147 mono_gc_memmove_aligned ((char*)obj
+ sizeof (MonoObject
), (char*)src
+ sizeof (MonoObject
),
148 size
- sizeof (MonoObject
));
152 #ifdef SGEN_HEAVY_BINARY_PROTOCOL
153 if (binary_protocol_is_heavy_enabled ())
154 scan_object_for_binary_protocol_copy_wbarrier (obj
, (char*)src
, (mword
) src
->vtable
->gc_descr
);
157 sgen_get_remset ()->wbarrier_object_copy (obj
, src
);
161 mono_gc_wbarrier_set_arrayref (MonoArray
*arr
, gpointer slot_ptr
, MonoObject
* value
)
163 HEAVY_STAT (++stat_wbarrier_set_arrayref
);
164 if (sgen_ptr_in_nursery (slot_ptr
)) {
165 *(void**)slot_ptr
= value
;
168 SGEN_LOG (8, "Adding remset at %p", slot_ptr
);
170 binary_protocol_wbarrier (slot_ptr
, value
, value
->vtable
);
172 sgen_get_remset ()->wbarrier_set_field ((GCObject
*)arr
, slot_ptr
, value
);
176 mono_gc_wbarrier_set_field (MonoObject
*obj
, gpointer field_ptr
, MonoObject
* value
)
178 mono_gc_wbarrier_set_arrayref ((MonoArray
*)obj
, field_ptr
, value
);
182 mono_gc_wbarrier_value_copy_bitmap (gpointer _dest
, gpointer _src
, int size
, unsigned bitmap
)
184 sgen_wbarrier_value_copy_bitmap (_dest
, _src
, size
, bitmap
);
187 static MonoMethod
*write_barrier_conc_method
;
188 static MonoMethod
*write_barrier_noconc_method
;
191 sgen_is_critical_method (MonoMethod
*method
)
193 return sgen_is_managed_allocator (method
);
197 sgen_has_critical_method (void)
199 return sgen_has_managed_allocator ();
205 emit_nursery_check (MonoMethodBuilder
*mb
, int *nursery_check_return_labels
, gboolean is_concurrent
)
207 int shifted_nursery_start
= mono_mb_add_local (mb
, &mono_defaults
.int_class
->byval_arg
);
209 memset (nursery_check_return_labels
, 0, sizeof (int) * 2);
210 // if (ptr_in_nursery (ptr)) return;
212 * Masking out the bits might be faster, but we would have to use 64 bit
213 * immediates, which might be slower.
215 mono_mb_emit_byte (mb
, MONO_CUSTOM_PREFIX
);
216 mono_mb_emit_byte (mb
, CEE_MONO_LDPTR_NURSERY_START
);
217 mono_mb_emit_byte (mb
, MONO_CUSTOM_PREFIX
);
218 mono_mb_emit_byte (mb
, CEE_MONO_LDPTR_NURSERY_BITS
);
219 mono_mb_emit_byte (mb
, CEE_SHR_UN
);
220 mono_mb_emit_stloc (mb
, shifted_nursery_start
);
222 mono_mb_emit_ldarg (mb
, 0);
223 mono_mb_emit_byte (mb
, MONO_CUSTOM_PREFIX
);
224 mono_mb_emit_byte (mb
, CEE_MONO_LDPTR_NURSERY_BITS
);
225 mono_mb_emit_byte (mb
, CEE_SHR_UN
);
226 mono_mb_emit_ldloc (mb
, shifted_nursery_start
);
227 nursery_check_return_labels
[0] = mono_mb_emit_branch (mb
, CEE_BEQ
);
229 if (!is_concurrent
) {
230 // if (!ptr_in_nursery (*ptr)) return;
231 mono_mb_emit_ldarg (mb
, 0);
232 mono_mb_emit_byte (mb
, CEE_LDIND_I
);
233 mono_mb_emit_byte (mb
, MONO_CUSTOM_PREFIX
);
234 mono_mb_emit_byte (mb
, CEE_MONO_LDPTR_NURSERY_BITS
);
235 mono_mb_emit_byte (mb
, CEE_SHR_UN
);
236 mono_mb_emit_ldloc (mb
, shifted_nursery_start
);
237 nursery_check_return_labels
[1] = mono_mb_emit_branch (mb
, CEE_BNE_UN
);
243 mono_gc_get_specific_write_barrier (gboolean is_concurrent
)
246 MonoMethodBuilder
*mb
;
247 MonoMethodSignature
*sig
;
248 MonoMethod
**write_barrier_method_addr
;
250 #ifdef MANAGED_WBARRIER
251 int i
, nursery_check_labels
[2];
254 // FIXME: Maybe create a separate version for ctors (the branch would be
255 // correctly predicted more times)
257 write_barrier_method_addr
= &write_barrier_conc_method
;
259 write_barrier_method_addr
= &write_barrier_noconc_method
;
261 if (*write_barrier_method_addr
)
262 return *write_barrier_method_addr
;
264 /* Create the IL version of mono_gc_barrier_generic_store () */
265 sig
= mono_metadata_signature_alloc (mono_defaults
.corlib
, 1);
266 sig
->ret
= &mono_defaults
.void_class
->byval_arg
;
267 sig
->params
[0] = &mono_defaults
.int_class
->byval_arg
;
270 mb
= mono_mb_new (mono_defaults
.object_class
, "wbarrier_conc", MONO_WRAPPER_WRITE_BARRIER
);
272 mb
= mono_mb_new (mono_defaults
.object_class
, "wbarrier_noconc", MONO_WRAPPER_WRITE_BARRIER
);
275 #ifdef MANAGED_WBARRIER
276 emit_nursery_check (mb
, nursery_check_labels
, is_concurrent
);
278 addr = sgen_cardtable + ((address >> CARD_BITS) & CARD_MASK)
282 LDC_PTR sgen_cardtable
288 if (SGEN_HAVE_OVERLAPPING_CARDS) {
289 LDC_PTR card_table_mask
296 mono_mb_emit_byte (mb
, MONO_CUSTOM_PREFIX
);
297 mono_mb_emit_byte (mb
, CEE_MONO_LDPTR_CARD_TABLE
);
298 mono_mb_emit_ldarg (mb
, 0);
299 mono_mb_emit_icon (mb
, CARD_BITS
);
300 mono_mb_emit_byte (mb
, CEE_SHR_UN
);
301 mono_mb_emit_byte (mb
, CEE_CONV_I
);
302 #ifdef SGEN_HAVE_OVERLAPPING_CARDS
303 #if SIZEOF_VOID_P == 8
304 mono_mb_emit_icon8 (mb
, CARD_MASK
);
306 mono_mb_emit_icon (mb
, CARD_MASK
);
308 mono_mb_emit_byte (mb
, CEE_CONV_I
);
309 mono_mb_emit_byte (mb
, CEE_AND
);
311 mono_mb_emit_byte (mb
, CEE_ADD
);
312 mono_mb_emit_icon (mb
, 1);
313 mono_mb_emit_byte (mb
, CEE_STIND_I1
);
316 for (i
= 0; i
< 2; ++i
) {
317 if (nursery_check_labels
[i
])
318 mono_mb_patch_branch (mb
, nursery_check_labels
[i
]);
320 mono_mb_emit_byte (mb
, CEE_RET
);
322 mono_mb_emit_ldarg (mb
, 0);
323 mono_mb_emit_icall (mb
, mono_gc_wbarrier_generic_nostore
);
324 mono_mb_emit_byte (mb
, CEE_RET
);
327 res
= mono_mb_create_method (mb
, sig
, 16);
328 info
= mono_wrapper_info_create (mb
, WRAPPER_SUBTYPE_NONE
);
329 mono_marshal_set_wrapper_info (res
, info
);
333 if (*write_barrier_method_addr
) {
334 /* Already created */
335 mono_free_method (res
);
337 /* double-checked locking */
338 mono_memory_barrier ();
339 *write_barrier_method_addr
= res
;
343 return *write_barrier_method_addr
;
347 mono_gc_get_write_barrier (void)
349 return mono_gc_get_specific_write_barrier (major_collector
.is_concurrent
);
353 * Dummy filler objects
356 /* Vtable of the objects used to fill out nursery fragments before a collection */
357 static GCVTable array_fill_vtable
;
360 get_array_fill_vtable (void)
362 if (!array_fill_vtable
) {
363 static MonoClass klass
;
364 static char _vtable
[sizeof(MonoVTable
)+8];
365 MonoVTable
* vtable
= (MonoVTable
*) ALIGN_TO((mword
)_vtable
, 8);
368 MonoDomain
*domain
= mono_get_root_domain ();
371 klass
.element_class
= mono_defaults
.byte_class
;
373 klass
.instance_size
= MONO_SIZEOF_MONO_ARRAY
;
374 klass
.sizes
.element_size
= 1;
375 klass
.name
= "array_filler_type";
377 vtable
->klass
= &klass
;
379 vtable
->gc_descr
= mono_gc_make_descr_for_array (TRUE
, &bmap
, 0, 1);
382 array_fill_vtable
= vtable
;
384 return array_fill_vtable
;
388 sgen_client_array_fill_range (char *start
, size_t size
)
392 if (size
< MONO_SIZEOF_MONO_ARRAY
) {
393 memset (start
, 0, size
);
397 o
= (MonoArray
*)start
;
398 o
->obj
.vtable
= (MonoVTable
*)get_array_fill_vtable ();
399 /* Mark this as not a real object */
400 o
->obj
.synchronisation
= (MonoThreadsSync
*)GINT_TO_POINTER (-1);
402 o
->max_length
= (mono_array_size_t
)(size
- MONO_SIZEOF_MONO_ARRAY
);
408 sgen_client_zero_array_fill_header (void *p
, size_t size
)
410 if (size
>= MONO_SIZEOF_MONO_ARRAY
) {
411 memset (p
, 0, MONO_SIZEOF_MONO_ARRAY
);
413 static guint8 zeros
[MONO_SIZEOF_MONO_ARRAY
];
415 SGEN_ASSERT (0, !memcmp (p
, zeros
, size
), "TLAB segment must be zeroed out.");
423 static MonoGCFinalizerCallbacks fin_callbacks
;
426 mono_gc_get_vtable_bits (MonoClass
*klass
)
429 /* FIXME move this to the bridge code */
430 if (sgen_need_bridge_processing ()) {
431 switch (sgen_bridge_class_kind (klass
)) {
432 case GC_BRIDGE_TRANSPARENT_BRIDGE_CLASS
:
433 case GC_BRIDGE_OPAQUE_BRIDGE_CLASS
:
434 res
= SGEN_GC_BIT_BRIDGE_OBJECT
;
436 case GC_BRIDGE_OPAQUE_CLASS
:
437 res
= SGEN_GC_BIT_BRIDGE_OPAQUE_OBJECT
;
439 case GC_BRIDGE_TRANSPARENT_CLASS
:
443 if (fin_callbacks
.is_class_finalization_aware
) {
444 if (fin_callbacks
.is_class_finalization_aware (klass
))
445 res
|= SGEN_GC_BIT_FINALIZER_AWARE
;
451 is_finalization_aware (MonoObject
*obj
)
453 MonoVTable
*vt
= SGEN_LOAD_VTABLE (obj
);
454 return (vt
->gc_bits
& SGEN_GC_BIT_FINALIZER_AWARE
) == SGEN_GC_BIT_FINALIZER_AWARE
;
458 sgen_client_object_queued_for_finalization (GCObject
*obj
)
460 if (fin_callbacks
.object_queued_for_finalization
&& is_finalization_aware (obj
))
461 fin_callbacks
.object_queued_for_finalization (obj
);
464 if (G_UNLIKELY (MONO_GC_FINALIZE_ENQUEUE_ENABLED ())) {
465 int gen
= sgen_ptr_in_nursery (obj
) ? GENERATION_NURSERY
: GENERATION_OLD
;
466 GCVTable vt
= SGEN_LOAD_VTABLE (obj
);
467 MONO_GC_FINALIZE_ENQUEUE ((mword
)obj
, sgen_safe_object_get_size (obj
),
468 sgen_client_vtable_get_namespace (vt
), sgen_client_vtable_get_name (vt
), gen
,
469 sgen_client_object_has_critical_finalizer (obj
));
475 mono_gc_register_finalizer_callbacks (MonoGCFinalizerCallbacks
*callbacks
)
477 if (callbacks
->version
!= MONO_GC_FINALIZER_EXTENSION_VERSION
)
478 g_error ("Invalid finalizer callback version. Expected %d but got %d\n", MONO_GC_FINALIZER_EXTENSION_VERSION
, callbacks
->version
);
480 fin_callbacks
= *callbacks
;
484 sgen_client_run_finalize (MonoObject
*obj
)
486 mono_gc_run_finalize (obj
, NULL
);
490 mono_gc_invoke_finalizers (void)
492 return sgen_gc_invoke_finalizers ();
496 mono_gc_pending_finalizers (void)
498 return sgen_have_pending_finalizers ();
502 sgen_client_finalize_notify (void)
504 mono_gc_finalize_notify ();
508 mono_gc_register_for_finalization (MonoObject
*obj
, void *user_data
)
510 sgen_object_register_for_finalization (obj
, user_data
);
514 object_in_domain_predicate (MonoObject
*obj
, void *user_data
)
516 MonoDomain
*domain
= (MonoDomain
*)user_data
;
517 if (mono_object_domain (obj
) == domain
) {
518 SGEN_LOG (5, "Unregistering finalizer for object: %p (%s)", obj
, sgen_client_vtable_get_name (SGEN_LOAD_VTABLE (obj
)));
525 * mono_gc_finalizers_for_domain:
526 * @domain: the unloading appdomain
527 * @out_array: output array
528 * @out_size: size of output array
530 * Enqueue for finalization all objects that belong to the unloading appdomain @domain
531 * @suspend is used for early termination of the enqueuing process.
534 mono_gc_finalize_domain (MonoDomain
*domain
)
536 sgen_finalize_if (object_in_domain_predicate
, domain
);
540 mono_gc_suspend_finalizers (void)
542 sgen_set_suspend_finalizers ();
549 typedef struct _EphemeronLinkNode EphemeronLinkNode
;
551 struct _EphemeronLinkNode
{
552 EphemeronLinkNode
*next
;
561 static EphemeronLinkNode
*ephemeron_list
;
563 /* LOCKING: requires that the GC lock is held */
565 null_ephemerons_for_domain (MonoDomain
*domain
)
567 EphemeronLinkNode
*current
= ephemeron_list
, *prev
= NULL
;
570 MonoObject
*object
= (MonoObject
*)current
->array
;
573 SGEN_ASSERT (0, object
->vtable
, "Can't have objects without vtables.");
575 if (object
&& object
->vtable
->domain
== domain
) {
576 EphemeronLinkNode
*tmp
= current
;
579 prev
->next
= current
->next
;
581 ephemeron_list
= current
->next
;
583 current
= current
->next
;
584 sgen_free_internal (tmp
, INTERNAL_MEM_EPHEMERON_LINK
);
587 current
= current
->next
;
592 /* LOCKING: requires that the GC lock is held */
594 sgen_client_clear_unreachable_ephemerons (ScanCopyContext ctx
)
596 CopyOrMarkObjectFunc copy_func
= ctx
.ops
->copy_or_mark_object
;
597 SgenGrayQueue
*queue
= ctx
.queue
;
598 EphemeronLinkNode
*current
= ephemeron_list
, *prev
= NULL
;
599 Ephemeron
*cur
, *array_end
;
603 MonoArray
*array
= current
->array
;
605 if (!sgen_is_object_alive_for_current_gen ((GCObject
*)array
)) {
606 EphemeronLinkNode
*tmp
= current
;
608 SGEN_LOG (5, "Dead Ephemeron array at %p", array
);
611 prev
->next
= current
->next
;
613 ephemeron_list
= current
->next
;
615 current
= current
->next
;
616 sgen_free_internal (tmp
, INTERNAL_MEM_EPHEMERON_LINK
);
621 copy_func ((GCObject
**)&array
, queue
);
622 current
->array
= array
;
624 SGEN_LOG (5, "Clearing unreachable entries for ephemeron array at %p", array
);
626 cur
= mono_array_addr (array
, Ephemeron
, 0);
627 array_end
= cur
+ mono_array_length_fast (array
);
628 tombstone
= SGEN_LOAD_VTABLE ((GCObject
*)array
)->domain
->ephemeron_tombstone
;
630 for (; cur
< array_end
; ++cur
) {
631 GCObject
*key
= cur
->key
;
633 if (!key
|| key
== tombstone
)
636 SGEN_LOG (5, "[%zd] key %p (%s) value %p (%s)", cur
- mono_array_addr (array
, Ephemeron
, 0),
637 key
, sgen_is_object_alive_for_current_gen (key
) ? "reachable" : "unreachable",
638 cur
->value
, cur
->value
&& sgen_is_object_alive_for_current_gen (cur
->value
) ? "reachable" : "unreachable");
640 if (!sgen_is_object_alive_for_current_gen (key
)) {
641 cur
->key
= tombstone
;
647 current
= current
->next
;
652 LOCKING: requires that the GC lock is held
654 Limitations: We scan all ephemerons on every collection since the current design doesn't allow for a simple nursery/mature split.
657 sgen_client_mark_ephemerons (ScanCopyContext ctx
)
659 CopyOrMarkObjectFunc copy_func
= ctx
.ops
->copy_or_mark_object
;
660 SgenGrayQueue
*queue
= ctx
.queue
;
661 gboolean nothing_marked
= TRUE
;
662 EphemeronLinkNode
*current
= ephemeron_list
;
663 Ephemeron
*cur
, *array_end
;
666 for (current
= ephemeron_list
; current
; current
= current
->next
) {
667 MonoArray
*array
= current
->array
;
668 SGEN_LOG (5, "Ephemeron array at %p", array
);
670 /*It has to be alive*/
671 if (!sgen_is_object_alive_for_current_gen ((GCObject
*)array
)) {
672 SGEN_LOG (5, "\tnot reachable");
676 copy_func ((GCObject
**)&array
, queue
);
678 cur
= mono_array_addr (array
, Ephemeron
, 0);
679 array_end
= cur
+ mono_array_length_fast (array
);
680 tombstone
= SGEN_LOAD_VTABLE ((GCObject
*)array
)->domain
->ephemeron_tombstone
;
682 for (; cur
< array_end
; ++cur
) {
683 GCObject
*key
= cur
->key
;
685 if (!key
|| key
== tombstone
)
688 SGEN_LOG (5, "[%zd] key %p (%s) value %p (%s)", cur
- mono_array_addr (array
, Ephemeron
, 0),
689 key
, sgen_is_object_alive_for_current_gen (key
) ? "reachable" : "unreachable",
690 cur
->value
, cur
->value
&& sgen_is_object_alive_for_current_gen (cur
->value
) ? "reachable" : "unreachable");
692 if (sgen_is_object_alive_for_current_gen (key
)) {
693 GCObject
*value
= cur
->value
;
695 copy_func (&cur
->key
, queue
);
697 if (!sgen_is_object_alive_for_current_gen (value
))
698 nothing_marked
= FALSE
;
699 copy_func (&cur
->value
, queue
);
705 SGEN_LOG (5, "Ephemeron run finished. Is it done %d", nothing_marked
);
706 return nothing_marked
;
710 mono_gc_ephemeron_array_add (MonoObject
*obj
)
712 EphemeronLinkNode
*node
;
716 node
= (EphemeronLinkNode
*)sgen_alloc_internal (INTERNAL_MEM_EPHEMERON_LINK
);
721 node
->array
= (MonoArray
*)obj
;
722 node
->next
= ephemeron_list
;
723 ephemeron_list
= node
;
725 SGEN_LOG (5, "Registered ephemeron array %p", obj
);
736 mono_gc_set_current_thread_appdomain (MonoDomain
*domain
)
738 SgenThreadInfo
*info
= mono_thread_info_current ();
740 /* Could be called from sgen_thread_unregister () with a NULL info */
743 info
->client_info
.stopped_domain
= domain
;
748 need_remove_object_for_domain (GCObject
*start
, MonoDomain
*domain
)
750 if (mono_object_domain (start
) == domain
) {
751 SGEN_LOG (4, "Need to cleanup object %p", start
);
752 binary_protocol_cleanup (start
, (gpointer
)SGEN_LOAD_VTABLE (start
), sgen_safe_object_get_size ((GCObject
*)start
));
759 process_object_for_domain_clearing (GCObject
*start
, MonoDomain
*domain
)
761 MonoVTable
*vt
= SGEN_LOAD_VTABLE (start
);
762 if (vt
->klass
== mono_defaults
.internal_thread_class
)
763 g_assert (mono_object_domain (start
) == mono_get_root_domain ());
764 /* The object could be a proxy for an object in the domain
766 #ifndef DISABLE_REMOTING
767 if (mono_defaults
.real_proxy_class
->supertypes
&& mono_class_has_parent_fast (vt
->klass
, mono_defaults
.real_proxy_class
)) {
768 MonoObject
*server
= ((MonoRealProxy
*)start
)->unwrapped_server
;
770 /* The server could already have been zeroed out, so
771 we need to check for that, too. */
772 if (server
&& (!SGEN_LOAD_VTABLE (server
) || mono_object_domain (server
) == domain
)) {
773 SGEN_LOG (4, "Cleaning up remote pointer in %p to object %p", start
, server
);
774 ((MonoRealProxy
*)start
)->unwrapped_server
= NULL
;
781 clear_domain_process_object (GCObject
*obj
, MonoDomain
*domain
)
785 process_object_for_domain_clearing (obj
, domain
);
786 remove
= need_remove_object_for_domain (obj
, domain
);
788 if (remove
&& obj
->synchronisation
) {
789 guint32 dislink
= mono_monitor_get_object_monitor_gchandle (obj
);
791 mono_gchandle_free (dislink
);
798 clear_domain_process_minor_object_callback (GCObject
*obj
, size_t size
, MonoDomain
*domain
)
800 if (clear_domain_process_object (obj
, domain
)) {
801 CANARIFY_SIZE (size
);
802 memset (obj
, 0, size
);
807 clear_domain_process_major_object_callback (GCObject
*obj
, size_t size
, MonoDomain
*domain
)
809 clear_domain_process_object (obj
, domain
);
813 clear_domain_free_major_non_pinned_object_callback (GCObject
*obj
, size_t size
, MonoDomain
*domain
)
815 if (need_remove_object_for_domain (obj
, domain
))
816 major_collector
.free_non_pinned_object (obj
, size
);
820 clear_domain_free_major_pinned_object_callback (GCObject
*obj
, size_t size
, MonoDomain
*domain
)
822 if (need_remove_object_for_domain (obj
, domain
))
823 major_collector
.free_pinned_object (obj
, size
);
827 * When appdomains are unloaded we can easily remove objects that have finalizers,
828 * but all the others could still be present in random places on the heap.
829 * We need a sweep to get rid of them even though it's going to be costly
831 * The reason we need to remove them is because we access the vtable and class
832 * structures to know the object size and the reference bitmap: once the domain is
833 * unloaded the point to random memory.
836 mono_gc_clear_domain (MonoDomain
* domain
)
838 LOSObject
*bigobj
, *prev
;
843 binary_protocol_domain_unload_begin (domain
);
847 if (sgen_concurrent_collection_in_progress ())
848 sgen_perform_collection (0, GENERATION_OLD
, "clear domain", TRUE
, FALSE
);
849 SGEN_ASSERT (0, !sgen_concurrent_collection_in_progress (), "We just ordered a synchronous collection. Why are we collecting concurrently?");
851 major_collector
.finish_sweeping ();
853 sgen_process_fin_stage_entries ();
855 sgen_clear_nursery_fragments ();
857 if (sgen_mono_xdomain_checks
&& domain
!= mono_get_root_domain ()) {
858 sgen_scan_for_registered_roots_in_domain (domain
, ROOT_TYPE_NORMAL
);
859 sgen_scan_for_registered_roots_in_domain (domain
, ROOT_TYPE_WBARRIER
);
860 sgen_check_for_xdomain_refs ();
863 /*Ephemerons and dislinks must be processed before LOS since they might end up pointing
864 to memory returned to the OS.*/
865 null_ephemerons_for_domain (domain
);
866 sgen_null_links_for_domain (domain
);
868 for (i
= GENERATION_NURSERY
; i
< GENERATION_MAX
; ++i
)
869 sgen_remove_finalizers_if (object_in_domain_predicate
, domain
, i
);
871 sgen_scan_area_with_callback (nursery_section
->data
, nursery_section
->end_data
,
872 (IterateObjectCallbackFunc
)clear_domain_process_minor_object_callback
, domain
, FALSE
, TRUE
);
874 /* We need two passes over major and large objects because
875 freeing such objects might give their memory back to the OS
876 (in the case of large objects) or obliterate its vtable
877 (pinned objects with major-copying or pinned and non-pinned
878 objects with major-mark&sweep), but we might need to
879 dereference a pointer from an object to another object if
880 the first object is a proxy. */
881 major_collector
.iterate_objects (ITERATE_OBJECTS_SWEEP_ALL
, (IterateObjectCallbackFunc
)clear_domain_process_major_object_callback
, domain
);
882 for (bigobj
= los_object_list
; bigobj
; bigobj
= bigobj
->next
)
883 clear_domain_process_object ((GCObject
*)bigobj
->data
, domain
);
886 for (bigobj
= los_object_list
; bigobj
;) {
887 if (need_remove_object_for_domain ((GCObject
*)bigobj
->data
, domain
)) {
888 LOSObject
*to_free
= bigobj
;
890 prev
->next
= bigobj
->next
;
892 los_object_list
= bigobj
->next
;
893 bigobj
= bigobj
->next
;
894 SGEN_LOG (4, "Freeing large object %p", bigobj
->data
);
895 sgen_los_free_object (to_free
);
899 bigobj
= bigobj
->next
;
901 major_collector
.iterate_objects (ITERATE_OBJECTS_SWEEP_NON_PINNED
, (IterateObjectCallbackFunc
)clear_domain_free_major_non_pinned_object_callback
, domain
);
902 major_collector
.iterate_objects (ITERATE_OBJECTS_SWEEP_PINNED
, (IterateObjectCallbackFunc
)clear_domain_free_major_pinned_object_callback
, domain
);
904 if (domain
== mono_get_root_domain ()) {
905 sgen_pin_stats_report ();
906 sgen_object_layout_dump (stdout
);
909 sgen_restart_world (0);
911 binary_protocol_domain_unload_end (domain
);
912 binary_protocol_flush_buffers (FALSE
);
922 mono_gc_alloc_obj (MonoVTable
*vtable
, size_t size
)
924 MonoObject
*obj
= sgen_alloc_obj (vtable
, size
);
926 if (G_UNLIKELY (mono_profiler_events
& MONO_PROFILE_ALLOCATIONS
)) {
928 mono_profiler_allocation (obj
);
935 mono_gc_alloc_pinned_obj (MonoVTable
*vtable
, size_t size
)
937 MonoObject
*obj
= sgen_alloc_obj_pinned (vtable
, size
);
939 if (G_UNLIKELY (mono_profiler_events
& MONO_PROFILE_ALLOCATIONS
)) {
941 mono_profiler_allocation (obj
);
948 mono_gc_alloc_mature (MonoVTable
*vtable
, size_t size
)
950 MonoObject
*obj
= sgen_alloc_obj_mature (vtable
, size
);
952 if (G_UNLIKELY (mono_profiler_events
& MONO_PROFILE_ALLOCATIONS
)) {
954 mono_profiler_allocation (obj
);
961 mono_gc_alloc_fixed (size_t size
, MonoGCDescriptor descr
, MonoGCRootSource source
, const char *msg
)
963 /* FIXME: do a single allocation */
964 void *res
= g_calloc (1, size
);
967 if (!mono_gc_register_root ((char *)res
, size
, descr
, source
, msg
)) {
975 mono_gc_free_fixed (void* addr
)
977 mono_gc_deregister_root ((char *)addr
);
985 static MonoMethod
* alloc_method_cache
[ATYPE_NUM
];
986 static MonoMethod
* slowpath_alloc_method_cache
[ATYPE_NUM
];
987 static gboolean use_managed_allocator
= TRUE
;
989 #ifdef MANAGED_ALLOCATION
991 #if defined(HAVE_KW_THREAD) || defined(TARGET_OSX) || defined(TARGET_WIN32) || defined(TARGET_ANDROID) || defined(TARGET_IOS)
993 // Cache the SgenThreadInfo pointer in a local 'var'.
994 #define EMIT_TLS_ACCESS_VAR(mb, var) \
996 var = mono_mb_add_local ((mb), &mono_defaults.int_class->byval_arg); \
997 mono_mb_emit_byte ((mb), MONO_CUSTOM_PREFIX); \
998 mono_mb_emit_byte ((mb), CEE_MONO_TLS); \
999 mono_mb_emit_i4 ((mb), TLS_KEY_SGEN_THREAD_INFO); \
1000 mono_mb_emit_stloc ((mb), (var)); \
1003 #define EMIT_TLS_ACCESS_IN_CRITICAL_REGION_ADDR(mb, var) \
1005 mono_mb_emit_ldloc ((mb), (var)); \
1006 mono_mb_emit_icon ((mb), MONO_STRUCT_OFFSET (SgenClientThreadInfo, in_critical_region)); \
1007 mono_mb_emit_byte ((mb), CEE_ADD); \
1010 #define EMIT_TLS_ACCESS_NEXT_ADDR(mb, var) do { \
1011 mono_mb_emit_ldloc ((mb), (var)); \
1012 mono_mb_emit_icon ((mb), MONO_STRUCT_OFFSET (SgenThreadInfo, tlab_next)); \
1013 mono_mb_emit_byte ((mb), CEE_ADD); \
1016 #define EMIT_TLS_ACCESS_TEMP_END(mb, var) do { \
1017 mono_mb_emit_ldloc ((mb), (var)); \
1018 mono_mb_emit_icon ((mb), MONO_STRUCT_OFFSET (SgenThreadInfo, tlab_temp_end)); \
1019 mono_mb_emit_byte ((mb), CEE_ADD); \
1020 mono_mb_emit_byte ((mb), CEE_LDIND_I); \
1024 #define EMIT_TLS_ACCESS_VAR(mb, _var) do { g_error ("sgen is not supported when using --with-tls=pthread.\n"); } while (0)
1025 #define EMIT_TLS_ACCESS_NEXT_ADDR(mb, _var) do { g_error ("sgen is not supported when using --with-tls=pthread.\n"); } while (0)
1026 #define EMIT_TLS_ACCESS_TEMP_END(mb, _var) do { g_error ("sgen is not supported when using --with-tls=pthread.\n"); } while (0)
1027 #define EMIT_TLS_ACCESS_IN_CRITICAL_REGION_ADDR(mb, _var) do { g_error ("sgen is not supported when using --with-tls=pthread.\n"); } while (0)
1031 /* FIXME: Do this in the JIT, where specialized allocation sequences can be created
1032 * for each class. This is currently not easy to do, as it is hard to generate basic
1033 * blocks + branches, but it is easy with the linear IL codebase.
1035 * For this to work we'd need to solve the TLAB race, first. Now we
1036 * require the allocator to be in a few known methods to make sure
1037 * that they are executed atomically via the restart mechanism.
1040 create_allocator (int atype
, ManagedAllocatorVariant variant
)
1042 int p_var
, size_var
, thread_var G_GNUC_UNUSED
;
1043 gboolean slowpath
= variant
== MANAGED_ALLOCATOR_SLOW_PATH
;
1044 guint32 slowpath_branch
, max_size_branch
;
1045 MonoMethodBuilder
*mb
;
1047 MonoMethodSignature
*csig
;
1048 static gboolean registered
= FALSE
;
1049 int tlab_next_addr_var
, new_next_var
;
1050 const char *name
= NULL
;
1055 mono_register_jit_icall (mono_gc_alloc_obj
, "mono_gc_alloc_obj", mono_create_icall_signature ("object ptr int"), FALSE
);
1056 mono_register_jit_icall (mono_gc_alloc_vector
, "mono_gc_alloc_vector", mono_create_icall_signature ("object ptr int int"), FALSE
);
1057 mono_register_jit_icall (mono_gc_alloc_string
, "mono_gc_alloc_string", mono_create_icall_signature ("object ptr int int32"), FALSE
);
1061 if (atype
== ATYPE_SMALL
) {
1062 name
= slowpath
? "SlowAllocSmall" : "AllocSmall";
1063 } else if (atype
== ATYPE_NORMAL
) {
1064 name
= slowpath
? "SlowAlloc" : "Alloc";
1065 } else if (atype
== ATYPE_VECTOR
) {
1066 name
= slowpath
? "SlowAllocVector" : "AllocVector";
1067 } else if (atype
== ATYPE_STRING
) {
1068 name
= slowpath
? "SlowAllocString" : "AllocString";
1070 g_assert_not_reached ();
1073 if (atype
== ATYPE_NORMAL
)
1078 csig
= mono_metadata_signature_alloc (mono_defaults
.corlib
, num_params
);
1079 if (atype
== ATYPE_STRING
) {
1080 csig
->ret
= &mono_defaults
.string_class
->byval_arg
;
1081 csig
->params
[0] = &mono_defaults
.int_class
->byval_arg
;
1082 csig
->params
[1] = &mono_defaults
.int32_class
->byval_arg
;
1084 csig
->ret
= &mono_defaults
.object_class
->byval_arg
;
1085 for (i
= 0; i
< num_params
; i
++)
1086 csig
->params
[i
] = &mono_defaults
.int_class
->byval_arg
;
1089 mb
= mono_mb_new (mono_defaults
.object_class
, name
, MONO_WRAPPER_ALLOC
);
1096 mono_mb_emit_ldarg (mb
, 0);
1097 mono_mb_emit_icall (mb
, ves_icall_object_new_specific
);
1100 mono_mb_emit_ldarg (mb
, 0);
1101 mono_mb_emit_ldarg (mb
, 1);
1102 mono_mb_emit_icall (mb
, ves_icall_array_new_specific
);
1105 mono_mb_emit_ldarg (mb
, 1);
1106 mono_mb_emit_icall (mb
, ves_icall_string_alloc
);
1109 g_assert_not_reached ();
1116 * Tls access might call foreign code or code without jinfo. This can
1117 * only happen if we are outside of the critical region.
1119 EMIT_TLS_ACCESS_VAR (mb
, thread_var
);
1121 size_var
= mono_mb_add_local (mb
, &mono_defaults
.int_class
->byval_arg
);
1122 if (atype
== ATYPE_SMALL
) {
1123 /* size_var = size_arg */
1124 mono_mb_emit_ldarg (mb
, 1);
1125 mono_mb_emit_stloc (mb
, size_var
);
1126 } else if (atype
== ATYPE_NORMAL
) {
1127 /* size = vtable->klass->instance_size; */
1128 mono_mb_emit_ldarg (mb
, 0);
1129 mono_mb_emit_icon (mb
, MONO_STRUCT_OFFSET (MonoVTable
, klass
));
1130 mono_mb_emit_byte (mb
, CEE_ADD
);
1131 mono_mb_emit_byte (mb
, CEE_LDIND_I
);
1132 mono_mb_emit_icon (mb
, MONO_STRUCT_OFFSET (MonoClass
, instance_size
));
1133 mono_mb_emit_byte (mb
, CEE_ADD
);
1134 /* FIXME: assert instance_size stays a 4 byte integer */
1135 mono_mb_emit_byte (mb
, CEE_LDIND_U4
);
1136 mono_mb_emit_byte (mb
, CEE_CONV_I
);
1137 mono_mb_emit_stloc (mb
, size_var
);
1138 } else if (atype
== ATYPE_VECTOR
) {
1139 MonoExceptionClause
*clause
;
1140 int pos
, pos_leave
, pos_error
;
1141 MonoClass
*oom_exc_class
;
1145 * n > MONO_ARRAY_MAX_INDEX => OutOfMemoryException
1146 * n < 0 => OverflowException
1148 * We can do an unsigned comparison to catch both cases, then in the error
1149 * case compare signed to distinguish between them.
1151 mono_mb_emit_ldarg (mb
, 1);
1152 mono_mb_emit_icon (mb
, MONO_ARRAY_MAX_INDEX
);
1153 mono_mb_emit_byte (mb
, CEE_CONV_U
);
1154 pos
= mono_mb_emit_short_branch (mb
, CEE_BLE_UN_S
);
1156 mono_mb_emit_byte (mb
, MONO_CUSTOM_PREFIX
);
1157 mono_mb_emit_byte (mb
, CEE_MONO_NOT_TAKEN
);
1158 mono_mb_emit_ldarg (mb
, 1);
1159 mono_mb_emit_icon (mb
, 0);
1160 pos_error
= mono_mb_emit_short_branch (mb
, CEE_BLT_S
);
1161 mono_mb_emit_exception (mb
, "OutOfMemoryException", NULL
);
1162 mono_mb_patch_short_branch (mb
, pos_error
);
1163 mono_mb_emit_exception (mb
, "OverflowException", NULL
);
1165 mono_mb_patch_short_branch (mb
, pos
);
1167 clause
= (MonoExceptionClause
*)mono_image_alloc0 (mono_defaults
.corlib
, sizeof (MonoExceptionClause
));
1168 clause
->try_offset
= mono_mb_get_label (mb
);
1170 /* vtable->klass->sizes.element_size */
1171 mono_mb_emit_ldarg (mb
, 0);
1172 mono_mb_emit_icon (mb
, MONO_STRUCT_OFFSET (MonoVTable
, klass
));
1173 mono_mb_emit_byte (mb
, CEE_ADD
);
1174 mono_mb_emit_byte (mb
, CEE_LDIND_I
);
1175 mono_mb_emit_icon (mb
, MONO_STRUCT_OFFSET (MonoClass
, sizes
));
1176 mono_mb_emit_byte (mb
, CEE_ADD
);
1177 mono_mb_emit_byte (mb
, CEE_LDIND_U4
);
1178 mono_mb_emit_byte (mb
, CEE_CONV_I
);
1181 mono_mb_emit_ldarg (mb
, 1);
1182 mono_mb_emit_byte (mb
, CEE_MUL_OVF_UN
);
1183 /* + sizeof (MonoArray) */
1184 mono_mb_emit_icon (mb
, MONO_SIZEOF_MONO_ARRAY
);
1185 mono_mb_emit_byte (mb
, CEE_ADD_OVF_UN
);
1186 mono_mb_emit_stloc (mb
, size_var
);
1188 pos_leave
= mono_mb_emit_branch (mb
, CEE_LEAVE
);
1191 clause
->flags
= MONO_EXCEPTION_CLAUSE_NONE
;
1192 clause
->try_len
= mono_mb_get_pos (mb
) - clause
->try_offset
;
1193 clause
->data
.catch_class
= mono_class_load_from_name (mono_defaults
.corlib
,
1194 "System", "OverflowException");
1195 clause
->handler_offset
= mono_mb_get_label (mb
);
1197 oom_exc_class
= mono_class_load_from_name (mono_defaults
.corlib
,
1198 "System", "OutOfMemoryException");
1199 ctor
= mono_class_get_method_from_name (oom_exc_class
, ".ctor", 0);
1202 mono_mb_emit_byte (mb
, CEE_POP
);
1203 mono_mb_emit_op (mb
, CEE_NEWOBJ
, ctor
);
1204 mono_mb_emit_byte (mb
, CEE_THROW
);
1206 clause
->handler_len
= mono_mb_get_pos (mb
) - clause
->handler_offset
;
1207 mono_mb_set_clauses (mb
, 1, clause
);
1208 mono_mb_patch_branch (mb
, pos_leave
);
1210 } else if (atype
== ATYPE_STRING
) {
1214 * a string allocator method takes the args: (vtable, len)
1216 * bytes = offsetof (MonoString, chars) + ((len + 1) * 2)
1220 * bytes <= INT32_MAX - (SGEN_ALLOC_ALIGN - 1)
1224 * offsetof (MonoString, chars) + ((len + 1) * 2) <= INT32_MAX - (SGEN_ALLOC_ALIGN - 1)
1225 * len <= (INT32_MAX - (SGEN_ALLOC_ALIGN - 1) - offsetof (MonoString, chars)) / 2 - 1
1227 mono_mb_emit_ldarg (mb
, 1);
1228 mono_mb_emit_icon (mb
, (INT32_MAX
- (SGEN_ALLOC_ALIGN
- 1) - MONO_STRUCT_OFFSET (MonoString
, chars
)) / 2 - 1);
1229 pos
= mono_mb_emit_short_branch (mb
, MONO_CEE_BLE_UN_S
);
1231 mono_mb_emit_byte (mb
, MONO_CUSTOM_PREFIX
);
1232 mono_mb_emit_byte (mb
, CEE_MONO_NOT_TAKEN
);
1233 mono_mb_emit_exception (mb
, "OutOfMemoryException", NULL
);
1234 mono_mb_patch_short_branch (mb
, pos
);
1236 mono_mb_emit_ldarg (mb
, 1);
1237 mono_mb_emit_icon (mb
, 1);
1238 mono_mb_emit_byte (mb
, MONO_CEE_SHL
);
1239 //WE manually fold the above + 2 here
1240 mono_mb_emit_icon (mb
, MONO_STRUCT_OFFSET (MonoString
, chars
) + 2);
1241 mono_mb_emit_byte (mb
, CEE_ADD
);
1242 mono_mb_emit_stloc (mb
, size_var
);
1244 g_assert_not_reached ();
1247 #ifdef MANAGED_ALLOCATOR_CAN_USE_CRITICAL_REGION
1248 EMIT_TLS_ACCESS_IN_CRITICAL_REGION_ADDR (mb
, thread_var
);
1249 mono_mb_emit_byte (mb
, CEE_LDC_I4_1
);
1250 mono_mb_emit_byte (mb
, MONO_CUSTOM_PREFIX
);
1251 mono_mb_emit_byte (mb
, CEE_MONO_ATOMIC_STORE_I4
);
1252 mono_mb_emit_i4 (mb
, MONO_MEMORY_BARRIER_NONE
);
1255 /* size += ALLOC_ALIGN - 1; */
1256 mono_mb_emit_ldloc (mb
, size_var
);
1257 mono_mb_emit_icon (mb
, SGEN_ALLOC_ALIGN
- 1);
1258 mono_mb_emit_byte (mb
, CEE_ADD
);
1259 /* size &= ~(ALLOC_ALIGN - 1); */
1260 mono_mb_emit_icon (mb
, ~(SGEN_ALLOC_ALIGN
- 1));
1261 mono_mb_emit_byte (mb
, CEE_AND
);
1262 mono_mb_emit_stloc (mb
, size_var
);
1264 /* if (size > MAX_SMALL_OBJ_SIZE) goto slowpath */
1265 if (atype
!= ATYPE_SMALL
) {
1266 mono_mb_emit_ldloc (mb
, size_var
);
1267 mono_mb_emit_icon (mb
, SGEN_MAX_SMALL_OBJ_SIZE
);
1268 max_size_branch
= mono_mb_emit_short_branch (mb
, MONO_CEE_BGT_UN_S
);
1272 * We need to modify tlab_next, but the JIT only supports reading, so we read
1273 * another tls var holding its address instead.
1276 /* tlab_next_addr (local) = tlab_next_addr (TLS var) */
1277 tlab_next_addr_var
= mono_mb_add_local (mb
, &mono_defaults
.int_class
->byval_arg
);
1278 EMIT_TLS_ACCESS_NEXT_ADDR (mb
, thread_var
);
1279 mono_mb_emit_stloc (mb
, tlab_next_addr_var
);
1281 /* p = (void**)tlab_next; */
1282 p_var
= mono_mb_add_local (mb
, &mono_defaults
.int_class
->byval_arg
);
1283 mono_mb_emit_ldloc (mb
, tlab_next_addr_var
);
1284 mono_mb_emit_byte (mb
, CEE_LDIND_I
);
1285 mono_mb_emit_stloc (mb
, p_var
);
1287 /* new_next = (char*)p + size; */
1288 new_next_var
= mono_mb_add_local (mb
, &mono_defaults
.int_class
->byval_arg
);
1289 mono_mb_emit_ldloc (mb
, p_var
);
1290 mono_mb_emit_ldloc (mb
, size_var
);
1291 mono_mb_emit_byte (mb
, CEE_CONV_I
);
1292 mono_mb_emit_byte (mb
, CEE_ADD
);
1293 mono_mb_emit_stloc (mb
, new_next_var
);
1295 /* if (G_LIKELY (new_next < tlab_temp_end)) */
1296 mono_mb_emit_ldloc (mb
, new_next_var
);
1297 EMIT_TLS_ACCESS_TEMP_END (mb
, thread_var
);
1298 slowpath_branch
= mono_mb_emit_short_branch (mb
, MONO_CEE_BLT_UN_S
);
1301 if (atype
!= ATYPE_SMALL
)
1302 mono_mb_patch_short_branch (mb
, max_size_branch
);
1304 mono_mb_emit_byte (mb
, MONO_CUSTOM_PREFIX
);
1305 mono_mb_emit_byte (mb
, CEE_MONO_NOT_TAKEN
);
1307 * We are no longer in a critical section. We need to do this before calling
1308 * to unmanaged land in order to avoid stw deadlocks since unmanaged code
1311 #ifdef MANAGED_ALLOCATOR_CAN_USE_CRITICAL_REGION
1312 EMIT_TLS_ACCESS_IN_CRITICAL_REGION_ADDR (mb
, thread_var
);
1313 mono_mb_emit_byte (mb
, CEE_LDC_I4_0
);
1314 mono_mb_emit_byte (mb
, MONO_CUSTOM_PREFIX
);
1315 mono_mb_emit_byte (mb
, CEE_MONO_ATOMIC_STORE_I4
);
1316 mono_mb_emit_i4 (mb
, MONO_MEMORY_BARRIER_NONE
);
1319 /* FIXME: mono_gc_alloc_obj takes a 'size_t' as an argument, not an int32 */
1320 mono_mb_emit_ldarg (mb
, 0);
1321 mono_mb_emit_ldloc (mb
, size_var
);
1322 if (atype
== ATYPE_NORMAL
|| atype
== ATYPE_SMALL
) {
1323 mono_mb_emit_icall (mb
, mono_gc_alloc_obj
);
1324 } else if (atype
== ATYPE_VECTOR
) {
1325 mono_mb_emit_ldarg (mb
, 1);
1326 mono_mb_emit_icall (mb
, mono_gc_alloc_vector
);
1327 } else if (atype
== ATYPE_STRING
) {
1328 mono_mb_emit_ldarg (mb
, 1);
1329 mono_mb_emit_icall (mb
, mono_gc_alloc_string
);
1331 g_assert_not_reached ();
1333 mono_mb_emit_byte (mb
, CEE_RET
);
1336 mono_mb_patch_short_branch (mb
, slowpath_branch
);
1338 /* FIXME: Memory barrier */
1340 /* tlab_next = new_next */
1341 mono_mb_emit_ldloc (mb
, tlab_next_addr_var
);
1342 mono_mb_emit_ldloc (mb
, new_next_var
);
1343 mono_mb_emit_byte (mb
, CEE_STIND_I
);
1346 mono_mb_emit_ldloc (mb
, p_var
);
1347 mono_mb_emit_ldarg (mb
, 0);
1348 mono_mb_emit_byte (mb
, CEE_STIND_I
);
1350 if (atype
== ATYPE_VECTOR
) {
1351 /* arr->max_length = max_length; */
1352 mono_mb_emit_ldloc (mb
, p_var
);
1353 mono_mb_emit_ldflda (mb
, MONO_STRUCT_OFFSET (MonoArray
, max_length
));
1354 mono_mb_emit_ldarg (mb
, 1);
1355 #ifdef MONO_BIG_ARRAYS
1356 mono_mb_emit_byte (mb
, CEE_STIND_I
);
1358 mono_mb_emit_byte (mb
, CEE_STIND_I4
);
1360 } else if (atype
== ATYPE_STRING
) {
1361 /* need to set length and clear the last char */
1362 /* s->length = len; */
1363 mono_mb_emit_ldloc (mb
, p_var
);
1364 mono_mb_emit_icon (mb
, MONO_STRUCT_OFFSET (MonoString
, length
));
1365 mono_mb_emit_byte (mb
, MONO_CEE_ADD
);
1366 mono_mb_emit_ldarg (mb
, 1);
1367 mono_mb_emit_byte (mb
, MONO_CEE_STIND_I4
);
1370 #ifdef MANAGED_ALLOCATOR_CAN_USE_CRITICAL_REGION
1371 EMIT_TLS_ACCESS_IN_CRITICAL_REGION_ADDR (mb
, thread_var
);
1372 mono_mb_emit_byte (mb
, CEE_LDC_I4_0
);
1373 mono_mb_emit_byte (mb
, MONO_CUSTOM_PREFIX
);
1374 mono_mb_emit_byte (mb
, CEE_MONO_ATOMIC_STORE_I4
);
1376 mono_mb_emit_byte (mb
, MONO_CUSTOM_PREFIX
);
1377 mono_mb_emit_byte (mb
, CEE_MONO_MEMORY_BARRIER
);
1380 We must make sure both vtable and max_length are globaly visible before returning to managed land.
1382 mono_mb_emit_i4 (mb
, MONO_MEMORY_BARRIER_REL
);
1385 mono_mb_emit_ldloc (mb
, p_var
);
1388 mono_mb_emit_byte (mb
, CEE_RET
);
1391 info
= mono_wrapper_info_create (mb
, WRAPPER_SUBTYPE_NONE
);
1392 info
->d
.alloc
.gc_name
= "sgen";
1393 info
->d
.alloc
.alloc_type
= atype
;
1396 mb
->init_locals
= FALSE
;
1399 res
= mono_mb_create (mb
, csig
, 8, info
);
1408 mono_gc_get_aligned_size_for_allocator (int size
)
1410 return SGEN_ALIGN_UP (size
);
1414 * Generate an allocator method implementing the fast path of mono_gc_alloc_obj ().
1415 * The signature of the called method is:
1416 * object allocate (MonoVTable *vtable)
1419 mono_gc_get_managed_allocator (MonoClass
*klass
, gboolean for_box
, gboolean known_instance_size
)
1421 #ifdef MANAGED_ALLOCATION
1422 if (collect_before_allocs
)
1424 if (!mono_runtime_has_tls_get ())
1426 if (klass
->instance_size
> tlab_size
)
1428 if (known_instance_size
&& ALIGN_TO (klass
->instance_size
, SGEN_ALLOC_ALIGN
) >= SGEN_MAX_SMALL_OBJ_SIZE
)
1430 if (mono_class_has_finalizer (klass
) || mono_class_is_marshalbyref (klass
))
1434 if (mono_profiler_get_events () & MONO_PROFILE_ALLOCATIONS
)
1436 if (klass
->byval_arg
.type
== MONO_TYPE_STRING
)
1437 return mono_gc_get_managed_allocator_by_type (ATYPE_STRING
, MANAGED_ALLOCATOR_REGULAR
);
1438 /* Generic classes have dynamic field and can go above MAX_SMALL_OBJ_SIZE. */
1439 if (known_instance_size
)
1440 return mono_gc_get_managed_allocator_by_type (ATYPE_SMALL
, MANAGED_ALLOCATOR_REGULAR
);
1442 return mono_gc_get_managed_allocator_by_type (ATYPE_NORMAL
, MANAGED_ALLOCATOR_REGULAR
);
1449 mono_gc_get_managed_array_allocator (MonoClass
*klass
)
1451 #ifdef MANAGED_ALLOCATION
1452 if (klass
->rank
!= 1)
1454 if (!mono_runtime_has_tls_get ())
1456 if (mono_profiler_get_events () & MONO_PROFILE_ALLOCATIONS
)
1458 if (has_per_allocation_action
)
1460 g_assert (!mono_class_has_finalizer (klass
) && !mono_class_is_marshalbyref (klass
));
1462 return mono_gc_get_managed_allocator_by_type (ATYPE_VECTOR
, MANAGED_ALLOCATOR_REGULAR
);
1469 sgen_set_use_managed_allocator (gboolean flag
)
1471 use_managed_allocator
= flag
;
1475 mono_gc_get_managed_allocator_by_type (int atype
, ManagedAllocatorVariant variant
)
1477 #ifdef MANAGED_ALLOCATION
1481 if (variant
== MANAGED_ALLOCATOR_REGULAR
&& !use_managed_allocator
)
1484 if (variant
== MANAGED_ALLOCATOR_REGULAR
&& !mono_runtime_has_tls_get ())
1488 case MANAGED_ALLOCATOR_REGULAR
: cache
= alloc_method_cache
; break;
1489 case MANAGED_ALLOCATOR_SLOW_PATH
: cache
= slowpath_alloc_method_cache
; break;
1490 default: g_assert_not_reached (); break;
1493 res
= cache
[atype
];
1497 res
= create_allocator (atype
, variant
);
1499 if (cache
[atype
]) {
1500 mono_free_method (res
);
1501 res
= cache
[atype
];
1503 mono_memory_barrier ();
1504 cache
[atype
] = res
;
1515 mono_gc_get_managed_allocator_types (void)
1521 sgen_is_managed_allocator (MonoMethod
*method
)
1525 for (i
= 0; i
< ATYPE_NUM
; ++i
)
1526 if (method
== alloc_method_cache
[i
] || method
== slowpath_alloc_method_cache
[i
])
1532 sgen_has_managed_allocator (void)
1536 for (i
= 0; i
< ATYPE_NUM
; ++i
)
1537 if (alloc_method_cache
[i
] || slowpath_alloc_method_cache
[i
])
1543 * Cardtable scanning
1546 #define MWORD_MASK (sizeof (mword) - 1)
1549 find_card_offset (mword card
)
1551 /*XXX Use assembly as this generates some pretty bad code */
1552 #if defined(__i386__) && defined(__GNUC__)
1553 return (__builtin_ffs (card
) - 1) / 8;
1554 #elif defined(__x86_64__) && defined(__GNUC__)
1555 return (__builtin_ffsll (card
) - 1) / 8;
1556 #elif defined(__s390x__)
1557 return (__builtin_ffsll (GUINT64_TO_LE(card
)) - 1) / 8;
1560 guint8
*ptr
= (guint8
*) &card
;
1561 for (i
= 0; i
< sizeof (mword
); ++i
) {
1570 find_next_card (guint8
*card_data
, guint8
*end
)
1572 mword
*cards
, *cards_end
;
1575 while ((((mword
)card_data
) & MWORD_MASK
) && card_data
< end
) {
1581 if (card_data
== end
)
1584 cards
= (mword
*)card_data
;
1585 cards_end
= (mword
*)((mword
)end
& ~MWORD_MASK
);
1586 while (cards
< cards_end
) {
1589 return (guint8
*)cards
+ find_card_offset (card
);
1593 card_data
= (guint8
*)cards_end
;
1594 while (card_data
< end
) {
1603 #define ARRAY_OBJ_INDEX(ptr,array,elem_size) (((char*)(ptr) - ((char*)(array) + G_STRUCT_OFFSET (MonoArray, vector))) / (elem_size))
1606 sgen_client_cardtable_scan_object (GCObject
*obj
, mword block_obj_size
, guint8
*cards
, ScanCopyContext ctx
)
1608 MonoVTable
*vt
= SGEN_LOAD_VTABLE (obj
);
1609 MonoClass
*klass
= vt
->klass
;
1611 SGEN_ASSERT (0, SGEN_VTABLE_HAS_REFERENCES (vt
), "Why would we ever call this on reference-free objects?");
1614 MonoArray
*arr
= (MonoArray
*)obj
;
1615 guint8
*card_data
, *card_base
;
1616 guint8
*card_data_end
;
1617 char *obj_start
= (char *)sgen_card_table_align_pointer (obj
);
1619 mword obj_size
= sgen_mono_array_size (vt
, arr
, &bounds_size
, sgen_vtable_get_descriptor (vt
));
1620 /* We don't want to scan the bounds entries at the end of multidimensional arrays */
1621 char *obj_end
= (char*)obj
+ obj_size
- bounds_size
;
1623 size_t extra_idx
= 0;
1625 mword desc
= (mword
)klass
->element_class
->gc_descr
;
1626 int elem_size
= mono_array_element_size (klass
);
1628 #ifdef SGEN_HAVE_OVERLAPPING_CARDS
1629 guint8
*overflow_scan_end
= NULL
;
1632 #ifdef SGEN_OBJECT_LAYOUT_STATISTICS
1633 if (klass
->element_class
->valuetype
)
1634 sgen_object_layout_scanned_vtype_array ();
1636 sgen_object_layout_scanned_ref_array ();
1642 card_data
= sgen_card_table_get_card_scan_address ((mword
)obj
);
1644 card_base
= card_data
;
1645 card_count
= sgen_card_table_number_of_cards_in_range ((mword
)obj
, obj_size
);
1646 card_data_end
= card_data
+ card_count
;
1649 #ifdef SGEN_HAVE_OVERLAPPING_CARDS
1650 /*Check for overflow and if so, setup to scan in two steps*/
1651 if (!cards
&& card_data_end
>= SGEN_SHADOW_CARDTABLE_END
) {
1652 overflow_scan_end
= sgen_shadow_cardtable
+ (card_data_end
- SGEN_SHADOW_CARDTABLE_END
);
1653 card_data_end
= SGEN_SHADOW_CARDTABLE_END
;
1659 card_data
= find_next_card (card_data
, card_data_end
);
1660 for (; card_data
< card_data_end
; card_data
= find_next_card (card_data
+ 1, card_data_end
)) {
1662 size_t idx
= (card_data
- card_base
) + extra_idx
;
1663 char *start
= (char*)(obj_start
+ idx
* CARD_SIZE_IN_BYTES
);
1664 char *card_end
= start
+ CARD_SIZE_IN_BYTES
;
1665 char *first_elem
, *elem
;
1667 HEAVY_STAT (++los_marked_cards
);
1670 sgen_card_table_prepare_card_for_scanning (card_data
);
1672 card_end
= MIN (card_end
, obj_end
);
1674 if (start
<= (char*)arr
->vector
)
1677 index
= ARRAY_OBJ_INDEX (start
, obj
, elem_size
);
1679 elem
= first_elem
= (char*)mono_array_addr_with_size_fast ((MonoArray
*)obj
, elem_size
, index
);
1680 if (klass
->element_class
->valuetype
) {
1681 ScanVTypeFunc scan_vtype_func
= ctx
.ops
->scan_vtype
;
1683 for (; elem
< card_end
; elem
+= elem_size
)
1684 scan_vtype_func (obj
, elem
, desc
, ctx
.queue
BINARY_PROTOCOL_ARG (elem_size
));
1686 ScanPtrFieldFunc scan_ptr_field_func
= ctx
.ops
->scan_ptr_field
;
1688 HEAVY_STAT (++los_array_cards
);
1689 for (; elem
< card_end
; elem
+= SIZEOF_VOID_P
)
1690 scan_ptr_field_func (obj
, (GCObject
**)elem
, ctx
.queue
);
1693 binary_protocol_card_scan (first_elem
, elem
- first_elem
);
1696 #ifdef SGEN_HAVE_OVERLAPPING_CARDS
1697 if (overflow_scan_end
) {
1698 extra_idx
= card_data
- card_base
;
1699 card_base
= card_data
= sgen_shadow_cardtable
;
1700 card_data_end
= overflow_scan_end
;
1701 overflow_scan_end
= NULL
;
1712 * Array and string allocation
1716 mono_gc_alloc_vector (MonoVTable
*vtable
, size_t size
, uintptr_t max_length
)
1721 if (!SGEN_CAN_ALIGN_UP (size
))
1724 #ifndef DISABLE_CRITICAL_REGION
1725 ENTER_CRITICAL_REGION
;
1726 arr
= (MonoArray
*)sgen_try_alloc_obj_nolock (vtable
, size
);
1728 /*This doesn't require fencing since EXIT_CRITICAL_REGION already does it for us*/
1729 arr
->max_length
= (mono_array_size_t
)max_length
;
1730 EXIT_CRITICAL_REGION
;
1733 EXIT_CRITICAL_REGION
;
1738 arr
= (MonoArray
*)sgen_alloc_obj_nolock (vtable
, size
);
1739 if (G_UNLIKELY (!arr
)) {
1744 arr
->max_length
= (mono_array_size_t
)max_length
;
1749 if (G_UNLIKELY (mono_profiler_events
& MONO_PROFILE_ALLOCATIONS
))
1750 mono_profiler_allocation (&arr
->obj
);
1752 SGEN_ASSERT (6, SGEN_ALIGN_UP (size
) == SGEN_ALIGN_UP (sgen_client_par_object_get_size (vtable
, (GCObject
*)arr
)), "Vector has incorrect size.");
1757 mono_gc_alloc_array (MonoVTable
*vtable
, size_t size
, uintptr_t max_length
, uintptr_t bounds_size
)
1760 MonoArrayBounds
*bounds
;
1763 if (!SGEN_CAN_ALIGN_UP (size
))
1766 #ifndef DISABLE_CRITICAL_REGION
1767 ENTER_CRITICAL_REGION
;
1768 arr
= (MonoArray
*)sgen_try_alloc_obj_nolock (vtable
, size
);
1770 /*This doesn't require fencing since EXIT_CRITICAL_REGION already does it for us*/
1771 arr
->max_length
= (mono_array_size_t
)max_length
;
1773 bounds
= (MonoArrayBounds
*)((char*)arr
+ size
- bounds_size
);
1774 arr
->bounds
= bounds
;
1775 EXIT_CRITICAL_REGION
;
1778 EXIT_CRITICAL_REGION
;
1783 arr
= (MonoArray
*)sgen_alloc_obj_nolock (vtable
, size
);
1784 if (G_UNLIKELY (!arr
)) {
1789 arr
->max_length
= (mono_array_size_t
)max_length
;
1791 bounds
= (MonoArrayBounds
*)((char*)arr
+ size
- bounds_size
);
1792 arr
->bounds
= bounds
;
1797 if (G_UNLIKELY (mono_profiler_events
& MONO_PROFILE_ALLOCATIONS
))
1798 mono_profiler_allocation (&arr
->obj
);
1800 SGEN_ASSERT (6, SGEN_ALIGN_UP (size
) == SGEN_ALIGN_UP (sgen_client_par_object_get_size (vtable
, (GCObject
*)arr
)), "Array has incorrect size.");
1805 mono_gc_alloc_string (MonoVTable
*vtable
, size_t size
, gint32 len
)
1810 if (!SGEN_CAN_ALIGN_UP (size
))
1813 #ifndef DISABLE_CRITICAL_REGION
1814 ENTER_CRITICAL_REGION
;
1815 str
= (MonoString
*)sgen_try_alloc_obj_nolock (vtable
, size
);
1817 /*This doesn't require fencing since EXIT_CRITICAL_REGION already does it for us*/
1819 EXIT_CRITICAL_REGION
;
1822 EXIT_CRITICAL_REGION
;
1827 str
= (MonoString
*)sgen_alloc_obj_nolock (vtable
, size
);
1828 if (G_UNLIKELY (!str
)) {
1838 if (G_UNLIKELY (mono_profiler_events
& MONO_PROFILE_ALLOCATIONS
))
1839 mono_profiler_allocation (&str
->object
);
1849 mono_gc_set_string_length (MonoString
*str
, gint32 new_length
)
1851 mono_unichar2
*new_end
= str
->chars
+ new_length
;
1853 /* zero the discarded string. This null-delimits the string and allows
1854 * the space to be reclaimed by SGen. */
1856 if (nursery_canaries_enabled () && sgen_ptr_in_nursery (str
)) {
1857 CHECK_CANARY_FOR_OBJECT ((GCObject
*)str
, TRUE
);
1858 memset (new_end
, 0, (str
->length
- new_length
+ 1) * sizeof (mono_unichar2
) + CANARY_SIZE
);
1859 memcpy (new_end
+ 1 , CANARY_STRING
, CANARY_SIZE
);
1861 memset (new_end
, 0, (str
->length
- new_length
+ 1) * sizeof (mono_unichar2
));
1864 str
->length
= new_length
;
1871 #define GC_ROOT_NUM 32
1873 int count
; /* must be the first field */
1874 void *objects
[GC_ROOT_NUM
];
1875 int root_types
[GC_ROOT_NUM
];
1876 uintptr_t extra_info
[GC_ROOT_NUM
];
1880 notify_gc_roots (GCRootReport
*report
)
1884 mono_profiler_gc_roots (report
->count
, report
->objects
, report
->root_types
, report
->extra_info
);
1889 add_profile_gc_root (GCRootReport
*report
, void *object
, int rtype
, uintptr_t extra_info
)
1891 if (report
->count
== GC_ROOT_NUM
)
1892 notify_gc_roots (report
);
1893 report
->objects
[report
->count
] = object
;
1894 report
->root_types
[report
->count
] = rtype
;
1895 report
->extra_info
[report
->count
++] = (uintptr_t)SGEN_LOAD_VTABLE (object
)->klass
;
1899 sgen_client_nursery_objects_pinned (void **definitely_pinned
, int count
)
1901 if (mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS
) {
1902 GCRootReport report
;
1905 for (idx
= 0; idx
< count
; ++idx
)
1906 add_profile_gc_root (&report
, definitely_pinned
[idx
], MONO_PROFILE_GC_ROOT_PINNING
| MONO_PROFILE_GC_ROOT_MISC
, 0);
1907 notify_gc_roots (&report
);
1912 report_finalizer_roots_from_queue (SgenPointerQueue
*queue
)
1914 GCRootReport report
;
1918 for (i
= 0; i
< queue
->next_slot
; ++i
) {
1919 void *obj
= queue
->data
[i
];
1922 add_profile_gc_root (&report
, obj
, MONO_PROFILE_GC_ROOT_FINALIZER
, 0);
1924 notify_gc_roots (&report
);
1928 report_finalizer_roots (SgenPointerQueue
*fin_ready_queue
, SgenPointerQueue
*critical_fin_queue
)
1930 report_finalizer_roots_from_queue (fin_ready_queue
);
1931 report_finalizer_roots_from_queue (critical_fin_queue
);
1934 static GCRootReport
*root_report
;
1937 single_arg_report_root (MonoObject
**obj
, void *gc_data
)
1940 add_profile_gc_root (root_report
, *obj
, MONO_PROFILE_GC_ROOT_OTHER
, 0);
1944 precisely_report_roots_from (GCRootReport
*report
, void** start_root
, void** end_root
, mword desc
)
1946 switch (desc
& ROOT_DESC_TYPE_MASK
) {
1947 case ROOT_DESC_BITMAP
:
1948 desc
>>= ROOT_DESC_TYPE_SHIFT
;
1950 if ((desc
& 1) && *start_root
) {
1951 add_profile_gc_root (report
, *start_root
, MONO_PROFILE_GC_ROOT_OTHER
, 0);
1957 case ROOT_DESC_COMPLEX
: {
1958 gsize
*bitmap_data
= (gsize
*)sgen_get_complex_descriptor_bitmap (desc
);
1959 gsize bwords
= (*bitmap_data
) - 1;
1960 void **start_run
= start_root
;
1962 while (bwords
-- > 0) {
1963 gsize bmap
= *bitmap_data
++;
1964 void **objptr
= start_run
;
1966 if ((bmap
& 1) && *objptr
) {
1967 add_profile_gc_root (report
, *objptr
, MONO_PROFILE_GC_ROOT_OTHER
, 0);
1972 start_run
+= GC_BITS_PER_WORD
;
1976 case ROOT_DESC_USER
: {
1977 MonoGCRootMarkFunc marker
= (MonoGCRootMarkFunc
)sgen_get_user_descriptor_func (desc
);
1978 root_report
= report
;
1979 marker ((MonoObject
**)start_root
, single_arg_report_root
, NULL
);
1982 case ROOT_DESC_RUN_LEN
:
1983 g_assert_not_reached ();
1985 g_assert_not_reached ();
1990 report_registered_roots_by_type (int root_type
)
1992 GCRootReport report
;
1996 SGEN_HASH_TABLE_FOREACH (&roots_hash
[root_type
], void **, start_root
, RootRecord
*, root
) {
1997 SGEN_LOG (6, "Precise root scan %p-%p (desc: %p)", start_root
, root
->end_root
, (void*)root
->root_desc
);
1998 precisely_report_roots_from (&report
, start_root
, (void**)root
->end_root
, root
->root_desc
);
1999 } SGEN_HASH_TABLE_FOREACH_END
;
2000 notify_gc_roots (&report
);
2004 report_registered_roots (void)
2006 report_registered_roots_by_type (ROOT_TYPE_NORMAL
);
2007 report_registered_roots_by_type (ROOT_TYPE_WBARRIER
);
2011 sgen_client_collecting_minor (SgenPointerQueue
*fin_ready_queue
, SgenPointerQueue
*critical_fin_queue
)
2013 if (mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS
)
2014 report_registered_roots ();
2015 if (mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS
)
2016 report_finalizer_roots (fin_ready_queue
, critical_fin_queue
);
2019 static GCRootReport major_root_report
;
2020 static gboolean profile_roots
;
2023 sgen_client_collecting_major_1 (void)
2025 profile_roots
= mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS
;
2026 memset (&major_root_report
, 0, sizeof (GCRootReport
));
2030 sgen_client_pinned_los_object (GCObject
*obj
)
2033 add_profile_gc_root (&major_root_report
, (char*)obj
, MONO_PROFILE_GC_ROOT_PINNING
| MONO_PROFILE_GC_ROOT_MISC
, 0);
2037 sgen_client_collecting_major_2 (void)
2040 notify_gc_roots (&major_root_report
);
2042 if (mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS
)
2043 report_registered_roots ();
2047 sgen_client_collecting_major_3 (SgenPointerQueue
*fin_ready_queue
, SgenPointerQueue
*critical_fin_queue
)
2049 if (mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS
)
2050 report_finalizer_roots (fin_ready_queue
, critical_fin_queue
);
2053 #define MOVED_OBJECTS_NUM 64
2054 static void *moved_objects
[MOVED_OBJECTS_NUM
];
2055 static int moved_objects_idx
= 0;
2057 static SgenPointerQueue moved_objects_queue
= SGEN_POINTER_QUEUE_INIT (INTERNAL_MEM_MOVED_OBJECT
);
2060 mono_sgen_register_moved_object (void *obj
, void *destination
)
2063 * This function can be called from SGen's worker threads. We want to try
2064 * and avoid exposing those threads to the profiler API, so queue up move
2065 * events and send them later when the main GC thread calls
2066 * mono_sgen_gc_event_moves ().
2068 * TODO: Once SGen has multiple worker threads, we need to switch to a
2069 * lock-free data structure for the queue as multiple threads will be
2070 * adding to it at the same time.
2072 if (sgen_thread_pool_is_thread_pool_thread (mono_native_thread_id_get ())) {
2073 sgen_pointer_queue_add (&moved_objects_queue
, obj
);
2074 sgen_pointer_queue_add (&moved_objects_queue
, destination
);
2076 if (moved_objects_idx
== MOVED_OBJECTS_NUM
) {
2077 mono_profiler_gc_moves (moved_objects
, moved_objects_idx
);
2078 moved_objects_idx
= 0;
2081 moved_objects
[moved_objects_idx
++] = obj
;
2082 moved_objects
[moved_objects_idx
++] = destination
;
2087 mono_sgen_gc_event_moves (void)
2089 while (!sgen_pointer_queue_is_empty (&moved_objects_queue
)) {
2090 void *dst
= sgen_pointer_queue_pop (&moved_objects_queue
);
2091 void *src
= sgen_pointer_queue_pop (&moved_objects_queue
);
2093 mono_sgen_register_moved_object (src
, dst
);
2096 if (moved_objects_idx
) {
2097 mono_profiler_gc_moves (moved_objects
, moved_objects_idx
);
2098 moved_objects_idx
= 0;
2106 #define REFS_SIZE 128
2109 MonoGCReferences callback
;
2113 MonoObject
*refs
[REFS_SIZE
];
2114 uintptr_t offsets
[REFS_SIZE
];
2118 #define HANDLE_PTR(ptr,obj) do { \
2120 if (hwi->count == REFS_SIZE) { \
2121 hwi->callback ((MonoObject*)start, mono_object_class (start), hwi->called? 0: size, hwi->count, hwi->refs, hwi->offsets, hwi->data); \
2125 hwi->offsets [hwi->count] = (char*)(ptr)-(char*)start; \
2126 hwi->refs [hwi->count++] = *(ptr); \
2131 collect_references (HeapWalkInfo
*hwi
, GCObject
*obj
, size_t size
)
2133 char *start
= (char*)obj
;
2134 mword desc
= sgen_obj_get_descriptor (obj
);
2136 #include "sgen/sgen-scan-object.h"
2140 walk_references (GCObject
*start
, size_t size
, void *data
)
2142 HeapWalkInfo
*hwi
= (HeapWalkInfo
*)data
;
2145 collect_references (hwi
, start
, size
);
2146 if (hwi
->count
|| !hwi
->called
)
2147 hwi
->callback (start
, mono_object_class (start
), hwi
->called
? 0: size
, hwi
->count
, hwi
->refs
, hwi
->offsets
, hwi
->data
);
2151 * mono_gc_walk_heap:
2152 * @flags: flags for future use
2153 * @callback: a function pointer called for each object in the heap
2154 * @data: a user data pointer that is passed to callback
2156 * This function can be used to iterate over all the live objects in the heap:
2157 * for each object, @callback is invoked, providing info about the object's
2158 * location in memory, its class, its size and the objects it references.
2159 * For each referenced object it's offset from the object address is
2160 * reported in the offsets array.
2161 * The object references may be buffered, so the callback may be invoked
2162 * multiple times for the same object: in all but the first call, the size
2163 * argument will be zero.
2164 * Note that this function can be only called in the #MONO_GC_EVENT_PRE_START_WORLD
2165 * profiler event handler.
2167 * Returns: a non-zero value if the GC doesn't support heap walking
2170 mono_gc_walk_heap (int flags
, MonoGCReferences callback
, void *data
)
2175 hwi
.callback
= callback
;
2178 sgen_clear_nursery_fragments ();
2179 sgen_scan_area_with_callback (nursery_section
->data
, nursery_section
->end_data
, walk_references
, &hwi
, FALSE
, TRUE
);
2181 major_collector
.iterate_objects (ITERATE_OBJECTS_SWEEP_ALL
, walk_references
, &hwi
);
2182 sgen_los_iterate_objects (walk_references
, &hwi
);
2192 mono_gc_set_gc_callbacks (MonoGCCallbacks
*callbacks
)
2194 gc_callbacks
= *callbacks
;
2198 mono_gc_get_gc_callbacks ()
2200 return &gc_callbacks
;
2204 sgen_client_thread_register (SgenThreadInfo
* info
, void *stack_bottom_fallback
)
2207 guint8
*staddr
= NULL
;
2209 #ifndef HAVE_KW_THREAD
2210 g_assert (!mono_native_tls_get_value (thread_info_key
));
2211 mono_native_tls_set_value (thread_info_key
, info
);
2213 sgen_thread_info
= info
;
2216 info
->client_info
.skip
= 0;
2217 info
->client_info
.stopped_ip
= NULL
;
2218 info
->client_info
.stopped_domain
= NULL
;
2220 info
->client_info
.stack_start
= NULL
;
2222 #ifdef SGEN_POSIX_STW
2223 info
->client_info
.stop_count
= -1;
2224 info
->client_info
.signal
= 0;
2227 mono_thread_info_get_stack_bounds (&staddr
, &stsize
);
2229 info
->client_info
.stack_start_limit
= staddr
;
2230 info
->client_info
.stack_end
= staddr
+ stsize
;
2232 gsize stack_bottom
= (gsize
)stack_bottom_fallback
;
2233 stack_bottom
+= 4095;
2234 stack_bottom
&= ~4095;
2235 info
->client_info
.stack_end
= (char*)stack_bottom
;
2238 memset (&info
->client_info
.ctx
, 0, sizeof (MonoContext
));
2240 if (mono_gc_get_gc_callbacks ()->thread_attach_func
)
2241 info
->client_info
.runtime_data
= mono_gc_get_gc_callbacks ()->thread_attach_func ();
2243 binary_protocol_thread_register ((gpointer
)mono_thread_info_get_tid (info
));
2245 SGEN_LOG (3, "registered thread %p (%p) stack end %p", info
, (gpointer
)mono_thread_info_get_tid (info
), info
->client_info
.stack_end
);
2247 info
->client_info
.info
.handle_stack
= mono_handle_stack_alloc ();
2251 sgen_client_thread_unregister (SgenThreadInfo
*p
)
2253 MonoNativeThreadId tid
;
2255 #ifndef HAVE_KW_THREAD
2256 mono_native_tls_set_value (thread_info_key
, NULL
);
2258 sgen_thread_info
= NULL
;
2261 tid
= mono_thread_info_get_tid (p
);
2263 if (p
->client_info
.info
.runtime_thread
)
2264 mono_threads_add_joinable_thread ((gpointer
)tid
);
2266 if (mono_gc_get_gc_callbacks ()->thread_detach_func
) {
2267 mono_gc_get_gc_callbacks ()->thread_detach_func (p
->client_info
.runtime_data
);
2268 p
->client_info
.runtime_data
= NULL
;
2271 binary_protocol_thread_unregister ((gpointer
)tid
);
2272 SGEN_LOG (3, "unregister thread %p (%p)", p
, (gpointer
)tid
);
2274 HandleStack
*handles
= (HandleStack
*) p
->client_info
.info
.handle_stack
;
2275 p
->client_info
.info
.handle_stack
= NULL
;
2276 mono_handle_stack_free (handles
);
2280 mono_gc_set_skip_thread (gboolean skip
)
2282 SgenThreadInfo
*info
= mono_thread_info_current ();
2285 info
->client_info
.gc_disabled
= skip
;
2290 is_critical_method (MonoMethod
*method
)
2292 return mono_runtime_is_critical_method (method
) || sgen_is_critical_method (method
);
2296 thread_in_critical_region (SgenThreadInfo
*info
)
2298 return info
->client_info
.in_critical_region
;
2302 sgen_thread_attach (SgenThreadInfo
*info
)
2304 if (mono_gc_get_gc_callbacks ()->thread_attach_func
&& !info
->client_info
.runtime_data
)
2305 info
->client_info
.runtime_data
= mono_gc_get_gc_callbacks ()->thread_attach_func ();
2309 sgen_thread_detach (SgenThreadInfo
*p
)
2311 /* If a delegate is passed to native code and invoked on a thread we dont
2312 * know about, marshal will register it with mono_threads_attach_coop, but
2313 * we have no way of knowing when that thread goes away. SGen has a TSD
2314 * so we assume that if the domain is still registered, we can detach
2317 if (mono_thread_internal_current_is_attached ())
2318 mono_thread_detach_internal (mono_thread_internal_current ());
2322 mono_gc_register_thread (void *baseptr
)
2324 return mono_thread_info_attach (baseptr
) != NULL
;
2328 mono_gc_is_gc_thread (void)
2332 result
= mono_thread_info_current () != NULL
;
2338 sgen_client_thread_register_worker (void)
2340 mono_thread_info_register_small_id ();
2341 mono_native_thread_set_name (mono_native_thread_id_get (), "SGen worker");
2344 /* Variables holding start/end nursery so it won't have to be passed at every call */
2345 static void *scan_area_arg_start
, *scan_area_arg_end
;
2348 mono_gc_conservatively_scan_area (void *start
, void *end
)
2350 sgen_conservatively_pin_objects_from ((void **)start
, (void **)end
, scan_area_arg_start
, scan_area_arg_end
, PIN_TYPE_STACK
);
2354 mono_gc_scan_object (void *obj
, void *gc_data
)
2356 ScanCopyContext
*ctx
= (ScanCopyContext
*)gc_data
;
2357 ctx
->ops
->copy_or_mark_object ((GCObject
**)&obj
, ctx
->queue
);
2362 * Mark from thread stacks and registers.
2365 sgen_client_scan_thread_data (void *start_nursery
, void *end_nursery
, gboolean precise
, ScanCopyContext ctx
)
2367 scan_area_arg_start
= start_nursery
;
2368 scan_area_arg_end
= end_nursery
;
2370 FOREACH_THREAD (info
) {
2371 int skip_reason
= 0;
2372 void *aligned_stack_start
;
2374 if (info
->client_info
.skip
) {
2375 SGEN_LOG (3, "Skipping dead thread %p, range: %p-%p, size: %zd", info
, info
->client_info
.stack_start
, info
->client_info
.stack_end
, (char*)info
->client_info
.stack_end
- (char*)info
->client_info
.stack_start
);
2377 } else if (info
->client_info
.gc_disabled
) {
2378 SGEN_LOG (3, "GC disabled for thread %p, range: %p-%p, size: %zd", info
, info
->client_info
.stack_start
, info
->client_info
.stack_end
, (char*)info
->client_info
.stack_end
- (char*)info
->client_info
.stack_start
);
2380 } else if (!mono_thread_info_is_live (info
)) {
2381 SGEN_LOG (3, "Skipping non-running thread %p, range: %p-%p, size: %zd (state %x)", info
, info
->client_info
.stack_start
, info
->client_info
.stack_end
, (char*)info
->client_info
.stack_end
- (char*)info
->client_info
.stack_start
, info
->client_info
.info
.thread_state
);
2383 } else if (!info
->client_info
.stack_start
) {
2384 SGEN_LOG (3, "Skipping starting or detaching thread %p", info
);
2388 binary_protocol_scan_stack ((gpointer
)mono_thread_info_get_tid (info
), info
->client_info
.stack_start
, info
->client_info
.stack_end
, skip_reason
);
2393 g_assert (info
->client_info
.stack_start
);
2394 g_assert (info
->client_info
.stack_end
);
2396 aligned_stack_start
= (void*)(mword
) ALIGN_TO ((mword
)info
->client_info
.stack_start
, SIZEOF_VOID_P
);
2398 /* Windows uses a guard page before the committed stack memory pages to detect when the
2399 stack needs to be grown. If we suspend a thread just after a function prolog has
2400 decremented the stack pointer to point into the guard page but before the thread has
2401 been able to read or write to that page, starting the stack scan at aligned_stack_start
2402 will raise a STATUS_GUARD_PAGE_VIOLATION and the process will crash. This code uses
2403 VirtualQuery() to determine whether stack_start points into the guard page and then
2404 updates aligned_stack_start to point at the next non-guard page. */
2405 MEMORY_BASIC_INFORMATION mem_info
;
2406 SIZE_T result
= VirtualQuery(info
->client_info
.stack_start
, &mem_info
, sizeof(mem_info
));
2407 g_assert (result
!= 0);
2408 if (mem_info
.Protect
& PAGE_GUARD
) {
2409 aligned_stack_start
= ((char*) mem_info
.BaseAddress
) + mem_info
.RegionSize
;
2413 g_assert (info
->client_info
.suspend_done
);
2414 SGEN_LOG (3, "Scanning thread %p, range: %p-%p, size: %zd, pinned=%zd", info
, info
->client_info
.stack_start
, info
->client_info
.stack_end
, (char*)info
->client_info
.stack_end
- (char*)info
->client_info
.stack_start
, sgen_get_pinned_count ());
2415 if (mono_gc_get_gc_callbacks ()->thread_mark_func
&& !conservative_stack_mark
) {
2416 mono_gc_get_gc_callbacks ()->thread_mark_func (info
->client_info
.runtime_data
, (guint8
*)aligned_stack_start
, (guint8
*)info
->client_info
.stack_end
, precise
, &ctx
);
2417 } else if (!precise
) {
2418 if (!conservative_stack_mark
) {
2419 fprintf (stderr
, "Precise stack mark not supported - disabling.\n");
2420 conservative_stack_mark
= TRUE
;
2422 //FIXME we should eventually use the new stack_mark from coop
2423 sgen_conservatively_pin_objects_from ((void **)aligned_stack_start
, (void **)info
->client_info
.stack_end
, start_nursery
, end_nursery
, PIN_TYPE_STACK
);
2427 sgen_conservatively_pin_objects_from ((void**)&info
->client_info
.ctx
, (void**)(&info
->client_info
.ctx
+ 1),
2428 start_nursery
, end_nursery
, PIN_TYPE_STACK
);
2431 // This is used on Coop GC for platforms where we cannot get the data for individual registers.
2432 // We force a spill of all registers into the stack and pass a chunk of data into sgen.
2433 //FIXME under coop, for now, what we need to ensure is that we scan any extra memory from info->client_info.stack_end to stack_mark
2434 MonoThreadUnwindState
*state
= &info
->client_info
.info
.thread_saved_state
[SELF_SUSPEND_STATE_INDEX
];
2435 if (state
&& state
->gc_stackdata
) {
2436 sgen_conservatively_pin_objects_from ((void **)state
->gc_stackdata
, (void**)((char*)state
->gc_stackdata
+ state
->gc_stackdata_size
),
2437 start_nursery
, end_nursery
, PIN_TYPE_STACK
);
2441 if (precise
&& info
->client_info
.info
.handle_stack
) {
2442 mono_handle_stack_scan ((HandleStack
*)info
->client_info
.info
.handle_stack
, (GcScanFunc
)ctx
.ops
->copy_or_mark_object
, ctx
.queue
);
2444 } FOREACH_THREAD_END
2448 * mono_gc_set_stack_end:
2450 * Set the end of the current threads stack to STACK_END. The stack space between
2451 * STACK_END and the real end of the threads stack will not be scanned during collections.
2454 mono_gc_set_stack_end (void *stack_end
)
2456 SgenThreadInfo
*info
;
2459 info
= mono_thread_info_current ();
2461 SGEN_ASSERT (0, stack_end
< info
->client_info
.stack_end
, "Can only lower stack end");
2462 info
->client_info
.stack_end
= stack_end
;
2472 mono_gc_register_root (char *start
, size_t size
, MonoGCDescriptor descr
, MonoGCRootSource source
, const char *msg
)
2474 return sgen_register_root (start
, size
, descr
, descr
? ROOT_TYPE_NORMAL
: ROOT_TYPE_PINNED
, source
, msg
);
2478 mono_gc_register_root_wbarrier (char *start
, size_t size
, MonoGCDescriptor descr
, MonoGCRootSource source
, const char *msg
)
2480 return sgen_register_root (start
, size
, descr
, ROOT_TYPE_WBARRIER
, source
, msg
);
2484 mono_gc_deregister_root (char* addr
)
2486 sgen_deregister_root (addr
);
2495 mono_gc_pthread_create (pthread_t
*new_thread
, const pthread_attr_t
*attr
, void *(*start_routine
)(void *), void *arg
)
2497 return pthread_create (new_thread
, attr
, start_routine
, arg
);
2506 sgen_client_total_allocated_heap_changed (size_t allocated_heap
)
2508 mono_runtime_resource_check_limit (MONO_RESOURCE_GC_HEAP
, allocated_heap
);
2512 mono_gc_user_markers_supported (void)
2518 mono_object_is_alive (MonoObject
* o
)
2524 mono_gc_get_generation (MonoObject
*obj
)
2526 if (sgen_ptr_in_nursery (obj
))
2532 mono_gc_get_gc_name (void)
2538 mono_gc_get_description (void)
2540 #ifdef HAVE_CONC_GC_AS_DEFAULT
2541 return g_strdup ("sgen (concurrent by default)");
2543 return g_strdup ("sgen");
2548 mono_gc_set_desktop_mode (void)
2553 mono_gc_is_moving (void)
2559 mono_gc_is_disabled (void)
2565 BOOL APIENTRY
mono_gc_dllmain (HMODULE module_handle
, DWORD reason
, LPVOID reserved
)
2572 mono_gc_max_generation (void)
2578 mono_gc_precise_stack_mark_enabled (void)
2580 return !conservative_stack_mark
;
2584 mono_gc_collect (int generation
)
2586 sgen_gc_collect (generation
);
2590 mono_gc_collection_count (int generation
)
2592 return sgen_gc_collection_count (generation
);
2596 mono_gc_get_used_size (void)
2598 return (int64_t)sgen_gc_get_used_size ();
2602 mono_gc_get_heap_size (void)
2604 return (int64_t)sgen_gc_get_total_heap_allocation ();
2608 mono_gc_make_root_descr_user (MonoGCRootMarkFunc marker
)
2610 return sgen_make_user_root_descriptor (marker
);
2614 mono_gc_make_descr_for_string (gsize
*bitmap
, int numbits
)
2616 return SGEN_DESC_STRING
;
2620 mono_gc_get_nursery (int *shift_bits
, size_t *size
)
2622 *size
= sgen_nursery_size
;
2623 *shift_bits
= DEFAULT_NURSERY_BITS
;
2624 return sgen_get_nursery_start ();
2628 mono_gc_get_los_limit (void)
2630 return SGEN_MAX_SMALL_OBJ_SIZE
;
2634 sgen_client_default_metadata (void)
2636 return mono_domain_get ();
2640 sgen_client_metadata_for_object (GCObject
*obj
)
2642 return mono_object_domain (obj
);
2646 * mono_gchandle_is_in_domain:
2647 * @gchandle: a GCHandle's handle.
2648 * @domain: An application domain.
2650 * Returns: TRUE if the object wrapped by the @gchandle belongs to the specific @domain.
2653 mono_gchandle_is_in_domain (guint32 gchandle
, MonoDomain
*domain
)
2655 MonoDomain
*gchandle_domain
= (MonoDomain
*)sgen_gchandle_get_metadata (gchandle
);
2656 return domain
->domain_id
== gchandle_domain
->domain_id
;
2660 * mono_gchandle_free_domain:
2661 * @unloading: domain that is unloading
2663 * Function used internally to cleanup any GC handle for objects belonging
2664 * to the specified domain during appdomain unload.
2667 mono_gchandle_free_domain (MonoDomain
*unloading
)
2672 null_link_if_in_domain (gpointer hidden
, GCHandleType handle_type
, int max_generation
, gpointer user
)
2674 MonoDomain
*unloading_domain
= (MonoDomain
*)user
;
2675 MonoDomain
*obj_domain
;
2676 gboolean is_weak
= MONO_GC_HANDLE_TYPE_IS_WEAK (handle_type
);
2677 if (MONO_GC_HANDLE_IS_OBJECT_POINTER (hidden
)) {
2678 MonoObject
*obj
= (MonoObject
*)MONO_GC_REVEAL_POINTER (hidden
, is_weak
);
2679 obj_domain
= mono_object_domain (obj
);
2681 obj_domain
= (MonoDomain
*)MONO_GC_REVEAL_POINTER (hidden
, is_weak
);
2683 if (unloading_domain
->domain_id
== obj_domain
->domain_id
)
2689 sgen_null_links_for_domain (MonoDomain
*domain
)
2692 for (type
= HANDLE_TYPE_MIN
; type
< HANDLE_TYPE_MAX
; ++type
)
2693 sgen_gchandle_iterate ((GCHandleType
)type
, GENERATION_OLD
, null_link_if_in_domain
, domain
);
2697 mono_gchandle_set_target (guint32 gchandle
, MonoObject
*obj
)
2699 sgen_gchandle_set_target (gchandle
, obj
);
2703 sgen_client_gchandle_created (int handle_type
, GCObject
*obj
, guint32 handle
)
2705 #ifndef DISABLE_PERFCOUNTERS
2706 mono_perfcounters
->gc_num_handles
++;
2708 mono_profiler_gc_handle (MONO_PROFILER_GC_HANDLE_CREATED
, handle_type
, handle
, obj
);
2712 sgen_client_gchandle_destroyed (int handle_type
, guint32 handle
)
2714 #ifndef DISABLE_PERFCOUNTERS
2715 mono_perfcounters
->gc_num_handles
--;
2717 mono_profiler_gc_handle (MONO_PROFILER_GC_HANDLE_DESTROYED
, handle_type
, handle
, NULL
);
2721 sgen_client_ensure_weak_gchandles_accessible (void)
2724 * During the second bridge processing step the world is
2725 * running again. That step processes all weak links once
2726 * more to null those that refer to dead objects. Before that
2727 * is completed, those links must not be followed, so we
2728 * conservatively wait for bridge processing when any weak
2729 * link is dereferenced.
2731 /* FIXME: A GC can occur after this check fails, in which case we
2732 * should wait for bridge processing but would fail to do so.
2734 if (G_UNLIKELY (bridge_processing_in_progress
))
2735 mono_gc_wait_for_bridge_processing ();
2739 mono_gc_invoke_with_gc_lock (MonoGCLockedCallbackFunc func
, void *data
)
2743 result
= func (data
);
2744 UNLOCK_INTERRUPTION
;
2749 mono_gc_register_altstack (gpointer stack
, gint32 stack_size
, gpointer altstack
, gint32 altstack_size
)
2755 mono_gc_get_card_table (int *shift_bits
, gpointer
*mask
)
2757 return sgen_get_card_table_configuration (shift_bits
, mask
);
2761 mono_gc_card_table_nursery_check (void)
2763 return !sgen_get_major_collector ()->is_concurrent
;
2766 /* Negative value to remove */
2768 mono_gc_add_memory_pressure (gint64 value
)
2770 /* FIXME: Implement at some point? */
2778 sgen_client_degraded_allocation (size_t size
)
2780 static int last_major_gc_warned
= -1;
2781 static int num_degraded
= 0;
2783 if (last_major_gc_warned
< (int)gc_stats
.major_gc_count
) {
2785 if (num_degraded
== 1 || num_degraded
== 3)
2786 mono_trace (G_LOG_LEVEL_INFO
, MONO_TRACE_GC
, "Warning: Degraded allocation. Consider increasing nursery-size if the warning persists.");
2787 else if (num_degraded
== 10)
2788 mono_trace (G_LOG_LEVEL_INFO
, MONO_TRACE_GC
, "Warning: Repeated degraded allocation. Consider increasing nursery-size.");
2789 last_major_gc_warned
= gc_stats
.major_gc_count
;
2798 sgen_client_description_for_internal_mem_type (int type
)
2801 case INTERNAL_MEM_EPHEMERON_LINK
: return "ephemeron-link";
2802 case INTERNAL_MEM_MOVED_OBJECT
: return "moved-object";
2809 sgen_client_pre_collection_checks (void)
2811 if (sgen_mono_xdomain_checks
) {
2812 sgen_clear_nursery_fragments ();
2813 sgen_check_for_xdomain_refs ();
2818 sgen_client_vtable_is_inited (MonoVTable
*vt
)
2820 return vt
->klass
->inited
;
2824 sgen_client_vtable_get_namespace (MonoVTable
*vt
)
2826 return vt
->klass
->name_space
;
2830 sgen_client_vtable_get_name (MonoVTable
*vt
)
2832 return vt
->klass
->name
;
2840 sgen_client_init (void)
2843 MonoThreadInfoCallbacks cb
;
2845 cb
.thread_register
= sgen_thread_register
;
2846 cb
.thread_detach
= sgen_thread_detach
;
2847 cb
.thread_unregister
= sgen_thread_unregister
;
2848 cb
.thread_attach
= sgen_thread_attach
;
2849 cb
.mono_method_is_critical
= (gboolean (*)(void *))is_critical_method
;
2850 cb
.mono_thread_in_critical_region
= thread_in_critical_region
;
2852 mono_threads_init (&cb
, sizeof (SgenThreadInfo
));
2854 ///* Keep this the default for now */
2855 /* Precise marking is broken on all supported targets. Disable until fixed. */
2856 conservative_stack_mark
= TRUE
;
2858 sgen_register_fixed_internal_mem_type (INTERNAL_MEM_EPHEMERON_LINK
, sizeof (EphemeronLinkNode
));
2860 mono_sgen_init_stw ();
2862 #ifndef HAVE_KW_THREAD
2863 mono_native_tls_alloc (&thread_info_key
, NULL
);
2864 #if defined(TARGET_OSX) || defined(TARGET_WIN32) || defined(TARGET_ANDROID) || defined(TARGET_IOS)
2866 * CEE_MONO_TLS requires the tls offset, not the key, so the code below only works on darwin,
2867 * where the two are the same.
2869 mono_tls_key_set_offset (TLS_KEY_SGEN_THREAD_INFO
, thread_info_key
);
2873 int tls_offset
= -1;
2874 MONO_THREAD_VAR_OFFSET (sgen_thread_info
, tls_offset
);
2875 mono_tls_key_set_offset (TLS_KEY_SGEN_THREAD_INFO
, tls_offset
);
2880 * This needs to happen before any internal allocations because
2881 * it inits the small id which is required for hazard pointer
2886 mono_gc_register_thread (&dummy
);
2890 sgen_client_handle_gc_param (const char *opt
)
2892 if (g_str_has_prefix (opt
, "stack-mark=")) {
2893 opt
= strchr (opt
, '=') + 1;
2894 if (!strcmp (opt
, "precise")) {
2895 conservative_stack_mark
= FALSE
;
2896 } else if (!strcmp (opt
, "conservative")) {
2897 conservative_stack_mark
= TRUE
;
2899 sgen_env_var_error (MONO_GC_PARAMS_NAME
, conservative_stack_mark
? "Using `conservative`." : "Using `precise`.",
2900 "Invalid value `%s` for `stack-mark` option, possible values are: `precise`, `conservative`.", opt
);
2902 } else if (g_str_has_prefix (opt
, "bridge-implementation=")) {
2903 opt
= strchr (opt
, '=') + 1;
2904 sgen_set_bridge_implementation (opt
);
2905 } else if (g_str_has_prefix (opt
, "toggleref-test")) {
2906 /* FIXME: This should probably in MONO_GC_DEBUG */
2907 sgen_register_test_toggleref_callback ();
2908 } else if (!sgen_bridge_handle_gc_param (opt
)) {
2915 sgen_client_print_gc_params_usage (void)
2917 fprintf (stderr
, " stack-mark=MARK-METHOD (where MARK-METHOD is 'precise' or 'conservative')\n");
2921 sgen_client_handle_gc_debug (const char *opt
)
2923 if (!strcmp (opt
, "xdomain-checks")) {
2924 sgen_mono_xdomain_checks
= TRUE
;
2925 } else if (!strcmp (opt
, "do-not-finalize")) {
2926 mono_do_not_finalize
= TRUE
;
2927 } else if (g_str_has_prefix (opt
, "do-not-finalize=")) {
2928 opt
= strchr (opt
, '=') + 1;
2929 mono_do_not_finalize
= TRUE
;
2930 mono_do_not_finalize_class_names
= g_strsplit (opt
, ",", 0);
2931 } else if (!strcmp (opt
, "log-finalizers")) {
2932 log_finalizers
= TRUE
;
2933 } else if (!strcmp (opt
, "no-managed-allocator")) {
2934 sgen_set_use_managed_allocator (FALSE
);
2935 } else if (!sgen_bridge_handle_gc_debug (opt
)) {
2942 sgen_client_print_gc_debug_usage (void)
2944 fprintf (stderr
, " xdomain-checks\n");
2945 fprintf (stderr
, " do-not-finalize\n");
2946 fprintf (stderr
, " log-finalizers\n");
2947 fprintf (stderr
, " no-managed-allocator\n");
2948 sgen_bridge_print_gc_debug_usage ();
2953 sgen_client_get_provenance (void)
2955 #ifdef SGEN_OBJECT_PROVENANCE
2956 MonoGCCallbacks
*cb
= mono_gc_get_gc_callbacks ();
2957 gpointer (*get_provenance_func
) (void);
2960 get_provenance_func
= cb
->get_provenance_func
;
2961 if (get_provenance_func
)
2962 return get_provenance_func ();
2970 sgen_client_describe_invalid_pointer (GCObject
*ptr
)
2972 sgen_bridge_describe_pointer (ptr
);
2975 static gboolean gc_inited
;
2978 mono_gc_base_init (void)
2983 mono_counters_init ();
2986 mono_w32handle_init ();
2989 #ifdef HEAVY_STATISTICS
2990 mono_counters_register ("los marked cards", MONO_COUNTER_GC
| MONO_COUNTER_ULONG
, &los_marked_cards
);
2991 mono_counters_register ("los array cards scanned ", MONO_COUNTER_GC
| MONO_COUNTER_ULONG
, &los_array_cards
);
2992 mono_counters_register ("los array remsets", MONO_COUNTER_GC
| MONO_COUNTER_ULONG
, &los_array_remsets
);
2994 mono_counters_register ("WBarrier set arrayref", MONO_COUNTER_GC
| MONO_COUNTER_ULONG
, &stat_wbarrier_set_arrayref
);
2995 mono_counters_register ("WBarrier value copy", MONO_COUNTER_GC
| MONO_COUNTER_ULONG
, &stat_wbarrier_value_copy
);
2996 mono_counters_register ("WBarrier object copy", MONO_COUNTER_GC
| MONO_COUNTER_ULONG
, &stat_wbarrier_object_copy
);
3001 if (nursery_canaries_enabled ())
3002 sgen_set_use_managed_allocator (FALSE
);
3004 #if defined(HAVE_KW_THREAD)
3005 /* This can happen with using libmonosgen.so */
3006 if (mono_tls_key_get_offset (TLS_KEY_SGEN_THREAD_INFO
) == -1)
3007 sgen_set_use_managed_allocator (FALSE
);
3014 mono_gc_base_cleanup (void)
3016 sgen_thread_pool_shutdown ();
3018 // We should have consumed any outstanding moves.
3019 g_assert (sgen_pointer_queue_is_empty (&moved_objects_queue
));
3023 mono_gc_is_null (void)