2 * sgen-fin-weak-hash.c: Finalizers and weak links.
5 * Paolo Molaro (lupus@ximian.com)
6 * Rodrigo Kumpera (kumpera@gmail.com)
8 * Copyright 2005-2011 Novell, Inc (http://www.novell.com)
9 * Copyright 2011 Xamarin Inc (http://www.xamarin.com)
10 * Copyright 2011 Xamarin, Inc.
11 * Copyright (C) 2012 Xamarin Inc
13 * This library is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU Library General Public
15 * License 2.0 as published by the Free Software Foundation;
17 * This library is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * Library General Public License for more details.
22 * You should have received a copy of the GNU Library General Public
23 * License 2.0 along with this library; if not, write to the Free
24 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
30 #include "metadata/sgen-gc.h"
31 #include "metadata/sgen-gray.h"
32 #include "metadata/sgen-protocol.h"
33 #include "utils/dtrace.h"
34 #include "utils/mono-counters.h"
36 #define ptr_in_nursery sgen_ptr_in_nursery
38 typedef SgenGrayQueue GrayQueue
;
40 int num_ready_finalizers
= 0;
41 static int no_finalize
= 0;
43 #define DISLINK_OBJECT(l) (REVEAL_POINTER (*(void**)(l)))
44 #define DISLINK_TRACK(l) ((~(size_t)(*(void**)(l))) & 1)
47 * The finalizable hash has the object as the key, the
48 * disappearing_link hash, has the link address as key.
50 * Copyright 2011 Xamarin Inc.
53 #define TAG_MASK ((mword)0x1)
55 static inline MonoObject
*
56 tagged_object_get_object (MonoObject
*object
)
58 return (MonoObject
*)(((mword
)object
) & ~TAG_MASK
);
62 tagged_object_get_tag (MonoObject
*object
)
64 return ((mword
)object
) & TAG_MASK
;
67 static inline MonoObject
*
68 tagged_object_apply (void *object
, int tag_bits
)
70 return (MonoObject
*)((mword
)object
| (mword
)tag_bits
);
74 tagged_object_hash (MonoObject
*o
)
76 return mono_object_hash (tagged_object_get_object (o
));
80 tagged_object_equals (MonoObject
*a
, MonoObject
*b
)
82 return tagged_object_get_object (a
) == tagged_object_get_object (b
);
85 static SgenHashTable minor_finalizable_hash
= SGEN_HASH_TABLE_INIT (INTERNAL_MEM_FIN_TABLE
, INTERNAL_MEM_FINALIZE_ENTRY
, 0, (GHashFunc
)tagged_object_hash
, (GEqualFunc
)tagged_object_equals
);
86 static SgenHashTable major_finalizable_hash
= SGEN_HASH_TABLE_INIT (INTERNAL_MEM_FIN_TABLE
, INTERNAL_MEM_FINALIZE_ENTRY
, 0, (GHashFunc
)tagged_object_hash
, (GEqualFunc
)tagged_object_equals
);
89 get_finalize_entry_hash_table (int generation
)
92 case GENERATION_NURSERY
: return &minor_finalizable_hash
;
93 case GENERATION_OLD
: return &major_finalizable_hash
;
94 default: g_assert_not_reached ();
98 #define BRIDGE_OBJECT_MARKED 0x1
100 /* LOCKING: requires that the GC lock is held */
102 sgen_mark_bridge_object (MonoObject
*obj
)
104 SgenHashTable
*hash_table
= get_finalize_entry_hash_table (ptr_in_nursery (obj
) ? GENERATION_NURSERY
: GENERATION_OLD
);
106 sgen_hash_table_set_key (hash_table
, obj
, tagged_object_apply (obj
, BRIDGE_OBJECT_MARKED
));
109 /* LOCKING: requires that the GC lock is held */
111 sgen_collect_bridge_objects (int generation
, ScanCopyContext ctx
)
113 CopyOrMarkObjectFunc copy_func
= ctx
.copy_func
;
114 GrayQueue
*queue
= ctx
.queue
;
115 SgenHashTable
*hash_table
= get_finalize_entry_hash_table (generation
);
123 SGEN_HASH_TABLE_FOREACH (hash_table
, object
, dummy
) {
124 int tag
= tagged_object_get_tag (object
);
125 object
= tagged_object_get_object (object
);
127 /* Bridge code told us to ignore this one */
128 if (tag
== BRIDGE_OBJECT_MARKED
)
131 /* Object is a bridge object and major heap says it's dead */
132 if (major_collector
.is_object_live ((char*)object
))
135 /* Nursery says the object is dead. */
136 if (!sgen_gc_is_object_ready_for_finalization (object
))
139 if (!sgen_is_bridge_object (object
))
142 copy
= (char*)object
;
143 copy_func ((void**)©
, queue
);
145 sgen_bridge_register_finalized_object ((MonoObject
*)copy
);
147 if (hash_table
== &minor_finalizable_hash
&& !ptr_in_nursery (copy
)) {
148 /* remove from the list */
149 SGEN_HASH_TABLE_FOREACH_REMOVE (TRUE
);
151 /* insert it into the major hash */
152 sgen_hash_table_replace (&major_finalizable_hash
, tagged_object_apply (copy
, tag
), NULL
, NULL
);
154 SGEN_LOG (5, "Promoting finalization of object %p (%s) (was at %p) to major table", copy
, sgen_safe_name (copy
), object
);
159 SGEN_LOG (5, "Updating object for finalization: %p (%s) (was at %p)", copy
, sgen_safe_name (copy
), object
);
160 SGEN_HASH_TABLE_FOREACH_SET_KEY (tagged_object_apply (copy
, tag
));
162 } SGEN_HASH_TABLE_FOREACH_END
;
166 /* LOCKING: requires that the GC lock is held */
168 sgen_finalize_in_range (int generation
, ScanCopyContext ctx
)
170 CopyOrMarkObjectFunc copy_func
= ctx
.copy_func
;
171 GrayQueue
*queue
= ctx
.queue
;
172 SgenHashTable
*hash_table
= get_finalize_entry_hash_table (generation
);
178 SGEN_HASH_TABLE_FOREACH (hash_table
, object
, dummy
) {
179 int tag
= tagged_object_get_tag (object
);
180 object
= tagged_object_get_object (object
);
181 if (!major_collector
.is_object_live ((char*)object
)) {
182 gboolean is_fin_ready
= sgen_gc_is_object_ready_for_finalization (object
);
183 MonoObject
*copy
= object
;
184 copy_func ((void**)©
, queue
);
186 /* remove and put in fin_ready_list */
187 SGEN_HASH_TABLE_FOREACH_REMOVE (TRUE
);
188 num_ready_finalizers
++;
189 sgen_queue_finalization_entry (copy
);
190 /* Make it survive */
191 SGEN_LOG (5, "Queueing object for finalization: %p (%s) (was at %p) (%d/%d)", copy
, sgen_safe_name (copy
), object
, num_ready_finalizers
, sgen_hash_table_num_entries (hash_table
));
194 if (hash_table
== &minor_finalizable_hash
&& !ptr_in_nursery (copy
)) {
195 /* remove from the list */
196 SGEN_HASH_TABLE_FOREACH_REMOVE (TRUE
);
198 /* insert it into the major hash */
199 sgen_hash_table_replace (&major_finalizable_hash
, tagged_object_apply (copy
, tag
), NULL
, NULL
);
201 SGEN_LOG (5, "Promoting finalization of object %p (%s) (was at %p) to major table", copy
, sgen_safe_name (copy
), object
);
206 SGEN_LOG (5, "Updating object for finalization: %p (%s) (was at %p)", copy
, sgen_safe_name (copy
), object
);
207 SGEN_HASH_TABLE_FOREACH_SET_KEY (tagged_object_apply (copy
, tag
));
211 } SGEN_HASH_TABLE_FOREACH_END
;
214 /* LOCKING: requires that the GC lock is held */
216 register_for_finalization (MonoObject
*obj
, void *user_data
, int generation
)
218 SgenHashTable
*hash_table
= get_finalize_entry_hash_table (generation
);
223 g_assert (user_data
== NULL
|| user_data
== mono_gc_run_finalize
);
226 if (sgen_hash_table_replace (hash_table
, obj
, NULL
, NULL
))
227 SGEN_LOG (5, "Added finalizer for object: %p (%s) (%d) to %s table", obj
, obj
->vtable
->klass
->name
, hash_table
->num_entries
, sgen_generation_name (generation
));
229 if (sgen_hash_table_remove (hash_table
, obj
, NULL
))
230 SGEN_LOG (5, "Removed finalizer for object: %p (%s) (%d)", obj
, obj
->vtable
->klass
->name
, hash_table
->num_entries
);
235 * We're using (mostly) non-locking staging queues for finalizers and weak links to speed
236 * up registering them. Otherwise we'd have to take the GC lock.
238 * The queues are arrays of `StageEntry`, plus a `next_entry` index. Threads add entries to
239 * the queue via `add_stage_entry()` in a linear fashion until it fills up, in which case
240 * `process_stage_entries()` is called to drain it. A garbage collection will also drain
241 * the queues via the same function. That implies that `add_stage_entry()`, since it
242 * doesn't take a lock, must be able to run concurrently with `process_stage_entries()`,
243 * though it doesn't have to make progress while the queue is drained. In fact, once it
244 * detects that the queue is being drained, it blocks until the draining is done.
246 * The protocol must guarantee that entries in the queue are causally ordered, otherwise two
247 * entries for the same location might get switched, resulting in the earlier one being
248 * committed and the later one ignored.
250 * `next_entry` is the index of the next entry to be filled, or `-1` if the queue is
251 * currently being drained. Each entry has a state:
253 * `STAGE_ENTRY_FREE`: The entry is free. Its data fields must be `NULL`.
255 * `STAGE_ENTRY_BUSY`: The entry is currently being filled in.
257 * `STAGE_ENTRY_USED`: The entry is completely filled in and must be processed in the next
260 * `STAGE_ENTRY_INVALID`: The entry was busy during queue draining and therefore
261 * invalidated. Entries that are `BUSY` can obviously not be processed during a drain, but
262 * we can't leave them in place because new entries might be inserted before them, including
263 * from the same thread, violating causality. An alternative would be not to reset
264 * `next_entry` to `0` after a drain, but to the index of the last `BUSY` entry plus one,
265 * but that can potentially waste the whole queue.
269 * | from | to | filler? | drainer? |
270 * +---------+---------+---------+----------+
271 * | FREE | BUSY | X | |
272 * | BUSY | FREE | X | |
273 * | BUSY | USED | X | |
274 * | BUSY | INVALID | | X |
275 * | USED | FREE | | X |
276 * | INVALID | FREE | X | |
278 * `next_entry` can be incremented either by the filler thread that set the corresponding
279 * entry to `BUSY`, or by another filler thread that's trying to get a `FREE` slot. If that
280 * other thread wasn't allowed to increment, it would block on the first filler thread.
282 * An entry's state, once it's set from `FREE` to `BUSY` by a filler thread, can only be
283 * changed by that same thread or by the drained. The drainer can only set a `BUSY` thread
284 * to `INVALID`, so it needs to be set to `FREE` again by the original filler thread.
287 #define STAGE_ENTRY_FREE 0
288 #define STAGE_ENTRY_BUSY 1
289 #define STAGE_ENTRY_USED 2
290 #define STAGE_ENTRY_INVALID 3
293 volatile gint32 state
;
298 #define NUM_FIN_STAGE_ENTRIES 1024
300 static volatile gint32 next_fin_stage_entry
= 0;
301 static StageEntry fin_stage_entries
[NUM_FIN_STAGE_ENTRIES
];
304 * This is used to lock the stage when processing is forced, i.e. when it's triggered by a
305 * garbage collection. In that case, the world is already stopped and there's only one
306 * thread operating on the queue.
309 lock_stage_for_processing (volatile gint32
*next_entry
)
315 * When processing is triggered by an overflow, we don't want to take the GC lock
316 * immediately, and then set `next_index` to `-1`, because another thread might have drained
317 * the queue in the mean time. Instead, we make sure the overflow is still there, we
318 * atomically set `next_index`, and only once that happened do we take the GC lock.
321 try_lock_stage_for_processing (int num_entries
, volatile gint32
*next_entry
)
323 gint32 old
= *next_entry
;
324 if (old
< num_entries
)
326 return InterlockedCompareExchange (next_entry
, -1, old
) == old
;
329 /* LOCKING: requires that the GC lock is held */
331 process_stage_entries (int num_entries
, volatile gint32
*next_entry
, StageEntry
*entries
, void (*process_func
) (MonoObject
*, void*, int))
336 * This can happen if after setting `next_index` to `-1` in
337 * `try_lock_stage_for_processing()`, a GC was triggered, which then drained the
338 * queue and reset `next_entry`.
340 * We have the GC lock now, so if it's still `-1`, we can't be interrupted by a GC.
342 if (*next_entry
!= -1)
345 for (i
= 0; i
< num_entries
; ++i
) {
349 state
= entries
[i
].state
;
352 case STAGE_ENTRY_FREE
:
353 case STAGE_ENTRY_INVALID
:
355 case STAGE_ENTRY_BUSY
:
356 /* BUSY -> INVALID */
358 * This must be done atomically, because the filler thread can set
359 * the entry to `USED`, in which case we must process it, so we must
360 * detect that eventuality.
362 if (InterlockedCompareExchange (&entries
[i
].state
, STAGE_ENTRY_INVALID
, STAGE_ENTRY_BUSY
) != STAGE_ENTRY_BUSY
)
365 case STAGE_ENTRY_USED
:
368 SGEN_ASSERT (0, FALSE
, "Invalid stage entry state");
374 process_func (entries
[i
].obj
, entries
[i
].user_data
, i
);
376 entries
[i
].obj
= NULL
;
377 entries
[i
].user_data
= NULL
;
379 mono_memory_write_barrier ();
383 * This transition only happens here, so we don't have to do it atomically.
385 entries
[i
].state
= STAGE_ENTRY_FREE
;
388 mono_memory_write_barrier ();
393 #ifdef HEAVY_STATISTICS
394 static long long stat_overflow_abort
= 0;
395 static long long stat_wait_for_processing
= 0;
396 static long long stat_increment_other_thread
= 0;
397 static long long stat_index_decremented
= 0;
398 static long long stat_entry_invalidated
= 0;
399 static long long stat_success
= 0;
403 add_stage_entry (int num_entries
, volatile gint32
*next_entry
, StageEntry
*entries
, MonoObject
*obj
, void *user_data
)
405 gint32 index
, new_next_entry
, old_next_entry
;
406 gint32 previous_state
;
411 if (index
>= num_entries
) {
412 HEAVY_STAT (++stat_overflow_abort
);
417 * Backed-off waiting is way more efficient than even using a
418 * dedicated lock for this.
420 while ((index
= *next_entry
) < 0) {
422 * This seems like a good value. Determined by timing
423 * sgen-weakref-stress.exe.
426 HEAVY_STAT (++stat_wait_for_processing
);
431 if (entries
[index
].state
!= STAGE_ENTRY_FREE
||
432 InterlockedCompareExchange (&entries
[index
].state
, STAGE_ENTRY_BUSY
, STAGE_ENTRY_FREE
) != STAGE_ENTRY_FREE
) {
434 * If we can't get the entry it must be because another thread got
435 * it first. We don't want to wait for that thread to increment
436 * `next_entry`, so we try to do it ourselves. Whether we succeed
437 * or not, we start over.
439 if (*next_entry
== index
) {
440 InterlockedCompareExchange (next_entry
, index
+ 1, index
);
441 //g_print ("tried increment for other thread\n");
442 HEAVY_STAT (++stat_increment_other_thread
);
446 /* state is BUSY now */
447 mono_memory_write_barrier ();
449 * Incrementing `next_entry` must happen after setting the state to `BUSY`.
450 * If it were the other way around, it would be possible that after a filler
451 * incremented the index, other threads fill up the queue, the queue is
452 * drained, the original filler finally fills in the slot, but `next_entry`
453 * ends up at the start of the queue, and new entries are written in the
454 * queue in front of, not behind, the original filler's entry.
456 * We don't actually require that the CAS succeeds, but we do require that
457 * the value of `next_entry` is not lower than our index. Since the drainer
458 * sets it to `-1`, that also takes care of the case that the drainer is
461 old_next_entry
= InterlockedCompareExchange (next_entry
, index
+ 1, index
);
462 if (old_next_entry
< index
) {
464 /* INVALID -> FREE */
466 * The state might still be `BUSY`, or the drainer could have set it
467 * to `INVALID`. In either case, there's no point in CASing. Set
468 * it to `FREE` and start over.
470 entries
[index
].state
= STAGE_ENTRY_FREE
;
471 HEAVY_STAT (++stat_index_decremented
);
477 SGEN_ASSERT (0, index
>= 0 && index
< num_entries
, "Invalid index");
479 entries
[index
].obj
= obj
;
480 entries
[index
].user_data
= user_data
;
482 mono_memory_write_barrier ();
484 new_next_entry
= *next_entry
;
485 mono_memory_read_barrier ();
488 * A `BUSY` entry will either still be `BUSY` or the drainer will have set it to
489 * `INVALID`. In the former case, we set it to `USED` and we're finished. In the
490 * latter case, we reset it to `FREE` and start over.
492 previous_state
= InterlockedCompareExchange (&entries
[index
].state
, STAGE_ENTRY_USED
, STAGE_ENTRY_BUSY
);
493 if (previous_state
== STAGE_ENTRY_BUSY
) {
494 SGEN_ASSERT (0, new_next_entry
>= index
|| new_next_entry
< 0, "Invalid next entry index - as long as we're busy, other thread can only increment or invalidate it");
495 HEAVY_STAT (++stat_success
);
499 SGEN_ASSERT (0, previous_state
== STAGE_ENTRY_INVALID
, "Invalid state transition - other thread can only make busy state invalid");
500 entries
[index
].obj
= NULL
;
501 entries
[index
].user_data
= NULL
;
502 mono_memory_write_barrier ();
503 /* INVALID -> FREE */
504 entries
[index
].state
= STAGE_ENTRY_FREE
;
506 HEAVY_STAT (++stat_entry_invalidated
);
511 /* LOCKING: requires that the GC lock is held */
513 process_fin_stage_entry (MonoObject
*obj
, void *user_data
, int index
)
515 if (ptr_in_nursery (obj
))
516 register_for_finalization (obj
, user_data
, GENERATION_NURSERY
);
518 register_for_finalization (obj
, user_data
, GENERATION_OLD
);
521 /* LOCKING: requires that the GC lock is held */
523 sgen_process_fin_stage_entries (void)
525 lock_stage_for_processing (&next_fin_stage_entry
);
526 process_stage_entries (NUM_FIN_STAGE_ENTRIES
, &next_fin_stage_entry
, fin_stage_entries
, process_fin_stage_entry
);
530 mono_gc_register_for_finalization (MonoObject
*obj
, void *user_data
)
532 while (add_stage_entry (NUM_FIN_STAGE_ENTRIES
, &next_fin_stage_entry
, fin_stage_entries
, obj
, user_data
) == -1) {
533 if (try_lock_stage_for_processing (NUM_FIN_STAGE_ENTRIES
, &next_fin_stage_entry
)) {
535 process_stage_entries (NUM_FIN_STAGE_ENTRIES
, &next_fin_stage_entry
, fin_stage_entries
, process_fin_stage_entry
);
541 /* LOCKING: requires that the GC lock is held */
543 finalizers_for_domain (MonoDomain
*domain
, MonoObject
**out_array
, int out_size
,
544 SgenHashTable
*hash_table
)
550 if (no_finalize
|| !out_size
|| !out_array
)
553 SGEN_HASH_TABLE_FOREACH (hash_table
, object
, dummy
) {
554 object
= tagged_object_get_object (object
);
556 if (mono_object_domain (object
) == domain
) {
557 /* remove and put in out_array */
558 SGEN_HASH_TABLE_FOREACH_REMOVE (TRUE
);
559 out_array
[count
++] = object
;
560 SGEN_LOG (5, "Collecting object for finalization: %p (%s) (%d/%d)", object
, sgen_safe_name (object
), num_ready_finalizers
, sgen_hash_table_num_entries (hash_table
));
561 if (count
== out_size
)
565 } SGEN_HASH_TABLE_FOREACH_END
;
570 * mono_gc_finalizers_for_domain:
571 * @domain: the unloading appdomain
572 * @out_array: output array
573 * @out_size: size of output array
575 * Store inside @out_array up to @out_size objects that belong to the unloading
576 * appdomain @domain. Returns the number of stored items. Can be called repeteadly
577 * until it returns 0.
578 * The items are removed from the finalizer data structure, so the caller is supposed
580 * @out_array should be on the stack to allow the GC to know the objects are still alive.
583 mono_gc_finalizers_for_domain (MonoDomain
*domain
, MonoObject
**out_array
, int out_size
)
588 sgen_process_fin_stage_entries ();
589 result
= finalizers_for_domain (domain
, out_array
, out_size
, &minor_finalizable_hash
);
590 if (result
< out_size
) {
591 result
+= finalizers_for_domain (domain
, out_array
+ result
, out_size
- result
,
592 &major_finalizable_hash
);
599 static SgenHashTable minor_disappearing_link_hash
= SGEN_HASH_TABLE_INIT (INTERNAL_MEM_DISLINK_TABLE
, INTERNAL_MEM_DISLINK
, 0, mono_aligned_addr_hash
, NULL
);
600 static SgenHashTable major_disappearing_link_hash
= SGEN_HASH_TABLE_INIT (INTERNAL_MEM_DISLINK_TABLE
, INTERNAL_MEM_DISLINK
, 0, mono_aligned_addr_hash
, NULL
);
602 static SgenHashTable
*
603 get_dislink_hash_table (int generation
)
605 switch (generation
) {
606 case GENERATION_NURSERY
: return &minor_disappearing_link_hash
;
607 case GENERATION_OLD
: return &major_disappearing_link_hash
;
608 default: g_assert_not_reached ();
612 /* LOCKING: assumes the GC lock is held */
614 add_or_remove_disappearing_link (MonoObject
*obj
, void **link
, int generation
)
616 SgenHashTable
*hash_table
= get_dislink_hash_table (generation
);
619 if (sgen_hash_table_remove (hash_table
, link
, NULL
)) {
620 SGEN_LOG (5, "Removed dislink %p (%d) from %s table",
621 link
, hash_table
->num_entries
, sgen_generation_name (generation
));
626 sgen_hash_table_replace (hash_table
, link
, NULL
, NULL
);
627 SGEN_LOG (5, "Added dislink for object: %p (%s) at %p to %s table",
628 obj
, obj
->vtable
->klass
->name
, link
, sgen_generation_name (generation
));
631 /* LOCKING: requires that the GC lock is held */
633 sgen_null_link_in_range (int generation
, gboolean before_finalization
, ScanCopyContext ctx
)
635 CopyOrMarkObjectFunc copy_func
= ctx
.copy_func
;
636 GrayQueue
*queue
= ctx
.queue
;
639 SgenHashTable
*hash
= get_dislink_hash_table (generation
);
641 SGEN_HASH_TABLE_FOREACH (hash
, link
, dummy
) {
646 We null a weak link before unregistering it, so it's possible that a thread is
647 suspended right in between setting the content to null and staging the unregister.
649 The rest of this code cannot handle null links as DISLINK_OBJECT (NULL) produces an invalid address.
651 We should simply skip the entry as the staged removal will take place during the next GC.
654 SGEN_LOG (5, "Dislink %p was externally nullified", link
);
658 track
= DISLINK_TRACK (link
);
660 * Tracked references are processed after
661 * finalization handling whereas standard weak
662 * references are processed before. If an
663 * object is still not marked after finalization
664 * handling it means that it either doesn't have
665 * a finalizer or the finalizer has already run,
666 * so we must null a tracking reference.
668 if (track
!= before_finalization
) {
669 object
= DISLINK_OBJECT (link
);
671 We should guard against a null object been hidden. This can sometimes happen.
674 SGEN_LOG (5, "Dislink %p with a hidden null object", link
);
678 if (!major_collector
.is_object_live (object
)) {
679 if (sgen_gc_is_object_ready_for_finalization (object
)) {
681 binary_protocol_dislink_update (link
, NULL
, 0, 0);
682 SGEN_LOG (5, "Dislink nullified at %p to GCed object %p", link
, object
);
683 SGEN_HASH_TABLE_FOREACH_REMOVE (TRUE
);
687 copy_func ((void**)©
, queue
);
689 /* Update pointer if it's moved. If the object
690 * has been moved out of the nursery, we need to
691 * remove the link from the minor hash table to
694 * FIXME: what if an object is moved earlier?
697 if (hash
== &minor_disappearing_link_hash
&& !ptr_in_nursery (copy
)) {
698 SGEN_HASH_TABLE_FOREACH_REMOVE (TRUE
);
701 *link
= HIDE_POINTER (copy
, track
);
702 add_or_remove_disappearing_link ((MonoObject
*)copy
, link
, GENERATION_OLD
);
703 binary_protocol_dislink_update (link
, copy
, track
, 0);
705 SGEN_LOG (5, "Upgraded dislink at %p to major because object %p moved to %p", link
, object
, copy
);
709 *link
= HIDE_POINTER (copy
, track
);
710 binary_protocol_dislink_update (link
, copy
, track
, 0);
711 SGEN_LOG (5, "Updated dislink at %p to %p", link
, DISLINK_OBJECT (link
));
716 } SGEN_HASH_TABLE_FOREACH_END
;
719 /* LOCKING: requires that the GC lock is held */
721 sgen_null_links_for_domain (MonoDomain
*domain
, int generation
)
725 SgenHashTable
*hash
= get_dislink_hash_table (generation
);
726 SGEN_HASH_TABLE_FOREACH (hash
, link
, dummy
) {
727 char *object
= DISLINK_OBJECT (link
);
728 if (*link
&& object
&& !((MonoObject
*)object
)->vtable
) {
729 gboolean free
= TRUE
;
733 binary_protocol_dislink_update (link
, NULL
, 0, 0);
736 * This can happen if finalizers are not ran, i.e. Environment.Exit ()
737 * is called from finalizer like in finalizer-abort.cs.
739 SGEN_LOG (5, "Disappearing link %p not freed", link
);
742 SGEN_HASH_TABLE_FOREACH_REMOVE (free
);
746 } SGEN_HASH_TABLE_FOREACH_END
;
749 /* LOCKING: requires that the GC lock is held */
751 sgen_null_links_with_predicate (int generation
, WeakLinkAlivePredicateFunc predicate
, void *data
)
755 SgenHashTable
*hash
= get_dislink_hash_table (generation
);
756 SGEN_HASH_TABLE_FOREACH (hash
, link
, dummy
) {
757 char *object
= DISLINK_OBJECT (link
);
762 is_alive
= predicate ((MonoObject
*)object
, data
);
766 binary_protocol_dislink_update (link
, NULL
, 0, 0);
767 SGEN_LOG (5, "Dislink nullified by predicate at %p to GCed object %p", link
, object
);
768 SGEN_HASH_TABLE_FOREACH_REMOVE (TRUE
);
771 } SGEN_HASH_TABLE_FOREACH_END
;
775 sgen_remove_finalizers_for_domain (MonoDomain
*domain
, int generation
)
777 SgenHashTable
*hash_table
= get_finalize_entry_hash_table (generation
);
781 SGEN_HASH_TABLE_FOREACH (hash_table
, object
, dummy
) {
782 object
= tagged_object_get_object (object
);
784 if (mono_object_domain (object
) == domain
) {
785 SGEN_LOG (5, "Unregistering finalizer for object: %p (%s)", object
, sgen_safe_name (object
));
787 SGEN_HASH_TABLE_FOREACH_REMOVE (TRUE
);
790 } SGEN_HASH_TABLE_FOREACH_END
;
793 /* LOCKING: requires that the GC lock is held */
795 process_dislink_stage_entry (MonoObject
*obj
, void *_link
, int index
)
800 binary_protocol_dislink_process_staged (link
, obj
, index
);
802 add_or_remove_disappearing_link (NULL
, link
, GENERATION_NURSERY
);
803 add_or_remove_disappearing_link (NULL
, link
, GENERATION_OLD
);
805 if (ptr_in_nursery (obj
))
806 add_or_remove_disappearing_link (obj
, link
, GENERATION_NURSERY
);
808 add_or_remove_disappearing_link (obj
, link
, GENERATION_OLD
);
812 #define NUM_DISLINK_STAGE_ENTRIES 1024
814 static volatile gint32 next_dislink_stage_entry
= 0;
815 static StageEntry dislink_stage_entries
[NUM_DISLINK_STAGE_ENTRIES
];
817 /* LOCKING: requires that the GC lock is held */
819 sgen_process_dislink_stage_entries (void)
821 lock_stage_for_processing (&next_dislink_stage_entry
);
822 process_stage_entries (NUM_DISLINK_STAGE_ENTRIES
, &next_dislink_stage_entry
, dislink_stage_entries
, process_dislink_stage_entry
);
826 sgen_register_disappearing_link (MonoObject
*obj
, void **link
, gboolean track
, gboolean in_gc
)
830 if (MONO_GC_WEAK_UPDATE_ENABLED ()) {
831 MonoVTable
*vt
= obj
? (MonoVTable
*)SGEN_LOAD_VTABLE (obj
) : NULL
;
832 MONO_GC_WEAK_UPDATE ((mword
)link
,
833 *link
? (mword
)DISLINK_OBJECT (link
) : (mword
)0,
835 obj
? (mword
)sgen_safe_object_get_size (obj
) : (mword
)0,
836 obj
? vt
->klass
->name_space
: NULL
,
837 obj
? vt
->klass
->name
: NULL
,
843 *link
= HIDE_POINTER (obj
, track
);
849 binary_protocol_dislink_update (link
, obj
, track
, 0);
850 process_dislink_stage_entry (obj
, link
, -1);
853 binary_protocol_dislink_update (link
, obj
, track
, 1);
854 while ((index
= add_stage_entry (NUM_DISLINK_STAGE_ENTRIES
, &next_dislink_stage_entry
, dislink_stage_entries
, obj
, link
)) == -1) {
855 if (try_lock_stage_for_processing (NUM_DISLINK_STAGE_ENTRIES
, &next_dislink_stage_entry
)) {
857 process_stage_entries (NUM_DISLINK_STAGE_ENTRIES
, &next_dislink_stage_entry
, dislink_stage_entries
, process_dislink_stage_entry
);
861 binary_protocol_dislink_update_staged (link
, obj
, track
, index
);
866 binary_protocol_dislink_update (link
, obj
, track
, 0);
867 process_dislink_stage_entry (obj
, link
, -1);
874 sgen_init_fin_weak_hash (void)
876 #ifdef HEAVY_STATISTICS
877 mono_counters_register ("FinWeak Successes", MONO_COUNTER_GC
| MONO_COUNTER_LONG
, &stat_success
);
878 mono_counters_register ("FinWeak Overflow aborts", MONO_COUNTER_GC
| MONO_COUNTER_LONG
, &stat_overflow_abort
);
879 mono_counters_register ("FinWeak Wait for processing", MONO_COUNTER_GC
| MONO_COUNTER_LONG
, &stat_wait_for_processing
);
880 mono_counters_register ("FinWeak Increment other thread", MONO_COUNTER_GC
| MONO_COUNTER_LONG
, &stat_increment_other_thread
);
881 mono_counters_register ("FinWeak Index decremented", MONO_COUNTER_GC
| MONO_COUNTER_LONG
, &stat_index_decremented
);
882 mono_counters_register ("FinWeak Entry invalidated", MONO_COUNTER_GC
| MONO_COUNTER_LONG
, &stat_entry_invalidated
);
886 #endif /* HAVE_SGEN_GC */