[amd64] Remove the callee saved registers from MonoLMF, save/restore them normally...
[mono-project.git] / mono / metadata / sgen-fin-weak-hash.c
blob3793c9435be4b084f443ac9633bbce498cc61c5f
1 /*
2 * sgen-fin-weak-hash.c: Finalizers and weak links.
4 * Author:
5 * Paolo Molaro (lupus@ximian.com)
6 * Rodrigo Kumpera (kumpera@gmail.com)
8 * Copyright 2005-2011 Novell, Inc (http://www.novell.com)
9 * Copyright 2011 Xamarin Inc (http://www.xamarin.com)
10 * Copyright 2011 Xamarin, Inc.
11 * Copyright (C) 2012 Xamarin Inc
13 * This library is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU Library General Public
15 * License 2.0 as published by the Free Software Foundation;
17 * This library is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * Library General Public License for more details.
22 * You should have received a copy of the GNU Library General Public
23 * License 2.0 along with this library; if not, write to the Free
24 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
27 #include "config.h"
28 #ifdef HAVE_SGEN_GC
30 #include "metadata/sgen-gc.h"
31 #include "metadata/sgen-gray.h"
32 #include "metadata/sgen-protocol.h"
33 #include "utils/dtrace.h"
34 #include "utils/mono-counters.h"
36 #define ptr_in_nursery sgen_ptr_in_nursery
38 typedef SgenGrayQueue GrayQueue;
40 int num_ready_finalizers = 0;
41 static int no_finalize = 0;
43 #define DISLINK_OBJECT(l) (REVEAL_POINTER (*(void**)(l)))
44 #define DISLINK_TRACK(l) ((~(size_t)(*(void**)(l))) & 1)
47 * The finalizable hash has the object as the key, the
48 * disappearing_link hash, has the link address as key.
50 * Copyright 2011 Xamarin Inc.
53 #define TAG_MASK ((mword)0x1)
55 static inline MonoObject*
56 tagged_object_get_object (MonoObject *object)
58 return (MonoObject*)(((mword)object) & ~TAG_MASK);
61 static inline int
62 tagged_object_get_tag (MonoObject *object)
64 return ((mword)object) & TAG_MASK;
67 static inline MonoObject*
68 tagged_object_apply (void *object, int tag_bits)
70 return (MonoObject*)((mword)object | (mword)tag_bits);
73 static int
74 tagged_object_hash (MonoObject *o)
76 return mono_object_hash (tagged_object_get_object (o));
79 static gboolean
80 tagged_object_equals (MonoObject *a, MonoObject *b)
82 return tagged_object_get_object (a) == tagged_object_get_object (b);
85 static SgenHashTable minor_finalizable_hash = SGEN_HASH_TABLE_INIT (INTERNAL_MEM_FIN_TABLE, INTERNAL_MEM_FINALIZE_ENTRY, 0, (GHashFunc)tagged_object_hash, (GEqualFunc)tagged_object_equals);
86 static SgenHashTable major_finalizable_hash = SGEN_HASH_TABLE_INIT (INTERNAL_MEM_FIN_TABLE, INTERNAL_MEM_FINALIZE_ENTRY, 0, (GHashFunc)tagged_object_hash, (GEqualFunc)tagged_object_equals);
88 static SgenHashTable*
89 get_finalize_entry_hash_table (int generation)
91 switch (generation) {
92 case GENERATION_NURSERY: return &minor_finalizable_hash;
93 case GENERATION_OLD: return &major_finalizable_hash;
94 default: g_assert_not_reached ();
98 #define BRIDGE_OBJECT_MARKED 0x1
100 /* LOCKING: requires that the GC lock is held */
101 void
102 sgen_mark_bridge_object (MonoObject *obj)
104 SgenHashTable *hash_table = get_finalize_entry_hash_table (ptr_in_nursery (obj) ? GENERATION_NURSERY : GENERATION_OLD);
106 sgen_hash_table_set_key (hash_table, obj, tagged_object_apply (obj, BRIDGE_OBJECT_MARKED));
109 /* LOCKING: requires that the GC lock is held */
110 void
111 sgen_collect_bridge_objects (int generation, ScanCopyContext ctx)
113 CopyOrMarkObjectFunc copy_func = ctx.copy_func;
114 GrayQueue *queue = ctx.queue;
115 SgenHashTable *hash_table = get_finalize_entry_hash_table (generation);
116 MonoObject *object;
117 gpointer dummy;
118 char *copy;
120 if (no_finalize)
121 return;
123 SGEN_HASH_TABLE_FOREACH (hash_table, object, dummy) {
124 int tag = tagged_object_get_tag (object);
125 object = tagged_object_get_object (object);
127 /* Bridge code told us to ignore this one */
128 if (tag == BRIDGE_OBJECT_MARKED)
129 continue;
131 /* Object is a bridge object and major heap says it's dead */
132 if (major_collector.is_object_live ((char*)object))
133 continue;
135 /* Nursery says the object is dead. */
136 if (!sgen_gc_is_object_ready_for_finalization (object))
137 continue;
139 if (!sgen_is_bridge_object (object))
140 continue;
142 copy = (char*)object;
143 copy_func ((void**)&copy, queue);
145 sgen_bridge_register_finalized_object ((MonoObject*)copy);
147 if (hash_table == &minor_finalizable_hash && !ptr_in_nursery (copy)) {
148 /* remove from the list */
149 SGEN_HASH_TABLE_FOREACH_REMOVE (TRUE);
151 /* insert it into the major hash */
152 sgen_hash_table_replace (&major_finalizable_hash, tagged_object_apply (copy, tag), NULL, NULL);
154 SGEN_LOG (5, "Promoting finalization of object %p (%s) (was at %p) to major table", copy, sgen_safe_name (copy), object);
156 continue;
157 } else {
158 /* update pointer */
159 SGEN_LOG (5, "Updating object for finalization: %p (%s) (was at %p)", copy, sgen_safe_name (copy), object);
160 SGEN_HASH_TABLE_FOREACH_SET_KEY (tagged_object_apply (copy, tag));
162 } SGEN_HASH_TABLE_FOREACH_END;
166 /* LOCKING: requires that the GC lock is held */
167 void
168 sgen_finalize_in_range (int generation, ScanCopyContext ctx)
170 CopyOrMarkObjectFunc copy_func = ctx.copy_func;
171 GrayQueue *queue = ctx.queue;
172 SgenHashTable *hash_table = get_finalize_entry_hash_table (generation);
173 MonoObject *object;
174 gpointer dummy;
176 if (no_finalize)
177 return;
178 SGEN_HASH_TABLE_FOREACH (hash_table, object, dummy) {
179 int tag = tagged_object_get_tag (object);
180 object = tagged_object_get_object (object);
181 if (!major_collector.is_object_live ((char*)object)) {
182 gboolean is_fin_ready = sgen_gc_is_object_ready_for_finalization (object);
183 MonoObject *copy = object;
184 copy_func ((void**)&copy, queue);
185 if (is_fin_ready) {
186 /* remove and put in fin_ready_list */
187 SGEN_HASH_TABLE_FOREACH_REMOVE (TRUE);
188 num_ready_finalizers++;
189 sgen_queue_finalization_entry (copy);
190 /* Make it survive */
191 SGEN_LOG (5, "Queueing object for finalization: %p (%s) (was at %p) (%d/%d)", copy, sgen_safe_name (copy), object, num_ready_finalizers, sgen_hash_table_num_entries (hash_table));
192 continue;
193 } else {
194 if (hash_table == &minor_finalizable_hash && !ptr_in_nursery (copy)) {
195 /* remove from the list */
196 SGEN_HASH_TABLE_FOREACH_REMOVE (TRUE);
198 /* insert it into the major hash */
199 sgen_hash_table_replace (&major_finalizable_hash, tagged_object_apply (copy, tag), NULL, NULL);
201 SGEN_LOG (5, "Promoting finalization of object %p (%s) (was at %p) to major table", copy, sgen_safe_name (copy), object);
203 continue;
204 } else {
205 /* update pointer */
206 SGEN_LOG (5, "Updating object for finalization: %p (%s) (was at %p)", copy, sgen_safe_name (copy), object);
207 SGEN_HASH_TABLE_FOREACH_SET_KEY (tagged_object_apply (copy, tag));
211 } SGEN_HASH_TABLE_FOREACH_END;
214 /* LOCKING: requires that the GC lock is held */
215 static void
216 register_for_finalization (MonoObject *obj, void *user_data, int generation)
218 SgenHashTable *hash_table = get_finalize_entry_hash_table (generation);
220 if (no_finalize)
221 return;
223 g_assert (user_data == NULL || user_data == mono_gc_run_finalize);
225 if (user_data) {
226 if (sgen_hash_table_replace (hash_table, obj, NULL, NULL))
227 SGEN_LOG (5, "Added finalizer for object: %p (%s) (%d) to %s table", obj, obj->vtable->klass->name, hash_table->num_entries, sgen_generation_name (generation));
228 } else {
229 if (sgen_hash_table_remove (hash_table, obj, NULL))
230 SGEN_LOG (5, "Removed finalizer for object: %p (%s) (%d)", obj, obj->vtable->klass->name, hash_table->num_entries);
235 * We're using (mostly) non-locking staging queues for finalizers and weak links to speed
236 * up registering them. Otherwise we'd have to take the GC lock.
238 * The queues are arrays of `StageEntry`, plus a `next_entry` index. Threads add entries to
239 * the queue via `add_stage_entry()` in a linear fashion until it fills up, in which case
240 * `process_stage_entries()` is called to drain it. A garbage collection will also drain
241 * the queues via the same function. That implies that `add_stage_entry()`, since it
242 * doesn't take a lock, must be able to run concurrently with `process_stage_entries()`,
243 * though it doesn't have to make progress while the queue is drained. In fact, once it
244 * detects that the queue is being drained, it blocks until the draining is done.
246 * The protocol must guarantee that entries in the queue are causally ordered, otherwise two
247 * entries for the same location might get switched, resulting in the earlier one being
248 * committed and the later one ignored.
250 * `next_entry` is the index of the next entry to be filled, or `-1` if the queue is
251 * currently being drained. Each entry has a state:
253 * `STAGE_ENTRY_FREE`: The entry is free. Its data fields must be `NULL`.
255 * `STAGE_ENTRY_BUSY`: The entry is currently being filled in.
257 * `STAGE_ENTRY_USED`: The entry is completely filled in and must be processed in the next
258 * draining round.
260 * `STAGE_ENTRY_INVALID`: The entry was busy during queue draining and therefore
261 * invalidated. Entries that are `BUSY` can obviously not be processed during a drain, but
262 * we can't leave them in place because new entries might be inserted before them, including
263 * from the same thread, violating causality. An alternative would be not to reset
264 * `next_entry` to `0` after a drain, but to the index of the last `BUSY` entry plus one,
265 * but that can potentially waste the whole queue.
267 * State transitions:
269 * | from | to | filler? | drainer? |
270 * +---------+---------+---------+----------+
271 * | FREE | BUSY | X | |
272 * | BUSY | FREE | X | |
273 * | BUSY | USED | X | |
274 * | BUSY | INVALID | | X |
275 * | USED | FREE | | X |
276 * | INVALID | FREE | X | |
278 * `next_entry` can be incremented either by the filler thread that set the corresponding
279 * entry to `BUSY`, or by another filler thread that's trying to get a `FREE` slot. If that
280 * other thread wasn't allowed to increment, it would block on the first filler thread.
282 * An entry's state, once it's set from `FREE` to `BUSY` by a filler thread, can only be
283 * changed by that same thread or by the drained. The drainer can only set a `BUSY` thread
284 * to `INVALID`, so it needs to be set to `FREE` again by the original filler thread.
287 #define STAGE_ENTRY_FREE 0
288 #define STAGE_ENTRY_BUSY 1
289 #define STAGE_ENTRY_USED 2
290 #define STAGE_ENTRY_INVALID 3
292 typedef struct {
293 volatile gint32 state;
294 MonoObject *obj;
295 void *user_data;
296 } StageEntry;
298 #define NUM_FIN_STAGE_ENTRIES 1024
300 static volatile gint32 next_fin_stage_entry = 0;
301 static StageEntry fin_stage_entries [NUM_FIN_STAGE_ENTRIES];
304 * This is used to lock the stage when processing is forced, i.e. when it's triggered by a
305 * garbage collection. In that case, the world is already stopped and there's only one
306 * thread operating on the queue.
308 static void
309 lock_stage_for_processing (volatile gint32 *next_entry)
311 *next_entry = -1;
315 * When processing is triggered by an overflow, we don't want to take the GC lock
316 * immediately, and then set `next_index` to `-1`, because another thread might have drained
317 * the queue in the mean time. Instead, we make sure the overflow is still there, we
318 * atomically set `next_index`, and only once that happened do we take the GC lock.
320 static gboolean
321 try_lock_stage_for_processing (int num_entries, volatile gint32 *next_entry)
323 gint32 old = *next_entry;
324 if (old < num_entries)
325 return FALSE;
326 return InterlockedCompareExchange (next_entry, -1, old) == old;
329 /* LOCKING: requires that the GC lock is held */
330 static void
331 process_stage_entries (int num_entries, volatile gint32 *next_entry, StageEntry *entries, void (*process_func) (MonoObject*, void*, int))
333 int i;
336 * This can happen if after setting `next_index` to `-1` in
337 * `try_lock_stage_for_processing()`, a GC was triggered, which then drained the
338 * queue and reset `next_entry`.
340 * We have the GC lock now, so if it's still `-1`, we can't be interrupted by a GC.
342 if (*next_entry != -1)
343 return;
345 for (i = 0; i < num_entries; ++i) {
346 gint32 state;
348 retry:
349 state = entries [i].state;
351 switch (state) {
352 case STAGE_ENTRY_FREE:
353 case STAGE_ENTRY_INVALID:
354 continue;
355 case STAGE_ENTRY_BUSY:
356 /* BUSY -> INVALID */
358 * This must be done atomically, because the filler thread can set
359 * the entry to `USED`, in which case we must process it, so we must
360 * detect that eventuality.
362 if (InterlockedCompareExchange (&entries [i].state, STAGE_ENTRY_INVALID, STAGE_ENTRY_BUSY) != STAGE_ENTRY_BUSY)
363 goto retry;
364 continue;
365 case STAGE_ENTRY_USED:
366 break;
367 default:
368 SGEN_ASSERT (0, FALSE, "Invalid stage entry state");
369 break;
372 /* state is USED */
374 process_func (entries [i].obj, entries [i].user_data, i);
376 entries [i].obj = NULL;
377 entries [i].user_data = NULL;
379 mono_memory_write_barrier ();
381 /* USED -> FREE */
383 * This transition only happens here, so we don't have to do it atomically.
385 entries [i].state = STAGE_ENTRY_FREE;
388 mono_memory_write_barrier ();
390 *next_entry = 0;
393 #ifdef HEAVY_STATISTICS
394 static long long stat_overflow_abort = 0;
395 static long long stat_wait_for_processing = 0;
396 static long long stat_increment_other_thread = 0;
397 static long long stat_index_decremented = 0;
398 static long long stat_entry_invalidated = 0;
399 static long long stat_success = 0;
400 #endif
402 static int
403 add_stage_entry (int num_entries, volatile gint32 *next_entry, StageEntry *entries, MonoObject *obj, void *user_data)
405 gint32 index, new_next_entry, old_next_entry;
406 gint32 previous_state;
408 retry:
409 for (;;) {
410 index = *next_entry;
411 if (index >= num_entries) {
412 HEAVY_STAT (++stat_overflow_abort);
413 return -1;
415 if (index < 0) {
417 * Backed-off waiting is way more efficient than even using a
418 * dedicated lock for this.
420 while ((index = *next_entry) < 0) {
422 * This seems like a good value. Determined by timing
423 * sgen-weakref-stress.exe.
425 g_usleep (200);
426 HEAVY_STAT (++stat_wait_for_processing);
428 continue;
430 /* FREE -> BUSY */
431 if (entries [index].state != STAGE_ENTRY_FREE ||
432 InterlockedCompareExchange (&entries [index].state, STAGE_ENTRY_BUSY, STAGE_ENTRY_FREE) != STAGE_ENTRY_FREE) {
434 * If we can't get the entry it must be because another thread got
435 * it first. We don't want to wait for that thread to increment
436 * `next_entry`, so we try to do it ourselves. Whether we succeed
437 * or not, we start over.
439 if (*next_entry == index) {
440 InterlockedCompareExchange (next_entry, index + 1, index);
441 //g_print ("tried increment for other thread\n");
442 HEAVY_STAT (++stat_increment_other_thread);
444 continue;
446 /* state is BUSY now */
447 mono_memory_write_barrier ();
449 * Incrementing `next_entry` must happen after setting the state to `BUSY`.
450 * If it were the other way around, it would be possible that after a filler
451 * incremented the index, other threads fill up the queue, the queue is
452 * drained, the original filler finally fills in the slot, but `next_entry`
453 * ends up at the start of the queue, and new entries are written in the
454 * queue in front of, not behind, the original filler's entry.
456 * We don't actually require that the CAS succeeds, but we do require that
457 * the value of `next_entry` is not lower than our index. Since the drainer
458 * sets it to `-1`, that also takes care of the case that the drainer is
459 * currently running.
461 old_next_entry = InterlockedCompareExchange (next_entry, index + 1, index);
462 if (old_next_entry < index) {
463 /* BUSY -> FREE */
464 /* INVALID -> FREE */
466 * The state might still be `BUSY`, or the drainer could have set it
467 * to `INVALID`. In either case, there's no point in CASing. Set
468 * it to `FREE` and start over.
470 entries [index].state = STAGE_ENTRY_FREE;
471 HEAVY_STAT (++stat_index_decremented);
472 continue;
474 break;
477 SGEN_ASSERT (0, index >= 0 && index < num_entries, "Invalid index");
479 entries [index].obj = obj;
480 entries [index].user_data = user_data;
482 mono_memory_write_barrier ();
484 new_next_entry = *next_entry;
485 mono_memory_read_barrier ();
486 /* BUSY -> USED */
488 * A `BUSY` entry will either still be `BUSY` or the drainer will have set it to
489 * `INVALID`. In the former case, we set it to `USED` and we're finished. In the
490 * latter case, we reset it to `FREE` and start over.
492 previous_state = InterlockedCompareExchange (&entries [index].state, STAGE_ENTRY_USED, STAGE_ENTRY_BUSY);
493 if (previous_state == STAGE_ENTRY_BUSY) {
494 SGEN_ASSERT (0, new_next_entry >= index || new_next_entry < 0, "Invalid next entry index - as long as we're busy, other thread can only increment or invalidate it");
495 HEAVY_STAT (++stat_success);
496 return index;
499 SGEN_ASSERT (0, previous_state == STAGE_ENTRY_INVALID, "Invalid state transition - other thread can only make busy state invalid");
500 entries [index].obj = NULL;
501 entries [index].user_data = NULL;
502 mono_memory_write_barrier ();
503 /* INVALID -> FREE */
504 entries [index].state = STAGE_ENTRY_FREE;
506 HEAVY_STAT (++stat_entry_invalidated);
508 goto retry;
511 /* LOCKING: requires that the GC lock is held */
512 static void
513 process_fin_stage_entry (MonoObject *obj, void *user_data, int index)
515 if (ptr_in_nursery (obj))
516 register_for_finalization (obj, user_data, GENERATION_NURSERY);
517 else
518 register_for_finalization (obj, user_data, GENERATION_OLD);
521 /* LOCKING: requires that the GC lock is held */
522 void
523 sgen_process_fin_stage_entries (void)
525 lock_stage_for_processing (&next_fin_stage_entry);
526 process_stage_entries (NUM_FIN_STAGE_ENTRIES, &next_fin_stage_entry, fin_stage_entries, process_fin_stage_entry);
529 void
530 mono_gc_register_for_finalization (MonoObject *obj, void *user_data)
532 while (add_stage_entry (NUM_FIN_STAGE_ENTRIES, &next_fin_stage_entry, fin_stage_entries, obj, user_data) == -1) {
533 if (try_lock_stage_for_processing (NUM_FIN_STAGE_ENTRIES, &next_fin_stage_entry)) {
534 LOCK_GC;
535 process_stage_entries (NUM_FIN_STAGE_ENTRIES, &next_fin_stage_entry, fin_stage_entries, process_fin_stage_entry);
536 UNLOCK_GC;
541 /* LOCKING: requires that the GC lock is held */
542 static int
543 finalizers_for_domain (MonoDomain *domain, MonoObject **out_array, int out_size,
544 SgenHashTable *hash_table)
546 MonoObject *object;
547 gpointer dummy;
548 int count;
550 if (no_finalize || !out_size || !out_array)
551 return 0;
552 count = 0;
553 SGEN_HASH_TABLE_FOREACH (hash_table, object, dummy) {
554 object = tagged_object_get_object (object);
556 if (mono_object_domain (object) == domain) {
557 /* remove and put in out_array */
558 SGEN_HASH_TABLE_FOREACH_REMOVE (TRUE);
559 out_array [count ++] = object;
560 SGEN_LOG (5, "Collecting object for finalization: %p (%s) (%d/%d)", object, sgen_safe_name (object), num_ready_finalizers, sgen_hash_table_num_entries (hash_table));
561 if (count == out_size)
562 return count;
563 continue;
565 } SGEN_HASH_TABLE_FOREACH_END;
566 return count;
570 * mono_gc_finalizers_for_domain:
571 * @domain: the unloading appdomain
572 * @out_array: output array
573 * @out_size: size of output array
575 * Store inside @out_array up to @out_size objects that belong to the unloading
576 * appdomain @domain. Returns the number of stored items. Can be called repeteadly
577 * until it returns 0.
578 * The items are removed from the finalizer data structure, so the caller is supposed
579 * to finalize them.
580 * @out_array should be on the stack to allow the GC to know the objects are still alive.
583 mono_gc_finalizers_for_domain (MonoDomain *domain, MonoObject **out_array, int out_size)
585 int result;
587 LOCK_GC;
588 sgen_process_fin_stage_entries ();
589 result = finalizers_for_domain (domain, out_array, out_size, &minor_finalizable_hash);
590 if (result < out_size) {
591 result += finalizers_for_domain (domain, out_array + result, out_size - result,
592 &major_finalizable_hash);
594 UNLOCK_GC;
596 return result;
599 static SgenHashTable minor_disappearing_link_hash = SGEN_HASH_TABLE_INIT (INTERNAL_MEM_DISLINK_TABLE, INTERNAL_MEM_DISLINK, 0, mono_aligned_addr_hash, NULL);
600 static SgenHashTable major_disappearing_link_hash = SGEN_HASH_TABLE_INIT (INTERNAL_MEM_DISLINK_TABLE, INTERNAL_MEM_DISLINK, 0, mono_aligned_addr_hash, NULL);
602 static SgenHashTable*
603 get_dislink_hash_table (int generation)
605 switch (generation) {
606 case GENERATION_NURSERY: return &minor_disappearing_link_hash;
607 case GENERATION_OLD: return &major_disappearing_link_hash;
608 default: g_assert_not_reached ();
612 /* LOCKING: assumes the GC lock is held */
613 static void
614 add_or_remove_disappearing_link (MonoObject *obj, void **link, int generation)
616 SgenHashTable *hash_table = get_dislink_hash_table (generation);
618 if (!obj) {
619 if (sgen_hash_table_remove (hash_table, link, NULL)) {
620 SGEN_LOG (5, "Removed dislink %p (%d) from %s table",
621 link, hash_table->num_entries, sgen_generation_name (generation));
623 return;
626 sgen_hash_table_replace (hash_table, link, NULL, NULL);
627 SGEN_LOG (5, "Added dislink for object: %p (%s) at %p to %s table",
628 obj, obj->vtable->klass->name, link, sgen_generation_name (generation));
631 /* LOCKING: requires that the GC lock is held */
632 void
633 sgen_null_link_in_range (int generation, gboolean before_finalization, ScanCopyContext ctx)
635 CopyOrMarkObjectFunc copy_func = ctx.copy_func;
636 GrayQueue *queue = ctx.queue;
637 void **link;
638 gpointer dummy;
639 SgenHashTable *hash = get_dislink_hash_table (generation);
641 SGEN_HASH_TABLE_FOREACH (hash, link, dummy) {
642 char *object;
643 gboolean track;
646 We null a weak link before unregistering it, so it's possible that a thread is
647 suspended right in between setting the content to null and staging the unregister.
649 The rest of this code cannot handle null links as DISLINK_OBJECT (NULL) produces an invalid address.
651 We should simply skip the entry as the staged removal will take place during the next GC.
653 if (!*link) {
654 SGEN_LOG (5, "Dislink %p was externally nullified", link);
655 continue;
658 track = DISLINK_TRACK (link);
660 * Tracked references are processed after
661 * finalization handling whereas standard weak
662 * references are processed before. If an
663 * object is still not marked after finalization
664 * handling it means that it either doesn't have
665 * a finalizer or the finalizer has already run,
666 * so we must null a tracking reference.
668 if (track != before_finalization) {
669 object = DISLINK_OBJECT (link);
671 We should guard against a null object been hidden. This can sometimes happen.
673 if (!object) {
674 SGEN_LOG (5, "Dislink %p with a hidden null object", link);
675 continue;
678 if (!major_collector.is_object_live (object)) {
679 if (sgen_gc_is_object_ready_for_finalization (object)) {
680 *link = NULL;
681 binary_protocol_dislink_update (link, NULL, 0, 0);
682 SGEN_LOG (5, "Dislink nullified at %p to GCed object %p", link, object);
683 SGEN_HASH_TABLE_FOREACH_REMOVE (TRUE);
684 continue;
685 } else {
686 char *copy = object;
687 copy_func ((void**)&copy, queue);
689 /* Update pointer if it's moved. If the object
690 * has been moved out of the nursery, we need to
691 * remove the link from the minor hash table to
692 * the major one.
694 * FIXME: what if an object is moved earlier?
697 if (hash == &minor_disappearing_link_hash && !ptr_in_nursery (copy)) {
698 SGEN_HASH_TABLE_FOREACH_REMOVE (TRUE);
700 g_assert (copy);
701 *link = HIDE_POINTER (copy, track);
702 add_or_remove_disappearing_link ((MonoObject*)copy, link, GENERATION_OLD);
703 binary_protocol_dislink_update (link, copy, track, 0);
705 SGEN_LOG (5, "Upgraded dislink at %p to major because object %p moved to %p", link, object, copy);
707 continue;
708 } else {
709 *link = HIDE_POINTER (copy, track);
710 binary_protocol_dislink_update (link, copy, track, 0);
711 SGEN_LOG (5, "Updated dislink at %p to %p", link, DISLINK_OBJECT (link));
716 } SGEN_HASH_TABLE_FOREACH_END;
719 /* LOCKING: requires that the GC lock is held */
720 void
721 sgen_null_links_for_domain (MonoDomain *domain, int generation)
723 void **link;
724 gpointer dummy;
725 SgenHashTable *hash = get_dislink_hash_table (generation);
726 SGEN_HASH_TABLE_FOREACH (hash, link, dummy) {
727 char *object = DISLINK_OBJECT (link);
728 if (*link && object && !((MonoObject*)object)->vtable) {
729 gboolean free = TRUE;
731 if (*link) {
732 *link = NULL;
733 binary_protocol_dislink_update (link, NULL, 0, 0);
734 free = FALSE;
736 * This can happen if finalizers are not ran, i.e. Environment.Exit ()
737 * is called from finalizer like in finalizer-abort.cs.
739 SGEN_LOG (5, "Disappearing link %p not freed", link);
742 SGEN_HASH_TABLE_FOREACH_REMOVE (free);
744 continue;
746 } SGEN_HASH_TABLE_FOREACH_END;
749 /* LOCKING: requires that the GC lock is held */
750 void
751 sgen_null_links_with_predicate (int generation, WeakLinkAlivePredicateFunc predicate, void *data)
753 void **link;
754 gpointer dummy;
755 SgenHashTable *hash = get_dislink_hash_table (generation);
756 SGEN_HASH_TABLE_FOREACH (hash, link, dummy) {
757 char *object = DISLINK_OBJECT (link);
758 mono_bool is_alive;
760 if (!*link)
761 continue;
762 is_alive = predicate ((MonoObject*)object, data);
764 if (!is_alive) {
765 *link = NULL;
766 binary_protocol_dislink_update (link, NULL, 0, 0);
767 SGEN_LOG (5, "Dislink nullified by predicate at %p to GCed object %p", link, object);
768 SGEN_HASH_TABLE_FOREACH_REMOVE (TRUE);
769 continue;
771 } SGEN_HASH_TABLE_FOREACH_END;
774 void
775 sgen_remove_finalizers_for_domain (MonoDomain *domain, int generation)
777 SgenHashTable *hash_table = get_finalize_entry_hash_table (generation);
778 MonoObject *object;
779 gpointer dummy;
781 SGEN_HASH_TABLE_FOREACH (hash_table, object, dummy) {
782 object = tagged_object_get_object (object);
784 if (mono_object_domain (object) == domain) {
785 SGEN_LOG (5, "Unregistering finalizer for object: %p (%s)", object, sgen_safe_name (object));
787 SGEN_HASH_TABLE_FOREACH_REMOVE (TRUE);
788 continue;
790 } SGEN_HASH_TABLE_FOREACH_END;
793 /* LOCKING: requires that the GC lock is held */
794 static void
795 process_dislink_stage_entry (MonoObject *obj, void *_link, int index)
797 void **link = _link;
799 if (index >= 0)
800 binary_protocol_dislink_process_staged (link, obj, index);
802 add_or_remove_disappearing_link (NULL, link, GENERATION_NURSERY);
803 add_or_remove_disappearing_link (NULL, link, GENERATION_OLD);
804 if (obj) {
805 if (ptr_in_nursery (obj))
806 add_or_remove_disappearing_link (obj, link, GENERATION_NURSERY);
807 else
808 add_or_remove_disappearing_link (obj, link, GENERATION_OLD);
812 #define NUM_DISLINK_STAGE_ENTRIES 1024
814 static volatile gint32 next_dislink_stage_entry = 0;
815 static StageEntry dislink_stage_entries [NUM_DISLINK_STAGE_ENTRIES];
817 /* LOCKING: requires that the GC lock is held */
818 void
819 sgen_process_dislink_stage_entries (void)
821 lock_stage_for_processing (&next_dislink_stage_entry);
822 process_stage_entries (NUM_DISLINK_STAGE_ENTRIES, &next_dislink_stage_entry, dislink_stage_entries, process_dislink_stage_entry);
825 void
826 sgen_register_disappearing_link (MonoObject *obj, void **link, gboolean track, gboolean in_gc)
829 #ifdef ENABLE_DTRACE
830 if (MONO_GC_WEAK_UPDATE_ENABLED ()) {
831 MonoVTable *vt = obj ? (MonoVTable*)SGEN_LOAD_VTABLE (obj) : NULL;
832 MONO_GC_WEAK_UPDATE ((mword)link,
833 *link ? (mword)DISLINK_OBJECT (link) : (mword)0,
834 (mword)obj,
835 obj ? (mword)sgen_safe_object_get_size (obj) : (mword)0,
836 obj ? vt->klass->name_space : NULL,
837 obj ? vt->klass->name : NULL,
838 track ? 1 : 0);
840 #endif
842 if (obj)
843 *link = HIDE_POINTER (obj, track);
844 else
845 *link = NULL;
847 #if 1
848 if (in_gc) {
849 binary_protocol_dislink_update (link, obj, track, 0);
850 process_dislink_stage_entry (obj, link, -1);
851 } else {
852 int index;
853 binary_protocol_dislink_update (link, obj, track, 1);
854 while ((index = add_stage_entry (NUM_DISLINK_STAGE_ENTRIES, &next_dislink_stage_entry, dislink_stage_entries, obj, link)) == -1) {
855 if (try_lock_stage_for_processing (NUM_DISLINK_STAGE_ENTRIES, &next_dislink_stage_entry)) {
856 LOCK_GC;
857 process_stage_entries (NUM_DISLINK_STAGE_ENTRIES, &next_dislink_stage_entry, dislink_stage_entries, process_dislink_stage_entry);
858 UNLOCK_GC;
861 binary_protocol_dislink_update_staged (link, obj, track, index);
863 #else
864 if (!in_gc)
865 LOCK_GC;
866 binary_protocol_dislink_update (link, obj, track, 0);
867 process_dislink_stage_entry (obj, link, -1);
868 if (!in_gc)
869 UNLOCK_GC;
870 #endif
873 void
874 sgen_init_fin_weak_hash (void)
876 #ifdef HEAVY_STATISTICS
877 mono_counters_register ("FinWeak Successes", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_success);
878 mono_counters_register ("FinWeak Overflow aborts", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_overflow_abort);
879 mono_counters_register ("FinWeak Wait for processing", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_wait_for_processing);
880 mono_counters_register ("FinWeak Increment other thread", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_increment_other_thread);
881 mono_counters_register ("FinWeak Index decremented", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_index_decremented);
882 mono_counters_register ("FinWeak Entry invalidated", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_entry_invalidated);
883 #endif
886 #endif /* HAVE_SGEN_GC */