2 * sgen-gc.c: Simple generational GC.
5 * Paolo Molaro (lupus@ximian.com)
7 * Copyright 2005-2010 Novell, Inc (http://www.novell.com)
9 * Thread start/stop adapted from Boehm's GC:
10 * Copyright (c) 1994 by Xerox Corporation. All rights reserved.
11 * Copyright (c) 1996 by Silicon Graphics. All rights reserved.
12 * Copyright (c) 1998 by Fergus Henderson. All rights reserved.
13 * Copyright (c) 2000-2004 by Hewlett-Packard Company. All rights reserved.
15 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
16 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
18 * Permission is hereby granted to use or copy this program
19 * for any purpose, provided the above notices are retained on all copies.
20 * Permission to modify the code and to distribute modified code is granted,
21 * provided the above notices are retained, and a notice that the code was
22 * modified is included with the above copyright notice.
25 * Copyright 2001-2003 Ximian, Inc
26 * Copyright 2003-2010 Novell, Inc.
28 * Permission is hereby granted, free of charge, to any person obtaining
29 * a copy of this software and associated documentation files (the
30 * "Software"), to deal in the Software without restriction, including
31 * without limitation the rights to use, copy, modify, merge, publish,
32 * distribute, sublicense, and/or sell copies of the Software, and to
33 * permit persons to whom the Software is furnished to do so, subject to
34 * the following conditions:
36 * The above copyright notice and this permission notice shall be
37 * included in all copies or substantial portions of the Software.
39 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
40 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
41 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
42 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
43 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
44 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
45 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
48 * Important: allocation provides always zeroed memory, having to do
49 * a memset after allocation is deadly for performance.
50 * Memory usage at startup is currently as follows:
52 * 64 KB internal space
54 * We should provide a small memory config with half the sizes
56 * We currently try to make as few mono assumptions as possible:
57 * 1) 2-word header with no GC pointers in it (first vtable, second to store the
59 * 2) gc descriptor is the second word in the vtable (first word in the class)
60 * 3) 8 byte alignment is the minimum and enough (not true for special structures (SIMD), FIXME)
61 * 4) there is a function to get an object's size and the number of
62 * elements in an array.
63 * 5) we know the special way bounds are allocated for complex arrays
64 * 6) we know about proxies and how to treat them when domains are unloaded
66 * Always try to keep stack usage to a minimum: no recursive behaviour
67 * and no large stack allocs.
69 * General description.
70 * Objects are initially allocated in a nursery using a fast bump-pointer technique.
71 * When the nursery is full we start a nursery collection: this is performed with a
73 * When the old generation is full we start a copying GC of the old generation as well:
74 * this will be changed to mark&sweep with copying when fragmentation becomes to severe
75 * in the future. Maybe we'll even do both during the same collection like IMMIX.
77 * The things that complicate this description are:
78 * *) pinned objects: we can't move them so we need to keep track of them
79 * *) no precise info of the thread stacks and registers: we need to be able to
80 * quickly find the objects that may be referenced conservatively and pin them
81 * (this makes the first issues more important)
82 * *) large objects are too expensive to be dealt with using copying GC: we handle them
83 * with mark/sweep during major collections
84 * *) some objects need to not move even if they are small (interned strings, Type handles):
85 * we use mark/sweep for them, too: they are not allocated in the nursery, but inside
86 * PinnedChunks regions
92 *) we could have a function pointer in MonoClass to implement
93 customized write barriers for value types
95 *) investigate the stuff needed to advance a thread to a GC-safe
96 point (single-stepping, read from unmapped memory etc) and implement it.
97 This would enable us to inline allocations and write barriers, for example,
98 or at least parts of them, like the write barrier checks.
99 We may need this also for handling precise info on stacks, even simple things
100 as having uninitialized data on the stack and having to wait for the prolog
101 to zero it. Not an issue for the last frame that we scan conservatively.
102 We could always not trust the value in the slots anyway.
104 *) modify the jit to save info about references in stack locations:
105 this can be done just for locals as a start, so that at least
106 part of the stack is handled precisely.
108 *) test/fix endianess issues
110 *) Implement a card table as the write barrier instead of remembered
111 sets? Card tables are not easy to implement with our current
112 memory layout. We have several different kinds of major heap
113 objects: Small objects in regular blocks, small objects in pinned
114 chunks and LOS objects. If we just have a pointer we have no way
115 to tell which kind of object it points into, therefore we cannot
116 know where its card table is. The least we have to do to make
117 this happen is to get rid of write barriers for indirect stores.
120 *) Get rid of write barriers for indirect stores. We can do this by
121 telling the GC to wbarrier-register an object once we do an ldloca
122 or ldelema on it, and to unregister it once it's not used anymore
123 (it can only travel downwards on the stack). The problem with
124 unregistering is that it needs to happen eventually no matter
125 what, even if exceptions are thrown, the thread aborts, etc.
126 Rodrigo suggested that we could do only the registering part and
127 let the collector find out (pessimistically) when it's safe to
128 unregister, namely when the stack pointer of the thread that
129 registered the object is higher than it was when the registering
130 happened. This might make for a good first implementation to get
131 some data on performance.
133 *) Some sort of blacklist support? Blacklists is a concept from the
134 Boehm GC: if during a conservative scan we find pointers to an
135 area which we might use as heap, we mark that area as unusable, so
136 pointer retention by random pinning pointers is reduced.
138 *) experiment with max small object size (very small right now - 2kb,
139 because it's tied to the max freelist size)
141 *) add an option to mmap the whole heap in one chunk: it makes for many
142 simplifications in the checks (put the nursery at the top and just use a single
143 check for inclusion/exclusion): the issue this has is that on 32 bit systems it's
144 not flexible (too much of the address space may be used by default or we can't
145 increase the heap as needed) and we'd need a race-free mechanism to return memory
146 back to the system (mprotect(PROT_NONE) will still keep the memory allocated if it
147 was written to, munmap is needed, but the following mmap may not find the same segment
150 *) memzero the major fragments after restarting the world and optionally a smaller
153 *) investigate having fragment zeroing threads
155 *) separate locks for finalization and other minor stuff to reduce
158 *) try a different copying order to improve memory locality
160 *) a thread abort after a store but before the write barrier will
161 prevent the write barrier from executing
163 *) specialized dynamically generated markers/copiers
165 *) Dynamically adjust TLAB size to the number of threads. If we have
166 too many threads that do allocation, we might need smaller TLABs,
167 and we might get better performance with larger TLABs if we only
168 have a handful of threads. We could sum up the space left in all
169 assigned TLABs and if that's more than some percentage of the
170 nursery size, reduce the TLAB size.
172 *) Explore placing unreachable objects on unused nursery memory.
173 Instead of memset'ng a region to zero, place an int[] covering it.
174 A good place to start is add_nursery_frag. The tricky thing here is
175 placing those objects atomically outside of a collection.
185 #include <semaphore.h>
194 #define _XOPEN_SOURCE
196 #include "metadata/metadata-internals.h"
197 #include "metadata/class-internals.h"
198 #include "metadata/gc-internal.h"
199 #include "metadata/object-internals.h"
200 #include "metadata/threads.h"
201 #include "metadata/sgen-gc.h"
202 #include "metadata/sgen-archdep.h"
203 #include "metadata/mono-gc.h"
204 #include "metadata/method-builder.h"
205 #include "metadata/profiler-private.h"
206 #include "metadata/monitor.h"
207 #include "metadata/threadpool-internals.h"
208 #include "metadata/mempool-internals.h"
209 #include "metadata/marshal.h"
210 #include "utils/mono-mmap.h"
211 #include "utils/mono-time.h"
212 #include "utils/mono-semaphore.h"
213 #include "utils/mono-counters.h"
214 #include "utils/mono-proclib.h"
216 #include <mono/utils/memcheck.h>
218 #if defined(__MACH__)
219 #include "utils/mach-support.h"
222 #define OPDEF(a,b,c,d,e,f,g,h,i,j) \
226 #include "mono/cil/opcode.def"
232 #undef pthread_create
234 #undef pthread_detach
237 * ######################################################################
238 * ######## Types and constants used by the GC.
239 * ######################################################################
242 static int gc_initialized
= 0;
243 /* If set, do a minor collection before every allocation */
244 static gboolean collect_before_allocs
= FALSE
;
245 /* If set, do a heap consistency check before each minor collection */
246 static gboolean consistency_check_at_minor_collection
= FALSE
;
247 /* If set, check that there are no references to the domain left at domain unload */
248 static gboolean xdomain_checks
= FALSE
;
249 /* If not null, dump the heap after each collection into this file */
250 static FILE *heap_dump_file
= NULL
;
251 /* If set, mark stacks conservatively, even if precise marking is possible */
252 static gboolean conservative_stack_mark
= TRUE
;
253 /* If set, do a plausibility check on the scan_starts before and after
255 static gboolean do_scan_starts_check
= FALSE
;
257 #ifdef HEAVY_STATISTICS
258 static long long stat_objects_alloced
= 0;
259 static long long stat_bytes_alloced
= 0;
260 long long stat_objects_alloced_degraded
= 0;
261 long long stat_bytes_alloced_degraded
= 0;
262 static long long stat_bytes_alloced_los
= 0;
264 long long stat_copy_object_called_nursery
= 0;
265 long long stat_objects_copied_nursery
= 0;
266 long long stat_copy_object_called_major
= 0;
267 long long stat_objects_copied_major
= 0;
269 long long stat_scan_object_called_nursery
= 0;
270 long long stat_scan_object_called_major
= 0;
272 long long stat_nursery_copy_object_failed_from_space
= 0;
273 long long stat_nursery_copy_object_failed_forwarded
= 0;
274 long long stat_nursery_copy_object_failed_pinned
= 0;
276 static long long stat_store_remsets
= 0;
277 static long long stat_store_remsets_unique
= 0;
278 static long long stat_saved_remsets_1
= 0;
279 static long long stat_saved_remsets_2
= 0;
280 static long long stat_global_remsets_added
= 0;
281 static long long stat_global_remsets_readded
= 0;
282 static long long stat_global_remsets_processed
= 0;
283 static long long stat_global_remsets_discarded
= 0;
285 static long long stat_wasted_fragments_used
= 0;
286 static long long stat_wasted_fragments_bytes
= 0;
288 static int stat_wbarrier_set_field
= 0;
289 static int stat_wbarrier_set_arrayref
= 0;
290 static int stat_wbarrier_arrayref_copy
= 0;
291 static int stat_wbarrier_generic_store
= 0;
292 static int stat_wbarrier_generic_store_remset
= 0;
293 static int stat_wbarrier_set_root
= 0;
294 static int stat_wbarrier_value_copy
= 0;
295 static int stat_wbarrier_object_copy
= 0;
298 static long long time_minor_pre_collection_fragment_clear
= 0;
299 static long long time_minor_pinning
= 0;
300 static long long time_minor_scan_remsets
= 0;
301 static long long time_minor_scan_pinned
= 0;
302 static long long time_minor_scan_registered_roots
= 0;
303 static long long time_minor_scan_thread_data
= 0;
304 static long long time_minor_finish_gray_stack
= 0;
305 static long long time_minor_fragment_creation
= 0;
307 static long long time_major_pre_collection_fragment_clear
= 0;
308 static long long time_major_pinning
= 0;
309 static long long time_major_scan_pinned
= 0;
310 static long long time_major_scan_registered_roots
= 0;
311 static long long time_major_scan_thread_data
= 0;
312 static long long time_major_scan_alloc_pinned
= 0;
313 static long long time_major_scan_finalized
= 0;
314 static long long time_major_scan_big_objects
= 0;
315 static long long time_major_finish_gray_stack
= 0;
316 static long long time_major_free_bigobjs
= 0;
317 static long long time_major_los_sweep
= 0;
318 static long long time_major_sweep
= 0;
319 static long long time_major_fragment_creation
= 0;
321 #define DEBUG(level,a) do {if (G_UNLIKELY ((level) <= SGEN_MAX_DEBUG_LEVEL && (level) <= gc_debug_level)) a;} while (0)
323 static int gc_debug_level
= 0;
324 static FILE* gc_debug_file
;
328 mono_gc_flush_info (void)
330 fflush (gc_debug_file);
335 * Define this to allow the user to change the nursery size by
336 * specifying its value in the MONO_GC_PARAMS environmental
337 * variable. See mono_gc_base_init for details.
339 #define USER_CONFIG 1
341 #define TV_DECLARE(name) gint64 name
342 #define TV_GETTIME(tv) tv = mono_100ns_ticks ()
343 #define TV_ELAPSED(start,end) (int)((end-start) / 10)
344 #define TV_ELAPSED_MS(start,end) ((TV_ELAPSED((start),(end)) + 500) / 1000)
346 #define ALIGN_TO(val,align) ((((guint64)val) + ((align) - 1)) & ~((align) - 1))
348 /* The method used to clear the nursery */
349 /* Clearing at nursery collections is the safest, but has bad interactions with caches.
350 * Clearing at TLAB creation is much faster, but more complex and it might expose hard
355 CLEAR_AT_TLAB_CREATION
356 } NurseryClearPolicy
;
358 static NurseryClearPolicy nursery_clear_policy
= CLEAR_AT_TLAB_CREATION
;
361 * The young generation is divided into fragments. This is because
362 * we can hand one fragments to a thread for lock-less fast alloc and
363 * because the young generation ends up fragmented anyway by pinned objects.
364 * Once a collection is done, a list of fragments is created. When doing
365 * thread local alloc we use smallish nurseries so we allow new threads to
366 * allocate memory from gen0 without triggering a collection. Threads that
367 * are found to allocate lots of memory are given bigger fragments. This
368 * should make the finalizer thread use little nursery memory after a while.
369 * We should start assigning threads very small fragments: if there are many
370 * threads the nursery will be full of reserved space that the threads may not
371 * use at all, slowing down allocation speed.
372 * Thread local allocation is done from areas of memory Hotspot calls Thread Local
373 * Allocation Buffers (TLABs).
375 typedef struct _Fragment Fragment
;
379 char *fragment_start
;
380 char *fragment_limit
; /* the current soft limit for allocation */
384 /* the runtime can register areas of memory as roots: we keep two lists of roots,
385 * a pinned root set for conservatively scanned roots and a normal one for
386 * precisely scanned roots (currently implemented as a single list).
388 typedef struct _RootRecord RootRecord
;
397 * We're never actually using the first element. It's always set to
398 * NULL to simplify the elimination of consecutive duplicate
401 #define STORE_REMSET_BUFFER_SIZE 1024
403 typedef struct _GenericStoreRememberedSet GenericStoreRememberedSet
;
404 struct _GenericStoreRememberedSet
{
405 GenericStoreRememberedSet
*next
;
406 /* We need one entry less because the first entry of store
407 remset buffers is always a dummy and we don't copy it. */
408 gpointer data
[STORE_REMSET_BUFFER_SIZE
- 1];
411 /* we have 4 possible values in the low 2 bits */
413 REMSET_LOCATION
, /* just a pointer to the exact location */
414 REMSET_RANGE
, /* range of pointer fields */
415 REMSET_OBJECT
, /* mark all the object for scanning */
416 REMSET_VTYPE
, /* a valuetype array described by a gc descriptor and a count */
417 REMSET_TYPE_MASK
= 0x3
420 #ifdef HAVE_KW_THREAD
421 static __thread RememberedSet
*remembered_set MONO_TLS_FAST
;
423 static pthread_key_t remembered_set_key
;
424 static RememberedSet
*global_remset
;
425 static RememberedSet
*freed_thread_remsets
;
426 static GenericStoreRememberedSet
*generic_store_remsets
= NULL
;
428 /*A two slots cache for recently inserted remsets */
429 static gpointer global_remset_cache
[2];
431 /* FIXME: later choose a size that takes into account the RememberedSet struct
432 * and doesn't waste any alloc paddin space.
434 #define DEFAULT_REMSET_SIZE 1024
435 static RememberedSet
* alloc_remset (int size
, gpointer id
);
437 #define object_is_forwarded SGEN_OBJECT_IS_FORWARDED
438 #define object_is_pinned SGEN_OBJECT_IS_PINNED
439 #define pin_object SGEN_PIN_OBJECT
440 #define unpin_object SGEN_UNPIN_OBJECT
442 #define ptr_in_nursery(p) (SGEN_PTR_IN_NURSERY ((p), DEFAULT_NURSERY_BITS, nursery_start, nursery_real_end))
444 #define LOAD_VTABLE SGEN_LOAD_VTABLE
447 safe_name (void* obj
)
449 MonoVTable
*vt
= (MonoVTable
*)LOAD_VTABLE (obj
);
450 return vt
->klass
->name
;
453 #define safe_object_get_size mono_sgen_safe_object_get_size
456 * ######################################################################
457 * ######## Global data.
458 * ######################################################################
460 static LOCK_DECLARE (gc_mutex
);
461 static int gc_disabled
= 0;
462 static int num_minor_gcs
= 0;
463 static int num_major_gcs
= 0;
467 /* good sizes are 512KB-1MB: larger ones increase a lot memzeroing time */
468 #define DEFAULT_NURSERY_SIZE (default_nursery_size)
469 static int default_nursery_size
= (1 << 22);
470 #ifdef SGEN_ALIGN_NURSERY
471 /* The number of trailing 0 bits in DEFAULT_NURSERY_SIZE */
472 #define DEFAULT_NURSERY_BITS (default_nursery_bits)
473 static int default_nursery_bits
= 22;
478 #define DEFAULT_NURSERY_SIZE (4*1024*1024)
479 #ifdef SGEN_ALIGN_NURSERY
480 #define DEFAULT_NURSERY_BITS 22
485 #ifndef SGEN_ALIGN_NURSERY
486 #define DEFAULT_NURSERY_BITS -1
489 #define MIN_MINOR_COLLECTION_ALLOWANCE (DEFAULT_NURSERY_SIZE * 4)
491 #define SCAN_START_SIZE SGEN_SCAN_START_SIZE
493 /* the minimum size of a fragment that we consider useful for allocation */
494 #define FRAGMENT_MIN_SIZE (512)
496 static mword pagesize
= 4096;
497 static mword nursery_size
;
498 static int degraded_mode
= 0;
500 static mword total_alloc
= 0;
501 /* use this to tune when to do a major/minor collection */
502 static mword memory_pressure
= 0;
503 static mword minor_collection_allowance
;
504 static int minor_collection_sections_alloced
= 0;
506 static GCMemSection
*nursery_section
= NULL
;
507 static mword lowest_heap_address
= ~(mword
)0;
508 static mword highest_heap_address
= 0;
510 static LOCK_DECLARE (interruption_mutex
);
511 static LOCK_DECLARE (global_remset_mutex
);
513 #define LOCK_GLOBAL_REMSET pthread_mutex_lock (&global_remset_mutex)
514 #define UNLOCK_GLOBAL_REMSET pthread_mutex_unlock (&global_remset_mutex)
516 typedef struct _FinalizeEntry FinalizeEntry
;
517 struct _FinalizeEntry
{
522 typedef struct _FinalizeEntryHashTable FinalizeEntryHashTable
;
523 struct _FinalizeEntryHashTable
{
524 FinalizeEntry
**table
;
529 typedef struct _DisappearingLink DisappearingLink
;
530 struct _DisappearingLink
{
531 DisappearingLink
*next
;
535 typedef struct _DisappearingLinkHashTable DisappearingLinkHashTable
;
536 struct _DisappearingLinkHashTable
{
537 DisappearingLink
**table
;
542 typedef struct _EphemeronLinkNode EphemeronLinkNode
;
544 struct _EphemeronLinkNode
{
545 EphemeronLinkNode
*next
;
560 int current_collection_generation
= -1;
563 * The link pointer is hidden by negating each bit. We use the lowest
564 * bit of the link (before negation) to store whether it needs
565 * resurrection tracking.
567 #define HIDE_POINTER(p,t) ((gpointer)(~((gulong)(p)|((t)?1:0))))
568 #define REVEAL_POINTER(p) ((gpointer)((~(gulong)(p))&~3L))
570 #define DISLINK_OBJECT(d) (REVEAL_POINTER (*(d)->link))
571 #define DISLINK_TRACK(d) ((~(gulong)(*(d)->link)) & 1)
574 * The finalizable hash has the object as the key, the
575 * disappearing_link hash, has the link address as key.
577 static FinalizeEntryHashTable minor_finalizable_hash
;
578 static FinalizeEntryHashTable major_finalizable_hash
;
579 /* objects that are ready to be finalized */
580 static FinalizeEntry
*fin_ready_list
= NULL
;
581 static FinalizeEntry
*critical_fin_list
= NULL
;
583 static DisappearingLinkHashTable minor_disappearing_link_hash
;
584 static DisappearingLinkHashTable major_disappearing_link_hash
;
586 static EphemeronLinkNode
*ephemeron_list
;
588 static int num_ready_finalizers
= 0;
589 static int no_finalize
= 0;
592 ROOT_TYPE_NORMAL
= 0, /* "normal" roots */
593 ROOT_TYPE_PINNED
= 1, /* roots without a GC descriptor */
594 ROOT_TYPE_WBARRIER
= 2, /* roots with a write barrier */
598 /* registered roots: the key to the hash is the root start address */
600 * Different kinds of roots are kept separate to speed up pin_from_roots () for example.
602 static RootRecord
**roots_hash
[ROOT_TYPE_NUM
] = { NULL
, NULL
};
603 static int roots_hash_size
[ROOT_TYPE_NUM
] = { 0, 0, 0 };
604 static mword roots_size
= 0; /* amount of memory in the root set */
605 static int num_roots_entries
[ROOT_TYPE_NUM
] = { 0, 0, 0 };
608 * The current allocation cursors
609 * We allocate objects in the nursery.
610 * The nursery is the area between nursery_start and nursery_real_end.
611 * Allocation is done from a Thread Local Allocation Buffer (TLAB). TLABs are allocated
612 * from nursery fragments.
613 * tlab_next is the pointer to the space inside the TLAB where the next object will
615 * tlab_temp_end is the pointer to the end of the temporary space reserved for
616 * the allocation: it allows us to set the scan starts at reasonable intervals.
617 * tlab_real_end points to the end of the TLAB.
618 * nursery_frag_real_end points to the end of the currently used nursery fragment.
619 * nursery_first_pinned_start points to the start of the first pinned object in the nursery
620 * nursery_last_pinned_end points to the end of the last pinned object in the nursery
621 * At the next allocation, the area of the nursery where objects can be present is
622 * between MIN(nursery_first_pinned_start, first_fragment_start) and
623 * MAX(nursery_last_pinned_end, nursery_frag_real_end)
625 static char *nursery_start
= NULL
;
627 #ifdef HAVE_KW_THREAD
628 #define TLAB_ACCESS_INIT
629 #define TLAB_START tlab_start
630 #define TLAB_NEXT tlab_next
631 #define TLAB_TEMP_END tlab_temp_end
632 #define TLAB_REAL_END tlab_real_end
633 #define REMEMBERED_SET remembered_set
634 #define STORE_REMSET_BUFFER store_remset_buffer
635 #define STORE_REMSET_BUFFER_INDEX store_remset_buffer_index
636 #define IN_CRITICAL_REGION thread_info->in_critical_region
638 static pthread_key_t thread_info_key
;
639 #define TLAB_ACCESS_INIT SgenThreadInfo *__thread_info__ = pthread_getspecific (thread_info_key)
640 #define TLAB_START (__thread_info__->tlab_start)
641 #define TLAB_NEXT (__thread_info__->tlab_next)
642 #define TLAB_TEMP_END (__thread_info__->tlab_temp_end)
643 #define TLAB_REAL_END (__thread_info__->tlab_real_end)
644 #define REMEMBERED_SET (__thread_info__->remset)
645 #define STORE_REMSET_BUFFER (__thread_info__->store_remset_buffer)
646 #define STORE_REMSET_BUFFER_INDEX (__thread_info__->store_remset_buffer_index)
647 #define IN_CRITICAL_REGION (__thread_info__->in_critical_region)
650 /* we use the memory barrier only to prevent compiler reordering (a memory constraint may be enough) */
651 #define ENTER_CRITICAL_REGION do {IN_CRITICAL_REGION = 1;mono_memory_barrier ();} while (0)
652 #define EXIT_CRITICAL_REGION do {IN_CRITICAL_REGION = 0;mono_memory_barrier ();} while (0)
655 * FIXME: What is faster, a TLS variable pointing to a structure, or separate TLS
656 * variables for next+temp_end ?
658 #ifdef HAVE_KW_THREAD
659 static __thread SgenThreadInfo
*thread_info
;
660 static __thread
char *tlab_start
;
661 static __thread
char *tlab_next
;
662 static __thread
char *tlab_temp_end
;
663 static __thread
char *tlab_real_end
;
664 static __thread gpointer
*store_remset_buffer
;
665 static __thread
long store_remset_buffer_index
;
666 /* Used by the managed allocator/wbarrier */
667 static __thread
char **tlab_next_addr
;
668 static __thread
char *stack_end
;
669 static __thread
long *store_remset_buffer_index_addr
;
671 static char *nursery_next
= NULL
;
672 static char *nursery_frag_real_end
= NULL
;
673 static char *nursery_real_end
= NULL
;
674 static char *nursery_last_pinned_end
= NULL
;
676 /* The size of a TLAB */
677 /* The bigger the value, the less often we have to go to the slow path to allocate a new
678 * one, but the more space is wasted by threads not allocating much memory.
680 * FIXME: Make this self-tuning for each thread.
682 static guint32 tlab_size
= (1024 * 4);
684 /*How much space is tolerable to be wasted from the current fragment when allocating a new TLAB*/
685 #define MAX_NURSERY_TLAB_WASTE 512
687 /* fragments that are free and ready to be used for allocation */
688 static Fragment
*nursery_fragments
= NULL
;
689 /* freeelist of fragment structures */
690 static Fragment
*fragment_freelist
= NULL
;
692 #define MAX_SMALL_OBJ_SIZE SGEN_MAX_SMALL_OBJ_SIZE
694 /* Functions supplied by the runtime to be called by the GC */
695 static MonoGCCallbacks gc_callbacks
;
697 #define ALLOC_ALIGN SGEN_ALLOC_ALIGN
698 #define ALLOC_ALIGN_BITS SGEN_ALLOC_ALIGN_BITS
700 #define ALIGN_UP SGEN_ALIGN_UP
702 #define MOVED_OBJECTS_NUM 64
703 static void *moved_objects
[MOVED_OBJECTS_NUM
];
704 static int moved_objects_idx
= 0;
707 * ######################################################################
708 * ######## Macros and function declarations.
709 * ######################################################################
712 #define ADDR_IN_HEAP_BOUNDARIES(addr) ((p) >= lowest_heap_address && (p) < highest_heap_address)
715 align_pointer (void *ptr
)
717 mword p
= (mword
)ptr
;
718 p
+= sizeof (gpointer
) - 1;
719 p
&= ~ (sizeof (gpointer
) - 1);
723 typedef SgenGrayQueue GrayQueue
;
725 typedef void (*CopyOrMarkObjectFunc
) (void**, GrayQueue
*);
726 typedef char* (*ScanObjectFunc
) (char*, GrayQueue
*);
728 /* forward declarations */
729 static int stop_world (void);
730 static int restart_world (void);
731 static void scan_thread_data (void *start_nursery
, void *end_nursery
, gboolean precise
);
732 static void scan_from_remsets (void *start_nursery
, void *end_nursery
, GrayQueue
*queue
);
733 static void scan_from_registered_roots (CopyOrMarkObjectFunc copy_func
, char *addr_start
, char *addr_end
, int root_type
, GrayQueue
*queue
);
734 static void scan_finalizer_entries (CopyOrMarkObjectFunc copy_func
, FinalizeEntry
*list
, GrayQueue
*queue
);
735 static void find_pinning_ref_from_thread (char *obj
, size_t size
);
736 static void update_current_thread_stack (void *start
);
737 static void finalize_in_range (CopyOrMarkObjectFunc copy_func
, char *start
, char *end
, int generation
, GrayQueue
*queue
);
738 static void add_or_remove_disappearing_link (MonoObject
*obj
, void **link
, gboolean track
, int generation
);
739 static void null_link_in_range (CopyOrMarkObjectFunc copy_func
, char *start
, char *end
, int generation
, GrayQueue
*queue
);
740 static void null_links_for_domain (MonoDomain
*domain
, int generation
);
741 static gboolean
search_fragment_for_size (size_t size
);
742 static int search_fragment_for_size_range (size_t desired_size
, size_t minimum_size
);
743 static void clear_nursery_fragments (char *next
);
744 static void pin_from_roots (void *start_nursery
, void *end_nursery
);
745 static int pin_objects_from_addresses (GCMemSection
*section
, void **start
, void **end
, void *start_nursery
, void *end_nursery
, GrayQueue
*queue
);
746 static void optimize_pin_queue (int start_slot
);
747 static void clear_remsets (void);
748 static void clear_tlabs (void);
749 static void sort_addresses (void **array
, int size
);
750 static void drain_gray_stack (GrayQueue
*queue
);
751 static void finish_gray_stack (char *start_addr
, char *end_addr
, int generation
, GrayQueue
*queue
);
752 static gboolean
need_major_collection (void);
753 static void major_collection (const char *reason
);
755 static void mono_gc_register_disappearing_link (MonoObject
*obj
, void **link
, gboolean track
);
757 void describe_ptr (char *ptr
);
758 void check_object (char *start
);
760 static void check_consistency (void);
761 static void check_major_refs (void);
762 static void check_scan_starts (void);
763 static void check_for_xdomain_refs (void);
764 static void dump_heap (const char *type
, int num
, const char *reason
);
766 void mono_gc_scan_for_specific_ref (MonoObject
*key
);
768 static void init_stats (void);
770 static int mark_ephemerons_in_range (CopyOrMarkObjectFunc copy_func
, char *start
, char *end
, GrayQueue
*queue
);
771 static void clear_unreachable_ephemerons (CopyOrMarkObjectFunc copy_func
, char *start
, char *end
, GrayQueue
*queue
);
772 static void null_ephemerons_for_domain (MonoDomain
*domain
);
774 SgenMajorCollector major
;
776 #include "sgen-protocol.c"
777 #include "sgen-pinning.c"
778 #include "sgen-pinning-stats.c"
779 #include "sgen-gray.c"
780 #include "sgen-workers.c"
781 #include "sgen-los.c"
783 /* Root bitmap descriptors are simpler: the lower three bits describe the type
784 * and we either have 30/62 bitmap bits or nibble-based run-length,
785 * or a complex descriptor, or a user defined marker function.
788 ROOT_DESC_CONSERVATIVE
, /* 0, so matches NULL value */
793 ROOT_DESC_TYPE_MASK
= 0x7,
794 ROOT_DESC_TYPE_SHIFT
= 3,
797 #define MAKE_ROOT_DESC(type,val) ((type) | ((val) << ROOT_DESC_TYPE_SHIFT))
799 #define MAX_USER_DESCRIPTORS 16
801 static gsize
* complex_descriptors
= NULL
;
802 static int complex_descriptors_size
= 0;
803 static int complex_descriptors_next
= 0;
804 static MonoGCRootMarkFunc user_descriptors
[MAX_USER_DESCRIPTORS
];
805 static int user_descriptors_next
= 0;
808 alloc_complex_descriptor (gsize
*bitmap
, int numbits
)
812 numbits
= ALIGN_TO (numbits
, GC_BITS_PER_WORD
);
813 nwords
= numbits
/ GC_BITS_PER_WORD
+ 1;
816 res
= complex_descriptors_next
;
817 /* linear search, so we don't have duplicates with domain load/unload
818 * this should not be performance critical or we'd have bigger issues
819 * (the number and size of complex descriptors should be small).
821 for (i
= 0; i
< complex_descriptors_next
; ) {
822 if (complex_descriptors
[i
] == nwords
) {
824 for (j
= 0; j
< nwords
- 1; ++j
) {
825 if (complex_descriptors
[i
+ 1 + j
] != bitmap
[j
]) {
835 i
+= complex_descriptors
[i
];
837 if (complex_descriptors_next
+ nwords
> complex_descriptors_size
) {
838 int new_size
= complex_descriptors_size
* 2 + nwords
;
839 complex_descriptors
= g_realloc (complex_descriptors
, new_size
* sizeof (gsize
));
840 complex_descriptors_size
= new_size
;
842 DEBUG (6, fprintf (gc_debug_file
, "Complex descriptor %d, size: %d (total desc memory: %d)\n", res
, nwords
, complex_descriptors_size
));
843 complex_descriptors_next
+= nwords
;
844 complex_descriptors
[res
] = nwords
;
845 for (i
= 0; i
< nwords
- 1; ++i
) {
846 complex_descriptors
[res
+ 1 + i
] = bitmap
[i
];
847 DEBUG (6, fprintf (gc_debug_file
, "\tvalue: %p\n", (void*)complex_descriptors
[res
+ 1 + i
]));
854 mono_sgen_get_complex_descriptor (GCVTable
*vt
)
856 return complex_descriptors
+ (vt
->desc
>> LOW_TYPE_BITS
);
860 * Descriptor builders.
863 mono_gc_make_descr_for_string (gsize
*bitmap
, int numbits
)
865 return (void*) DESC_TYPE_RUN_LENGTH
;
869 mono_gc_make_descr_for_object (gsize
*bitmap
, int numbits
, size_t obj_size
)
871 int first_set
= -1, num_set
= 0, last_set
= -1, i
;
873 size_t stored_size
= obj_size
;
874 for (i
= 0; i
< numbits
; ++i
) {
875 if (bitmap
[i
/ GC_BITS_PER_WORD
] & ((gsize
)1 << (i
% GC_BITS_PER_WORD
))) {
883 * We don't encode the size of types that don't contain
884 * references because they might not be aligned, i.e. the
885 * bottom two bits might be set, which would clash with the
886 * bits we need to encode the descriptor type. Since we don't
887 * use the encoded size to skip objects, other than for
888 * processing remsets, in which case only the positions of
889 * references are relevant, this is not a problem.
892 return (void*)DESC_TYPE_RUN_LENGTH
;
893 g_assert (!(stored_size
& 0x3));
894 if (stored_size
<= MAX_SMALL_OBJ_SIZE
) {
895 /* check run-length encoding first: one byte offset, one byte number of pointers
896 * on 64 bit archs, we can have 3 runs, just one on 32.
897 * It may be better to use nibbles.
900 desc
= DESC_TYPE_RUN_LENGTH
| (stored_size
<< 1);
901 DEBUG (6, fprintf (gc_debug_file
, "Ptrfree descriptor %p, size: %zd\n", (void*)desc
, stored_size
));
903 } else if (first_set
< 256 && num_set
< 256 && (first_set
+ num_set
== last_set
+ 1)) {
904 desc
= DESC_TYPE_RUN_LENGTH
| (stored_size
<< 1) | (first_set
<< 16) | (num_set
<< 24);
905 DEBUG (6, fprintf (gc_debug_file
, "Runlen descriptor %p, size: %zd, first set: %d, num set: %d\n", (void*)desc
, stored_size
, first_set
, num_set
));
908 /* we know the 2-word header is ptr-free */
909 if (last_set
< SMALL_BITMAP_SIZE
+ OBJECT_HEADER_WORDS
) {
910 desc
= DESC_TYPE_SMALL_BITMAP
| (stored_size
<< 1) | ((*bitmap
>> OBJECT_HEADER_WORDS
) << SMALL_BITMAP_SHIFT
);
911 DEBUG (6, fprintf (gc_debug_file
, "Smallbitmap descriptor %p, size: %zd, last set: %d\n", (void*)desc
, stored_size
, last_set
));
915 /* we know the 2-word header is ptr-free */
916 if (last_set
< LARGE_BITMAP_SIZE
+ OBJECT_HEADER_WORDS
) {
917 desc
= DESC_TYPE_LARGE_BITMAP
| ((*bitmap
>> OBJECT_HEADER_WORDS
) << LOW_TYPE_BITS
);
918 DEBUG (6, fprintf (gc_debug_file
, "Largebitmap descriptor %p, size: %zd, last set: %d\n", (void*)desc
, stored_size
, last_set
));
921 /* it's a complex object ... */
922 desc
= DESC_TYPE_COMPLEX
| (alloc_complex_descriptor (bitmap
, last_set
+ 1) << LOW_TYPE_BITS
);
926 /* If the array holds references, numbits == 1 and the first bit is set in elem_bitmap */
928 mono_gc_make_descr_for_array (int vector
, gsize
*elem_bitmap
, int numbits
, size_t elem_size
)
930 int first_set
= -1, num_set
= 0, last_set
= -1, i
;
931 mword desc
= vector
? DESC_TYPE_VECTOR
: DESC_TYPE_ARRAY
;
932 for (i
= 0; i
< numbits
; ++i
) {
933 if (elem_bitmap
[i
/ GC_BITS_PER_WORD
] & ((gsize
)1 << (i
% GC_BITS_PER_WORD
))) {
940 /* See comment at the definition of DESC_TYPE_RUN_LENGTH. */
942 return (void*)DESC_TYPE_RUN_LENGTH
;
943 if (elem_size
<= MAX_ELEMENT_SIZE
) {
944 desc
|= elem_size
<< VECTOR_ELSIZE_SHIFT
;
946 return (void*)(desc
| VECTOR_SUBTYPE_PTRFREE
);
948 /* Note: we also handle structs with just ref fields */
949 if (num_set
* sizeof (gpointer
) == elem_size
) {
950 return (void*)(desc
| VECTOR_SUBTYPE_REFS
| ((gssize
)(-1) << 16));
952 /* FIXME: try run-len first */
953 /* Note: we can't skip the object header here, because it's not present */
954 if (last_set
<= SMALL_BITMAP_SIZE
) {
955 return (void*)(desc
| VECTOR_SUBTYPE_BITMAP
| (*elem_bitmap
<< 16));
958 /* it's am array of complex structs ... */
959 desc
= DESC_TYPE_COMPLEX_ARR
;
960 desc
|= alloc_complex_descriptor (elem_bitmap
, last_set
+ 1) << LOW_TYPE_BITS
;
964 /* Return the bitmap encoded by a descriptor */
966 mono_gc_get_bitmap_for_descr (void *descr
, int *numbits
)
968 mword d
= (mword
)descr
;
972 case DESC_TYPE_RUN_LENGTH
: {
973 int first_set
= (d
>> 16) & 0xff;
974 int num_set
= (d
>> 24) & 0xff;
977 bitmap
= g_new0 (gsize
, (first_set
+ num_set
+ 7) / 8);
979 for (i
= first_set
; i
< first_set
+ num_set
; ++i
)
980 bitmap
[i
/ GC_BITS_PER_WORD
] |= ((gsize
)1 << (i
% GC_BITS_PER_WORD
));
982 *numbits
= first_set
+ num_set
;
986 case DESC_TYPE_SMALL_BITMAP
:
987 bitmap
= g_new0 (gsize
, 1);
989 bitmap
[0] = (d
>> SMALL_BITMAP_SHIFT
) << OBJECT_HEADER_WORDS
;
991 *numbits
= GC_BITS_PER_WORD
;
995 g_assert_not_reached ();
1000 is_xdomain_ref_allowed (gpointer
*ptr
, char *obj
, MonoDomain
*domain
)
1002 MonoObject
*o
= (MonoObject
*)(obj
);
1003 MonoObject
*ref
= (MonoObject
*)*(ptr
);
1004 int offset
= (char*)(ptr
) - (char*)o
;
1006 if (o
->vtable
->klass
== mono_defaults
.thread_class
&& offset
== G_STRUCT_OFFSET (MonoThread
, internal_thread
))
1008 if (o
->vtable
->klass
== mono_defaults
.internal_thread_class
&& offset
== G_STRUCT_OFFSET (MonoInternalThread
, current_appcontext
))
1010 if (mono_class_has_parent (o
->vtable
->klass
, mono_defaults
.real_proxy_class
) &&
1011 offset
== G_STRUCT_OFFSET (MonoRealProxy
, unwrapped_server
))
1013 /* Thread.cached_culture_info */
1014 if (!strcmp (ref
->vtable
->klass
->name_space
, "System.Globalization") &&
1015 !strcmp (ref
->vtable
->klass
->name
, "CultureInfo") &&
1016 !strcmp(o
->vtable
->klass
->name_space
, "System") &&
1017 !strcmp(o
->vtable
->klass
->name
, "Object[]"))
1020 * at System.IO.MemoryStream.InternalConstructor (byte[],int,int,bool,bool) [0x0004d] in /home/schani/Work/novell/trunk/mcs/class/corlib/System.IO/MemoryStream.cs:121
1021 * at System.IO.MemoryStream..ctor (byte[]) [0x00017] in /home/schani/Work/novell/trunk/mcs/class/corlib/System.IO/MemoryStream.cs:81
1022 * at (wrapper remoting-invoke-with-check) System.IO.MemoryStream..ctor (byte[]) <IL 0x00020, 0xffffffff>
1023 * at System.Runtime.Remoting.Messaging.CADMethodCallMessage.GetArguments () [0x0000d] in /home/schani/Work/novell/trunk/mcs/class/corlib/System.Runtime.Remoting.Messaging/CADMessages.cs:327
1024 * at System.Runtime.Remoting.Messaging.MethodCall..ctor (System.Runtime.Remoting.Messaging.CADMethodCallMessage) [0x00017] in /home/schani/Work/novell/trunk/mcs/class/corlib/System.Runtime.Remoting.Messaging/MethodCall.cs:87
1025 * at System.AppDomain.ProcessMessageInDomain (byte[],System.Runtime.Remoting.Messaging.CADMethodCallMessage,byte[]&,System.Runtime.Remoting.Messaging.CADMethodReturnMessage&) [0x00018] in /home/schani/Work/novell/trunk/mcs/class/corlib/System/AppDomain.cs:1213
1026 * at (wrapper remoting-invoke-with-check) System.AppDomain.ProcessMessageInDomain (byte[],System.Runtime.Remoting.Messaging.CADMethodCallMessage,byte[]&,System.Runtime.Remoting.Messaging.CADMethodReturnMessage&) <IL 0x0003d, 0xffffffff>
1027 * at System.Runtime.Remoting.Channels.CrossAppDomainSink.ProcessMessageInDomain (byte[],System.Runtime.Remoting.Messaging.CADMethodCallMessage) [0x00008] in /home/schani/Work/novell/trunk/mcs/class/corlib/System.Runtime.Remoting.Channels/CrossAppDomainChannel.cs:198
1028 * at (wrapper runtime-invoke) object.runtime_invoke_CrossAppDomainSink/ProcessMessageRes_object_object (object,intptr,intptr,intptr) <IL 0x0004c, 0xffffffff>
1030 if (!strcmp (ref
->vtable
->klass
->name_space
, "System") &&
1031 !strcmp (ref
->vtable
->klass
->name
, "Byte[]") &&
1032 !strcmp (o
->vtable
->klass
->name_space
, "System.IO") &&
1033 !strcmp (o
->vtable
->klass
->name
, "MemoryStream"))
1035 /* append_job() in threadpool.c */
1036 if (!strcmp (ref
->vtable
->klass
->name_space
, "System.Runtime.Remoting.Messaging") &&
1037 !strcmp (ref
->vtable
->klass
->name
, "AsyncResult") &&
1038 !strcmp (o
->vtable
->klass
->name_space
, "System") &&
1039 !strcmp (o
->vtable
->klass
->name
, "Object[]") &&
1040 mono_thread_pool_is_queue_array ((MonoArray
*) o
))
1046 check_reference_for_xdomain (gpointer
*ptr
, char *obj
, MonoDomain
*domain
)
1048 MonoObject
*o
= (MonoObject
*)(obj
);
1049 MonoObject
*ref
= (MonoObject
*)*(ptr
);
1050 int offset
= (char*)(ptr
) - (char*)o
;
1052 MonoClassField
*field
;
1055 if (!ref
|| ref
->vtable
->domain
== domain
)
1057 if (is_xdomain_ref_allowed (ptr
, obj
, domain
))
1061 for (class = o
->vtable
->klass
; class; class = class->parent
) {
1064 for (i
= 0; i
< class->field
.count
; ++i
) {
1065 if (class->fields
[i
].offset
== offset
) {
1066 field
= &class->fields
[i
];
1074 if (ref
->vtable
->klass
== mono_defaults
.string_class
)
1075 str
= mono_string_to_utf8 ((MonoString
*)ref
);
1078 g_print ("xdomain reference in %p (%s.%s) at offset %d (%s) to %p (%s.%s) (%s) - pointed to by:\n",
1079 o
, o
->vtable
->klass
->name_space
, o
->vtable
->klass
->name
,
1080 offset
, field
? field
->name
: "",
1081 ref
, ref
->vtable
->klass
->name_space
, ref
->vtable
->klass
->name
, str
? str
: "");
1082 mono_gc_scan_for_specific_ref (o
);
1088 #define HANDLE_PTR(ptr,obj) check_reference_for_xdomain ((ptr), (obj), domain)
1091 scan_object_for_xdomain_refs (char *start
, mword size
, void *data
)
1093 MonoDomain
*domain
= ((MonoObject
*)start
)->vtable
->domain
;
1095 #include "sgen-scan-object.h"
1099 #define HANDLE_PTR(ptr,obj) do { \
1100 if ((MonoObject*)*(ptr) == key) { \
1101 g_print ("found ref to %p in object %p (%s) at offset %td\n", \
1102 key, (obj), safe_name ((obj)), ((char*)(ptr) - (char*)(obj))); \
1107 scan_object_for_specific_ref (char *start
, MonoObject
*key
)
1109 #include "sgen-scan-object.h"
1113 mono_sgen_scan_area_with_callback (char *start
, char *end
, IterateObjectCallbackFunc callback
, void *data
)
1115 while (start
< end
) {
1117 if (!*(void**)start
) {
1118 start
+= sizeof (void*); /* should be ALLOC_ALIGN, really */
1122 size
= ALIGN_UP (safe_object_get_size ((MonoObject
*) start
));
1124 callback (start
, size
, data
);
1131 scan_object_for_specific_ref_callback (char *obj
, size_t size
, MonoObject
*key
)
1133 scan_object_for_specific_ref (obj
, key
);
1137 check_root_obj_specific_ref (RootRecord
*root
, MonoObject
*key
, MonoObject
*obj
)
1141 g_print ("found ref to %p in root record %p\n", key
, root
);
1144 static MonoObject
*check_key
= NULL
;
1145 static RootRecord
*check_root
= NULL
;
1148 check_root_obj_specific_ref_from_marker (void **obj
)
1150 check_root_obj_specific_ref (check_root
, check_key
, *obj
);
1154 scan_roots_for_specific_ref (MonoObject
*key
, int root_type
)
1159 for (i
= 0; i
< roots_hash_size
[root_type
]; ++i
) {
1160 for (root
= roots_hash
[root_type
][i
]; root
; root
= root
->next
) {
1161 void **start_root
= (void**)root
->start_root
;
1162 mword desc
= root
->root_desc
;
1166 switch (desc
& ROOT_DESC_TYPE_MASK
) {
1167 case ROOT_DESC_BITMAP
:
1168 desc
>>= ROOT_DESC_TYPE_SHIFT
;
1171 check_root_obj_specific_ref (root
, key
, *start_root
);
1176 case ROOT_DESC_COMPLEX
: {
1177 gsize
*bitmap_data
= complex_descriptors
+ (desc
>> ROOT_DESC_TYPE_SHIFT
);
1178 int bwords
= (*bitmap_data
) - 1;
1179 void **start_run
= start_root
;
1181 while (bwords
-- > 0) {
1182 gsize bmap
= *bitmap_data
++;
1183 void **objptr
= start_run
;
1186 check_root_obj_specific_ref (root
, key
, *objptr
);
1190 start_run
+= GC_BITS_PER_WORD
;
1194 case ROOT_DESC_USER
: {
1195 MonoGCRootMarkFunc marker
= user_descriptors
[desc
>> ROOT_DESC_TYPE_SHIFT
];
1196 marker (start_root
, check_root_obj_specific_ref_from_marker
);
1199 case ROOT_DESC_RUN_LEN
:
1200 g_assert_not_reached ();
1202 g_assert_not_reached ();
1211 mono_gc_scan_for_specific_ref (MonoObject
*key
)
1217 mono_sgen_scan_area_with_callback (nursery_section
->data
, nursery_section
->end_data
,
1218 (IterateObjectCallbackFunc
)scan_object_for_specific_ref_callback
, key
);
1220 major
.iterate_objects (TRUE
, TRUE
, (IterateObjectCallbackFunc
)scan_object_for_specific_ref_callback
, key
);
1222 for (bigobj
= los_object_list
; bigobj
; bigobj
= bigobj
->next
)
1223 scan_object_for_specific_ref (bigobj
->data
, key
);
1225 scan_roots_for_specific_ref (key
, ROOT_TYPE_NORMAL
);
1226 scan_roots_for_specific_ref (key
, ROOT_TYPE_WBARRIER
);
1228 for (i
= 0; i
< roots_hash_size
[ROOT_TYPE_PINNED
]; ++i
) {
1229 for (root
= roots_hash
[ROOT_TYPE_PINNED
][i
]; root
; root
= root
->next
) {
1230 void **ptr
= (void**)root
->start_root
;
1232 while (ptr
< (void**)root
->end_root
) {
1233 check_root_obj_specific_ref (root
, *ptr
, key
);
1240 /* Clear all remaining nursery fragments */
1242 clear_nursery_fragments (char *next
)
1245 if (nursery_clear_policy
== CLEAR_AT_TLAB_CREATION
) {
1246 g_assert (next
<= nursery_frag_real_end
);
1247 memset (next
, 0, nursery_frag_real_end
- next
);
1248 for (frag
= nursery_fragments
; frag
; frag
= frag
->next
) {
1249 memset (frag
->fragment_start
, 0, frag
->fragment_end
- frag
->fragment_start
);
1255 need_remove_object_for_domain (char *start
, MonoDomain
*domain
)
1257 if (mono_object_domain (start
) == domain
) {
1258 DEBUG (4, fprintf (gc_debug_file
, "Need to cleanup object %p\n", start
));
1259 binary_protocol_cleanup (start
, (gpointer
)LOAD_VTABLE (start
), safe_object_get_size ((MonoObject
*)start
));
1266 process_object_for_domain_clearing (char *start
, MonoDomain
*domain
)
1268 GCVTable
*vt
= (GCVTable
*)LOAD_VTABLE (start
);
1269 if (vt
->klass
== mono_defaults
.internal_thread_class
)
1270 g_assert (mono_object_domain (start
) == mono_get_root_domain ());
1271 /* The object could be a proxy for an object in the domain
1273 if (mono_class_has_parent (vt
->klass
, mono_defaults
.real_proxy_class
)) {
1274 MonoObject
*server
= ((MonoRealProxy
*)start
)->unwrapped_server
;
1276 /* The server could already have been zeroed out, so
1277 we need to check for that, too. */
1278 if (server
&& (!LOAD_VTABLE (server
) || mono_object_domain (server
) == domain
)) {
1279 DEBUG (4, fprintf (gc_debug_file
, "Cleaning up remote pointer in %p to object %p\n",
1281 ((MonoRealProxy
*)start
)->unwrapped_server
= NULL
;
1286 static MonoDomain
*check_domain
= NULL
;
1289 check_obj_not_in_domain (void **o
)
1291 g_assert (((MonoObject
*)(*o
))->vtable
->domain
!= check_domain
);
1295 scan_for_registered_roots_in_domain (MonoDomain
*domain
, int root_type
)
1299 check_domain
= domain
;
1300 for (i
= 0; i
< roots_hash_size
[root_type
]; ++i
) {
1301 for (root
= roots_hash
[root_type
][i
]; root
; root
= root
->next
) {
1302 void **start_root
= (void**)root
->start_root
;
1303 mword desc
= root
->root_desc
;
1305 /* The MonoDomain struct is allowed to hold
1306 references to objects in its own domain. */
1307 if (start_root
== (void**)domain
)
1310 switch (desc
& ROOT_DESC_TYPE_MASK
) {
1311 case ROOT_DESC_BITMAP
:
1312 desc
>>= ROOT_DESC_TYPE_SHIFT
;
1314 if ((desc
& 1) && *start_root
)
1315 check_obj_not_in_domain (*start_root
);
1320 case ROOT_DESC_COMPLEX
: {
1321 gsize
*bitmap_data
= complex_descriptors
+ (desc
>> ROOT_DESC_TYPE_SHIFT
);
1322 int bwords
= (*bitmap_data
) - 1;
1323 void **start_run
= start_root
;
1325 while (bwords
-- > 0) {
1326 gsize bmap
= *bitmap_data
++;
1327 void **objptr
= start_run
;
1329 if ((bmap
& 1) && *objptr
)
1330 check_obj_not_in_domain (*objptr
);
1334 start_run
+= GC_BITS_PER_WORD
;
1338 case ROOT_DESC_USER
: {
1339 MonoGCRootMarkFunc marker
= user_descriptors
[desc
>> ROOT_DESC_TYPE_SHIFT
];
1340 marker (start_root
, check_obj_not_in_domain
);
1343 case ROOT_DESC_RUN_LEN
:
1344 g_assert_not_reached ();
1346 g_assert_not_reached ();
1350 check_domain
= NULL
;
1354 check_for_xdomain_refs (void)
1358 mono_sgen_scan_area_with_callback (nursery_section
->data
, nursery_section
->end_data
,
1359 (IterateObjectCallbackFunc
)scan_object_for_xdomain_refs
, NULL
);
1361 major
.iterate_objects (TRUE
, TRUE
, (IterateObjectCallbackFunc
)scan_object_for_xdomain_refs
, NULL
);
1363 for (bigobj
= los_object_list
; bigobj
; bigobj
= bigobj
->next
)
1364 scan_object_for_xdomain_refs (bigobj
->data
, bigobj
->size
, NULL
);
1368 clear_domain_process_object (char *obj
, MonoDomain
*domain
)
1372 process_object_for_domain_clearing (obj
, domain
);
1373 remove
= need_remove_object_for_domain (obj
, domain
);
1375 if (remove
&& ((MonoObject
*)obj
)->synchronisation
) {
1376 void **dislink
= mono_monitor_get_object_monitor_weak_link ((MonoObject
*)obj
);
1378 mono_gc_register_disappearing_link (NULL
, dislink
, FALSE
);
1385 clear_domain_process_minor_object_callback (char *obj
, size_t size
, MonoDomain
*domain
)
1387 if (clear_domain_process_object (obj
, domain
))
1388 memset (obj
, 0, size
);
1392 clear_domain_process_major_object_callback (char *obj
, size_t size
, MonoDomain
*domain
)
1394 clear_domain_process_object (obj
, domain
);
1398 clear_domain_free_major_non_pinned_object_callback (char *obj
, size_t size
, MonoDomain
*domain
)
1400 if (need_remove_object_for_domain (obj
, domain
))
1401 major
.free_non_pinned_object (obj
, size
);
1405 clear_domain_free_major_pinned_object_callback (char *obj
, size_t size
, MonoDomain
*domain
)
1407 if (need_remove_object_for_domain (obj
, domain
))
1408 major
.free_pinned_object (obj
, size
);
1412 * When appdomains are unloaded we can easily remove objects that have finalizers,
1413 * but all the others could still be present in random places on the heap.
1414 * We need a sweep to get rid of them even though it's going to be costly
1416 * The reason we need to remove them is because we access the vtable and class
1417 * structures to know the object size and the reference bitmap: once the domain is
1418 * unloaded the point to random memory.
1421 mono_gc_clear_domain (MonoDomain
* domain
)
1423 LOSObject
*bigobj
, *prev
;
1428 clear_nursery_fragments (nursery_next
);
1430 if (xdomain_checks
&& domain
!= mono_get_root_domain ()) {
1431 scan_for_registered_roots_in_domain (domain
, ROOT_TYPE_NORMAL
);
1432 scan_for_registered_roots_in_domain (domain
, ROOT_TYPE_WBARRIER
);
1433 check_for_xdomain_refs ();
1436 mono_sgen_scan_area_with_callback (nursery_section
->data
, nursery_section
->end_data
,
1437 (IterateObjectCallbackFunc
)clear_domain_process_minor_object_callback
, domain
);
1439 /*Ephemerons and dislinks must be processed before LOS since they might end up pointing
1440 to memory returned to the OS.*/
1441 null_ephemerons_for_domain (domain
);
1443 for (i
= GENERATION_NURSERY
; i
< GENERATION_MAX
; ++i
)
1444 null_links_for_domain (domain
, i
);
1446 /* We need two passes over major and large objects because
1447 freeing such objects might give their memory back to the OS
1448 (in the case of large objects) or obliterate its vtable
1449 (pinned objects with major-copying or pinned and non-pinned
1450 objects with major-mark&sweep), but we might need to
1451 dereference a pointer from an object to another object if
1452 the first object is a proxy. */
1453 major
.iterate_objects (TRUE
, TRUE
, (IterateObjectCallbackFunc
)clear_domain_process_major_object_callback
, domain
);
1454 for (bigobj
= los_object_list
; bigobj
; bigobj
= bigobj
->next
)
1455 clear_domain_process_object (bigobj
->data
, domain
);
1458 for (bigobj
= los_object_list
; bigobj
;) {
1459 if (need_remove_object_for_domain (bigobj
->data
, domain
)) {
1460 LOSObject
*to_free
= bigobj
;
1462 prev
->next
= bigobj
->next
;
1464 los_object_list
= bigobj
->next
;
1465 bigobj
= bigobj
->next
;
1466 DEBUG (4, fprintf (gc_debug_file
, "Freeing large object %p\n",
1468 free_large_object (to_free
);
1472 bigobj
= bigobj
->next
;
1474 major
.iterate_objects (TRUE
, FALSE
, (IterateObjectCallbackFunc
)clear_domain_free_major_non_pinned_object_callback
, domain
);
1475 major
.iterate_objects (FALSE
, TRUE
, (IterateObjectCallbackFunc
)clear_domain_free_major_pinned_object_callback
, domain
);
1481 global_remset_cache_clear (void)
1483 memset (global_remset_cache
, 0, sizeof (global_remset_cache
));
1487 * Tries to check if a given remset location was already added to the global remset.
1490 * A 2 entry, LRU cache of recently saw location remsets.
1492 * It's hand-coded instead of done using loops to reduce the number of memory references on cache hit.
1494 * Returns TRUE is the element was added..
1497 global_remset_location_was_not_added (gpointer ptr
)
1500 gpointer first
= global_remset_cache
[0], second
;
1502 HEAVY_STAT (++stat_global_remsets_discarded
);
1506 second
= global_remset_cache
[1];
1508 if (second
== ptr
) {
1509 /*Move the second to the front*/
1510 global_remset_cache
[0] = second
;
1511 global_remset_cache
[1] = first
;
1513 HEAVY_STAT (++stat_global_remsets_discarded
);
1517 global_remset_cache
[0] = second
;
1518 global_remset_cache
[1] = ptr
;
1523 * mono_sgen_add_to_global_remset:
1525 * The global remset contains locations which point into newspace after
1526 * a minor collection. This can happen if the objects they point to are pinned.
1528 * LOCKING: If called from a parallel collector, the global remset
1529 * lock must be held. For serial collectors that is not necessary.
1532 mono_sgen_add_to_global_remset (gpointer ptr
)
1535 gboolean lock
= current_collection_generation
== GENERATION_OLD
&& major
.is_parallel
;
1537 g_assert (!ptr_in_nursery (ptr
) && ptr_in_nursery (*(gpointer
*)ptr
));
1542 if (!global_remset_location_was_not_added (ptr
))
1545 DEBUG (8, fprintf (gc_debug_file
, "Adding global remset for %p\n", ptr
));
1546 binary_protocol_global_remset (ptr
, *(gpointer
*)ptr
, (gpointer
)LOAD_VTABLE (*(gpointer
*)ptr
));
1548 HEAVY_STAT (++stat_global_remsets_added
);
1551 * FIXME: If an object remains pinned, we need to add it at every minor collection.
1552 * To avoid uncontrolled growth of the global remset, only add each pointer once.
1554 if (global_remset
->store_next
+ 3 < global_remset
->end_set
) {
1555 *(global_remset
->store_next
++) = (mword
)ptr
;
1558 rs
= alloc_remset (global_remset
->end_set
- global_remset
->data
, NULL
);
1559 rs
->next
= global_remset
;
1561 *(global_remset
->store_next
++) = (mword
)ptr
;
1564 int global_rs_size
= 0;
1566 for (rs
= global_remset
; rs
; rs
= rs
->next
) {
1567 global_rs_size
+= rs
->store_next
- rs
->data
;
1569 DEBUG (4, fprintf (gc_debug_file
, "Global remset now has size %d\n", global_rs_size
));
1574 UNLOCK_GLOBAL_REMSET
;
1580 * Scan objects in the gray stack until the stack is empty. This should be called
1581 * frequently after each object is copied, to achieve better locality and cache
1585 drain_gray_stack (GrayQueue
*queue
)
1589 if (current_collection_generation
== GENERATION_NURSERY
) {
1591 GRAY_OBJECT_DEQUEUE (queue
, obj
);
1594 DEBUG (9, fprintf (gc_debug_file
, "Precise gray object scan %p (%s)\n", obj
, safe_name (obj
)));
1595 major
.minor_scan_object (obj
, queue
);
1598 if (major
.is_parallel
&& queue
== &workers_distribute_gray_queue
)
1602 GRAY_OBJECT_DEQUEUE (queue
, obj
);
1605 DEBUG (9, fprintf (gc_debug_file
, "Precise gray object scan %p (%s)\n", obj
, safe_name (obj
)));
1606 major
.major_scan_object (obj
, queue
);
1612 * Addresses from start to end are already sorted. This function finds
1613 * the object header for each address and pins the object. The
1614 * addresses must be inside the passed section. The (start of the)
1615 * address array is overwritten with the addresses of the actually
1616 * pinned objects. Return the number of pinned objects.
1619 pin_objects_from_addresses (GCMemSection
*section
, void **start
, void **end
, void *start_nursery
, void *end_nursery
, GrayQueue
*queue
)
1624 void *last_obj
= NULL
;
1625 size_t last_obj_size
= 0;
1628 void **definitely_pinned
= start
;
1629 while (start
< end
) {
1631 /* the range check should be reduntant */
1632 if (addr
!= last
&& addr
>= start_nursery
&& addr
< end_nursery
) {
1633 DEBUG (5, fprintf (gc_debug_file
, "Considering pinning addr %p\n", addr
));
1634 /* multiple pointers to the same object */
1635 if (addr
>= last_obj
&& (char*)addr
< (char*)last_obj
+ last_obj_size
) {
1639 idx
= ((char*)addr
- (char*)section
->data
) / SCAN_START_SIZE
;
1640 g_assert (idx
< section
->num_scan_start
);
1641 search_start
= (void*)section
->scan_starts
[idx
];
1642 if (!search_start
|| search_start
> addr
) {
1645 search_start
= section
->scan_starts
[idx
];
1646 if (search_start
&& search_start
<= addr
)
1649 if (!search_start
|| search_start
> addr
)
1650 search_start
= start_nursery
;
1652 if (search_start
< last_obj
)
1653 search_start
= (char*)last_obj
+ last_obj_size
;
1654 /* now addr should be in an object a short distance from search_start
1655 * Note that search_start must point to zeroed mem or point to an object.
1658 if (!*(void**)search_start
) {
1659 search_start
= (void*)ALIGN_UP ((mword
)search_start
+ sizeof (gpointer
));
1662 last_obj
= search_start
;
1663 last_obj_size
= ALIGN_UP (safe_object_get_size ((MonoObject
*)search_start
));
1664 DEBUG (8, fprintf (gc_debug_file
, "Pinned try match %p (%s), size %zd\n", last_obj
, safe_name (last_obj
), last_obj_size
));
1665 if (addr
>= search_start
&& (char*)addr
< (char*)last_obj
+ last_obj_size
) {
1666 DEBUG (4, fprintf (gc_debug_file
, "Pinned object %p, vtable %p (%s), count %d\n", search_start
, *(void**)search_start
, safe_name (search_start
), count
));
1667 binary_protocol_pin (search_start
, (gpointer
)LOAD_VTABLE (search_start
), safe_object_get_size (search_start
));
1668 pin_object (search_start
);
1669 GRAY_OBJECT_ENQUEUE (queue
, search_start
);
1671 mono_sgen_pin_stats_register_object (search_start
, last_obj_size
);
1672 definitely_pinned
[count
] = search_start
;
1676 /* skip to the next object */
1677 search_start
= (void*)((char*)search_start
+ last_obj_size
);
1678 } while (search_start
<= addr
);
1679 /* we either pinned the correct object or we ignored the addr because
1680 * it points to unused zeroed memory.
1686 //printf ("effective pinned: %d (at the end: %d)\n", count, (char*)end_nursery - (char*)last);
1691 mono_sgen_pin_objects_in_section (GCMemSection
*section
, GrayQueue
*queue
)
1693 int num_entries
= section
->pin_queue_num_entries
;
1695 void **start
= section
->pin_queue_start
;
1697 reduced_to
= pin_objects_from_addresses (section
, start
, start
+ num_entries
,
1698 section
->data
, section
->next_data
, queue
);
1699 section
->pin_queue_num_entries
= reduced_to
;
1701 section
->pin_queue_start
= NULL
;
1705 /* Sort the addresses in array in increasing order.
1706 * Done using a by-the book heap sort. Which has decent and stable performance, is pretty cache efficient.
1709 sort_addresses (void **array
, int size
)
1714 for (i
= 1; i
< size
; ++i
) {
1717 int parent
= (child
- 1) / 2;
1719 if (array
[parent
] >= array
[child
])
1722 tmp
= array
[parent
];
1723 array
[parent
] = array
[child
];
1724 array
[child
] = tmp
;
1730 for (i
= size
- 1; i
> 0; --i
) {
1733 array
[i
] = array
[0];
1739 while (root
* 2 + 1 <= end
) {
1740 int child
= root
* 2 + 1;
1742 if (child
< end
&& array
[child
] < array
[child
+ 1])
1744 if (array
[root
] >= array
[child
])
1748 array
[root
] = array
[child
];
1749 array
[child
] = tmp
;
1756 static G_GNUC_UNUSED
void
1757 print_nursery_gaps (void* start_nursery
, void *end_nursery
)
1760 gpointer first
= start_nursery
;
1762 for (i
= 0; i
< next_pin_slot
; ++i
) {
1763 next
= pin_queue
[i
];
1764 fprintf (gc_debug_file
, "Nursery range: %p-%p, size: %td\n", first
, next
, (char*)next
-(char*)first
);
1768 fprintf (gc_debug_file
, "Nursery range: %p-%p, size: %td\n", first
, next
, (char*)next
-(char*)first
);
1771 /* reduce the info in the pin queue, removing duplicate pointers and sorting them */
1773 optimize_pin_queue (int start_slot
)
1775 void **start
, **cur
, **end
;
1776 /* sort and uniq pin_queue: we just sort and we let the rest discard multiple values */
1777 /* it may be better to keep ranges of pinned memory instead of individually pinning objects */
1778 DEBUG (5, fprintf (gc_debug_file
, "Sorting pin queue, size: %d\n", next_pin_slot
));
1779 if ((next_pin_slot
- start_slot
) > 1)
1780 sort_addresses (pin_queue
+ start_slot
, next_pin_slot
- start_slot
);
1781 start
= cur
= pin_queue
+ start_slot
;
1782 end
= pin_queue
+ next_pin_slot
;
1785 while (*start
== *cur
&& cur
< end
)
1789 next_pin_slot
= start
- pin_queue
;
1790 DEBUG (5, fprintf (gc_debug_file
, "Pin queue reduced to size: %d\n", next_pin_slot
));
1791 //DEBUG (6, print_nursery_gaps (start_nursery, end_nursery));
1796 * Scan the memory between start and end and queue values which could be pointers
1797 * to the area between start_nursery and end_nursery for later consideration.
1798 * Typically used for thread stacks.
1801 conservatively_pin_objects_from (void **start
, void **end
, void *start_nursery
, void *end_nursery
, int pin_type
)
1804 while (start
< end
) {
1805 if (*start
>= start_nursery
&& *start
< end_nursery
) {
1807 * *start can point to the middle of an object
1808 * note: should we handle pointing at the end of an object?
1809 * pinning in C# code disallows pointing at the end of an object
1810 * but there is some small chance that an optimizing C compiler
1811 * may keep the only reference to an object by pointing
1812 * at the end of it. We ignore this small chance for now.
1813 * Pointers to the end of an object are indistinguishable
1814 * from pointers to the start of the next object in memory
1815 * so if we allow that we'd need to pin two objects...
1816 * We queue the pointer in an array, the
1817 * array will then be sorted and uniqued. This way
1818 * we can coalesce several pinning pointers and it should
1819 * be faster since we'd do a memory scan with increasing
1820 * addresses. Note: we can align the address to the allocation
1821 * alignment, so the unique process is more effective.
1823 mword addr
= (mword
)*start
;
1824 addr
&= ~(ALLOC_ALIGN
- 1);
1825 if (addr
>= (mword
)start_nursery
&& addr
< (mword
)end_nursery
)
1826 pin_stage_ptr ((void*)addr
);
1828 pin_stats_register_address ((char*)addr
, pin_type
);
1829 DEBUG (6, if (count
) fprintf (gc_debug_file
, "Pinning address %p\n", (void*)addr
));
1834 DEBUG (7, if (count
) fprintf (gc_debug_file
, "found %d potential pinned heap pointers\n", count
));
1838 * Debugging function: find in the conservative roots where @obj is being pinned.
1840 static G_GNUC_UNUSED
void
1841 find_pinning_reference (char *obj
, size_t size
)
1845 char *endobj
= obj
+ size
;
1846 for (i
= 0; i
< roots_hash_size
[0]; ++i
) {
1847 for (root
= roots_hash
[0][i
]; root
; root
= root
->next
) {
1848 /* if desc is non-null it has precise info */
1849 if (!root
->root_desc
) {
1850 char ** start
= (char**)root
->start_root
;
1851 while (start
< (char**)root
->end_root
) {
1852 if (*start
>= obj
&& *start
< endobj
) {
1853 DEBUG (0, fprintf (gc_debug_file
, "Object %p referenced in pinned roots %p-%p (at %p in record %p)\n", obj
, root
->start_root
, root
->end_root
, start
, root
));
1860 find_pinning_ref_from_thread (obj
, size
);
1864 * The first thing we do in a collection is to identify pinned objects.
1865 * This function considers all the areas of memory that need to be
1866 * conservatively scanned.
1869 pin_from_roots (void *start_nursery
, void *end_nursery
)
1873 DEBUG (2, fprintf (gc_debug_file
, "Scanning pinned roots (%d bytes, %d/%d entries)\n", (int)roots_size
, num_roots_entries
[ROOT_TYPE_NORMAL
], num_roots_entries
[ROOT_TYPE_PINNED
]));
1874 /* objects pinned from the API are inside these roots */
1875 for (i
= 0; i
< roots_hash_size
[ROOT_TYPE_PINNED
]; ++i
) {
1876 for (root
= roots_hash
[ROOT_TYPE_PINNED
][i
]; root
; root
= root
->next
) {
1877 DEBUG (6, fprintf (gc_debug_file
, "Pinned roots %p-%p\n", root
->start_root
, root
->end_root
));
1878 conservatively_pin_objects_from ((void**)root
->start_root
, (void**)root
->end_root
, start_nursery
, end_nursery
, PIN_TYPE_OTHER
);
1881 /* now deal with the thread stacks
1882 * in the future we should be able to conservatively scan only:
1883 * *) the cpu registers
1884 * *) the unmanaged stack frames
1885 * *) the _last_ managed stack frame
1886 * *) pointers slots in managed frames
1888 scan_thread_data (start_nursery
, end_nursery
, FALSE
);
1890 evacuate_pin_staging_area ();
1893 static CopyOrMarkObjectFunc user_copy_or_mark_func
;
1894 static GrayQueue
*user_copy_or_mark_queue
;
1897 single_arg_user_copy_or_mark (void **obj
)
1899 user_copy_or_mark_func (obj
, user_copy_or_mark_queue
);
1903 * The memory area from start_root to end_root contains pointers to objects.
1904 * Their position is precisely described by @desc (this means that the pointer
1905 * can be either NULL or the pointer to the start of an object).
1906 * This functions copies them to to_space updates them.
1908 * This function is not thread-safe!
1911 precisely_scan_objects_from (CopyOrMarkObjectFunc copy_func
, void** start_root
, void** end_root
, char* n_start
, char *n_end
, mword desc
, GrayQueue
*queue
)
1913 switch (desc
& ROOT_DESC_TYPE_MASK
) {
1914 case ROOT_DESC_BITMAP
:
1915 desc
>>= ROOT_DESC_TYPE_SHIFT
;
1917 if ((desc
& 1) && *start_root
) {
1918 copy_func (start_root
, queue
);
1919 DEBUG (9, fprintf (gc_debug_file
, "Overwrote root at %p with %p\n", start_root
, *start_root
));
1920 drain_gray_stack (queue
);
1926 case ROOT_DESC_COMPLEX
: {
1927 gsize
*bitmap_data
= complex_descriptors
+ (desc
>> ROOT_DESC_TYPE_SHIFT
);
1928 int bwords
= (*bitmap_data
) - 1;
1929 void **start_run
= start_root
;
1931 while (bwords
-- > 0) {
1932 gsize bmap
= *bitmap_data
++;
1933 void **objptr
= start_run
;
1935 if ((bmap
& 1) && *objptr
) {
1936 copy_func (objptr
, queue
);
1937 DEBUG (9, fprintf (gc_debug_file
, "Overwrote root at %p with %p\n", objptr
, *objptr
));
1938 drain_gray_stack (queue
);
1943 start_run
+= GC_BITS_PER_WORD
;
1947 case ROOT_DESC_USER
: {
1948 MonoGCRootMarkFunc marker
= user_descriptors
[desc
>> ROOT_DESC_TYPE_SHIFT
];
1949 user_copy_or_mark_func
= copy_func
;
1950 user_copy_or_mark_queue
= queue
;
1951 marker (start_root
, single_arg_user_copy_or_mark
);
1952 user_copy_or_mark_func
= NULL
;
1953 user_copy_or_mark_queue
= NULL
;
1956 case ROOT_DESC_RUN_LEN
:
1957 g_assert_not_reached ();
1959 g_assert_not_reached ();
1964 mono_sgen_update_heap_boundaries (mword low
, mword high
)
1969 old
= lowest_heap_address
;
1972 } while (SGEN_CAS_PTR ((gpointer
*)&lowest_heap_address
, (gpointer
)low
, (gpointer
)old
) != (gpointer
)old
);
1975 old
= highest_heap_address
;
1978 } while (SGEN_CAS_PTR ((gpointer
*)&highest_heap_address
, (gpointer
)high
, (gpointer
)old
) != (gpointer
)old
);
1982 alloc_fragment (void)
1984 Fragment
*frag
= fragment_freelist
;
1986 fragment_freelist
= frag
->next
;
1990 frag
= mono_sgen_alloc_internal (INTERNAL_MEM_FRAGMENT
);
1995 /* size must be a power of 2 */
1997 mono_sgen_alloc_os_memory_aligned (mword size
, mword alignment
, gboolean activate
)
1999 /* Allocate twice the memory to be able to put the block on an aligned address */
2000 char *mem
= mono_sgen_alloc_os_memory (size
+ alignment
, activate
);
2005 aligned
= (char*)((mword
)(mem
+ (alignment
- 1)) & ~(alignment
- 1));
2006 g_assert (aligned
>= mem
&& aligned
+ size
<= mem
+ size
+ alignment
&& !((mword
)aligned
& (alignment
- 1)));
2009 mono_sgen_free_os_memory (mem
, aligned
- mem
);
2010 if (aligned
+ size
< mem
+ size
+ alignment
)
2011 mono_sgen_free_os_memory (aligned
+ size
, (mem
+ size
+ alignment
) - (aligned
+ size
));
2017 * Allocate and setup the data structures needed to be able to allocate objects
2018 * in the nursery. The nursery is stored in nursery_section.
2021 alloc_nursery (void)
2023 GCMemSection
*section
;
2029 if (nursery_section
)
2031 DEBUG (2, fprintf (gc_debug_file
, "Allocating nursery size: %lu\n", (unsigned long)nursery_size
));
2032 /* later we will alloc a larger area for the nursery but only activate
2033 * what we need. The rest will be used as expansion if we have too many pinned
2034 * objects in the existing nursery.
2036 /* FIXME: handle OOM */
2037 section
= mono_sgen_alloc_internal (INTERNAL_MEM_SECTION
);
2039 g_assert (nursery_size
== DEFAULT_NURSERY_SIZE
);
2040 alloc_size
= nursery_size
;
2041 #ifdef SGEN_ALIGN_NURSERY
2042 data
= major
.alloc_heap (alloc_size
, alloc_size
, DEFAULT_NURSERY_BITS
);
2044 data
= major
.alloc_heap (alloc_size
, 0, DEFAULT_NURSERY_BITS
);
2046 nursery_start
= data
;
2047 nursery_real_end
= nursery_start
+ nursery_size
;
2048 mono_sgen_update_heap_boundaries ((mword
)nursery_start
, (mword
)nursery_real_end
);
2049 nursery_next
= nursery_start
;
2050 DEBUG (4, fprintf (gc_debug_file
, "Expanding nursery size (%p-%p): %lu, total: %lu\n", data
, data
+ alloc_size
, (unsigned long)nursery_size
, (unsigned long)total_alloc
));
2051 section
->data
= section
->next_data
= data
;
2052 section
->size
= alloc_size
;
2053 section
->end_data
= nursery_real_end
;
2054 scan_starts
= (alloc_size
+ SCAN_START_SIZE
- 1) / SCAN_START_SIZE
;
2055 section
->scan_starts
= mono_sgen_alloc_internal_dynamic (sizeof (char*) * scan_starts
, INTERNAL_MEM_SCAN_STARTS
);
2056 section
->num_scan_start
= scan_starts
;
2057 section
->block
.role
= MEMORY_ROLE_GEN0
;
2058 section
->block
.next
= NULL
;
2060 nursery_section
= section
;
2062 /* Setup the single first large fragment */
2063 frag
= alloc_fragment ();
2064 frag
->fragment_start
= nursery_start
;
2065 frag
->fragment_limit
= nursery_start
;
2066 frag
->fragment_end
= nursery_real_end
;
2067 nursery_frag_real_end
= nursery_real_end
;
2068 /* FIXME: frag here is lost */
2072 scan_finalizer_entries (CopyOrMarkObjectFunc copy_func
, FinalizeEntry
*list
, GrayQueue
*queue
)
2076 for (fin
= list
; fin
; fin
= fin
->next
) {
2079 DEBUG (5, fprintf (gc_debug_file
, "Scan of fin ready object: %p (%s)\n", fin
->object
, safe_name (fin
->object
)));
2080 copy_func (&fin
->object
, queue
);
2084 static mword fragment_total
= 0;
2086 * We found a fragment of free memory in the nursery: memzero it and if
2087 * it is big enough, add it to the list of fragments that can be used for
2091 add_nursery_frag (size_t frag_size
, char* frag_start
, char* frag_end
)
2094 DEBUG (4, fprintf (gc_debug_file
, "Found empty fragment: %p-%p, size: %zd\n", frag_start
, frag_end
, frag_size
));
2095 binary_protocol_empty (frag_start
, frag_size
);
2096 /* memsetting just the first chunk start is bound to provide better cache locality */
2097 if (nursery_clear_policy
== CLEAR_AT_GC
)
2098 memset (frag_start
, 0, frag_size
);
2099 /* Not worth dealing with smaller fragments: need to tune */
2100 if (frag_size
>= FRAGMENT_MIN_SIZE
) {
2101 fragment
= alloc_fragment ();
2102 fragment
->fragment_start
= frag_start
;
2103 fragment
->fragment_limit
= frag_start
;
2104 fragment
->fragment_end
= frag_end
;
2105 fragment
->next
= nursery_fragments
;
2106 nursery_fragments
= fragment
;
2107 fragment_total
+= frag_size
;
2109 /* Clear unused fragments, pinning depends on this */
2110 /*TODO place an int[] here instead of the memset if size justify it*/
2111 memset (frag_start
, 0, frag_size
);
2116 generation_name (int generation
)
2118 switch (generation
) {
2119 case GENERATION_NURSERY
: return "nursery";
2120 case GENERATION_OLD
: return "old";
2121 default: g_assert_not_reached ();
2125 static DisappearingLinkHashTable
*
2126 get_dislink_hash_table (int generation
)
2128 switch (generation
) {
2129 case GENERATION_NURSERY
: return &minor_disappearing_link_hash
;
2130 case GENERATION_OLD
: return &major_disappearing_link_hash
;
2131 default: g_assert_not_reached ();
2135 static FinalizeEntryHashTable
*
2136 get_finalize_entry_hash_table (int generation
)
2138 switch (generation
) {
2139 case GENERATION_NURSERY
: return &minor_finalizable_hash
;
2140 case GENERATION_OLD
: return &major_finalizable_hash
;
2141 default: g_assert_not_reached ();
2146 finish_gray_stack (char *start_addr
, char *end_addr
, int generation
, GrayQueue
*queue
)
2151 int ephemeron_rounds
= 0;
2152 CopyOrMarkObjectFunc copy_func
= current_collection_generation
== GENERATION_NURSERY
? major
.copy_object
: major
.copy_or_mark_object
;
2155 * We copied all the reachable objects. Now it's the time to copy
2156 * the objects that were not referenced by the roots, but by the copied objects.
2157 * we built a stack of objects pointed to by gray_start: they are
2158 * additional roots and we may add more items as we go.
2159 * We loop until gray_start == gray_objects which means no more objects have
2160 * been added. Note this is iterative: no recursion is involved.
2161 * We need to walk the LO list as well in search of marked big objects
2162 * (use a flag since this is needed only on major collections). We need to loop
2163 * here as well, so keep a counter of marked LO (increasing it in copy_object).
2164 * To achieve better cache locality and cache usage, we drain the gray stack
2165 * frequently, after each object is copied, and just finish the work here.
2167 drain_gray_stack (queue
);
2169 DEBUG (2, fprintf (gc_debug_file
, "%s generation done\n", generation_name (generation
)));
2170 /* walk the finalization queue and move also the objects that need to be
2171 * finalized: use the finalized objects as new roots so the objects they depend
2172 * on are also not reclaimed. As with the roots above, only objects in the nursery
2173 * are marked/copied.
2174 * We need a loop here, since objects ready for finalizers may reference other objects
2175 * that are fin-ready. Speedup with a flag?
2179 * Walk the ephemeron tables marking all values with reachable keys. This must be completely done
2180 * before processing finalizable objects to avoid finalizing reachable values.
2182 * It must be done inside the finalizaters loop since objects must not be removed from CWT tables
2183 * while they are been finalized.
2185 int done_with_ephemerons
= 0;
2187 done_with_ephemerons
= mark_ephemerons_in_range (copy_func
, start_addr
, end_addr
, queue
);
2188 drain_gray_stack (queue
);
2190 } while (!done_with_ephemerons
);
2192 fin_ready
= num_ready_finalizers
;
2193 finalize_in_range (copy_func
, start_addr
, end_addr
, generation
, queue
);
2194 if (generation
== GENERATION_OLD
)
2195 finalize_in_range (copy_func
, nursery_start
, nursery_real_end
, GENERATION_NURSERY
, queue
);
2197 /* drain the new stack that might have been created */
2198 DEBUG (6, fprintf (gc_debug_file
, "Precise scan of gray area post fin\n"));
2199 drain_gray_stack (queue
);
2200 } while (fin_ready
!= num_ready_finalizers
);
2203 * Clear ephemeron pairs with unreachable keys.
2204 * We pass the copy func so we can figure out if an array was promoted or not.
2206 clear_unreachable_ephemerons (copy_func
, start_addr
, end_addr
, queue
);
2209 DEBUG (2, fprintf (gc_debug_file
, "Finalize queue handling scan for %s generation: %d usecs %d ephemeron roundss\n", generation_name (generation
), TV_ELAPSED (atv
, btv
), ephemeron_rounds
));
2212 * handle disappearing links
2213 * Note we do this after checking the finalization queue because if an object
2214 * survives (at least long enough to be finalized) we don't clear the link.
2215 * This also deals with a possible issue with the monitor reclamation: with the Boehm
2216 * GC a finalized object my lose the monitor because it is cleared before the finalizer is
2219 g_assert (gray_object_queue_is_empty (queue
));
2221 null_link_in_range (copy_func
, start_addr
, end_addr
, generation
, queue
);
2222 if (generation
== GENERATION_OLD
)
2223 null_link_in_range (copy_func
, start_addr
, end_addr
, GENERATION_NURSERY
, queue
);
2224 if (gray_object_queue_is_empty (queue
))
2226 drain_gray_stack (queue
);
2229 g_assert (gray_object_queue_is_empty (queue
));
2233 mono_sgen_check_section_scan_starts (GCMemSection
*section
)
2236 for (i
= 0; i
< section
->num_scan_start
; ++i
) {
2237 if (section
->scan_starts
[i
]) {
2238 guint size
= safe_object_get_size ((MonoObject
*) section
->scan_starts
[i
]);
2239 g_assert (size
>= sizeof (MonoObject
) && size
<= MAX_SMALL_OBJ_SIZE
);
2245 check_scan_starts (void)
2247 if (!do_scan_starts_check
)
2249 mono_sgen_check_section_scan_starts (nursery_section
);
2250 major
.check_scan_starts ();
2253 static int last_num_pinned
= 0;
2256 build_nursery_fragments (void **start
, int num_entries
)
2258 char *frag_start
, *frag_end
;
2262 while (nursery_fragments
) {
2263 Fragment
*next
= nursery_fragments
->next
;
2264 nursery_fragments
->next
= fragment_freelist
;
2265 fragment_freelist
= nursery_fragments
;
2266 nursery_fragments
= next
;
2268 frag_start
= nursery_start
;
2270 /* clear scan starts */
2271 memset (nursery_section
->scan_starts
, 0, nursery_section
->num_scan_start
* sizeof (gpointer
));
2272 for (i
= 0; i
< num_entries
; ++i
) {
2273 frag_end
= start
[i
];
2274 /* remove the pin bit from pinned objects */
2275 unpin_object (frag_end
);
2276 nursery_section
->scan_starts
[((char*)frag_end
- (char*)nursery_section
->data
)/SCAN_START_SIZE
] = frag_end
;
2277 frag_size
= frag_end
- frag_start
;
2279 add_nursery_frag (frag_size
, frag_start
, frag_end
);
2280 frag_size
= ALIGN_UP (safe_object_get_size ((MonoObject
*)start
[i
]));
2281 frag_start
= (char*)start
[i
] + frag_size
;
2283 nursery_last_pinned_end
= frag_start
;
2284 frag_end
= nursery_real_end
;
2285 frag_size
= frag_end
- frag_start
;
2287 add_nursery_frag (frag_size
, frag_start
, frag_end
);
2288 if (!nursery_fragments
) {
2289 DEBUG (1, fprintf (gc_debug_file
, "Nursery fully pinned (%d)\n", num_entries
));
2290 for (i
= 0; i
< num_entries
; ++i
) {
2291 DEBUG (3, fprintf (gc_debug_file
, "Bastard pinning obj %p (%s), size: %d\n", start
[i
], safe_name (start
[i
]), safe_object_get_size (start
[i
])));
2296 nursery_next
= nursery_frag_real_end
= NULL
;
2298 /* Clear TLABs for all threads */
2303 scan_from_registered_roots (CopyOrMarkObjectFunc copy_func
, char *addr_start
, char *addr_end
, int root_type
, GrayQueue
*queue
)
2307 for (i
= 0; i
< roots_hash_size
[root_type
]; ++i
) {
2308 for (root
= roots_hash
[root_type
][i
]; root
; root
= root
->next
) {
2309 DEBUG (6, fprintf (gc_debug_file
, "Precise root scan %p-%p (desc: %p)\n", root
->start_root
, root
->end_root
, (void*)root
->root_desc
));
2310 precisely_scan_objects_from (copy_func
, (void**)root
->start_root
, (void**)root
->end_root
, addr_start
, addr_end
, root
->root_desc
, queue
);
2316 mono_sgen_dump_occupied (char *start
, char *end
, char *section_start
)
2318 fprintf (heap_dump_file
, "<occupied offset=\"%td\" size=\"%td\"/>\n", start
- section_start
, end
- start
);
2322 mono_sgen_dump_section (GCMemSection
*section
, const char *type
)
2324 char *start
= section
->data
;
2325 char *end
= section
->data
+ section
->size
;
2326 char *occ_start
= NULL
;
2328 char *old_start
= NULL
; /* just for debugging */
2330 fprintf (heap_dump_file
, "<section type=\"%s\" size=\"%lu\">\n", type
, (unsigned long)section
->size
);
2332 while (start
< end
) {
2336 if (!*(void**)start
) {
2338 mono_sgen_dump_occupied (occ_start
, start
, section
->data
);
2341 start
+= sizeof (void*); /* should be ALLOC_ALIGN, really */
2344 g_assert (start
< section
->next_data
);
2349 vt
= (GCVTable
*)LOAD_VTABLE (start
);
2352 size
= ALIGN_UP (safe_object_get_size ((MonoObject
*) start
));
2355 fprintf (heap_dump_file, "<object offset=\"%d\" class=\"%s.%s\" size=\"%d\"/>\n",
2356 start - section->data,
2357 vt->klass->name_space, vt->klass->name,
2365 mono_sgen_dump_occupied (occ_start
, start
, section
->data
);
2367 fprintf (heap_dump_file
, "</section>\n");
2371 dump_object (MonoObject
*obj
, gboolean dump_location
)
2373 static char class_name
[1024];
2375 MonoClass
*class = mono_object_class (obj
);
2379 * Python's XML parser is too stupid to parse angle brackets
2380 * in strings, so we just ignore them;
2383 while (class->name
[i
] && j
< sizeof (class_name
) - 1) {
2384 if (!strchr ("<>\"", class->name
[i
]))
2385 class_name
[j
++] = class->name
[i
];
2388 g_assert (j
< sizeof (class_name
));
2391 fprintf (heap_dump_file
, "<object class=\"%s.%s\" size=\"%d\"",
2392 class->name_space
, class_name
,
2393 safe_object_get_size (obj
));
2394 if (dump_location
) {
2395 const char *location
;
2396 if (ptr_in_nursery (obj
))
2397 location
= "nursery";
2398 else if (safe_object_get_size (obj
) <= MAX_SMALL_OBJ_SIZE
)
2402 fprintf (heap_dump_file
, " location=\"%s\"", location
);
2404 fprintf (heap_dump_file
, "/>\n");
2408 dump_heap (const char *type
, int num
, const char *reason
)
2413 fprintf (heap_dump_file
, "<collection type=\"%s\" num=\"%d\"", type
, num
);
2415 fprintf (heap_dump_file
, " reason=\"%s\"", reason
);
2416 fprintf (heap_dump_file
, ">\n");
2417 fprintf (heap_dump_file
, "<other-mem-usage type=\"mempools\" size=\"%ld\"/>\n", mono_mempool_get_bytes_allocated ());
2418 mono_sgen_dump_internal_mem_usage (heap_dump_file
);
2419 fprintf (heap_dump_file
, "<pinned type=\"stack\" bytes=\"%zu\"/>\n", pinned_byte_counts
[PIN_TYPE_STACK
]);
2420 /* fprintf (heap_dump_file, "<pinned type=\"static-data\" bytes=\"%d\"/>\n", pinned_byte_counts [PIN_TYPE_STATIC_DATA]); */
2421 fprintf (heap_dump_file
, "<pinned type=\"other\" bytes=\"%zu\"/>\n", pinned_byte_counts
[PIN_TYPE_OTHER
]);
2423 fprintf (heap_dump_file
, "<pinned-objects>\n");
2424 for (list
= pinned_objects
; list
; list
= list
->next
)
2425 dump_object (list
->obj
, TRUE
);
2426 fprintf (heap_dump_file
, "</pinned-objects>\n");
2428 mono_sgen_dump_section (nursery_section
, "nursery");
2430 major
.dump_heap (heap_dump_file
);
2432 fprintf (heap_dump_file
, "<los>\n");
2433 for (bigobj
= los_object_list
; bigobj
; bigobj
= bigobj
->next
)
2434 dump_object ((MonoObject
*)bigobj
->data
, FALSE
);
2435 fprintf (heap_dump_file
, "</los>\n");
2437 fprintf (heap_dump_file
, "</collection>\n");
2441 mono_sgen_register_moved_object (void *obj
, void *destination
)
2443 g_assert (mono_profiler_events
& MONO_PROFILE_GC_MOVES
);
2445 /* FIXME: handle this for parallel collector */
2446 g_assert (!major
.is_parallel
);
2448 if (moved_objects_idx
== MOVED_OBJECTS_NUM
) {
2449 mono_profiler_gc_moves (moved_objects
, moved_objects_idx
);
2450 moved_objects_idx
= 0;
2452 moved_objects
[moved_objects_idx
++] = obj
;
2453 moved_objects
[moved_objects_idx
++] = destination
;
2459 static gboolean inited
= FALSE
;
2464 mono_counters_register ("Minor fragment clear", MONO_COUNTER_GC
| MONO_COUNTER_LONG
, &time_minor_pre_collection_fragment_clear
);
2465 mono_counters_register ("Minor pinning", MONO_COUNTER_GC
| MONO_COUNTER_LONG
, &time_minor_pinning
);
2466 mono_counters_register ("Minor scan remsets", MONO_COUNTER_GC
| MONO_COUNTER_LONG
, &time_minor_scan_remsets
);
2467 mono_counters_register ("Minor scan pinned", MONO_COUNTER_GC
| MONO_COUNTER_LONG
, &time_minor_scan_pinned
);
2468 mono_counters_register ("Minor scan registered roots", MONO_COUNTER_GC
| MONO_COUNTER_LONG
, &time_minor_scan_registered_roots
);
2469 mono_counters_register ("Minor scan thread data", MONO_COUNTER_GC
| MONO_COUNTER_LONG
, &time_minor_scan_thread_data
);
2470 mono_counters_register ("Minor finish gray stack", MONO_COUNTER_GC
| MONO_COUNTER_LONG
, &time_minor_finish_gray_stack
);
2471 mono_counters_register ("Minor fragment creation", MONO_COUNTER_GC
| MONO_COUNTER_LONG
, &time_minor_fragment_creation
);
2473 mono_counters_register ("Major fragment clear", MONO_COUNTER_GC
| MONO_COUNTER_LONG
, &time_major_pre_collection_fragment_clear
);
2474 mono_counters_register ("Major pinning", MONO_COUNTER_GC
| MONO_COUNTER_LONG
, &time_major_pinning
);
2475 mono_counters_register ("Major scan pinned", MONO_COUNTER_GC
| MONO_COUNTER_LONG
, &time_major_scan_pinned
);
2476 mono_counters_register ("Major scan registered roots", MONO_COUNTER_GC
| MONO_COUNTER_LONG
, &time_major_scan_registered_roots
);
2477 mono_counters_register ("Major scan thread data", MONO_COUNTER_GC
| MONO_COUNTER_LONG
, &time_major_scan_thread_data
);
2478 mono_counters_register ("Major scan alloc_pinned", MONO_COUNTER_GC
| MONO_COUNTER_LONG
, &time_major_scan_alloc_pinned
);
2479 mono_counters_register ("Major scan finalized", MONO_COUNTER_GC
| MONO_COUNTER_LONG
, &time_major_scan_finalized
);
2480 mono_counters_register ("Major scan big objects", MONO_COUNTER_GC
| MONO_COUNTER_LONG
, &time_major_scan_big_objects
);
2481 mono_counters_register ("Major finish gray stack", MONO_COUNTER_GC
| MONO_COUNTER_LONG
, &time_major_finish_gray_stack
);
2482 mono_counters_register ("Major free big objects", MONO_COUNTER_GC
| MONO_COUNTER_LONG
, &time_major_free_bigobjs
);
2483 mono_counters_register ("Major LOS sweep", MONO_COUNTER_GC
| MONO_COUNTER_LONG
, &time_major_los_sweep
);
2484 mono_counters_register ("Major sweep", MONO_COUNTER_GC
| MONO_COUNTER_LONG
, &time_major_sweep
);
2485 mono_counters_register ("Major fragment creation", MONO_COUNTER_GC
| MONO_COUNTER_LONG
, &time_major_fragment_creation
);
2487 #ifdef HEAVY_STATISTICS
2488 mono_counters_register ("WBarrier set field", MONO_COUNTER_GC
| MONO_COUNTER_INT
, &stat_wbarrier_set_field
);
2489 mono_counters_register ("WBarrier set arrayref", MONO_COUNTER_GC
| MONO_COUNTER_INT
, &stat_wbarrier_set_arrayref
);
2490 mono_counters_register ("WBarrier arrayref copy", MONO_COUNTER_GC
| MONO_COUNTER_INT
, &stat_wbarrier_arrayref_copy
);
2491 mono_counters_register ("WBarrier generic store called", MONO_COUNTER_GC
| MONO_COUNTER_INT
, &stat_wbarrier_generic_store
);
2492 mono_counters_register ("WBarrier generic store stored", MONO_COUNTER_GC
| MONO_COUNTER_INT
, &stat_wbarrier_generic_store_remset
);
2493 mono_counters_register ("WBarrier set root", MONO_COUNTER_GC
| MONO_COUNTER_INT
, &stat_wbarrier_set_root
);
2494 mono_counters_register ("WBarrier value copy", MONO_COUNTER_GC
| MONO_COUNTER_INT
, &stat_wbarrier_value_copy
);
2495 mono_counters_register ("WBarrier object copy", MONO_COUNTER_GC
| MONO_COUNTER_INT
, &stat_wbarrier_object_copy
);
2497 mono_counters_register ("# objects allocated", MONO_COUNTER_GC
| MONO_COUNTER_LONG
, &stat_objects_alloced
);
2498 mono_counters_register ("bytes allocated", MONO_COUNTER_GC
| MONO_COUNTER_LONG
, &stat_bytes_alloced
);
2499 mono_counters_register ("# objects allocated degraded", MONO_COUNTER_GC
| MONO_COUNTER_LONG
, &stat_objects_alloced_degraded
);
2500 mono_counters_register ("bytes allocated degraded", MONO_COUNTER_GC
| MONO_COUNTER_LONG
, &stat_bytes_alloced_degraded
);
2501 mono_counters_register ("bytes allocated in LOS", MONO_COUNTER_GC
| MONO_COUNTER_LONG
, &stat_bytes_alloced_los
);
2503 mono_counters_register ("# copy_object() called (nursery)", MONO_COUNTER_GC
| MONO_COUNTER_LONG
, &stat_copy_object_called_nursery
);
2504 mono_counters_register ("# objects copied (nursery)", MONO_COUNTER_GC
| MONO_COUNTER_LONG
, &stat_objects_copied_nursery
);
2505 mono_counters_register ("# copy_object() called (major)", MONO_COUNTER_GC
| MONO_COUNTER_LONG
, &stat_copy_object_called_major
);
2506 mono_counters_register ("# objects copied (major)", MONO_COUNTER_GC
| MONO_COUNTER_LONG
, &stat_objects_copied_major
);
2508 mono_counters_register ("# scan_object() called (nursery)", MONO_COUNTER_GC
| MONO_COUNTER_LONG
, &stat_scan_object_called_nursery
);
2509 mono_counters_register ("# scan_object() called (major)", MONO_COUNTER_GC
| MONO_COUNTER_LONG
, &stat_scan_object_called_major
);
2511 mono_counters_register ("# nursery copy_object() failed from space", MONO_COUNTER_GC
| MONO_COUNTER_LONG
, &stat_nursery_copy_object_failed_from_space
);
2512 mono_counters_register ("# nursery copy_object() failed forwarded", MONO_COUNTER_GC
| MONO_COUNTER_LONG
, &stat_nursery_copy_object_failed_forwarded
);
2513 mono_counters_register ("# nursery copy_object() failed pinned", MONO_COUNTER_GC
| MONO_COUNTER_LONG
, &stat_nursery_copy_object_failed_pinned
);
2515 mono_counters_register ("# wasted fragments used", MONO_COUNTER_GC
| MONO_COUNTER_LONG
, &stat_wasted_fragments_used
);
2516 mono_counters_register ("bytes in wasted fragments", MONO_COUNTER_GC
| MONO_COUNTER_LONG
, &stat_wasted_fragments_bytes
);
2518 mono_counters_register ("Store remsets", MONO_COUNTER_GC
| MONO_COUNTER_LONG
, &stat_store_remsets
);
2519 mono_counters_register ("Unique store remsets", MONO_COUNTER_GC
| MONO_COUNTER_LONG
, &stat_store_remsets_unique
);
2520 mono_counters_register ("Saved remsets 1", MONO_COUNTER_GC
| MONO_COUNTER_LONG
, &stat_saved_remsets_1
);
2521 mono_counters_register ("Saved remsets 2", MONO_COUNTER_GC
| MONO_COUNTER_LONG
, &stat_saved_remsets_2
);
2522 mono_counters_register ("Global remsets added", MONO_COUNTER_GC
| MONO_COUNTER_LONG
, &stat_global_remsets_added
);
2523 mono_counters_register ("Global remsets re-added", MONO_COUNTER_GC
| MONO_COUNTER_LONG
, &stat_global_remsets_readded
);
2524 mono_counters_register ("Global remsets processed", MONO_COUNTER_GC
| MONO_COUNTER_LONG
, &stat_global_remsets_processed
);
2525 mono_counters_register ("Global remsets discarded", MONO_COUNTER_GC
| MONO_COUNTER_LONG
, &stat_global_remsets_discarded
);
2532 need_major_collection (void)
2534 mword los_alloced
= los_memory_usage
- MIN (last_los_memory_usage
, los_memory_usage
);
2535 return minor_collection_sections_alloced
* major
.section_size
+ los_alloced
> minor_collection_allowance
;
2539 * Collect objects in the nursery. Returns whether to trigger a major
2543 collect_nursery (size_t requested_size
)
2545 size_t max_garbage_amount
;
2546 char *orig_nursery_next
;
2547 TV_DECLARE (all_atv
);
2548 TV_DECLARE (all_btv
);
2552 current_collection_generation
= GENERATION_NURSERY
;
2554 binary_protocol_collection (GENERATION_NURSERY
);
2555 check_scan_starts ();
2558 orig_nursery_next
= nursery_next
;
2559 nursery_next
= MAX (nursery_next
, nursery_last_pinned_end
);
2560 /* FIXME: optimize later to use the higher address where an object can be present */
2561 nursery_next
= MAX (nursery_next
, nursery_real_end
);
2563 DEBUG (1, fprintf (gc_debug_file
, "Start nursery collection %d %p-%p, size: %d\n", num_minor_gcs
, nursery_start
, nursery_next
, (int)(nursery_next
- nursery_start
)));
2564 max_garbage_amount
= nursery_next
- nursery_start
;
2565 g_assert (nursery_section
->size
>= max_garbage_amount
);
2567 /* world must be stopped already */
2568 TV_GETTIME (all_atv
);
2571 /* Pinning depends on this */
2572 clear_nursery_fragments (orig_nursery_next
);
2575 time_minor_pre_collection_fragment_clear
+= TV_ELAPSED_MS (atv
, btv
);
2578 check_for_xdomain_refs ();
2580 nursery_section
->next_data
= nursery_next
;
2582 major
.start_nursery_collection ();
2584 gray_object_queue_init (&gray_queue
, mono_sgen_get_unmanaged_allocator ());
2587 mono_stats
.minor_gc_count
++;
2589 global_remset_cache_clear ();
2591 /* pin from pinned handles */
2593 pin_from_roots (nursery_start
, nursery_next
);
2594 /* identify pinned objects */
2595 optimize_pin_queue (0);
2596 next_pin_slot
= pin_objects_from_addresses (nursery_section
, pin_queue
, pin_queue
+ next_pin_slot
, nursery_start
, nursery_next
, &gray_queue
);
2597 nursery_section
->pin_queue_start
= pin_queue
;
2598 nursery_section
->pin_queue_num_entries
= next_pin_slot
;
2600 time_minor_pinning
+= TV_ELAPSED_MS (btv
, atv
);
2601 DEBUG (2, fprintf (gc_debug_file
, "Finding pinned pointers: %d in %d usecs\n", next_pin_slot
, TV_ELAPSED (btv
, atv
)));
2602 DEBUG (4, fprintf (gc_debug_file
, "Start scan with %d pinned objects\n", next_pin_slot
));
2604 if (consistency_check_at_minor_collection
)
2605 check_consistency ();
2608 * walk all the roots and copy the young objects to the old generation,
2609 * starting from to_space
2612 scan_from_remsets (nursery_start
, nursery_next
, &gray_queue
);
2613 /* we don't have complete write barrier yet, so we scan all the old generation sections */
2615 time_minor_scan_remsets
+= TV_ELAPSED_MS (atv
, btv
);
2616 DEBUG (2, fprintf (gc_debug_file
, "Old generation scan: %d usecs\n", TV_ELAPSED (atv
, btv
)));
2618 drain_gray_stack (&gray_queue
);
2621 time_minor_scan_pinned
+= TV_ELAPSED_MS (btv
, atv
);
2622 /* registered roots, this includes static fields */
2623 scan_from_registered_roots (major
.copy_object
, nursery_start
, nursery_next
, ROOT_TYPE_NORMAL
, &gray_queue
);
2624 scan_from_registered_roots (major
.copy_object
, nursery_start
, nursery_next
, ROOT_TYPE_WBARRIER
, &gray_queue
);
2626 time_minor_scan_registered_roots
+= TV_ELAPSED_MS (atv
, btv
);
2628 scan_thread_data (nursery_start
, nursery_next
, TRUE
);
2630 time_minor_scan_thread_data
+= TV_ELAPSED_MS (btv
, atv
);
2633 finish_gray_stack (nursery_start
, nursery_next
, GENERATION_NURSERY
, &gray_queue
);
2635 time_minor_finish_gray_stack
+= TV_ELAPSED_MS (btv
, atv
);
2637 /* walk the pin_queue, build up the fragment list of free memory, unmark
2638 * pinned objects as we go, memzero() the empty fragments so they are ready for the
2641 build_nursery_fragments (pin_queue
, next_pin_slot
);
2643 time_minor_fragment_creation
+= TV_ELAPSED_MS (atv
, btv
);
2644 DEBUG (2, fprintf (gc_debug_file
, "Fragment creation: %d usecs, %lu bytes available\n", TV_ELAPSED (atv
, btv
), (unsigned long)fragment_total
));
2646 if (consistency_check_at_minor_collection
)
2647 check_major_refs ();
2649 major
.finish_nursery_collection ();
2651 TV_GETTIME (all_btv
);
2652 mono_stats
.minor_gc_time_usecs
+= TV_ELAPSED (all_atv
, all_btv
);
2655 dump_heap ("minor", num_minor_gcs
- 1, NULL
);
2657 /* prepare the pin queue for the next collection */
2658 last_num_pinned
= next_pin_slot
;
2660 if (fin_ready_list
|| critical_fin_list
) {
2661 DEBUG (4, fprintf (gc_debug_file
, "Finalizer-thread wakeup: ready %d\n", num_ready_finalizers
));
2662 mono_gc_finalize_notify ();
2666 g_assert (gray_object_queue_is_empty (&gray_queue
));
2668 check_scan_starts ();
2670 binary_protocol_flush_buffers ();
2672 current_collection_generation
= -1;
2674 return need_major_collection ();
2678 major_do_collection (const char *reason
)
2680 LOSObject
*bigobj
, *prevbo
;
2681 TV_DECLARE (all_atv
);
2682 TV_DECLARE (all_btv
);
2685 /* FIXME: only use these values for the precise scan
2686 * note that to_space pointers should be excluded anyway...
2688 char *heap_start
= NULL
;
2689 char *heap_end
= (char*)-1;
2690 int old_num_major_sections
= major
.get_num_major_sections ();
2691 int num_major_sections
, num_major_sections_saved
, save_target
, allowance_target
;
2692 mword los_memory_saved
, los_memory_alloced
, old_los_memory_usage
;
2695 * A domain could have been freed, resulting in
2696 * los_memory_usage being less than last_los_memory_usage.
2698 los_memory_alloced
= los_memory_usage
- MIN (last_los_memory_usage
, los_memory_usage
);
2699 old_los_memory_usage
= los_memory_usage
;
2701 //count_ref_nonref_objs ();
2702 //consistency_check ();
2704 binary_protocol_collection (GENERATION_OLD
);
2705 check_scan_starts ();
2706 gray_object_queue_init (&gray_queue
, mono_sgen_get_unmanaged_allocator ());
2707 if (major
.is_parallel
)
2708 gray_object_queue_init (&workers_distribute_gray_queue
, mono_sgen_get_unmanaged_allocator ());
2711 DEBUG (1, fprintf (gc_debug_file
, "Start major collection %d\n", num_major_gcs
));
2713 mono_stats
.major_gc_count
++;
2715 /* world must be stopped already */
2716 TV_GETTIME (all_atv
);
2719 /* Pinning depends on this */
2720 clear_nursery_fragments (nursery_next
);
2723 time_major_pre_collection_fragment_clear
+= TV_ELAPSED_MS (atv
, btv
);
2726 check_for_xdomain_refs ();
2728 nursery_section
->next_data
= nursery_real_end
;
2729 /* we should also coalesce scanning from sections close to each other
2730 * and deal with pointers outside of the sections later.
2732 /* The remsets are not useful for a major collection */
2734 global_remset_cache_clear ();
2738 DEBUG (6, fprintf (gc_debug_file
, "Collecting pinned addresses\n"));
2739 pin_from_roots ((void*)lowest_heap_address
, (void*)highest_heap_address
);
2740 optimize_pin_queue (0);
2743 * pin_queue now contains all candidate pointers, sorted and
2744 * uniqued. We must do two passes now to figure out which
2745 * objects are pinned.
2747 * The first is to find within the pin_queue the area for each
2748 * section. This requires that the pin_queue be sorted. We
2749 * also process the LOS objects and pinned chunks here.
2751 * The second, destructive, pass is to reduce the section
2752 * areas to pointers to the actually pinned objects.
2754 DEBUG (6, fprintf (gc_debug_file
, "Pinning from sections\n"));
2755 /* first pass for the sections */
2756 mono_sgen_find_section_pin_queue_start_end (nursery_section
);
2757 major
.find_pin_queue_start_ends (WORKERS_DISTRIBUTE_GRAY_QUEUE
);
2758 /* identify possible pointers to the insize of large objects */
2759 DEBUG (6, fprintf (gc_debug_file
, "Pinning from large objects\n"));
2760 for (bigobj
= los_object_list
; bigobj
; bigobj
= bigobj
->next
) {
2762 if (mono_sgen_find_optimized_pin_queue_area (bigobj
->data
, (char*)bigobj
->data
+ bigobj
->size
, &dummy
)) {
2763 pin_object (bigobj
->data
);
2764 /* FIXME: only enqueue if object has references */
2765 GRAY_OBJECT_ENQUEUE (WORKERS_DISTRIBUTE_GRAY_QUEUE
, bigobj
->data
);
2767 mono_sgen_pin_stats_register_object ((char*) bigobj
->data
, safe_object_get_size ((MonoObject
*) bigobj
->data
));
2768 DEBUG (6, fprintf (gc_debug_file
, "Marked large object %p (%s) size: %lu from roots\n", bigobj
->data
, safe_name (bigobj
->data
), (unsigned long)bigobj
->size
));
2771 /* second pass for the sections */
2772 mono_sgen_pin_objects_in_section (nursery_section
, WORKERS_DISTRIBUTE_GRAY_QUEUE
);
2773 major
.pin_objects (WORKERS_DISTRIBUTE_GRAY_QUEUE
);
2776 time_major_pinning
+= TV_ELAPSED_MS (atv
, btv
);
2777 DEBUG (2, fprintf (gc_debug_file
, "Finding pinned pointers: %d in %d usecs\n", next_pin_slot
, TV_ELAPSED (atv
, btv
)));
2778 DEBUG (4, fprintf (gc_debug_file
, "Start scan with %d pinned objects\n", next_pin_slot
));
2780 major
.init_to_space ();
2782 workers_start_all_workers (1);
2785 time_major_scan_pinned
+= TV_ELAPSED_MS (btv
, atv
);
2787 /* registered roots, this includes static fields */
2788 scan_from_registered_roots (major
.copy_or_mark_object
, heap_start
, heap_end
, ROOT_TYPE_NORMAL
, WORKERS_DISTRIBUTE_GRAY_QUEUE
);
2789 scan_from_registered_roots (major
.copy_or_mark_object
, heap_start
, heap_end
, ROOT_TYPE_WBARRIER
, WORKERS_DISTRIBUTE_GRAY_QUEUE
);
2791 time_major_scan_registered_roots
+= TV_ELAPSED_MS (atv
, btv
);
2794 /* FIXME: This is the wrong place for this, because it does
2796 scan_thread_data (heap_start
, heap_end
, TRUE
);
2798 time_major_scan_thread_data
+= TV_ELAPSED_MS (btv
, atv
);
2801 time_major_scan_alloc_pinned
+= TV_ELAPSED_MS (atv
, btv
);
2803 /* scan the list of objects ready for finalization */
2804 scan_finalizer_entries (major
.copy_or_mark_object
, fin_ready_list
, WORKERS_DISTRIBUTE_GRAY_QUEUE
);
2805 scan_finalizer_entries (major
.copy_or_mark_object
, critical_fin_list
, WORKERS_DISTRIBUTE_GRAY_QUEUE
);
2807 time_major_scan_finalized
+= TV_ELAPSED_MS (btv
, atv
);
2808 DEBUG (2, fprintf (gc_debug_file
, "Root scan: %d usecs\n", TV_ELAPSED (btv
, atv
)));
2811 time_major_scan_big_objects
+= TV_ELAPSED_MS (atv
, btv
);
2813 if (major
.is_parallel
) {
2814 /* FIXME: don't do busy waiting here! */
2815 while (!gray_object_queue_is_empty (WORKERS_DISTRIBUTE_GRAY_QUEUE
))
2816 workers_distribute_gray_queue_sections ();
2818 workers_change_num_working (-1);
2821 if (major
.is_parallel
)
2822 g_assert (gray_object_queue_is_empty (&gray_queue
));
2824 /* all the objects in the heap */
2825 finish_gray_stack (heap_start
, heap_end
, GENERATION_OLD
, &gray_queue
);
2827 time_major_finish_gray_stack
+= TV_ELAPSED_MS (btv
, atv
);
2829 /* sweep the big objects list */
2831 for (bigobj
= los_object_list
; bigobj
;) {
2832 if (object_is_pinned (bigobj
->data
)) {
2833 unpin_object (bigobj
->data
);
2836 /* not referenced anywhere, so we can free it */
2838 prevbo
->next
= bigobj
->next
;
2840 los_object_list
= bigobj
->next
;
2842 bigobj
= bigobj
->next
;
2843 free_large_object (to_free
);
2847 bigobj
= bigobj
->next
;
2851 time_major_free_bigobjs
+= TV_ELAPSED_MS (atv
, btv
);
2856 time_major_los_sweep
+= TV_ELAPSED_MS (btv
, atv
);
2861 time_major_sweep
+= TV_ELAPSED_MS (atv
, btv
);
2863 /* walk the pin_queue, build up the fragment list of free memory, unmark
2864 * pinned objects as we go, memzero() the empty fragments so they are ready for the
2867 build_nursery_fragments (nursery_section
->pin_queue_start
, nursery_section
->pin_queue_num_entries
);
2870 time_major_fragment_creation
+= TV_ELAPSED_MS (btv
, atv
);
2872 TV_GETTIME (all_btv
);
2873 mono_stats
.major_gc_time_usecs
+= TV_ELAPSED (all_atv
, all_btv
);
2876 dump_heap ("major", num_major_gcs
- 1, reason
);
2878 /* prepare the pin queue for the next collection */
2880 if (fin_ready_list
|| critical_fin_list
) {
2881 DEBUG (4, fprintf (gc_debug_file
, "Finalizer-thread wakeup: ready %d\n", num_ready_finalizers
));
2882 mono_gc_finalize_notify ();
2886 g_assert (gray_object_queue_is_empty (&gray_queue
));
2888 num_major_sections
= major
.get_num_major_sections ();
2890 num_major_sections_saved
= MAX (old_num_major_sections
- num_major_sections
, 0);
2891 los_memory_saved
= MAX (old_los_memory_usage
- los_memory_usage
, 1);
2893 save_target
= ((num_major_sections
* major
.section_size
) + los_memory_saved
) / 2;
2895 * We aim to allow the allocation of as many sections as is
2896 * necessary to reclaim save_target sections in the next
2897 * collection. We assume the collection pattern won't change.
2898 * In the last cycle, we had num_major_sections_saved for
2899 * minor_collection_sections_alloced. Assuming things won't
2900 * change, this must be the same ratio as save_target for
2901 * allowance_target, i.e.
2903 * num_major_sections_saved save_target
2904 * --------------------------------- == ----------------
2905 * minor_collection_sections_alloced allowance_target
2909 allowance_target
= (mword
)((double)save_target
* (double)(minor_collection_sections_alloced
* major
.section_size
+ los_memory_alloced
) / (double)(num_major_sections_saved
* major
.section_size
+ los_memory_saved
));
2911 minor_collection_allowance
= MAX (MIN (allowance_target
, num_major_sections
* major
.section_size
+ los_memory_usage
), MIN_MINOR_COLLECTION_ALLOWANCE
);
2913 minor_collection_sections_alloced
= 0;
2914 last_los_memory_usage
= los_memory_usage
;
2916 major
.finish_major_collection ();
2918 check_scan_starts ();
2920 binary_protocol_flush_buffers ();
2922 //consistency_check ();
2926 major_collection (const char *reason
)
2928 if (g_getenv ("MONO_GC_NO_MAJOR")) {
2929 collect_nursery (0);
2933 current_collection_generation
= GENERATION_OLD
;
2934 major_do_collection (reason
);
2935 current_collection_generation
= -1;
2939 * When deciding if it's better to collect or to expand, keep track
2940 * of how much garbage was reclaimed with the last collection: if it's too
2942 * This is called when we could not allocate a small object.
2944 static void __attribute__((noinline
))
2945 minor_collect_or_expand_inner (size_t size
)
2947 int do_minor_collection
= 1;
2949 g_assert (nursery_section
);
2950 if (do_minor_collection
) {
2952 if (collect_nursery (size
))
2953 major_collection ("minor overflow");
2954 DEBUG (2, fprintf (gc_debug_file
, "Heap size: %lu, LOS size: %lu\n", (unsigned long)total_alloc
, (unsigned long)los_memory_usage
));
2956 /* this also sets the proper pointers for the next allocation */
2957 if (!search_fragment_for_size (size
)) {
2959 /* TypeBuilder and MonoMethod are killing mcs with fragmentation */
2960 DEBUG (1, fprintf (gc_debug_file
, "nursery collection didn't find enough room for %zd alloc (%d pinned)\n", size
, last_num_pinned
));
2961 for (i
= 0; i
< last_num_pinned
; ++i
) {
2962 DEBUG (3, fprintf (gc_debug_file
, "Bastard pinning obj %p (%s), size: %d\n", pin_queue
[i
], safe_name (pin_queue
[i
]), safe_object_get_size (pin_queue
[i
])));
2967 //report_internal_mem_usage ();
2971 * ######################################################################
2972 * ######## Memory allocation from the OS
2973 * ######################################################################
2974 * This section of code deals with getting memory from the OS and
2975 * allocating memory for GC-internal data structures.
2976 * Internal memory can be handled with a freelist for small objects.
2982 G_GNUC_UNUSED
static void
2983 report_internal_mem_usage (void)
2985 printf ("Internal memory usage:\n");
2986 mono_sgen_report_internal_mem_usage ();
2987 printf ("Pinned memory usage:\n");
2988 major
.report_pinned_memory_usage ();
2992 * Allocate a big chunk of memory from the OS (usually 64KB to several megabytes).
2993 * This must not require any lock.
2996 mono_sgen_alloc_os_memory (size_t size
, int activate
)
2999 unsigned long prot_flags
= activate
? MONO_MMAP_READ
|MONO_MMAP_WRITE
: MONO_MMAP_NONE
;
3001 prot_flags
|= MONO_MMAP_PRIVATE
| MONO_MMAP_ANON
;
3002 size
+= pagesize
- 1;
3003 size
&= ~(pagesize
- 1);
3004 ptr
= mono_valloc (0, size
, prot_flags
);
3006 total_alloc
+= size
;
3011 * Free the memory returned by mono_sgen_alloc_os_memory (), returning it to the OS.
3014 mono_sgen_free_os_memory (void *addr
, size_t size
)
3016 mono_vfree (addr
, size
);
3018 size
+= pagesize
- 1;
3019 size
&= ~(pagesize
- 1);
3021 total_alloc
-= size
;
3025 * ######################################################################
3026 * ######## Object allocation
3027 * ######################################################################
3028 * This section of code deals with allocating memory for objects.
3029 * There are several ways:
3030 * *) allocate large objects
3031 * *) allocate normal objects
3032 * *) fast lock-free allocation
3033 * *) allocation of pinned objects
3037 setup_fragment (Fragment
*frag
, Fragment
*prev
, size_t size
)
3039 /* remove from the list */
3041 prev
->next
= frag
->next
;
3043 nursery_fragments
= frag
->next
;
3044 nursery_next
= frag
->fragment_start
;
3045 nursery_frag_real_end
= frag
->fragment_end
;
3047 DEBUG (4, fprintf (gc_debug_file
, "Using nursery fragment %p-%p, size: %td (req: %zd)\n", nursery_next
, nursery_frag_real_end
, nursery_frag_real_end
- nursery_next
, size
));
3048 frag
->next
= fragment_freelist
;
3049 fragment_freelist
= frag
;
3052 /* check if we have a suitable fragment in nursery_fragments to be able to allocate
3053 * an object of size @size
3054 * Return FALSE if not found (which means we need a collection)
3057 search_fragment_for_size (size_t size
)
3059 Fragment
*frag
, *prev
;
3060 DEBUG (4, fprintf (gc_debug_file
, "Searching nursery fragment %p, size: %zd\n", nursery_frag_real_end
, size
));
3062 if (nursery_frag_real_end
> nursery_next
&& nursery_clear_policy
== CLEAR_AT_TLAB_CREATION
)
3063 /* Clear the remaining space, pinning depends on this */
3064 memset (nursery_next
, 0, nursery_frag_real_end
- nursery_next
);
3067 for (frag
= nursery_fragments
; frag
; frag
= frag
->next
) {
3068 if (size
<= (frag
->fragment_end
- frag
->fragment_start
)) {
3069 setup_fragment (frag
, prev
, size
);
3078 * Same as search_fragment_for_size but if search for @desired_size fails, try to satisfy @minimum_size.
3079 * This improves nursery usage.
3082 search_fragment_for_size_range (size_t desired_size
, size_t minimum_size
)
3084 Fragment
*frag
, *prev
, *min_prev
;
3085 DEBUG (4, fprintf (gc_debug_file
, "Searching nursery fragment %p, desired size: %zd minimum size %zd\n", nursery_frag_real_end
, desired_size
, minimum_size
));
3087 if (nursery_frag_real_end
> nursery_next
&& nursery_clear_policy
== CLEAR_AT_TLAB_CREATION
)
3088 /* Clear the remaining space, pinning depends on this */
3089 memset (nursery_next
, 0, nursery_frag_real_end
- nursery_next
);
3091 min_prev
= GINT_TO_POINTER (-1);
3094 for (frag
= nursery_fragments
; frag
; frag
= frag
->next
) {
3095 int frag_size
= frag
->fragment_end
- frag
->fragment_start
;
3096 if (desired_size
<= frag_size
) {
3097 setup_fragment (frag
, prev
, desired_size
);
3098 return desired_size
;
3100 if (minimum_size
<= frag_size
)
3106 if (min_prev
!= GINT_TO_POINTER (-1)) {
3109 frag
= min_prev
->next
;
3111 frag
= nursery_fragments
;
3113 frag_size
= frag
->fragment_end
- frag
->fragment_start
;
3114 HEAVY_STAT (++stat_wasted_fragments_used
);
3115 HEAVY_STAT (stat_wasted_fragments_bytes
+= frag_size
);
3117 setup_fragment (frag
, min_prev
, minimum_size
);
3125 alloc_degraded (MonoVTable
*vtable
, size_t size
)
3127 if (need_major_collection ()) {
3129 major_collection ("degraded overflow");
3133 degraded_mode
+= size
;
3134 return major
.alloc_degraded (vtable
, size
);
3138 * Provide a variant that takes just the vtable for small fixed-size objects.
3139 * The aligned size is already computed and stored in vt->gc_descr.
3140 * Note: every SCAN_START_SIZE or so we are given the chance to do some special
3141 * processing. We can keep track of where objects start, for example,
3142 * so when we scan the thread stacks for pinned objects, we can start
3143 * a search for the pinned object in SCAN_START_SIZE chunks.
3146 mono_gc_alloc_obj_nolock (MonoVTable
*vtable
, size_t size
)
3148 /* FIXME: handle OOM */
3153 HEAVY_STAT (++stat_objects_alloced
);
3154 if (size
<= MAX_SMALL_OBJ_SIZE
)
3155 HEAVY_STAT (stat_bytes_alloced
+= size
);
3157 HEAVY_STAT (stat_bytes_alloced_los
+= size
);
3159 size
= ALIGN_UP (size
);
3161 g_assert (vtable
->gc_descr
);
3163 if (G_UNLIKELY (collect_before_allocs
)) {
3164 if (nursery_section
) {
3166 collect_nursery (0);
3168 if (!degraded_mode
&& !search_fragment_for_size (size
)) {
3170 g_assert_not_reached ();
3176 * We must already have the lock here instead of after the
3177 * fast path because we might be interrupted in the fast path
3178 * (after confirming that new_next < TLAB_TEMP_END) by the GC,
3179 * and we'll end up allocating an object in a fragment which
3180 * no longer belongs to us.
3182 * The managed allocator does not do this, but it's treated
3183 * specially by the world-stopping code.
3186 if (size
> MAX_SMALL_OBJ_SIZE
) {
3187 p
= alloc_large_inner (vtable
, size
);
3189 /* tlab_next and tlab_temp_end are TLS vars so accessing them might be expensive */
3191 p
= (void**)TLAB_NEXT
;
3192 /* FIXME: handle overflow */
3193 new_next
= (char*)p
+ size
;
3194 TLAB_NEXT
= new_next
;
3196 if (G_LIKELY (new_next
< TLAB_TEMP_END
)) {
3200 * FIXME: We might need a memory barrier here so the change to tlab_next is
3201 * visible before the vtable store.
3204 DEBUG (6, fprintf (gc_debug_file
, "Allocated object %p, vtable: %p (%s), size: %zd\n", p
, vtable
, vtable
->klass
->name
, size
));
3205 binary_protocol_alloc (p
, vtable
, size
);
3206 g_assert (*p
== NULL
);
3209 g_assert (TLAB_NEXT
== new_next
);
3216 /* there are two cases: the object is too big or we run out of space in the TLAB */
3217 /* we also reach here when the thread does its first allocation after a minor
3218 * collection, since the tlab_ variables are initialized to NULL.
3219 * there can be another case (from ORP), if we cooperate with the runtime a bit:
3220 * objects that need finalizers can have the high bit set in their size
3221 * so the above check fails and we can readily add the object to the queue.
3222 * This avoids taking again the GC lock when registering, but this is moot when
3223 * doing thread-local allocation, so it may not be a good idea.
3225 g_assert (TLAB_NEXT
== new_next
);
3226 if (TLAB_NEXT
>= TLAB_REAL_END
) {
3228 * Run out of space in the TLAB. When this happens, some amount of space
3229 * remains in the TLAB, but not enough to satisfy the current allocation
3230 * request. Currently, we retire the TLAB in all cases, later we could
3231 * keep it if the remaining space is above a treshold, and satisfy the
3232 * allocation directly from the nursery.
3235 /* when running in degraded mode, we continue allocing that way
3236 * for a while, to decrease the number of useless nursery collections.
3238 if (degraded_mode
&& degraded_mode
< DEFAULT_NURSERY_SIZE
) {
3239 p
= alloc_degraded (vtable
, size
);
3240 binary_protocol_alloc_degraded (p
, vtable
, size
);
3244 /*FIXME This codepath is current deadcode since tlab_size > MAX_SMALL_OBJ_SIZE*/
3245 if (size
> tlab_size
) {
3246 /* Allocate directly from the nursery */
3247 if (nursery_next
+ size
>= nursery_frag_real_end
) {
3248 if (!search_fragment_for_size (size
)) {
3249 minor_collect_or_expand_inner (size
);
3250 if (degraded_mode
) {
3251 p
= alloc_degraded (vtable
, size
);
3252 binary_protocol_alloc_degraded (p
, vtable
, size
);
3258 p
= (void*)nursery_next
;
3259 nursery_next
+= size
;
3260 if (nursery_next
> nursery_frag_real_end
) {
3265 if (nursery_clear_policy
== CLEAR_AT_TLAB_CREATION
)
3266 memset (p
, 0, size
);
3268 int alloc_size
= tlab_size
;
3269 int available_in_nursery
= nursery_frag_real_end
- nursery_next
;
3271 DEBUG (3, fprintf (gc_debug_file
, "Retire TLAB: %p-%p [%ld]\n", TLAB_START
, TLAB_REAL_END
, (long)(TLAB_REAL_END
- TLAB_NEXT
- size
)));
3273 if (alloc_size
>= available_in_nursery
) {
3274 if (available_in_nursery
> MAX_NURSERY_TLAB_WASTE
&& available_in_nursery
> size
) {
3275 alloc_size
= available_in_nursery
;
3277 alloc_size
= search_fragment_for_size_range (tlab_size
, size
);
3279 alloc_size
= tlab_size
;
3280 minor_collect_or_expand_inner (tlab_size
);
3281 if (degraded_mode
) {
3282 p
= alloc_degraded (vtable
, size
);
3283 binary_protocol_alloc_degraded (p
, vtable
, size
);
3290 /* Allocate a new TLAB from the current nursery fragment */
3291 TLAB_START
= nursery_next
;
3292 nursery_next
+= alloc_size
;
3293 TLAB_NEXT
= TLAB_START
;
3294 TLAB_REAL_END
= TLAB_START
+ alloc_size
;
3295 TLAB_TEMP_END
= TLAB_START
+ MIN (SCAN_START_SIZE
, alloc_size
);
3297 if (nursery_clear_policy
== CLEAR_AT_TLAB_CREATION
)
3298 memset (TLAB_START
, 0, alloc_size
);
3300 /* Allocate from the TLAB */
3301 p
= (void*)TLAB_NEXT
;
3303 g_assert (TLAB_NEXT
<= TLAB_REAL_END
);
3305 nursery_section
->scan_starts
[((char*)p
- (char*)nursery_section
->data
)/SCAN_START_SIZE
] = (char*)p
;
3308 /* Reached tlab_temp_end */
3310 /* record the scan start so we can find pinned objects more easily */
3311 nursery_section
->scan_starts
[((char*)p
- (char*)nursery_section
->data
)/SCAN_START_SIZE
] = (char*)p
;
3312 /* we just bump tlab_temp_end as well */
3313 TLAB_TEMP_END
= MIN (TLAB_REAL_END
, TLAB_NEXT
+ SCAN_START_SIZE
);
3314 DEBUG (5, fprintf (gc_debug_file
, "Expanding local alloc: %p-%p\n", TLAB_NEXT
, TLAB_TEMP_END
));
3318 DEBUG (6, fprintf (gc_debug_file
, "Allocated object %p, vtable: %p (%s), size: %zd\n", p
, vtable
, vtable
->klass
->name
, size
));
3319 binary_protocol_alloc (p
, vtable
, size
);
3326 mono_gc_try_alloc_obj_nolock (MonoVTable
*vtable
, size_t size
)
3332 size
= ALIGN_UP (size
);
3334 g_assert (vtable
->gc_descr
);
3335 if (size
<= MAX_SMALL_OBJ_SIZE
) {
3336 /* tlab_next and tlab_temp_end are TLS vars so accessing them might be expensive */
3338 p
= (void**)TLAB_NEXT
;
3339 /* FIXME: handle overflow */
3340 new_next
= (char*)p
+ size
;
3341 TLAB_NEXT
= new_next
;
3343 if (G_LIKELY (new_next
< TLAB_TEMP_END
)) {
3347 * FIXME: We might need a memory barrier here so the change to tlab_next is
3348 * visible before the vtable store.
3351 HEAVY_STAT (++stat_objects_alloced
);
3352 HEAVY_STAT (stat_bytes_alloced
+= size
);
3354 DEBUG (6, fprintf (gc_debug_file
, "Allocated object %p, vtable: %p (%s), size: %zd\n", p
, vtable
, vtable
->klass
->name
, size
));
3355 binary_protocol_alloc (p
, vtable
, size
);
3356 g_assert (*p
== NULL
);
3359 g_assert (TLAB_NEXT
== new_next
);
3368 mono_gc_alloc_obj (MonoVTable
*vtable
, size_t size
)
3371 #ifndef DISABLE_CRITICAL_REGION
3373 ENTER_CRITICAL_REGION
;
3374 res
= mono_gc_try_alloc_obj_nolock (vtable
, size
);
3376 EXIT_CRITICAL_REGION
;
3379 EXIT_CRITICAL_REGION
;
3382 res
= mono_gc_alloc_obj_nolock (vtable
, size
);
3388 mono_gc_alloc_vector (MonoVTable
*vtable
, size_t size
, uintptr_t max_length
)
3391 #ifndef DISABLE_CRITICAL_REGION
3393 ENTER_CRITICAL_REGION
;
3394 arr
= mono_gc_try_alloc_obj_nolock (vtable
, size
);
3396 arr
->max_length
= max_length
;
3397 EXIT_CRITICAL_REGION
;
3400 EXIT_CRITICAL_REGION
;
3405 arr
= mono_gc_alloc_obj_nolock (vtable
, size
);
3406 arr
->max_length
= max_length
;
3414 mono_gc_alloc_array (MonoVTable
*vtable
, size_t size
, uintptr_t max_length
, uintptr_t bounds_size
)
3417 MonoArrayBounds
*bounds
;
3421 arr
= mono_gc_alloc_obj_nolock (vtable
, size
);
3422 arr
->max_length
= max_length
;
3424 bounds
= (MonoArrayBounds
*)((char*)arr
+ size
- bounds_size
);
3425 arr
->bounds
= bounds
;
3433 mono_gc_alloc_string (MonoVTable
*vtable
, size_t size
, gint32 len
)
3436 #ifndef DISABLE_CRITICAL_REGION
3438 ENTER_CRITICAL_REGION
;
3439 str
= mono_gc_try_alloc_obj_nolock (vtable
, size
);
3442 EXIT_CRITICAL_REGION
;
3445 EXIT_CRITICAL_REGION
;
3450 str
= mono_gc_alloc_obj_nolock (vtable
, size
);
3459 * To be used for interned strings and possibly MonoThread, reflection handles.
3460 * We may want to explicitly free these objects.
3463 mono_gc_alloc_pinned_obj (MonoVTable
*vtable
, size_t size
)
3465 /* FIXME: handle OOM */
3467 size
= ALIGN_UP (size
);
3469 if (size
> MAX_SMALL_OBJ_SIZE
) {
3470 /* large objects are always pinned anyway */
3471 p
= alloc_large_inner (vtable
, size
);
3473 DEBUG (9, g_assert (vtable
->klass
->inited
));
3474 p
= major
.alloc_small_pinned_obj (size
, vtable
->klass
->has_references
);
3476 DEBUG (6, fprintf (gc_debug_file
, "Allocated pinned object %p, vtable: %p (%s), size: %zd\n", p
, vtable
, vtable
->klass
->name
, size
));
3477 binary_protocol_alloc_pinned (p
, vtable
, size
);
3484 * ######################################################################
3485 * ######## Finalization support
3486 * ######################################################################
3490 * this is valid for the nursery: if the object has been forwarded it means it's
3491 * still refrenced from a root. If it is pinned it's still alive as well.
3492 * Return TRUE if @obj is ready to be finalized.
3494 #define object_is_fin_ready(obj) (!object_is_pinned (obj) && !object_is_forwarded (obj))
3497 is_critical_finalizer (FinalizeEntry
*entry
)
3502 if (!mono_defaults
.critical_finalizer_object
)
3505 obj
= entry
->object
;
3506 class = ((MonoVTable
*)LOAD_VTABLE (obj
))->klass
;
3508 return mono_class_has_parent (class, mono_defaults
.critical_finalizer_object
);
3512 queue_finalization_entry (FinalizeEntry
*entry
) {
3513 if (is_critical_finalizer (entry
)) {
3514 entry
->next
= critical_fin_list
;
3515 critical_fin_list
= entry
;
3517 entry
->next
= fin_ready_list
;
3518 fin_ready_list
= entry
;
3522 /* LOCKING: requires that the GC lock is held */
3524 rehash_fin_table (FinalizeEntryHashTable
*hash_table
)
3526 FinalizeEntry
**finalizable_hash
= hash_table
->table
;
3527 mword finalizable_hash_size
= hash_table
->size
;
3530 FinalizeEntry
**new_hash
;
3531 FinalizeEntry
*entry
, *next
;
3532 int new_size
= g_spaced_primes_closest (hash_table
->num_registered
);
3534 new_hash
= mono_sgen_alloc_internal_dynamic (new_size
* sizeof (FinalizeEntry
*), INTERNAL_MEM_FIN_TABLE
);
3535 for (i
= 0; i
< finalizable_hash_size
; ++i
) {
3536 for (entry
= finalizable_hash
[i
]; entry
; entry
= next
) {
3537 hash
= mono_object_hash (entry
->object
) % new_size
;
3539 entry
->next
= new_hash
[hash
];
3540 new_hash
[hash
] = entry
;
3543 mono_sgen_free_internal_dynamic (finalizable_hash
, finalizable_hash_size
* sizeof (FinalizeEntry
*), INTERNAL_MEM_FIN_TABLE
);
3544 hash_table
->table
= new_hash
;
3545 hash_table
->size
= new_size
;
3548 /* LOCKING: requires that the GC lock is held */
3550 rehash_fin_table_if_necessary (FinalizeEntryHashTable
*hash_table
)
3552 if (hash_table
->num_registered
>= hash_table
->size
* 2)
3553 rehash_fin_table (hash_table
);
3556 /* LOCKING: requires that the GC lock is held */
3558 finalize_in_range (CopyOrMarkObjectFunc copy_func
, char *start
, char *end
, int generation
, GrayQueue
*queue
)
3560 FinalizeEntryHashTable
*hash_table
= get_finalize_entry_hash_table (generation
);
3561 FinalizeEntry
*entry
, *prev
;
3563 FinalizeEntry
**finalizable_hash
= hash_table
->table
;
3564 mword finalizable_hash_size
= hash_table
->size
;
3568 for (i
= 0; i
< finalizable_hash_size
; ++i
) {
3570 for (entry
= finalizable_hash
[i
]; entry
;) {
3571 if ((char*)entry
->object
>= start
&& (char*)entry
->object
< end
&& !major
.is_object_live (entry
->object
)) {
3572 gboolean is_fin_ready
= object_is_fin_ready (entry
->object
);
3573 char *copy
= entry
->object
;
3574 copy_func ((void**)©
, queue
);
3577 FinalizeEntry
*next
;
3578 /* remove and put in fin_ready_list */
3580 prev
->next
= entry
->next
;
3582 finalizable_hash
[i
] = entry
->next
;
3584 num_ready_finalizers
++;
3585 hash_table
->num_registered
--;
3586 queue_finalization_entry (entry
);
3587 /* Make it survive */
3588 from
= entry
->object
;
3589 entry
->object
= copy
;
3590 DEBUG (5, fprintf (gc_debug_file
, "Queueing object for finalization: %p (%s) (was at %p) (%d/%d)\n", entry
->object
, safe_name (entry
->object
), from
, num_ready_finalizers
, hash_table
->num_registered
));
3594 char *from
= entry
->object
;
3595 if (hash_table
== &minor_finalizable_hash
&& !ptr_in_nursery (copy
)) {
3596 FinalizeEntry
*next
= entry
->next
;
3597 unsigned int major_hash
;
3598 /* remove from the list */
3600 prev
->next
= entry
->next
;
3602 finalizable_hash
[i
] = entry
->next
;
3603 hash_table
->num_registered
--;
3605 entry
->object
= copy
;
3607 /* insert it into the major hash */
3608 rehash_fin_table_if_necessary (&major_finalizable_hash
);
3609 major_hash
= mono_object_hash ((MonoObject
*) copy
) %
3610 major_finalizable_hash
.size
;
3611 entry
->next
= major_finalizable_hash
.table
[major_hash
];
3612 major_finalizable_hash
.table
[major_hash
] = entry
;
3613 major_finalizable_hash
.num_registered
++;
3615 DEBUG (5, fprintf (gc_debug_file
, "Promoting finalization of object %p (%s) (was at %p) to major table\n", copy
, safe_name (copy
), from
));
3620 /* update pointer */
3621 DEBUG (5, fprintf (gc_debug_file
, "Updating object for finalization: %p (%s) (was at %p)\n", entry
->object
, safe_name (entry
->object
), from
));
3622 entry
->object
= copy
;
3627 entry
= entry
->next
;
3633 object_is_reachable (char *object
, char *start
, char *end
)
3635 /*This happens for non nursery objects during minor collections. We just treat all objects as alive.*/
3636 if (object
< start
|| object
>= end
)
3638 return !object_is_fin_ready (object
) || major
.is_object_live (object
);
3641 /* LOCKING: requires that the GC lock is held */
3643 null_ephemerons_for_domain (MonoDomain
*domain
)
3645 EphemeronLinkNode
*current
= ephemeron_list
, *prev
= NULL
;
3648 MonoObject
*object
= (MonoObject
*)current
->array
;
3650 if (object
&& !object
->vtable
) {
3651 EphemeronLinkNode
*tmp
= current
;
3654 prev
->next
= current
->next
;
3656 ephemeron_list
= current
->next
;
3658 current
= current
->next
;
3659 mono_sgen_free_internal (tmp
, INTERNAL_MEM_EPHEMERON_LINK
);
3662 current
= current
->next
;
3667 /* LOCKING: requires that the GC lock is held */
3669 clear_unreachable_ephemerons (CopyOrMarkObjectFunc copy_func
, char *start
, char *end
, GrayQueue
*queue
)
3671 int was_in_nursery
, was_promoted
;
3672 EphemeronLinkNode
*current
= ephemeron_list
, *prev
= NULL
;
3674 Ephemeron
*cur
, *array_end
;
3678 char *object
= current
->array
;
3680 if (!object_is_reachable (object
, start
, end
)) {
3681 EphemeronLinkNode
*tmp
= current
;
3683 DEBUG (5, fprintf (gc_debug_file
, "Dead Ephemeron array at %p\n", object
));
3686 prev
->next
= current
->next
;
3688 ephemeron_list
= current
->next
;
3690 current
= current
->next
;
3691 mono_sgen_free_internal (tmp
, INTERNAL_MEM_EPHEMERON_LINK
);
3696 was_in_nursery
= ptr_in_nursery (object
);
3697 copy_func ((void**)&object
, queue
);
3698 current
->array
= object
;
3700 /*The array was promoted, add global remsets for key/values left behind in nursery.*/
3701 was_promoted
= was_in_nursery
&& !ptr_in_nursery (object
);
3703 DEBUG (5, fprintf (gc_debug_file
, "Clearing unreachable entries for ephemeron array at %p\n", object
));
3705 array
= (MonoArray
*)object
;
3706 cur
= mono_array_addr (array
, Ephemeron
, 0);
3707 array_end
= cur
+ mono_array_length_fast (array
);
3708 tombstone
= (char*)((MonoVTable
*)LOAD_VTABLE (object
))->domain
->ephemeron_tombstone
;
3710 for (; cur
< array_end
; ++cur
) {
3711 char *key
= (char*)cur
->key
;
3713 if (!key
|| key
== tombstone
)
3716 DEBUG (5, fprintf (gc_debug_file
, "[%td] key %p (%s) value %p (%s)\n", cur
- mono_array_addr (array
, Ephemeron
, 0),
3717 key
, object_is_reachable (key
, start
, end
) ? "reachable" : "unreachable",
3718 cur
->value
, cur
->value
&& object_is_reachable (cur
->value
, start
, end
) ? "reachable" : "unreachable"));
3720 if (!object_is_reachable (key
, start
, end
)) {
3721 cur
->key
= tombstone
;
3727 if (ptr_in_nursery (key
)) {/*key was not promoted*/
3728 DEBUG (5, fprintf (gc_debug_file
, "\tAdded remset to key %p\n", key
));
3729 mono_sgen_add_to_global_remset (&cur
->key
);
3731 if (ptr_in_nursery (cur
->value
)) {/*value was not promoted*/
3732 DEBUG (5, fprintf (gc_debug_file
, "\tAdded remset to value %p\n", cur
->value
));
3733 mono_sgen_add_to_global_remset (&cur
->value
);
3738 current
= current
->next
;
3742 /* LOCKING: requires that the GC lock is held */
3744 mark_ephemerons_in_range (CopyOrMarkObjectFunc copy_func
, char *start
, char *end
, GrayQueue
*queue
)
3746 int nothing_marked
= 1;
3747 EphemeronLinkNode
*current
= ephemeron_list
;
3749 Ephemeron
*cur
, *array_end
;
3752 for (current
= ephemeron_list
; current
; current
= current
->next
) {
3753 char *object
= current
->array
;
3754 DEBUG (5, fprintf (gc_debug_file
, "Ephemeron array at %p\n", object
));
3756 /*We ignore arrays in old gen during minor collections since all objects are promoted by the remset machinery.*/
3757 if (object
< start
|| object
>= end
)
3760 /*It has to be alive*/
3761 if (!object_is_reachable (object
, start
, end
)) {
3762 DEBUG (5, fprintf (gc_debug_file
, "\tnot reachable\n"));
3766 copy_func ((void**)&object
, queue
);
3768 array
= (MonoArray
*)object
;
3769 cur
= mono_array_addr (array
, Ephemeron
, 0);
3770 array_end
= cur
+ mono_array_length_fast (array
);
3771 tombstone
= (char*)((MonoVTable
*)LOAD_VTABLE (object
))->domain
->ephemeron_tombstone
;
3773 for (; cur
< array_end
; ++cur
) {
3774 char *key
= cur
->key
;
3776 if (!key
|| key
== tombstone
)
3779 DEBUG (5, fprintf (gc_debug_file
, "[%td] key %p (%s) value %p (%s)\n", cur
- mono_array_addr (array
, Ephemeron
, 0),
3780 key
, object_is_reachable (key
, start
, end
) ? "reachable" : "unreachable",
3781 cur
->value
, cur
->value
&& object_is_reachable (cur
->value
, start
, end
) ? "reachable" : "unreachable"));
3783 if (object_is_reachable (key
, start
, end
)) {
3784 char *value
= cur
->value
;
3786 copy_func ((void**)&cur
->key
, queue
);
3788 if (!object_is_reachable (value
, start
, end
))
3790 copy_func ((void**)&cur
->value
, queue
);
3796 DEBUG (5, fprintf (gc_debug_file
, "Ephemeron run finished. Is it done %d\n", nothing_marked
));
3797 return nothing_marked
;
3800 /* LOCKING: requires that the GC lock is held */
3802 null_link_in_range (CopyOrMarkObjectFunc copy_func
, char *start
, char *end
, int generation
, GrayQueue
*queue
)
3804 DisappearingLinkHashTable
*hash
= get_dislink_hash_table (generation
);
3805 DisappearingLink
**disappearing_link_hash
= hash
->table
;
3806 int disappearing_link_hash_size
= hash
->size
;
3807 DisappearingLink
*entry
, *prev
;
3809 if (!hash
->num_links
)
3811 for (i
= 0; i
< disappearing_link_hash_size
; ++i
) {
3813 for (entry
= disappearing_link_hash
[i
]; entry
;) {
3814 char *object
= DISLINK_OBJECT (entry
);
3815 if (object
>= start
&& object
< end
&& !major
.is_object_live (object
)) {
3816 gboolean track
= DISLINK_TRACK (entry
);
3817 if (!track
&& object_is_fin_ready (object
)) {
3818 void **p
= entry
->link
;
3819 DisappearingLink
*old
;
3821 /* remove from list */
3823 prev
->next
= entry
->next
;
3825 disappearing_link_hash
[i
] = entry
->next
;
3826 DEBUG (5, fprintf (gc_debug_file
, "Dislink nullified at %p to GCed object %p\n", p
, object
));
3828 mono_sgen_free_internal (entry
, INTERNAL_MEM_DISLINK
);
3833 char *copy
= object
;
3834 copy_func ((void**)©
, queue
);
3836 /* Update pointer if it's moved. If the object
3837 * has been moved out of the nursery, we need to
3838 * remove the link from the minor hash table to
3841 * FIXME: what if an object is moved earlier?
3844 if (hash
== &minor_disappearing_link_hash
&& !ptr_in_nursery (copy
)) {
3845 void **link
= entry
->link
;
3846 DisappearingLink
*old
;
3847 /* remove from list */
3849 prev
->next
= entry
->next
;
3851 disappearing_link_hash
[i
] = entry
->next
;
3853 mono_sgen_free_internal (entry
, INTERNAL_MEM_DISLINK
);
3857 add_or_remove_disappearing_link ((MonoObject
*)copy
, link
,
3858 track
, GENERATION_OLD
);
3860 DEBUG (5, fprintf (gc_debug_file
, "Upgraded dislink at %p to major because object %p moved to %p\n", link
, object
, copy
));
3864 /* We set the track resurrection bit to
3865 * FALSE if the object is to be finalized
3866 * so that the object can be collected in
3867 * the next cycle (i.e. after it was
3870 *entry
->link
= HIDE_POINTER (copy
,
3871 object_is_fin_ready (object
) ? FALSE
: track
);
3872 DEBUG (5, fprintf (gc_debug_file
, "Updated dislink at %p to %p\n", entry
->link
, DISLINK_OBJECT (entry
)));
3877 entry
= entry
->next
;
3882 /* LOCKING: requires that the GC lock is held */
3884 null_links_for_domain (MonoDomain
*domain
, int generation
)
3886 DisappearingLinkHashTable
*hash
= get_dislink_hash_table (generation
);
3887 DisappearingLink
**disappearing_link_hash
= hash
->table
;
3888 int disappearing_link_hash_size
= hash
->size
;
3889 DisappearingLink
*entry
, *prev
;
3891 for (i
= 0; i
< disappearing_link_hash_size
; ++i
) {
3893 for (entry
= disappearing_link_hash
[i
]; entry
; ) {
3894 char *object
= DISLINK_OBJECT (entry
);
3895 if (object
&& !((MonoObject
*)object
)->vtable
) {
3896 DisappearingLink
*next
= entry
->next
;
3901 disappearing_link_hash
[i
] = next
;
3903 if (*(entry
->link
)) {
3904 *(entry
->link
) = NULL
;
3905 g_warning ("Disappearing link %p not freed", entry
->link
);
3907 mono_sgen_free_internal (entry
, INTERNAL_MEM_DISLINK
);
3914 entry
= entry
->next
;
3919 /* LOCKING: requires that the GC lock is held */
3921 finalizers_for_domain (MonoDomain
*domain
, MonoObject
**out_array
, int out_size
,
3922 FinalizeEntryHashTable
*hash_table
)
3924 FinalizeEntry
**finalizable_hash
= hash_table
->table
;
3925 mword finalizable_hash_size
= hash_table
->size
;
3926 FinalizeEntry
*entry
, *prev
;
3929 if (no_finalize
|| !out_size
|| !out_array
)
3932 for (i
= 0; i
< finalizable_hash_size
; ++i
) {
3934 for (entry
= finalizable_hash
[i
]; entry
;) {
3935 if (mono_object_domain (entry
->object
) == domain
) {
3936 FinalizeEntry
*next
;
3937 /* remove and put in out_array */
3939 prev
->next
= entry
->next
;
3941 finalizable_hash
[i
] = entry
->next
;
3943 hash_table
->num_registered
--;
3944 out_array
[count
++] = entry
->object
;
3945 DEBUG (5, fprintf (gc_debug_file
, "Collecting object for finalization: %p (%s) (%d/%d)\n", entry
->object
, safe_name (entry
->object
), num_ready_finalizers
, hash_table
->num_registered
));
3947 if (count
== out_size
)
3952 entry
= entry
->next
;
3959 * mono_gc_finalizers_for_domain:
3960 * @domain: the unloading appdomain
3961 * @out_array: output array
3962 * @out_size: size of output array
3964 * Store inside @out_array up to @out_size objects that belong to the unloading
3965 * appdomain @domain. Returns the number of stored items. Can be called repeteadly
3966 * until it returns 0.
3967 * The items are removed from the finalizer data structure, so the caller is supposed
3969 * @out_array should be on the stack to allow the GC to know the objects are still alive.
3972 mono_gc_finalizers_for_domain (MonoDomain
*domain
, MonoObject
**out_array
, int out_size
)
3977 result
= finalizers_for_domain (domain
, out_array
, out_size
, &minor_finalizable_hash
);
3978 if (result
< out_size
) {
3979 result
+= finalizers_for_domain (domain
, out_array
+ result
, out_size
- result
,
3980 &major_finalizable_hash
);
3988 register_for_finalization (MonoObject
*obj
, void *user_data
, int generation
)
3990 FinalizeEntryHashTable
*hash_table
= get_finalize_entry_hash_table (generation
);
3991 FinalizeEntry
**finalizable_hash
;
3992 mword finalizable_hash_size
;
3993 FinalizeEntry
*entry
, *prev
;
3997 g_assert (user_data
== NULL
|| user_data
== mono_gc_run_finalize
);
3998 hash
= mono_object_hash (obj
);
4000 rehash_fin_table_if_necessary (hash_table
);
4001 finalizable_hash
= hash_table
->table
;
4002 finalizable_hash_size
= hash_table
->size
;
4003 hash
%= finalizable_hash_size
;
4005 for (entry
= finalizable_hash
[hash
]; entry
; entry
= entry
->next
) {
4006 if (entry
->object
== obj
) {
4008 /* remove from the list */
4010 prev
->next
= entry
->next
;
4012 finalizable_hash
[hash
] = entry
->next
;
4013 hash_table
->num_registered
--;
4014 DEBUG (5, fprintf (gc_debug_file
, "Removed finalizer %p for object: %p (%s) (%d)\n", entry
, obj
, obj
->vtable
->klass
->name
, hash_table
->num_registered
));
4015 mono_sgen_free_internal (entry
, INTERNAL_MEM_FINALIZE_ENTRY
);
4023 /* request to deregister, but already out of the list */
4027 entry
= mono_sgen_alloc_internal (INTERNAL_MEM_FINALIZE_ENTRY
);
4028 entry
->object
= obj
;
4029 entry
->next
= finalizable_hash
[hash
];
4030 finalizable_hash
[hash
] = entry
;
4031 hash_table
->num_registered
++;
4032 DEBUG (5, fprintf (gc_debug_file
, "Added finalizer %p for object: %p (%s) (%d) to %s table\n", entry
, obj
, obj
->vtable
->klass
->name
, hash_table
->num_registered
, generation_name (generation
)));
4037 mono_gc_register_for_finalization (MonoObject
*obj
, void *user_data
)
4039 if (ptr_in_nursery (obj
))
4040 register_for_finalization (obj
, user_data
, GENERATION_NURSERY
);
4042 register_for_finalization (obj
, user_data
, GENERATION_OLD
);
4046 rehash_dislink (DisappearingLinkHashTable
*hash_table
)
4048 DisappearingLink
**disappearing_link_hash
= hash_table
->table
;
4049 int disappearing_link_hash_size
= hash_table
->size
;
4052 DisappearingLink
**new_hash
;
4053 DisappearingLink
*entry
, *next
;
4054 int new_size
= g_spaced_primes_closest (hash_table
->num_links
);
4056 new_hash
= mono_sgen_alloc_internal_dynamic (new_size
* sizeof (DisappearingLink
*), INTERNAL_MEM_DISLINK_TABLE
);
4057 for (i
= 0; i
< disappearing_link_hash_size
; ++i
) {
4058 for (entry
= disappearing_link_hash
[i
]; entry
; entry
= next
) {
4059 hash
= mono_aligned_addr_hash (entry
->link
) % new_size
;
4061 entry
->next
= new_hash
[hash
];
4062 new_hash
[hash
] = entry
;
4065 mono_sgen_free_internal_dynamic (disappearing_link_hash
,
4066 disappearing_link_hash_size
* sizeof (DisappearingLink
*), INTERNAL_MEM_DISLINK_TABLE
);
4067 hash_table
->table
= new_hash
;
4068 hash_table
->size
= new_size
;
4071 /* LOCKING: assumes the GC lock is held */
4073 add_or_remove_disappearing_link (MonoObject
*obj
, void **link
, gboolean track
, int generation
)
4075 DisappearingLinkHashTable
*hash_table
= get_dislink_hash_table (generation
);
4076 DisappearingLink
*entry
, *prev
;
4078 DisappearingLink
**disappearing_link_hash
= hash_table
->table
;
4079 int disappearing_link_hash_size
= hash_table
->size
;
4081 if (hash_table
->num_links
>= disappearing_link_hash_size
* 2) {
4082 rehash_dislink (hash_table
);
4083 disappearing_link_hash
= hash_table
->table
;
4084 disappearing_link_hash_size
= hash_table
->size
;
4086 /* FIXME: add check that link is not in the heap */
4087 hash
= mono_aligned_addr_hash (link
) % disappearing_link_hash_size
;
4088 entry
= disappearing_link_hash
[hash
];
4090 for (; entry
; entry
= entry
->next
) {
4091 /* link already added */
4092 if (link
== entry
->link
) {
4093 /* NULL obj means remove */
4096 prev
->next
= entry
->next
;
4098 disappearing_link_hash
[hash
] = entry
->next
;
4099 hash_table
->num_links
--;
4100 DEBUG (5, fprintf (gc_debug_file
, "Removed dislink %p (%d) from %s table\n", entry
, hash_table
->num_links
, generation_name (generation
)));
4101 mono_sgen_free_internal (entry
, INTERNAL_MEM_DISLINK
);
4104 *link
= HIDE_POINTER (obj
, track
); /* we allow the change of object */
4112 entry
= mono_sgen_alloc_internal (INTERNAL_MEM_DISLINK
);
4113 *link
= HIDE_POINTER (obj
, track
);
4115 entry
->next
= disappearing_link_hash
[hash
];
4116 disappearing_link_hash
[hash
] = entry
;
4117 hash_table
->num_links
++;
4118 DEBUG (5, fprintf (gc_debug_file
, "Added dislink %p for object: %p (%s) at %p to %s table\n", entry
, obj
, obj
->vtable
->klass
->name
, link
, generation_name (generation
)));
4121 /* LOCKING: assumes the GC lock is held */
4123 mono_gc_register_disappearing_link (MonoObject
*obj
, void **link
, gboolean track
)
4125 add_or_remove_disappearing_link (NULL
, link
, FALSE
, GENERATION_NURSERY
);
4126 add_or_remove_disappearing_link (NULL
, link
, FALSE
, GENERATION_OLD
);
4128 if (ptr_in_nursery (obj
))
4129 add_or_remove_disappearing_link (obj
, link
, track
, GENERATION_NURSERY
);
4131 add_or_remove_disappearing_link (obj
, link
, track
, GENERATION_OLD
);
4136 mono_gc_invoke_finalizers (void)
4138 FinalizeEntry
*entry
= NULL
;
4139 gboolean entry_is_critical
= FALSE
;
4142 /* FIXME: batch to reduce lock contention */
4143 while (fin_ready_list
|| critical_fin_list
) {
4147 FinalizeEntry
**list
= entry_is_critical
? &critical_fin_list
: &fin_ready_list
;
4149 /* We have finalized entry in the last
4150 interation, now we need to remove it from
4153 *list
= entry
->next
;
4155 FinalizeEntry
*e
= *list
;
4156 while (e
->next
!= entry
)
4158 e
->next
= entry
->next
;
4160 mono_sgen_free_internal (entry
, INTERNAL_MEM_FINALIZE_ENTRY
);
4164 /* Now look for the first non-null entry. */
4165 for (entry
= fin_ready_list
; entry
&& !entry
->object
; entry
= entry
->next
)
4168 entry_is_critical
= FALSE
;
4170 entry_is_critical
= TRUE
;
4171 for (entry
= critical_fin_list
; entry
&& !entry
->object
; entry
= entry
->next
)
4176 g_assert (entry
->object
);
4177 num_ready_finalizers
--;
4178 obj
= entry
->object
;
4179 entry
->object
= NULL
;
4180 DEBUG (7, fprintf (gc_debug_file
, "Finalizing object %p (%s)\n", obj
, safe_name (obj
)));
4188 g_assert (entry
->object
== NULL
);
4190 /* the object is on the stack so it is pinned */
4191 /*g_print ("Calling finalizer for object: %p (%s)\n", entry->object, safe_name (entry->object));*/
4192 mono_gc_run_finalize (obj
, NULL
);
4199 mono_gc_pending_finalizers (void)
4201 return fin_ready_list
|| critical_fin_list
;
4204 /* Negative value to remove */
4206 mono_gc_add_memory_pressure (gint64 value
)
4208 /* FIXME: Use interlocked functions */
4210 memory_pressure
+= value
;
4215 mono_sgen_register_major_sections_alloced (int num_sections
)
4217 minor_collection_sections_alloced
+= num_sections
;
4221 mono_sgen_get_minor_collection_allowance (void)
4223 return minor_collection_allowance
;
4227 * ######################################################################
4228 * ######## registered roots support
4229 * ######################################################################
4233 rehash_roots (gboolean pinned
)
4237 RootRecord
**new_hash
;
4238 RootRecord
*entry
, *next
;
4241 new_size
= g_spaced_primes_closest (num_roots_entries
[pinned
]);
4242 new_hash
= mono_sgen_alloc_internal_dynamic (new_size
* sizeof (RootRecord
*), INTERNAL_MEM_ROOTS_TABLE
);
4243 for (i
= 0; i
< roots_hash_size
[pinned
]; ++i
) {
4244 for (entry
= roots_hash
[pinned
][i
]; entry
; entry
= next
) {
4245 hash
= mono_aligned_addr_hash (entry
->start_root
) % new_size
;
4247 entry
->next
= new_hash
[hash
];
4248 new_hash
[hash
] = entry
;
4251 mono_sgen_free_internal_dynamic (roots_hash
[pinned
], roots_hash_size
[pinned
] * sizeof (RootRecord
*), INTERNAL_MEM_ROOTS_TABLE
);
4252 roots_hash
[pinned
] = new_hash
;
4253 roots_hash_size
[pinned
] = new_size
;
4257 find_root (int root_type
, char *start
, guint32 addr_hash
)
4259 RootRecord
*new_root
;
4261 guint32 hash
= addr_hash
% roots_hash_size
[root_type
];
4262 for (new_root
= roots_hash
[root_type
][hash
]; new_root
; new_root
= new_root
->next
) {
4263 /* we allow changing the size and the descriptor (for thread statics etc) */
4264 if (new_root
->start_root
== start
) {
4273 * We do not coalesce roots.
4276 mono_gc_register_root_inner (char *start
, size_t size
, void *descr
, int root_type
)
4278 RootRecord
*new_root
;
4279 unsigned int hash
, addr_hash
= mono_aligned_addr_hash (start
);
4282 for (i
= 0; i
< ROOT_TYPE_NUM
; ++i
) {
4283 if (num_roots_entries
[i
] >= roots_hash_size
[i
] * 2)
4286 for (i
= 0; i
< ROOT_TYPE_NUM
; ++i
) {
4287 new_root
= find_root (i
, start
, addr_hash
);
4288 /* we allow changing the size and the descriptor (for thread statics etc) */
4290 size_t old_size
= new_root
->end_root
- new_root
->start_root
;
4291 new_root
->end_root
= new_root
->start_root
+ size
;
4292 g_assert (((new_root
->root_desc
!= 0) && (descr
!= NULL
)) ||
4293 ((new_root
->root_desc
== 0) && (descr
== NULL
)));
4294 new_root
->root_desc
= (mword
)descr
;
4296 roots_size
-= old_size
;
4301 new_root
= mono_sgen_alloc_internal (INTERNAL_MEM_ROOT_RECORD
);
4303 new_root
->start_root
= start
;
4304 new_root
->end_root
= new_root
->start_root
+ size
;
4305 new_root
->root_desc
= (mword
)descr
;
4307 hash
= addr_hash
% roots_hash_size
[root_type
];
4308 num_roots_entries
[root_type
]++;
4309 new_root
->next
= roots_hash
[root_type
] [hash
];
4310 roots_hash
[root_type
][hash
] = new_root
;
4311 DEBUG (3, fprintf (gc_debug_file
, "Added root %p for range: %p-%p, descr: %p (%d/%d bytes)\n", new_root
, new_root
->start_root
, new_root
->end_root
, descr
, (int)size
, (int)roots_size
));
4321 mono_gc_register_root (char *start
, size_t size
, void *descr
)
4323 return mono_gc_register_root_inner (start
, size
, descr
, descr
? ROOT_TYPE_NORMAL
: ROOT_TYPE_PINNED
);
4327 mono_gc_register_root_wbarrier (char *start
, size_t size
, void *descr
)
4329 return mono_gc_register_root_inner (start
, size
, descr
, ROOT_TYPE_WBARRIER
);
4333 mono_gc_deregister_root (char* addr
)
4335 RootRecord
*tmp
, *prev
;
4336 unsigned int hash
, addr_hash
= mono_aligned_addr_hash (addr
);
4340 for (root_type
= 0; root_type
< ROOT_TYPE_NUM
; ++root_type
) {
4341 hash
= addr_hash
% roots_hash_size
[root_type
];
4342 tmp
= roots_hash
[root_type
][hash
];
4345 if (tmp
->start_root
== (char*)addr
) {
4347 prev
->next
= tmp
->next
;
4349 roots_hash
[root_type
][hash
] = tmp
->next
;
4350 roots_size
-= (tmp
->end_root
- tmp
->start_root
);
4351 num_roots_entries
[root_type
]--;
4352 DEBUG (3, fprintf (gc_debug_file
, "Removed root %p for range: %p-%p\n", tmp
, tmp
->start_root
, tmp
->end_root
));
4353 mono_sgen_free_internal (tmp
, INTERNAL_MEM_ROOT_RECORD
);
4364 * ######################################################################
4365 * ######## Thread handling (stop/start code)
4366 * ######################################################################
4369 /* FIXME: handle large/small config */
4370 #define HASH_PTHREAD_T(id) (((unsigned int)(id) >> 4) * 2654435761u)
4372 static SgenThreadInfo
* thread_table
[THREAD_HASH_SIZE
];
4374 #if USE_SIGNAL_BASED_START_STOP_WORLD
4376 static MonoSemType suspend_ack_semaphore
;
4377 static MonoSemType
*suspend_ack_semaphore_ptr
;
4378 static unsigned int global_stop_count
= 0;
4380 static sigset_t suspend_signal_mask
;
4381 static mword cur_thread_regs
[ARCH_NUM_REGS
] = {0};
4383 /* LOCKING: assumes the GC lock is held */
4385 mono_sgen_get_thread_table (void)
4387 return thread_table
;
4391 mono_sgen_thread_info_lookup (ARCH_THREAD_TYPE id
)
4393 unsigned int hash
= HASH_PTHREAD_T (id
) % THREAD_HASH_SIZE
;
4394 SgenThreadInfo
*info
;
4396 info
= thread_table
[hash
];
4397 while (info
&& !ARCH_THREAD_EQUALS (info
->id
, id
)) {
4404 update_current_thread_stack (void *start
)
4406 void *ptr
= cur_thread_regs
;
4407 SgenThreadInfo
*info
= mono_sgen_thread_info_lookup (ARCH_GET_THREAD ());
4409 info
->stack_start
= align_pointer (&ptr
);
4410 g_assert (info
->stack_start
>= info
->stack_start_limit
&& info
->stack_start
< info
->stack_end
);
4411 ARCH_STORE_REGS (ptr
);
4412 info
->stopped_regs
= ptr
;
4413 if (gc_callbacks
.thread_suspend_func
)
4414 gc_callbacks
.thread_suspend_func (info
->runtime_data
, NULL
);
4418 * Define this and use the "xdomain-checks" MONO_GC_DEBUG option to
4419 * have cross-domain checks in the write barrier.
4421 //#define XDOMAIN_CHECKS_IN_WBARRIER
4423 #ifndef SGEN_BINARY_PROTOCOL
4424 #ifndef HEAVY_STATISTICS
4425 #define MANAGED_ALLOCATION
4426 #ifndef XDOMAIN_CHECKS_IN_WBARRIER
4427 #define MANAGED_WBARRIER
4433 is_ip_in_managed_allocator (MonoDomain
*domain
, gpointer ip
);
4436 mono_sgen_wait_for_suspend_ack (int count
)
4440 for (i
= 0; i
< count
; ++i
) {
4441 while ((result
= MONO_SEM_WAIT (suspend_ack_semaphore_ptr
)) != 0) {
4442 if (errno
!= EINTR
) {
4443 g_error ("sem_wait ()");
4450 restart_threads_until_none_in_managed_allocator (void)
4452 SgenThreadInfo
*info
;
4453 int i
, result
, num_threads_died
= 0;
4454 int sleep_duration
= -1;
4457 int restart_count
= 0, restarted_count
= 0;
4458 /* restart all threads that stopped in the
4460 for (i
= 0; i
< THREAD_HASH_SIZE
; ++i
) {
4461 for (info
= thread_table
[i
]; info
; info
= info
->next
) {
4464 if (!info
->stack_start
|| info
->in_critical_region
||
4465 is_ip_in_managed_allocator (info
->stopped_domain
, info
->stopped_ip
)) {
4466 binary_protocol_thread_restart ((gpointer
)info
->id
);
4467 #if defined(__MACH__) && MONO_MACH_ARCH_SUPPORTED
4468 result
= thread_resume (pthread_mach_thread_np (info
->id
));
4470 result
= pthread_kill (info
->id
, restart_signal_num
);
4478 /* we set the stopped_ip to
4479 NULL for threads which
4480 we're not restarting so
4481 that we can easily identify
4483 info
->stopped_ip
= NULL
;
4484 info
->stopped_domain
= NULL
;
4488 /* if no threads were restarted, we're done */
4489 if (restart_count
== 0)
4492 #if defined(__MACH__) && MONO_MACH_ARCH_SUPPORTED
4493 /* mach thread_resume is synchronous so we dont need to wait for them */
4495 /* wait for the threads to signal their restart */
4496 mono_sgen_wait_for_suspend_ack (restart_count
);
4499 if (sleep_duration
< 0) {
4503 g_usleep (sleep_duration
);
4504 sleep_duration
+= 10;
4507 /* stop them again */
4508 for (i
= 0; i
< THREAD_HASH_SIZE
; ++i
) {
4509 for (info
= thread_table
[i
]; info
; info
= info
->next
) {
4510 if (info
->skip
|| info
->stopped_ip
== NULL
)
4512 #if defined(__MACH__) && MONO_MACH_ARCH_SUPPORTED
4513 result
= thread_suspend (pthread_mach_thread_np (info
->id
));
4515 result
= pthread_kill (info
->id
, suspend_signal_num
);
4524 /* some threads might have died */
4525 num_threads_died
+= restart_count
- restarted_count
;
4526 #if defined(__MACH__) && MONO_MACH_ARCH_SUPPORTED
4527 /* mach thread_resume is synchronous so we dont need to wait for them */
4529 /* wait for the threads to signal their suspension
4531 mono_sgen_wait_for_suspend_ack (restart_count
);
4535 return num_threads_died
;
4538 /* LOCKING: assumes the GC lock is held (by the stopping thread) */
4540 suspend_handler (int sig
, siginfo_t
*siginfo
, void *context
)
4542 SgenThreadInfo
*info
;
4545 int old_errno
= errno
;
4546 gpointer regs
[ARCH_NUM_REGS
];
4547 gpointer stack_start
;
4549 id
= pthread_self ();
4550 info
= mono_sgen_thread_info_lookup (id
);
4551 info
->stopped_domain
= mono_domain_get ();
4552 info
->stopped_ip
= (gpointer
) ARCH_SIGCTX_IP (context
);
4553 stop_count
= global_stop_count
;
4554 /* duplicate signal */
4555 if (0 && info
->stop_count
== stop_count
) {
4559 #ifdef HAVE_KW_THREAD
4560 /* update the remset info in the thread data structure */
4561 info
->remset
= remembered_set
;
4563 stack_start
= (char*) ARCH_SIGCTX_SP (context
) - REDZONE_SIZE
;
4564 /* If stack_start is not within the limits, then don't set it
4565 in info and we will be restarted. */
4566 if (stack_start
>= info
->stack_start_limit
&& info
->stack_start
<= info
->stack_end
) {
4567 info
->stack_start
= stack_start
;
4569 ARCH_COPY_SIGCTX_REGS (regs
, context
);
4570 info
->stopped_regs
= regs
;
4572 g_assert (!info
->stack_start
);
4575 /* Notify the JIT */
4576 if (gc_callbacks
.thread_suspend_func
)
4577 gc_callbacks
.thread_suspend_func (info
->runtime_data
, context
);
4579 DEBUG (4, fprintf (gc_debug_file
, "Posting suspend_ack_semaphore for suspend from %p %p\n", info
, (gpointer
)ARCH_GET_THREAD ()));
4580 /* notify the waiting thread */
4581 MONO_SEM_POST (suspend_ack_semaphore_ptr
);
4582 info
->stop_count
= stop_count
;
4584 /* wait until we receive the restart signal */
4587 sigsuspend (&suspend_signal_mask
);
4588 } while (info
->signal
!= restart_signal_num
);
4590 DEBUG (4, fprintf (gc_debug_file
, "Posting suspend_ack_semaphore for resume from %p %p\n", info
, (gpointer
)ARCH_GET_THREAD ()));
4591 /* notify the waiting thread */
4592 MONO_SEM_POST (suspend_ack_semaphore_ptr
);
4598 restart_handler (int sig
)
4600 SgenThreadInfo
*info
;
4601 int old_errno
= errno
;
4603 info
= mono_sgen_thread_info_lookup (pthread_self ());
4604 info
->signal
= restart_signal_num
;
4605 DEBUG (4, fprintf (gc_debug_file
, "Restart handler in %p %p\n", info
, (gpointer
)ARCH_GET_THREAD ()));
4611 acquire_gc_locks (void)
4617 release_gc_locks (void)
4619 UNLOCK_INTERRUPTION
;
4622 static TV_DECLARE (stop_world_time
);
4623 static unsigned long max_pause_usec
= 0;
4625 /* LOCKING: assumes the GC lock is held */
4631 acquire_gc_locks ();
4633 update_current_thread_stack (&count
);
4635 global_stop_count
++;
4636 DEBUG (3, fprintf (gc_debug_file
, "stopping world n %d from %p %p\n", global_stop_count
, mono_sgen_thread_info_lookup (ARCH_GET_THREAD ()), (gpointer
)ARCH_GET_THREAD ()));
4637 TV_GETTIME (stop_world_time
);
4638 count
= mono_sgen_thread_handshake (suspend_signal_num
);
4639 count
-= restart_threads_until_none_in_managed_allocator ();
4640 g_assert (count
>= 0);
4641 DEBUG (3, fprintf (gc_debug_file
, "world stopped %d thread(s)\n", count
));
4645 /* LOCKING: assumes the GC lock is held */
4647 restart_world (void)
4650 SgenThreadInfo
*info
;
4651 TV_DECLARE (end_sw
);
4654 /* notify the profiler of the leftovers */
4655 if (G_UNLIKELY (mono_profiler_events
& MONO_PROFILE_GC_MOVES
)) {
4656 if (moved_objects_idx
) {
4657 mono_profiler_gc_moves (moved_objects
, moved_objects_idx
);
4658 moved_objects_idx
= 0;
4661 for (i
= 0; i
< THREAD_HASH_SIZE
; ++i
) {
4662 for (info
= thread_table
[i
]; info
; info
= info
->next
) {
4663 info
->stack_start
= NULL
;
4664 info
->stopped_regs
= NULL
;
4668 release_gc_locks ();
4670 count
= mono_sgen_thread_handshake (restart_signal_num
);
4671 TV_GETTIME (end_sw
);
4672 usec
= TV_ELAPSED (stop_world_time
, end_sw
);
4673 max_pause_usec
= MAX (usec
, max_pause_usec
);
4674 DEBUG (2, fprintf (gc_debug_file
, "restarted %d thread(s) (pause time: %d usec, max: %d)\n", count
, (int)usec
, (int)max_pause_usec
));
4678 #endif /* USE_SIGNAL_BASED_START_STOP_WORLD */
4681 mono_gc_set_gc_callbacks (MonoGCCallbacks
*callbacks
)
4683 gc_callbacks
= *callbacks
;
4687 mono_gc_get_gc_callbacks ()
4689 return &gc_callbacks
;
4692 /* Variables holding start/end nursery so it won't have to be passed at every call */
4693 static void *scan_area_arg_start
, *scan_area_arg_end
;
4696 mono_gc_conservatively_scan_area (void *start
, void *end
)
4698 conservatively_pin_objects_from (start
, end
, scan_area_arg_start
, scan_area_arg_end
, PIN_TYPE_STACK
);
4702 mono_gc_scan_object (void *obj
)
4704 g_assert_not_reached ();
4705 if (current_collection_generation
== GENERATION_NURSERY
)
4706 major
.copy_object (&obj
, &gray_queue
);
4708 major
.copy_or_mark_object (&obj
, &gray_queue
);
4713 * Mark from thread stacks and registers.
4716 scan_thread_data (void *start_nursery
, void *end_nursery
, gboolean precise
)
4719 SgenThreadInfo
*info
;
4721 scan_area_arg_start
= start_nursery
;
4722 scan_area_arg_end
= end_nursery
;
4724 for (i
= 0; i
< THREAD_HASH_SIZE
; ++i
) {
4725 for (info
= thread_table
[i
]; info
; info
= info
->next
) {
4727 DEBUG (3, fprintf (gc_debug_file
, "Skipping dead thread %p, range: %p-%p, size: %td\n", info
, info
->stack_start
, info
->stack_end
, (char*)info
->stack_end
- (char*)info
->stack_start
));
4730 DEBUG (3, fprintf (gc_debug_file
, "Scanning thread %p, range: %p-%p, size: %td, pinned=%d\n", info
, info
->stack_start
, info
->stack_end
, (char*)info
->stack_end
- (char*)info
->stack_start
, next_pin_slot
));
4731 if (gc_callbacks
.thread_mark_func
&& !conservative_stack_mark
)
4732 gc_callbacks
.thread_mark_func (info
->runtime_data
, info
->stack_start
, info
->stack_end
, precise
);
4734 conservatively_pin_objects_from (info
->stack_start
, info
->stack_end
, start_nursery
, end_nursery
, PIN_TYPE_STACK
);
4737 conservatively_pin_objects_from (info
->stopped_regs
, info
->stopped_regs
+ ARCH_NUM_REGS
,
4738 start_nursery
, end_nursery
, PIN_TYPE_STACK
);
4744 find_pinning_ref_from_thread (char *obj
, size_t size
)
4747 SgenThreadInfo
*info
;
4748 char *endobj
= obj
+ size
;
4750 for (i
= 0; i
< THREAD_HASH_SIZE
; ++i
) {
4751 for (info
= thread_table
[i
]; info
; info
= info
->next
) {
4752 char **start
= (char**)info
->stack_start
;
4755 while (start
< (char**)info
->stack_end
) {
4756 if (*start
>= obj
&& *start
< endobj
) {
4757 DEBUG (0, fprintf (gc_debug_file
, "Object %p referenced in thread %p (id %p) at %p, stack: %p-%p\n", obj
, info
, (gpointer
)info
->id
, start
, info
->stack_start
, info
->stack_end
));
4762 /* FIXME: check info->stopped_regs */
4768 ptr_on_stack (void *ptr
)
4770 gpointer stack_start
= &stack_start
;
4771 SgenThreadInfo
*info
= mono_sgen_thread_info_lookup (ARCH_GET_THREAD ());
4773 if (ptr
>= stack_start
&& ptr
< (gpointer
)info
->stack_end
)
4779 handle_remset (mword
*p
, void *start_nursery
, void *end_nursery
, gboolean global
, GrayQueue
*queue
)
4786 HEAVY_STAT (++stat_global_remsets_processed
);
4788 /* FIXME: exclude stack locations */
4789 switch ((*p
) & REMSET_TYPE_MASK
) {
4790 case REMSET_LOCATION
:
4792 //__builtin_prefetch (ptr);
4793 if (((void*)ptr
< start_nursery
|| (void*)ptr
>= end_nursery
)) {
4794 gpointer old
= *ptr
;
4795 major
.copy_object (ptr
, queue
);
4796 DEBUG (9, fprintf (gc_debug_file
, "Overwrote remset at %p with %p\n", ptr
, *ptr
));
4798 binary_protocol_ptr_update (ptr
, old
, *ptr
, (gpointer
)LOAD_VTABLE (*ptr
), safe_object_get_size (*ptr
));
4799 if (!global
&& *ptr
>= start_nursery
&& *ptr
< end_nursery
) {
4801 * If the object is pinned, each reference to it from nonpinned objects
4802 * becomes part of the global remset, which can grow very large.
4804 DEBUG (9, fprintf (gc_debug_file
, "Add to global remset because of pinning %p (%p %s)\n", ptr
, *ptr
, safe_name (*ptr
)));
4805 mono_sgen_add_to_global_remset (ptr
);
4808 DEBUG (9, fprintf (gc_debug_file
, "Skipping remset at %p holding %p\n", ptr
, *ptr
));
4812 ptr
= (void**)(*p
& ~REMSET_TYPE_MASK
);
4813 if (((void*)ptr
>= start_nursery
&& (void*)ptr
< end_nursery
))
4816 while (count
-- > 0) {
4817 major
.copy_object (ptr
, queue
);
4818 DEBUG (9, fprintf (gc_debug_file
, "Overwrote remset at %p with %p (count: %d)\n", ptr
, *ptr
, (int)count
));
4819 if (!global
&& *ptr
>= start_nursery
&& *ptr
< end_nursery
)
4820 mono_sgen_add_to_global_remset (ptr
);
4825 ptr
= (void**)(*p
& ~REMSET_TYPE_MASK
);
4826 if (((void*)ptr
>= start_nursery
&& (void*)ptr
< end_nursery
))
4828 major
.minor_scan_object ((char*)ptr
, queue
);
4830 case REMSET_VTYPE
: {
4831 ptr
= (void**)(*p
& ~REMSET_TYPE_MASK
);
4832 if (((void*)ptr
>= start_nursery
&& (void*)ptr
< end_nursery
))
4837 ptr
= (void**) major
.minor_scan_vtype ((char*)ptr
, desc
, start_nursery
, end_nursery
, queue
);
4841 g_assert_not_reached ();
4846 #ifdef HEAVY_STATISTICS
4848 collect_store_remsets (RememberedSet
*remset
, mword
*bumper
)
4850 mword
*p
= remset
->data
;
4855 while (p
< remset
->store_next
) {
4856 switch ((*p
) & REMSET_TYPE_MASK
) {
4857 case REMSET_LOCATION
:
4860 ++stat_saved_remsets_1
;
4862 if (*p
== last1
|| *p
== last2
) {
4863 ++stat_saved_remsets_2
;
4880 g_assert_not_reached ();
4890 RememberedSet
*remset
;
4892 SgenThreadInfo
*info
;
4894 mword
*addresses
, *bumper
, *p
, *r
;
4896 for (i
= 0; i
< THREAD_HASH_SIZE
; ++i
) {
4897 for (info
= thread_table
[i
]; info
; info
= info
->next
) {
4898 for (remset
= info
->remset
; remset
; remset
= remset
->next
)
4899 size
+= remset
->store_next
- remset
->data
;
4902 for (remset
= freed_thread_remsets
; remset
; remset
= remset
->next
)
4903 size
+= remset
->store_next
- remset
->data
;
4904 for (remset
= global_remset
; remset
; remset
= remset
->next
)
4905 size
+= remset
->store_next
- remset
->data
;
4907 bumper
= addresses
= mono_sgen_alloc_internal_dynamic (sizeof (mword
) * size
, INTERNAL_MEM_STATISTICS
);
4909 for (i
= 0; i
< THREAD_HASH_SIZE
; ++i
) {
4910 for (info
= thread_table
[i
]; info
; info
= info
->next
) {
4911 for (remset
= info
->remset
; remset
; remset
= remset
->next
)
4912 bumper
= collect_store_remsets (remset
, bumper
);
4915 for (remset
= global_remset
; remset
; remset
= remset
->next
)
4916 bumper
= collect_store_remsets (remset
, bumper
);
4917 for (remset
= freed_thread_remsets
; remset
; remset
= remset
->next
)
4918 bumper
= collect_store_remsets (remset
, bumper
);
4920 g_assert (bumper
<= addresses
+ size
);
4922 stat_store_remsets
+= bumper
- addresses
;
4924 sort_addresses ((void**)addresses
, bumper
- addresses
);
4927 while (r
< bumper
) {
4933 stat_store_remsets_unique
+= p
- addresses
;
4935 mono_sgen_free_internal_dynamic (addresses
, sizeof (mword
) * size
, INTERNAL_MEM_STATISTICS
);
4940 clear_thread_store_remset_buffer (SgenThreadInfo
*info
)
4942 *info
->store_remset_buffer_index_addr
= 0;
4943 memset (*info
->store_remset_buffer_addr
, 0, sizeof (gpointer
) * STORE_REMSET_BUFFER_SIZE
);
4947 remset_byte_size (RememberedSet
*remset
)
4949 return sizeof (RememberedSet
) + (remset
->end_set
- remset
->data
) * sizeof (gpointer
);
4953 scan_from_remsets (void *start_nursery
, void *end_nursery
, GrayQueue
*queue
)
4956 SgenThreadInfo
*info
;
4957 RememberedSet
*remset
;
4958 GenericStoreRememberedSet
*store_remset
;
4959 mword
*p
, *next_p
, *store_pos
;
4961 #ifdef HEAVY_STATISTICS
4965 /* the global one */
4966 for (remset
= global_remset
; remset
; remset
= remset
->next
) {
4967 DEBUG (4, fprintf (gc_debug_file
, "Scanning global remset range: %p-%p, size: %td\n", remset
->data
, remset
->store_next
, remset
->store_next
- remset
->data
));
4968 store_pos
= remset
->data
;
4969 for (p
= remset
->data
; p
< remset
->store_next
; p
= next_p
) {
4970 void **ptr
= (void**)p
[0];
4972 /*Ignore previously processed remset.*/
4973 if (!global_remset_location_was_not_added (ptr
)) {
4978 next_p
= handle_remset (p
, start_nursery
, end_nursery
, TRUE
, queue
);
4981 * Clear global remsets of locations which no longer point to the
4982 * nursery. Otherwise, they could grow indefinitely between major
4985 * Since all global remsets are location remsets, we don't need to unmask the pointer.
4987 if (ptr_in_nursery (*ptr
)) {
4988 *store_pos
++ = p
[0];
4989 HEAVY_STAT (++stat_global_remsets_readded
);
4993 /* Truncate the remset */
4994 remset
->store_next
= store_pos
;
4997 /* the generic store ones */
4998 store_remset
= generic_store_remsets
;
4999 while (store_remset
) {
5000 GenericStoreRememberedSet
*next
= store_remset
->next
;
5002 for (i
= 0; i
< STORE_REMSET_BUFFER_SIZE
- 1; ++i
) {
5003 gpointer addr
= store_remset
->data
[i
];
5005 handle_remset ((mword
*)&addr
, start_nursery
, end_nursery
, FALSE
, queue
);
5008 mono_sgen_free_internal (store_remset
, INTERNAL_MEM_STORE_REMSET
);
5010 store_remset
= next
;
5012 generic_store_remsets
= NULL
;
5014 /* the per-thread ones */
5015 for (i
= 0; i
< THREAD_HASH_SIZE
; ++i
) {
5016 for (info
= thread_table
[i
]; info
; info
= info
->next
) {
5017 RememberedSet
*next
;
5019 for (remset
= info
->remset
; remset
; remset
= next
) {
5020 DEBUG (4, fprintf (gc_debug_file
, "Scanning remset for thread %p, range: %p-%p, size: %td\n", info
, remset
->data
, remset
->store_next
, remset
->store_next
- remset
->data
));
5021 for (p
= remset
->data
; p
< remset
->store_next
;) {
5022 p
= handle_remset (p
, start_nursery
, end_nursery
, FALSE
, queue
);
5024 remset
->store_next
= remset
->data
;
5025 next
= remset
->next
;
5026 remset
->next
= NULL
;
5027 if (remset
!= info
->remset
) {
5028 DEBUG (4, fprintf (gc_debug_file
, "Freed remset at %p\n", remset
->data
));
5029 mono_sgen_free_internal_dynamic (remset
, remset_byte_size (remset
), INTERNAL_MEM_REMSET
);
5032 for (j
= 0; j
< *info
->store_remset_buffer_index_addr
; ++j
)
5033 handle_remset ((mword
*)*info
->store_remset_buffer_addr
+ j
+ 1, start_nursery
, end_nursery
, FALSE
, queue
);
5034 clear_thread_store_remset_buffer (info
);
5038 /* the freed thread ones */
5039 while (freed_thread_remsets
) {
5040 RememberedSet
*next
;
5041 remset
= freed_thread_remsets
;
5042 DEBUG (4, fprintf (gc_debug_file
, "Scanning remset for freed thread, range: %p-%p, size: %td\n", remset
->data
, remset
->store_next
, remset
->store_next
- remset
->data
));
5043 for (p
= remset
->data
; p
< remset
->store_next
;) {
5044 p
= handle_remset (p
, start_nursery
, end_nursery
, FALSE
, queue
);
5046 next
= remset
->next
;
5047 DEBUG (4, fprintf (gc_debug_file
, "Freed remset at %p\n", remset
->data
));
5048 mono_sgen_free_internal_dynamic (remset
, remset_byte_size (remset
), INTERNAL_MEM_REMSET
);
5049 freed_thread_remsets
= next
;
5054 * Clear the info in the remembered sets: we're doing a major collection, so
5055 * the per-thread ones are not needed and the global ones will be reconstructed
5059 clear_remsets (void)
5062 SgenThreadInfo
*info
;
5063 RememberedSet
*remset
, *next
;
5065 /* the global list */
5066 for (remset
= global_remset
; remset
; remset
= next
) {
5067 remset
->store_next
= remset
->data
;
5068 next
= remset
->next
;
5069 remset
->next
= NULL
;
5070 if (remset
!= global_remset
) {
5071 DEBUG (4, fprintf (gc_debug_file
, "Freed remset at %p\n", remset
->data
));
5072 mono_sgen_free_internal_dynamic (remset
, remset_byte_size (remset
), INTERNAL_MEM_REMSET
);
5075 /* the generic store ones */
5076 while (generic_store_remsets
) {
5077 GenericStoreRememberedSet
*gs_next
= generic_store_remsets
->next
;
5078 mono_sgen_free_internal (generic_store_remsets
, INTERNAL_MEM_STORE_REMSET
);
5079 generic_store_remsets
= gs_next
;
5081 /* the per-thread ones */
5082 for (i
= 0; i
< THREAD_HASH_SIZE
; ++i
) {
5083 for (info
= thread_table
[i
]; info
; info
= info
->next
) {
5084 for (remset
= info
->remset
; remset
; remset
= next
) {
5085 remset
->store_next
= remset
->data
;
5086 next
= remset
->next
;
5087 remset
->next
= NULL
;
5088 if (remset
!= info
->remset
) {
5089 DEBUG (3, fprintf (gc_debug_file
, "Freed remset at %p\n", remset
->data
));
5090 mono_sgen_free_internal_dynamic (remset
, remset_byte_size (remset
), INTERNAL_MEM_REMSET
);
5093 clear_thread_store_remset_buffer (info
);
5097 /* the freed thread ones */
5098 while (freed_thread_remsets
) {
5099 next
= freed_thread_remsets
->next
;
5100 DEBUG (4, fprintf (gc_debug_file
, "Freed remset at %p\n", freed_thread_remsets
->data
));
5101 mono_sgen_free_internal_dynamic (freed_thread_remsets
, remset_byte_size (freed_thread_remsets
), INTERNAL_MEM_REMSET
);
5102 freed_thread_remsets
= next
;
5107 * Clear the thread local TLAB variables for all threads.
5112 SgenThreadInfo
*info
;
5115 for (i
= 0; i
< THREAD_HASH_SIZE
; ++i
) {
5116 for (info
= thread_table
[i
]; info
; info
= info
->next
) {
5117 /* A new TLAB will be allocated when the thread does its first allocation */
5118 *info
->tlab_start_addr
= NULL
;
5119 *info
->tlab_next_addr
= NULL
;
5120 *info
->tlab_temp_end_addr
= NULL
;
5121 *info
->tlab_real_end_addr
= NULL
;
5126 /* LOCKING: assumes the GC lock is held */
5127 static SgenThreadInfo
*
5128 gc_register_current_thread (void *addr
)
5131 SgenThreadInfo
* info
= malloc (sizeof (SgenThreadInfo
));
5132 #ifndef HAVE_KW_THREAD
5133 SgenThreadInfo
*__thread_info__
= info
;
5139 memset (info
, 0, sizeof (SgenThreadInfo
));
5140 #ifndef HAVE_KW_THREAD
5141 info
->tlab_start
= info
->tlab_next
= info
->tlab_temp_end
= info
->tlab_real_end
= NULL
;
5143 g_assert (!pthread_getspecific (thread_info_key
));
5144 pthread_setspecific (thread_info_key
, info
);
5149 info
->id
= ARCH_GET_THREAD ();
5150 info
->stop_count
= -1;
5153 info
->stack_start
= NULL
;
5154 info
->tlab_start_addr
= &TLAB_START
;
5155 info
->tlab_next_addr
= &TLAB_NEXT
;
5156 info
->tlab_temp_end_addr
= &TLAB_TEMP_END
;
5157 info
->tlab_real_end_addr
= &TLAB_REAL_END
;
5158 info
->store_remset_buffer_addr
= &STORE_REMSET_BUFFER
;
5159 info
->store_remset_buffer_index_addr
= &STORE_REMSET_BUFFER_INDEX
;
5160 info
->stopped_ip
= NULL
;
5161 info
->stopped_domain
= NULL
;
5162 info
->stopped_regs
= NULL
;
5164 binary_protocol_thread_register ((gpointer
)info
->id
);
5166 #ifdef HAVE_KW_THREAD
5167 tlab_next_addr
= &tlab_next
;
5168 store_remset_buffer_index_addr
= &store_remset_buffer_index
;
5171 /* try to get it with attributes first */
5172 #if defined(HAVE_PTHREAD_GETATTR_NP) && defined(HAVE_PTHREAD_ATTR_GETSTACK)
5176 pthread_attr_t attr
;
5177 pthread_getattr_np (pthread_self (), &attr
);
5178 pthread_attr_getstack (&attr
, &sstart
, &size
);
5179 info
->stack_start_limit
= sstart
;
5180 info
->stack_end
= (char*)sstart
+ size
;
5181 pthread_attr_destroy (&attr
);
5183 #elif defined(HAVE_PTHREAD_GET_STACKSIZE_NP) && defined(HAVE_PTHREAD_GET_STACKADDR_NP)
5184 info
->stack_end
= (char*)pthread_get_stackaddr_np (pthread_self ());
5185 info
->stack_start_limit
= (char*)info
->stack_end
- pthread_get_stacksize_np (pthread_self ());
5188 /* FIXME: we assume the stack grows down */
5189 gsize stack_bottom
= (gsize
)addr
;
5190 stack_bottom
+= 4095;
5191 stack_bottom
&= ~4095;
5192 info
->stack_end
= (char*)stack_bottom
;
5196 #ifdef HAVE_KW_THREAD
5197 stack_end
= info
->stack_end
;
5200 /* hash into the table */
5201 hash
= HASH_PTHREAD_T (info
->id
) % THREAD_HASH_SIZE
;
5202 info
->next
= thread_table
[hash
];
5203 thread_table
[hash
] = info
;
5205 info
->remset
= alloc_remset (DEFAULT_REMSET_SIZE
, info
);
5206 pthread_setspecific (remembered_set_key
, info
->remset
);
5207 #ifdef HAVE_KW_THREAD
5208 remembered_set
= info
->remset
;
5211 STORE_REMSET_BUFFER
= mono_sgen_alloc_internal (INTERNAL_MEM_STORE_REMSET
);
5212 STORE_REMSET_BUFFER_INDEX
= 0;
5214 DEBUG (3, fprintf (gc_debug_file
, "registered thread %p (%p) (hash: %d)\n", info
, (gpointer
)info
->id
, hash
));
5216 if (gc_callbacks
.thread_attach_func
)
5217 info
->runtime_data
= gc_callbacks
.thread_attach_func ();
5223 add_generic_store_remset_from_buffer (gpointer
*buffer
)
5225 GenericStoreRememberedSet
*remset
= mono_sgen_alloc_internal (INTERNAL_MEM_STORE_REMSET
);
5226 memcpy (remset
->data
, buffer
+ 1, sizeof (gpointer
) * (STORE_REMSET_BUFFER_SIZE
- 1));
5227 remset
->next
= generic_store_remsets
;
5228 generic_store_remsets
= remset
;
5232 unregister_current_thread (void)
5235 SgenThreadInfo
*prev
= NULL
;
5237 RememberedSet
*rset
;
5238 ARCH_THREAD_TYPE id
= ARCH_GET_THREAD ();
5240 binary_protocol_thread_unregister ((gpointer
)id
);
5242 hash
= HASH_PTHREAD_T (id
) % THREAD_HASH_SIZE
;
5243 p
= thread_table
[hash
];
5245 DEBUG (3, fprintf (gc_debug_file
, "unregister thread %p (%p)\n", p
, (gpointer
)p
->id
));
5246 while (!ARCH_THREAD_EQUALS (p
->id
, id
)) {
5251 thread_table
[hash
] = p
->next
;
5253 prev
->next
= p
->next
;
5256 if (freed_thread_remsets
) {
5257 for (rset
= p
->remset
; rset
->next
; rset
= rset
->next
)
5259 rset
->next
= freed_thread_remsets
;
5260 freed_thread_remsets
= p
->remset
;
5262 freed_thread_remsets
= p
->remset
;
5265 if (*p
->store_remset_buffer_index_addr
)
5266 add_generic_store_remset_from_buffer (*p
->store_remset_buffer_addr
);
5267 mono_sgen_free_internal (*p
->store_remset_buffer_addr
, INTERNAL_MEM_STORE_REMSET
);
5272 unregister_thread (void *k
)
5274 g_assert (!mono_domain_get ());
5276 unregister_current_thread ();
5281 mono_gc_register_thread (void *baseptr
)
5283 SgenThreadInfo
*info
;
5287 info
= mono_sgen_thread_info_lookup (ARCH_GET_THREAD ());
5289 info
= gc_register_current_thread (baseptr
);
5291 return info
!= NULL
;
5294 #if USE_PTHREAD_INTERCEPT
5297 void *(*start_routine
) (void *);
5300 MonoSemType registered
;
5301 } SgenThreadStartInfo
;
5304 gc_start_thread (void *arg
)
5306 SgenThreadStartInfo
*start_info
= arg
;
5307 SgenThreadInfo
* info
;
5308 void *t_arg
= start_info
->arg
;
5309 void *(*start_func
) (void*) = start_info
->start_routine
;
5314 info
= gc_register_current_thread (&result
);
5316 post_result
= MONO_SEM_POST (&(start_info
->registered
));
5317 g_assert (!post_result
);
5318 result
= start_func (t_arg
);
5319 g_assert (!mono_domain_get ());
5321 * this is done by the pthread key dtor
5323 unregister_current_thread ();
5331 mono_gc_pthread_create (pthread_t
*new_thread
, const pthread_attr_t
*attr
, void *(*start_routine
)(void *), void *arg
)
5333 SgenThreadStartInfo
*start_info
;
5336 start_info
= malloc (sizeof (SgenThreadStartInfo
));
5339 MONO_SEM_INIT (&(start_info
->registered
), 0);
5340 start_info
->arg
= arg
;
5341 start_info
->start_routine
= start_routine
;
5343 result
= pthread_create (new_thread
, attr
, gc_start_thread
, start_info
);
5345 while (MONO_SEM_WAIT (&(start_info
->registered
)) != 0) {
5346 /*if (EINTR != errno) ABORT("sem_wait failed"); */
5349 MONO_SEM_DESTROY (&(start_info
->registered
));
5355 mono_gc_pthread_join (pthread_t thread
, void **retval
)
5357 return pthread_join (thread
, retval
);
5361 mono_gc_pthread_detach (pthread_t thread
)
5363 return pthread_detach (thread
);
5366 #endif /* USE_PTHREAD_INTERCEPT */
5369 * ######################################################################
5370 * ######## Write barriers
5371 * ######################################################################
5374 static RememberedSet
*
5375 alloc_remset (int size
, gpointer id
) {
5376 RememberedSet
* res
= mono_sgen_alloc_internal_dynamic (sizeof (RememberedSet
) + (size
* sizeof (gpointer
)), INTERNAL_MEM_REMSET
);
5377 res
->store_next
= res
->data
;
5378 res
->end_set
= res
->data
+ size
;
5380 DEBUG (4, fprintf (gc_debug_file
, "Allocated remset size %d at %p for %p\n", size
, res
->data
, id
));
5385 * Note: the write barriers first do the needed GC work and then do the actual store:
5386 * this way the value is visible to the conservative GC scan after the write barrier
5387 * itself. If a GC interrupts the barrier in the middle, value will be kept alive by
5388 * the conservative scan, otherwise by the remembered set scan.
5391 mono_gc_wbarrier_set_field (MonoObject
*obj
, gpointer field_ptr
, MonoObject
* value
)
5395 HEAVY_STAT (++stat_wbarrier_set_field
);
5396 if (ptr_in_nursery (field_ptr
)) {
5397 *(void**)field_ptr
= value
;
5400 DEBUG (8, fprintf (gc_debug_file
, "Adding remset at %p\n", field_ptr
));
5402 rs
= REMEMBERED_SET
;
5403 if (rs
->store_next
< rs
->end_set
) {
5404 *(rs
->store_next
++) = (mword
)field_ptr
;
5405 *(void**)field_ptr
= value
;
5409 rs
= alloc_remset (rs
->end_set
- rs
->data
, (void*)1);
5410 rs
->next
= REMEMBERED_SET
;
5411 REMEMBERED_SET
= rs
;
5412 #ifdef HAVE_KW_THREAD
5413 mono_sgen_thread_info_lookup (ARCH_GET_THREAD ())->remset
= rs
;
5415 *(rs
->store_next
++) = (mword
)field_ptr
;
5416 *(void**)field_ptr
= value
;
5421 mono_gc_wbarrier_set_arrayref (MonoArray
*arr
, gpointer slot_ptr
, MonoObject
* value
)
5425 HEAVY_STAT (++stat_wbarrier_set_arrayref
);
5426 if (ptr_in_nursery (slot_ptr
)) {
5427 *(void**)slot_ptr
= value
;
5430 DEBUG (8, fprintf (gc_debug_file
, "Adding remset at %p\n", slot_ptr
));
5432 rs
= REMEMBERED_SET
;
5433 if (rs
->store_next
< rs
->end_set
) {
5434 *(rs
->store_next
++) = (mword
)slot_ptr
;
5435 *(void**)slot_ptr
= value
;
5439 rs
= alloc_remset (rs
->end_set
- rs
->data
, (void*)1);
5440 rs
->next
= REMEMBERED_SET
;
5441 REMEMBERED_SET
= rs
;
5442 #ifdef HAVE_KW_THREAD
5443 mono_sgen_thread_info_lookup (ARCH_GET_THREAD ())->remset
= rs
;
5445 *(rs
->store_next
++) = (mword
)slot_ptr
;
5446 *(void**)slot_ptr
= value
;
5451 mono_gc_wbarrier_arrayref_copy (gpointer dest_ptr
, gpointer src_ptr
, int count
)
5455 HEAVY_STAT (++stat_wbarrier_arrayref_copy
);
5457 memmove (dest_ptr
, src_ptr
, count
* sizeof (gpointer
));
5458 if (ptr_in_nursery (dest_ptr
)) {
5462 rs
= REMEMBERED_SET
;
5463 DEBUG (8, fprintf (gc_debug_file
, "Adding remset at %p, %d\n", dest_ptr
, count
));
5464 if (rs
->store_next
+ 1 < rs
->end_set
) {
5465 *(rs
->store_next
++) = (mword
)dest_ptr
| REMSET_RANGE
;
5466 *(rs
->store_next
++) = count
;
5470 rs
= alloc_remset (rs
->end_set
- rs
->data
, (void*)1);
5471 rs
->next
= REMEMBERED_SET
;
5472 REMEMBERED_SET
= rs
;
5473 #ifdef HAVE_KW_THREAD
5474 mono_sgen_thread_info_lookup (ARCH_GET_THREAD ())->remset
= rs
;
5476 *(rs
->store_next
++) = (mword
)dest_ptr
| REMSET_RANGE
;
5477 *(rs
->store_next
++) = count
;
5481 static char *found_obj
;
5484 find_object_for_ptr_callback (char *obj
, size_t size
, char *ptr
)
5486 if (ptr
>= obj
&& ptr
< obj
+ size
) {
5487 g_assert (!found_obj
);
5492 /* for use in the debugger */
5493 char* find_object_for_ptr (char *ptr
);
5495 find_object_for_ptr (char *ptr
)
5499 if (ptr
>= nursery_section
->data
&& ptr
< nursery_section
->end_data
) {
5501 mono_sgen_scan_area_with_callback (nursery_section
->data
, nursery_section
->end_data
,
5502 (IterateObjectCallbackFunc
)find_object_for_ptr_callback
, ptr
);
5507 for (bigobj
= los_object_list
; bigobj
; bigobj
= bigobj
->next
) {
5508 if (ptr
>= bigobj
->data
&& ptr
< bigobj
->data
+ bigobj
->size
)
5509 return bigobj
->data
;
5513 * Very inefficient, but this is debugging code, supposed to
5514 * be called from gdb, so we don't care.
5517 major
.iterate_objects (TRUE
, TRUE
, (IterateObjectCallbackFunc
)find_object_for_ptr_callback
, ptr
);
5522 evacuate_remset_buffer (void)
5527 buffer
= STORE_REMSET_BUFFER
;
5529 add_generic_store_remset_from_buffer (buffer
);
5530 memset (buffer
, 0, sizeof (gpointer
) * STORE_REMSET_BUFFER_SIZE
);
5532 STORE_REMSET_BUFFER_INDEX
= 0;
5536 mono_gc_wbarrier_generic_nostore (gpointer ptr
)
5542 HEAVY_STAT (++stat_wbarrier_generic_store
);
5544 #ifdef XDOMAIN_CHECKS_IN_WBARRIER
5545 /* FIXME: ptr_in_heap must be called with the GC lock held */
5546 if (xdomain_checks
&& *(MonoObject
**)ptr
&& ptr_in_heap (ptr
)) {
5547 char *start
= find_object_for_ptr (ptr
);
5548 MonoObject
*value
= *(MonoObject
**)ptr
;
5552 MonoObject
*obj
= (MonoObject
*)start
;
5553 if (obj
->vtable
->domain
!= value
->vtable
->domain
)
5554 g_assert (is_xdomain_ref_allowed (ptr
, start
, obj
->vtable
->domain
));
5562 if (*(gpointer
*)ptr
)
5563 binary_protocol_wbarrier (ptr
, *(gpointer
*)ptr
, (gpointer
)LOAD_VTABLE (*(gpointer
*)ptr
));
5565 if (ptr_in_nursery (ptr
) || ptr_on_stack (ptr
) || !ptr_in_nursery (*(gpointer
*)ptr
)) {
5566 DEBUG (8, fprintf (gc_debug_file
, "Skipping remset at %p\n", ptr
));
5571 buffer
= STORE_REMSET_BUFFER
;
5572 index
= STORE_REMSET_BUFFER_INDEX
;
5573 /* This simple optimization eliminates a sizable portion of
5574 entries. Comparing it to the last but one entry as well
5575 doesn't eliminate significantly more entries. */
5576 if (buffer
[index
] == ptr
) {
5581 DEBUG (8, fprintf (gc_debug_file
, "Adding remset at %p\n", ptr
));
5582 HEAVY_STAT (++stat_wbarrier_generic_store_remset
);
5585 if (index
>= STORE_REMSET_BUFFER_SIZE
) {
5586 evacuate_remset_buffer ();
5587 index
= STORE_REMSET_BUFFER_INDEX
;
5588 g_assert (index
== 0);
5591 buffer
[index
] = ptr
;
5592 STORE_REMSET_BUFFER_INDEX
= index
;
5598 mono_gc_wbarrier_generic_store (gpointer ptr
, MonoObject
* value
)
5600 DEBUG (8, fprintf (gc_debug_file
, "Wbarrier store at %p to %p (%s)\n", ptr
, value
, value
? safe_name (value
) : "null"));
5601 *(void**)ptr
= value
;
5602 if (ptr_in_nursery (value
))
5603 mono_gc_wbarrier_generic_nostore (ptr
);
5606 void mono_gc_wbarrier_value_copy_bitmap (gpointer _dest
, gpointer _src
, int size
, unsigned bitmap
)
5608 mword
*dest
= _dest
;
5613 mono_gc_wbarrier_generic_store (dest
, (MonoObject
*)*src
);
5618 size
-= SIZEOF_VOID_P
;
5625 mono_gc_wbarrier_value_copy (gpointer dest
, gpointer src
, int count
, MonoClass
*klass
)
5629 HEAVY_STAT (++stat_wbarrier_value_copy
);
5630 g_assert (klass
->valuetype
);
5632 memmove (dest
, src
, count
* mono_class_value_size (klass
, NULL
));
5633 rs
= REMEMBERED_SET
;
5634 if (ptr_in_nursery (dest
) || ptr_on_stack (dest
) || !klass
->has_references
) {
5638 g_assert (klass
->gc_descr_inited
);
5639 DEBUG (8, fprintf (gc_debug_file
, "Adding value remset at %p, count %d, descr %p for class %s (%p)\n", dest
, count
, klass
->gc_descr
, klass
->name
, klass
));
5641 if (rs
->store_next
+ 3 < rs
->end_set
) {
5642 *(rs
->store_next
++) = (mword
)dest
| REMSET_VTYPE
;
5643 *(rs
->store_next
++) = (mword
)klass
->gc_descr
;
5644 *(rs
->store_next
++) = (mword
)count
;
5648 rs
= alloc_remset (rs
->end_set
- rs
->data
, (void*)1);
5649 rs
->next
= REMEMBERED_SET
;
5650 REMEMBERED_SET
= rs
;
5651 #ifdef HAVE_KW_THREAD
5652 mono_sgen_thread_info_lookup (ARCH_GET_THREAD ())->remset
= rs
;
5654 *(rs
->store_next
++) = (mword
)dest
| REMSET_VTYPE
;
5655 *(rs
->store_next
++) = (mword
)klass
->gc_descr
;
5656 *(rs
->store_next
++) = (mword
)count
;
5661 * mono_gc_wbarrier_object_copy:
5663 * Write barrier to call when obj is the result of a clone or copy of an object.
5666 mono_gc_wbarrier_object_copy (MonoObject
* obj
, MonoObject
*src
)
5672 HEAVY_STAT (++stat_wbarrier_object_copy
);
5673 rs
= REMEMBERED_SET
;
5674 DEBUG (6, fprintf (gc_debug_file
, "Adding object remset for %p\n", obj
));
5675 size
= mono_object_class (obj
)->instance_size
;
5677 /* do not copy the sync state */
5678 memcpy ((char*)obj
+ sizeof (MonoObject
), (char*)src
+ sizeof (MonoObject
),
5679 size
- sizeof (MonoObject
));
5680 if (ptr_in_nursery (obj
) || ptr_on_stack (obj
)) {
5684 if (rs
->store_next
< rs
->end_set
) {
5685 *(rs
->store_next
++) = (mword
)obj
| REMSET_OBJECT
;
5689 rs
= alloc_remset (rs
->end_set
- rs
->data
, (void*)1);
5690 rs
->next
= REMEMBERED_SET
;
5691 REMEMBERED_SET
= rs
;
5692 #ifdef HAVE_KW_THREAD
5693 mono_sgen_thread_info_lookup (ARCH_GET_THREAD ())->remset
= rs
;
5695 *(rs
->store_next
++) = (mword
)obj
| REMSET_OBJECT
;
5700 * ######################################################################
5701 * ######## Collector debugging
5702 * ######################################################################
5705 const char*descriptor_types
[] = {
5717 describe_ptr (char *ptr
)
5723 if (ptr_in_nursery (ptr
)) {
5724 printf ("Pointer inside nursery.\n");
5726 if (major
.ptr_is_in_non_pinned_space (ptr
)) {
5727 printf ("Pointer inside oldspace.\n");
5728 } else if (major
.obj_is_from_pinned_alloc (ptr
)) {
5729 printf ("Pointer is inside a pinned chunk.\n");
5731 printf ("Pointer unknown.\n");
5736 if (object_is_pinned (ptr
))
5737 printf ("Object is pinned.\n");
5739 if (object_is_forwarded (ptr
))
5740 printf ("Object is forwared.\n");
5742 // FIXME: Handle pointers to the inside of objects
5743 vtable
= (MonoVTable
*)LOAD_VTABLE (ptr
);
5745 printf ("VTable: %p\n", vtable
);
5746 if (vtable
== NULL
) {
5747 printf ("VTable is invalid (empty).\n");
5750 if (ptr_in_nursery (vtable
)) {
5751 printf ("VTable is invalid (points inside nursery).\n");
5754 printf ("Class: %s\n", vtable
->klass
->name
);
5756 desc
= ((GCVTable
*)vtable
)->desc
;
5757 printf ("Descriptor: %lx\n", (long)desc
);
5760 printf ("Descriptor type: %d (%s)\n", type
, descriptor_types
[type
]);
5764 find_in_remset_loc (mword
*p
, char *addr
, gboolean
*found
)
5770 switch ((*p
) & REMSET_TYPE_MASK
) {
5771 case REMSET_LOCATION
:
5772 if (*p
== (mword
)addr
)
5776 ptr
= (void**)(*p
& ~REMSET_TYPE_MASK
);
5778 if ((void**)addr
>= ptr
&& (void**)addr
< ptr
+ count
)
5782 ptr
= (void**)(*p
& ~REMSET_TYPE_MASK
);
5783 count
= safe_object_get_size ((MonoObject
*)ptr
);
5784 count
= ALIGN_UP (count
);
5785 count
/= sizeof (mword
);
5786 if ((void**)addr
>= ptr
&& (void**)addr
< ptr
+ count
)
5790 ptr
= (void**)(*p
& ~REMSET_TYPE_MASK
);
5794 switch (desc
& 0x7) {
5795 case DESC_TYPE_RUN_LENGTH
:
5796 OBJ_RUN_LEN_SIZE (skip_size
, desc
, ptr
);
5798 case DESC_TYPE_SMALL_BITMAP
:
5799 OBJ_BITMAP_SIZE (skip_size
, desc
, start
);
5803 g_assert_not_reached ();
5806 /* The descriptor includes the size of MonoObject */
5807 skip_size
-= sizeof (MonoObject
);
5809 if ((void**)addr
>= ptr
&& (void**)addr
< ptr
+ (skip_size
/ sizeof (gpointer
)))
5814 g_assert_not_reached ();
5820 * Return whenever ADDR occurs in the remembered sets
5823 find_in_remsets (char *addr
)
5826 SgenThreadInfo
*info
;
5827 RememberedSet
*remset
;
5828 GenericStoreRememberedSet
*store_remset
;
5830 gboolean found
= FALSE
;
5832 /* the global one */
5833 for (remset
= global_remset
; remset
; remset
= remset
->next
) {
5834 DEBUG (4, fprintf (gc_debug_file
, "Scanning global remset range: %p-%p, size: %td\n", remset
->data
, remset
->store_next
, remset
->store_next
- remset
->data
));
5835 for (p
= remset
->data
; p
< remset
->store_next
;) {
5836 p
= find_in_remset_loc (p
, addr
, &found
);
5842 /* the generic store ones */
5843 for (store_remset
= generic_store_remsets
; store_remset
; store_remset
= store_remset
->next
) {
5844 for (i
= 0; i
< STORE_REMSET_BUFFER_SIZE
- 1; ++i
) {
5845 if (store_remset
->data
[i
] == addr
)
5850 /* the per-thread ones */
5851 for (i
= 0; i
< THREAD_HASH_SIZE
; ++i
) {
5852 for (info
= thread_table
[i
]; info
; info
= info
->next
) {
5854 for (remset
= info
->remset
; remset
; remset
= remset
->next
) {
5855 DEBUG (4, fprintf (gc_debug_file
, "Scanning remset for thread %p, range: %p-%p, size: %td\n", info
, remset
->data
, remset
->store_next
, remset
->store_next
- remset
->data
));
5856 for (p
= remset
->data
; p
< remset
->store_next
;) {
5857 p
= find_in_remset_loc (p
, addr
, &found
);
5862 for (j
= 0; j
< *info
->store_remset_buffer_index_addr
; ++j
) {
5863 if ((*info
->store_remset_buffer_addr
) [j
+ 1] == addr
)
5869 /* the freed thread ones */
5870 for (remset
= freed_thread_remsets
; remset
; remset
= remset
->next
) {
5871 DEBUG (4, fprintf (gc_debug_file
, "Scanning remset for freed thread, range: %p-%p, size: %td\n", remset
->data
, remset
->store_next
, remset
->store_next
- remset
->data
));
5872 for (p
= remset
->data
; p
< remset
->store_next
;) {
5873 p
= find_in_remset_loc (p
, addr
, &found
);
5882 static gboolean missing_remsets
;
5885 * We let a missing remset slide if the target object is pinned,
5886 * because the store might have happened but the remset not yet added,
5887 * but in that case the target must be pinned. We might theoretically
5888 * miss some missing remsets this way, but it's very unlikely.
5891 #define HANDLE_PTR(ptr,obj) do { \
5892 if (*(ptr) && (char*)*(ptr) >= nursery_start && (char*)*(ptr) < nursery_next) { \
5893 if (!find_in_remsets ((char*)(ptr))) { \
5894 fprintf (gc_debug_file, "Oldspace->newspace reference %p at offset %td in object %p (%s.%s) not found in remsets.\n", *(ptr), (char*)(ptr) - (char*)(obj), (obj), ((MonoObject*)(obj))->vtable->klass->name_space, ((MonoObject*)(obj))->vtable->klass->name); \
5895 binary_protocol_missing_remset ((obj), (gpointer)LOAD_VTABLE ((obj)), (char*)(ptr) - (char*)(obj), *(ptr), (gpointer)LOAD_VTABLE(*(ptr)), object_is_pinned (*(ptr))); \
5896 if (!object_is_pinned (*(ptr))) \
5897 missing_remsets = TRUE; \
5903 * Check that each object reference which points into the nursery can
5904 * be found in the remembered sets.
5907 check_consistency_callback (char *start
, size_t size
, void *dummy
)
5909 GCVTable
*vt
= (GCVTable
*)LOAD_VTABLE (start
);
5910 DEBUG (8, fprintf (gc_debug_file
, "Scanning object %p, vtable: %p (%s)\n", start
, vt
, vt
->klass
->name
));
5912 #define SCAN_OBJECT_ACTION
5913 #include "sgen-scan-object.h"
5917 * Perform consistency check of the heap.
5919 * Assumes the world is stopped.
5922 check_consistency (void)
5926 // Need to add more checks
5928 missing_remsets
= FALSE
;
5930 DEBUG (1, fprintf (gc_debug_file
, "Begin heap consistency check...\n"));
5932 // Check that oldspace->newspace pointers are registered with the collector
5933 major
.iterate_objects (TRUE
, TRUE
, (IterateObjectCallbackFunc
)check_consistency_callback
, NULL
);
5935 for (bigobj
= los_object_list
; bigobj
; bigobj
= bigobj
->next
)
5936 check_consistency_callback (bigobj
->data
, bigobj
->size
, NULL
);
5938 DEBUG (1, fprintf (gc_debug_file
, "Heap consistency check done.\n"));
5940 #ifdef SGEN_BINARY_PROTOCOL
5941 if (!binary_protocol_file
)
5943 g_assert (!missing_remsets
);
5948 #define HANDLE_PTR(ptr,obj) do { \
5950 g_assert (LOAD_VTABLE (*(ptr))); \
5954 check_major_refs_callback (char *start
, size_t size
, void *dummy
)
5956 #define SCAN_OBJECT_ACTION
5957 #include "sgen-scan-object.h"
5961 check_major_refs (void)
5965 major
.iterate_objects (TRUE
, TRUE
, (IterateObjectCallbackFunc
)check_major_refs_callback
, NULL
);
5967 for (bigobj
= los_object_list
; bigobj
; bigobj
= bigobj
->next
)
5968 check_major_refs_callback (bigobj
->data
, bigobj
->size
, NULL
);
5971 /* Check that the reference is valid */
5973 #define HANDLE_PTR(ptr,obj) do { \
5975 g_assert (safe_name (*(ptr)) != NULL); \
5982 * Perform consistency check on an object. Currently we only check that the
5983 * reference fields are valid.
5986 check_object (char *start
)
5991 #include "sgen-scan-object.h"
5995 * ######################################################################
5996 * ######## Other mono public interface functions.
5997 * ######################################################################
6001 mono_gc_collect (int generation
)
6005 if (generation
== 0) {
6006 collect_nursery (0);
6008 major_collection ("user request");
6015 mono_gc_max_generation (void)
6021 mono_gc_collection_count (int generation
)
6023 if (generation
== 0)
6024 return num_minor_gcs
;
6025 return num_major_gcs
;
6029 mono_gc_get_used_size (void)
6033 tot
= los_memory_usage
;
6034 tot
+= nursery_section
->next_data
- nursery_section
->data
;
6035 tot
+= major
.get_used_size ();
6036 /* FIXME: account for pinned objects */
6042 mono_gc_get_heap_size (void)
6048 mono_gc_disable (void)
6056 mono_gc_enable (void)
6064 mono_gc_get_los_limit (void)
6066 return MAX_SMALL_OBJ_SIZE
;
6070 mono_object_is_alive (MonoObject
* o
)
6076 mono_gc_get_generation (MonoObject
*obj
)
6078 if (ptr_in_nursery (obj
))
6084 mono_gc_enable_events (void)
6089 mono_gc_weak_link_add (void **link_addr
, MonoObject
*obj
, gboolean track
)
6092 mono_gc_register_disappearing_link (obj
, link_addr
, track
);
6097 mono_gc_weak_link_remove (void **link_addr
)
6100 mono_gc_register_disappearing_link (NULL
, link_addr
, FALSE
);
6105 mono_gc_weak_link_get (void **link_addr
)
6109 return (MonoObject
*) REVEAL_POINTER (*link_addr
);
6113 mono_gc_ephemeron_array_add (MonoObject
*obj
)
6115 EphemeronLinkNode
*node
;
6119 node
= mono_sgen_alloc_internal (INTERNAL_MEM_EPHEMERON_LINK
);
6124 node
->array
= (char*)obj
;
6125 node
->next
= ephemeron_list
;
6126 ephemeron_list
= node
;
6128 DEBUG (5, fprintf (gc_debug_file
, "Registered ephemeron array %p\n", obj
));
6135 mono_gc_make_descr_from_bitmap (gsize
*bitmap
, int numbits
)
6137 if (numbits
< ((sizeof (*bitmap
) * 8) - ROOT_DESC_TYPE_SHIFT
)) {
6138 return (void*)MAKE_ROOT_DESC (ROOT_DESC_BITMAP
, bitmap
[0]);
6140 mword
complex = alloc_complex_descriptor (bitmap
, numbits
);
6141 return (void*)MAKE_ROOT_DESC (ROOT_DESC_COMPLEX
, complex);
6146 mono_gc_make_root_descr_user (MonoGCRootMarkFunc marker
)
6150 g_assert (user_descriptors_next
< MAX_USER_DESCRIPTORS
);
6151 descr
= (void*)MAKE_ROOT_DESC (ROOT_DESC_USER
, (mword
)user_descriptors_next
);
6152 user_descriptors
[user_descriptors_next
++] = marker
;
6158 mono_gc_alloc_fixed (size_t size
, void *descr
)
6160 /* FIXME: do a single allocation */
6161 void *res
= calloc (1, size
);
6164 if (!mono_gc_register_root (res
, size
, descr
)) {
6172 mono_gc_free_fixed (void* addr
)
6174 mono_gc_deregister_root (addr
);
6179 mono_gc_invoke_with_gc_lock (MonoGCLockedCallbackFunc func
, void *data
)
6183 result
= func (data
);
6184 UNLOCK_INTERRUPTION
;
6189 mono_gc_is_gc_thread (void)
6193 result
= mono_sgen_thread_info_lookup (ARCH_GET_THREAD ()) != NULL
;
6198 /* Tries to extract a number from the passed string, taking in to account m, k
6201 mono_sgen_parse_environment_string_extract_number (const char *str
, glong
*out
)
6204 int len
= strlen (str
), shift
= 0;
6206 gboolean is_suffix
= FALSE
;
6209 switch (str
[len
- 1]) {
6220 suffix
= str
[len
- 1];
6225 val
= strtol (str
, &endptr
, 10);
6227 if ((errno
== ERANGE
&& (val
== LONG_MAX
|| val
== LONG_MIN
))
6228 || (errno
!= 0 && val
== 0) || (endptr
== str
))
6232 if (*(endptr
+ 1)) /* Invalid string. */
6242 mono_gc_base_init (void)
6246 char *major_collector
= NULL
;
6247 struct sigaction sinfo
;
6249 LOCK_INIT (gc_mutex
);
6251 if (gc_initialized
) {
6255 pagesize
= mono_pagesize ();
6256 gc_debug_file
= stderr
;
6258 LOCK_INIT (interruption_mutex
);
6259 LOCK_INIT (global_remset_mutex
);
6261 if ((env
= getenv ("MONO_GC_PARAMS"))) {
6262 opts
= g_strsplit (env
, ",", -1);
6263 for (ptr
= opts
; *ptr
; ++ptr
) {
6265 if (g_str_has_prefix (opt
, "major=")) {
6266 opt
= strchr (opt
, '=') + 1;
6267 major_collector
= g_strdup (opt
);
6275 mono_sgen_init_internal_allocator ();
6277 mono_sgen_register_fixed_internal_mem_type (INTERNAL_MEM_FRAGMENT
, sizeof (Fragment
));
6278 mono_sgen_register_fixed_internal_mem_type (INTERNAL_MEM_SECTION
, SGEN_SIZEOF_GC_MEM_SECTION
);
6279 mono_sgen_register_fixed_internal_mem_type (INTERNAL_MEM_FINALIZE_ENTRY
, sizeof (FinalizeEntry
));
6280 mono_sgen_register_fixed_internal_mem_type (INTERNAL_MEM_DISLINK
, sizeof (DisappearingLink
));
6281 mono_sgen_register_fixed_internal_mem_type (INTERNAL_MEM_ROOT_RECORD
, sizeof (RootRecord
));
6282 mono_sgen_register_fixed_internal_mem_type (INTERNAL_MEM_GRAY_QUEUE
, sizeof (GrayQueueSection
));
6283 g_assert (sizeof (GenericStoreRememberedSet
) == sizeof (gpointer
) * STORE_REMSET_BUFFER_SIZE
);
6284 mono_sgen_register_fixed_internal_mem_type (INTERNAL_MEM_STORE_REMSET
, sizeof (GenericStoreRememberedSet
));
6285 mono_sgen_register_fixed_internal_mem_type (INTERNAL_MEM_EPHEMERON_LINK
, sizeof (EphemeronLinkNode
));
6287 if (!major_collector
|| !strcmp (major_collector
, "marksweep")) {
6288 mono_sgen_marksweep_init (&major
);
6289 } else if (!major_collector
|| !strcmp (major_collector
, "marksweep-fixed")) {
6290 mono_sgen_marksweep_fixed_init (&major
);
6291 } else if (!major_collector
|| !strcmp (major_collector
, "marksweep-par")) {
6292 mono_sgen_marksweep_par_init (&major
);
6293 workers_init (mono_cpu_count ());
6294 } else if (!major_collector
|| !strcmp (major_collector
, "marksweep-fixed-par")) {
6295 mono_sgen_marksweep_fixed_par_init (&major
);
6296 workers_init (mono_cpu_count ());
6297 } else if (!strcmp (major_collector
, "copying")) {
6298 mono_sgen_copying_init (&major
);
6300 fprintf (stderr
, "Unknown major collector `%s'.\n", major_collector
);
6305 for (ptr
= opts
; *ptr
; ++ptr
) {
6307 if (g_str_has_prefix (opt
, "major="))
6310 if (g_str_has_prefix (opt
, "nursery-size=")) {
6312 opt
= strchr (opt
, '=') + 1;
6313 if (*opt
&& mono_sgen_parse_environment_string_extract_number (opt
, &val
)) {
6314 default_nursery_size
= val
;
6315 #ifdef SGEN_ALIGN_NURSERY
6316 if ((val
& (val
- 1))) {
6317 fprintf (stderr
, "The nursery size must be a power of two.\n");
6321 default_nursery_bits
= 0;
6322 while (1 << (++ default_nursery_bits
) != default_nursery_size
)
6326 fprintf (stderr
, "nursery-size must be an integer.\n");
6332 if (!(major
.handle_gc_param
&& major
.handle_gc_param (opt
))) {
6333 fprintf (stderr
, "MONO_GC_PARAMS must be a comma-delimited list of one or more of the following:\n");
6334 fprintf (stderr
, " nursery-size=N (where N is an integer, possibly with a k, m or a g suffix)\n");
6335 fprintf (stderr
, " major=COLLECTOR (where collector is `marksweep', `marksweep-par' or `copying')\n");
6336 if (major
.print_gc_param_usage
)
6337 major
.print_gc_param_usage ();
6344 if (major_collector
)
6345 g_free (major_collector
);
6347 nursery_size
= DEFAULT_NURSERY_SIZE
;
6348 minor_collection_allowance
= MIN_MINOR_COLLECTION_ALLOWANCE
;
6352 if ((env
= getenv ("MONO_GC_DEBUG"))) {
6353 opts
= g_strsplit (env
, ",", -1);
6354 for (ptr
= opts
; ptr
&& *ptr
; ptr
++) {
6356 if (opt
[0] >= '0' && opt
[0] <= '9') {
6357 gc_debug_level
= atoi (opt
);
6362 char *rf
= g_strdup_printf ("%s.%d", opt
, getpid ());
6363 gc_debug_file
= fopen (rf
, "wb");
6365 gc_debug_file
= stderr
;
6368 } else if (!strcmp (opt
, "collect-before-allocs")) {
6369 collect_before_allocs
= TRUE
;
6370 } else if (!strcmp (opt
, "check-at-minor-collections")) {
6371 consistency_check_at_minor_collection
= TRUE
;
6372 nursery_clear_policy
= CLEAR_AT_GC
;
6373 } else if (!strcmp (opt
, "xdomain-checks")) {
6374 xdomain_checks
= TRUE
;
6375 } else if (!strcmp (opt
, "clear-at-gc")) {
6376 nursery_clear_policy
= CLEAR_AT_GC
;
6377 } else if (!strcmp (opt
, "conservative-stack-mark")) {
6378 conservative_stack_mark
= TRUE
;
6379 } else if (!strcmp (opt
, "check-scan-starts")) {
6380 do_scan_starts_check
= TRUE
;
6381 } else if (g_str_has_prefix (opt
, "heap-dump=")) {
6382 char *filename
= strchr (opt
, '=') + 1;
6383 nursery_clear_policy
= CLEAR_AT_GC
;
6384 heap_dump_file
= fopen (filename
, "w");
6386 fprintf (heap_dump_file
, "<sgen-dump>\n");
6387 #ifdef SGEN_BINARY_PROTOCOL
6388 } else if (g_str_has_prefix (opt
, "binary-protocol=")) {
6389 char *filename
= strchr (opt
, '=') + 1;
6390 binary_protocol_file
= fopen (filename
, "w");
6393 fprintf (stderr
, "Invalid format for the MONO_GC_DEBUG env variable: '%s'\n", env
);
6394 fprintf (stderr
, "The format is: MONO_GC_DEBUG=[l[:filename]|<option>]+ where l is a debug level 0-9.\n");
6395 fprintf (stderr
, "Valid options are: collect-before-allocs, check-at-minor-collections, xdomain-checks, clear-at-gc.\n");
6402 suspend_ack_semaphore_ptr
= &suspend_ack_semaphore
;
6403 MONO_SEM_INIT (&suspend_ack_semaphore
, 0);
6405 sigfillset (&sinfo
.sa_mask
);
6406 sinfo
.sa_flags
= SA_RESTART
| SA_SIGINFO
;
6407 sinfo
.sa_sigaction
= suspend_handler
;
6408 if (sigaction (suspend_signal_num
, &sinfo
, NULL
) != 0) {
6409 g_error ("failed sigaction");
6412 sinfo
.sa_handler
= restart_handler
;
6413 if (sigaction (restart_signal_num
, &sinfo
, NULL
) != 0) {
6414 g_error ("failed sigaction");
6417 sigfillset (&suspend_signal_mask
);
6418 sigdelset (&suspend_signal_mask
, restart_signal_num
);
6420 global_remset
= alloc_remset (1024, NULL
);
6421 global_remset
->next
= NULL
;
6423 pthread_key_create (&remembered_set_key
, unregister_thread
);
6425 #ifndef HAVE_KW_THREAD
6426 pthread_key_create (&thread_info_key
, NULL
);
6429 gc_initialized
= TRUE
;
6431 mono_gc_register_thread (&sinfo
);
6435 mono_gc_get_suspend_signal (void)
6437 return suspend_signal_num
;
6447 #ifdef HAVE_KW_THREAD
6448 #define EMIT_TLS_ACCESS(mb,dummy,offset) do { \
6449 mono_mb_emit_byte ((mb), MONO_CUSTOM_PREFIX); \
6450 mono_mb_emit_byte ((mb), CEE_MONO_TLS); \
6451 mono_mb_emit_i4 ((mb), (offset)); \
6454 #define EMIT_TLS_ACCESS(mb,member,dummy) do { \
6455 mono_mb_emit_byte ((mb), MONO_CUSTOM_PREFIX); \
6456 mono_mb_emit_byte ((mb), CEE_MONO_TLS); \
6457 mono_mb_emit_i4 ((mb), thread_info_key); \
6458 mono_mb_emit_icon ((mb), G_STRUCT_OFFSET (SgenThreadInfo, member)); \
6459 mono_mb_emit_byte ((mb), CEE_ADD); \
6460 mono_mb_emit_byte ((mb), CEE_LDIND_I); \
6464 #ifdef MANAGED_ALLOCATION
6465 /* FIXME: Do this in the JIT, where specialized allocation sequences can be created
6466 * for each class. This is currently not easy to do, as it is hard to generate basic
6467 * blocks + branches, but it is easy with the linear IL codebase.
6469 * For this to work we'd need to solve the TLAB race, first. Now we
6470 * require the allocator to be in a few known methods to make sure
6471 * that they are executed atomically via the restart mechanism.
6474 create_allocator (int atype
)
6476 int p_var
, size_var
;
6477 guint32 slowpath_branch
, max_size_branch
;
6478 MonoMethodBuilder
*mb
;
6480 MonoMethodSignature
*csig
;
6481 static gboolean registered
= FALSE
;
6482 int tlab_next_addr_var
, new_next_var
;
6484 const char *name
= NULL
;
6485 AllocatorWrapperInfo
*info
;
6487 #ifdef HAVE_KW_THREAD
6488 int tlab_next_addr_offset
= -1;
6489 int tlab_temp_end_offset
= -1;
6491 MONO_THREAD_VAR_OFFSET (tlab_next_addr
, tlab_next_addr_offset
);
6492 MONO_THREAD_VAR_OFFSET (tlab_temp_end
, tlab_temp_end_offset
);
6494 g_assert (tlab_next_addr_offset
!= -1);
6495 g_assert (tlab_temp_end_offset
!= -1);
6499 mono_register_jit_icall (mono_gc_alloc_obj
, "mono_gc_alloc_obj", mono_create_icall_signature ("object ptr int"), FALSE
);
6500 mono_register_jit_icall (mono_gc_alloc_vector
, "mono_gc_alloc_vector", mono_create_icall_signature ("object ptr int int"), FALSE
);
6504 if (atype
== ATYPE_SMALL
) {
6506 name
= "AllocSmall";
6507 } else if (atype
== ATYPE_NORMAL
) {
6510 } else if (atype
== ATYPE_VECTOR
) {
6512 name
= "AllocVector";
6514 g_assert_not_reached ();
6517 csig
= mono_metadata_signature_alloc (mono_defaults
.corlib
, num_params
);
6518 csig
->ret
= &mono_defaults
.object_class
->byval_arg
;
6519 for (i
= 0; i
< num_params
; ++i
)
6520 csig
->params
[i
] = &mono_defaults
.int_class
->byval_arg
;
6522 mb
= mono_mb_new (mono_defaults
.object_class
, name
, MONO_WRAPPER_ALLOC
);
6523 size_var
= mono_mb_add_local (mb
, &mono_defaults
.int32_class
->byval_arg
);
6524 if (atype
== ATYPE_NORMAL
|| atype
== ATYPE_SMALL
) {
6525 /* size = vtable->klass->instance_size; */
6526 mono_mb_emit_ldarg (mb
, 0);
6527 mono_mb_emit_icon (mb
, G_STRUCT_OFFSET (MonoVTable
, klass
));
6528 mono_mb_emit_byte (mb
, CEE_ADD
);
6529 mono_mb_emit_byte (mb
, CEE_LDIND_I
);
6530 mono_mb_emit_icon (mb
, G_STRUCT_OFFSET (MonoClass
, instance_size
));
6531 mono_mb_emit_byte (mb
, CEE_ADD
);
6532 /* FIXME: assert instance_size stays a 4 byte integer */
6533 mono_mb_emit_byte (mb
, CEE_LDIND_U4
);
6534 mono_mb_emit_stloc (mb
, size_var
);
6535 } else if (atype
== ATYPE_VECTOR
) {
6536 MonoExceptionClause
*clause
;
6538 MonoClass
*oom_exc_class
;
6541 /* n > MONO_ARRAY_MAX_INDEX -> OverflowException */
6542 mono_mb_emit_ldarg (mb
, 1);
6543 mono_mb_emit_icon (mb
, MONO_ARRAY_MAX_INDEX
);
6544 pos
= mono_mb_emit_short_branch (mb
, CEE_BLE_UN_S
);
6545 mono_mb_emit_exception (mb
, "OverflowException", NULL
);
6546 mono_mb_patch_short_branch (mb
, pos
);
6548 clause
= mono_image_alloc0 (mono_defaults
.corlib
, sizeof (MonoExceptionClause
));
6549 clause
->try_offset
= mono_mb_get_label (mb
);
6551 /* vtable->klass->sizes.element_size */
6552 mono_mb_emit_ldarg (mb
, 0);
6553 mono_mb_emit_icon (mb
, G_STRUCT_OFFSET (MonoVTable
, klass
));
6554 mono_mb_emit_byte (mb
, CEE_ADD
);
6555 mono_mb_emit_byte (mb
, CEE_LDIND_I
);
6556 mono_mb_emit_icon (mb
, G_STRUCT_OFFSET (MonoClass
, sizes
.element_size
));
6557 mono_mb_emit_byte (mb
, CEE_ADD
);
6558 mono_mb_emit_byte (mb
, CEE_LDIND_U4
);
6561 mono_mb_emit_ldarg (mb
, 1);
6562 mono_mb_emit_byte (mb
, CEE_MUL_OVF_UN
);
6563 /* + sizeof (MonoArray) */
6564 mono_mb_emit_icon (mb
, sizeof (MonoArray
));
6565 mono_mb_emit_byte (mb
, CEE_ADD_OVF_UN
);
6566 mono_mb_emit_stloc (mb
, size_var
);
6568 pos_leave
= mono_mb_emit_branch (mb
, CEE_LEAVE
);
6571 clause
->flags
= MONO_EXCEPTION_CLAUSE_NONE
;
6572 clause
->try_len
= mono_mb_get_pos (mb
) - clause
->try_offset
;
6573 clause
->data
.catch_class
= mono_class_from_name (mono_defaults
.corlib
,
6574 "System", "OverflowException");
6575 g_assert (clause
->data
.catch_class
);
6576 clause
->handler_offset
= mono_mb_get_label (mb
);
6578 oom_exc_class
= mono_class_from_name (mono_defaults
.corlib
,
6579 "System", "OutOfMemoryException");
6580 g_assert (oom_exc_class
);
6581 ctor
= mono_class_get_method_from_name (oom_exc_class
, ".ctor", 0);
6584 mono_mb_emit_byte (mb
, CEE_POP
);
6585 mono_mb_emit_op (mb
, CEE_NEWOBJ
, ctor
);
6586 mono_mb_emit_byte (mb
, CEE_THROW
);
6588 clause
->handler_len
= mono_mb_get_pos (mb
) - clause
->handler_offset
;
6589 mono_mb_set_clauses (mb
, 1, clause
);
6590 mono_mb_patch_branch (mb
, pos_leave
);
6593 g_assert_not_reached ();
6596 /* size += ALLOC_ALIGN - 1; */
6597 mono_mb_emit_ldloc (mb
, size_var
);
6598 mono_mb_emit_icon (mb
, ALLOC_ALIGN
- 1);
6599 mono_mb_emit_byte (mb
, CEE_ADD
);
6600 /* size &= ~(ALLOC_ALIGN - 1); */
6601 mono_mb_emit_icon (mb
, ~(ALLOC_ALIGN
- 1));
6602 mono_mb_emit_byte (mb
, CEE_AND
);
6603 mono_mb_emit_stloc (mb
, size_var
);
6605 /* if (size > MAX_SMALL_OBJ_SIZE) goto slowpath */
6606 if (atype
!= ATYPE_SMALL
) {
6607 mono_mb_emit_ldloc (mb
, size_var
);
6608 mono_mb_emit_icon (mb
, MAX_SMALL_OBJ_SIZE
);
6609 max_size_branch
= mono_mb_emit_short_branch (mb
, MONO_CEE_BGT_S
);
6613 * We need to modify tlab_next, but the JIT only supports reading, so we read
6614 * another tls var holding its address instead.
6617 /* tlab_next_addr (local) = tlab_next_addr (TLS var) */
6618 tlab_next_addr_var
= mono_mb_add_local (mb
, &mono_defaults
.int_class
->byval_arg
);
6619 EMIT_TLS_ACCESS (mb
, tlab_next_addr
, tlab_next_addr_offset
);
6620 mono_mb_emit_stloc (mb
, tlab_next_addr_var
);
6622 /* p = (void**)tlab_next; */
6623 p_var
= mono_mb_add_local (mb
, &mono_defaults
.int_class
->byval_arg
);
6624 mono_mb_emit_ldloc (mb
, tlab_next_addr_var
);
6625 mono_mb_emit_byte (mb
, CEE_LDIND_I
);
6626 mono_mb_emit_stloc (mb
, p_var
);
6628 /* new_next = (char*)p + size; */
6629 new_next_var
= mono_mb_add_local (mb
, &mono_defaults
.int_class
->byval_arg
);
6630 mono_mb_emit_ldloc (mb
, p_var
);
6631 mono_mb_emit_ldloc (mb
, size_var
);
6632 mono_mb_emit_byte (mb
, CEE_CONV_I
);
6633 mono_mb_emit_byte (mb
, CEE_ADD
);
6634 mono_mb_emit_stloc (mb
, new_next_var
);
6636 /* tlab_next = new_next */
6637 mono_mb_emit_ldloc (mb
, tlab_next_addr_var
);
6638 mono_mb_emit_ldloc (mb
, new_next_var
);
6639 mono_mb_emit_byte (mb
, CEE_STIND_I
);
6641 /* if (G_LIKELY (new_next < tlab_temp_end)) */
6642 mono_mb_emit_ldloc (mb
, new_next_var
);
6643 EMIT_TLS_ACCESS (mb
, tlab_temp_end
, tlab_temp_end_offset
);
6644 slowpath_branch
= mono_mb_emit_short_branch (mb
, MONO_CEE_BLT_UN_S
);
6647 if (atype
!= ATYPE_SMALL
)
6648 mono_mb_patch_short_branch (mb
, max_size_branch
);
6650 mono_mb_emit_byte (mb
, MONO_CUSTOM_PREFIX
);
6651 mono_mb_emit_byte (mb
, CEE_MONO_NOT_TAKEN
);
6653 /* FIXME: mono_gc_alloc_obj takes a 'size_t' as an argument, not an int32 */
6654 mono_mb_emit_ldarg (mb
, 0);
6655 mono_mb_emit_ldloc (mb
, size_var
);
6656 if (atype
== ATYPE_NORMAL
|| atype
== ATYPE_SMALL
) {
6657 mono_mb_emit_icall (mb
, mono_gc_alloc_obj
);
6658 } else if (atype
== ATYPE_VECTOR
) {
6659 mono_mb_emit_ldarg (mb
, 1);
6660 mono_mb_emit_icall (mb
, mono_gc_alloc_vector
);
6662 g_assert_not_reached ();
6664 mono_mb_emit_byte (mb
, CEE_RET
);
6667 mono_mb_patch_short_branch (mb
, slowpath_branch
);
6669 /* FIXME: Memory barrier */
6672 mono_mb_emit_ldloc (mb
, p_var
);
6673 mono_mb_emit_ldarg (mb
, 0);
6674 mono_mb_emit_byte (mb
, CEE_STIND_I
);
6676 if (atype
== ATYPE_VECTOR
) {
6677 /* arr->max_length = max_length; */
6678 mono_mb_emit_ldloc (mb
, p_var
);
6679 mono_mb_emit_ldflda (mb
, G_STRUCT_OFFSET (MonoArray
, max_length
));
6680 mono_mb_emit_ldarg (mb
, 1);
6681 mono_mb_emit_byte (mb
, CEE_STIND_I
);
6685 mono_mb_emit_ldloc (mb
, p_var
);
6686 mono_mb_emit_byte (mb
, CEE_RET
);
6688 res
= mono_mb_create_method (mb
, csig
, 8);
6690 mono_method_get_header (res
)->init_locals
= FALSE
;
6692 info
= mono_image_alloc0 (mono_defaults
.corlib
, sizeof (AllocatorWrapperInfo
));
6693 info
->gc_name
= "sgen";
6694 info
->alloc_type
= atype
;
6695 mono_marshal_set_wrapper_info (res
, info
);
6702 mono_gc_get_gc_name (void)
6707 static MonoMethod
* alloc_method_cache
[ATYPE_NUM
];
6708 static MonoMethod
*write_barrier_method
;
6711 is_ip_in_managed_allocator (MonoDomain
*domain
, gpointer ip
)
6719 ji
= mono_jit_info_table_find (domain
, ip
);
6722 method
= ji
->method
;
6724 if (method
== write_barrier_method
)
6726 for (i
= 0; i
< ATYPE_NUM
; ++i
)
6727 if (method
== alloc_method_cache
[i
])
6733 * Generate an allocator method implementing the fast path of mono_gc_alloc_obj ().
6734 * The signature of the called method is:
6735 * object allocate (MonoVTable *vtable)
6738 mono_gc_get_managed_allocator (MonoVTable
*vtable
, gboolean for_box
)
6740 #ifdef MANAGED_ALLOCATION
6741 MonoClass
*klass
= vtable
->klass
;
6743 #ifdef HAVE_KW_THREAD
6744 int tlab_next_offset
= -1;
6745 int tlab_temp_end_offset
= -1;
6746 MONO_THREAD_VAR_OFFSET (tlab_next
, tlab_next_offset
);
6747 MONO_THREAD_VAR_OFFSET (tlab_temp_end
, tlab_temp_end_offset
);
6749 if (tlab_next_offset
== -1 || tlab_temp_end_offset
== -1)
6753 if (!mono_runtime_has_tls_get ())
6755 if (klass
->instance_size
> tlab_size
)
6757 if (klass
->has_finalize
|| klass
->marshalbyref
|| (mono_profiler_get_events () & MONO_PROFILE_ALLOCATIONS
))
6761 if (klass
->byval_arg
.type
== MONO_TYPE_STRING
)
6763 if (collect_before_allocs
)
6766 if (ALIGN_TO (klass
->instance_size
, ALLOC_ALIGN
) < MAX_SMALL_OBJ_SIZE
)
6767 return mono_gc_get_managed_allocator_by_type (ATYPE_SMALL
);
6769 return mono_gc_get_managed_allocator_by_type (ATYPE_NORMAL
);
6776 mono_gc_get_managed_array_allocator (MonoVTable
*vtable
, int rank
)
6778 #ifdef MANAGED_ALLOCATION
6779 MonoClass
*klass
= vtable
->klass
;
6781 #ifdef HAVE_KW_THREAD
6782 int tlab_next_offset
= -1;
6783 int tlab_temp_end_offset
= -1;
6784 MONO_THREAD_VAR_OFFSET (tlab_next
, tlab_next_offset
);
6785 MONO_THREAD_VAR_OFFSET (tlab_temp_end
, tlab_temp_end_offset
);
6787 if (tlab_next_offset
== -1 || tlab_temp_end_offset
== -1)
6793 if (!mono_runtime_has_tls_get ())
6795 if (mono_profiler_get_events () & MONO_PROFILE_ALLOCATIONS
)
6797 if (collect_before_allocs
)
6799 g_assert (!klass
->has_finalize
&& !klass
->marshalbyref
);
6801 return mono_gc_get_managed_allocator_by_type (ATYPE_VECTOR
);
6808 mono_gc_get_managed_allocator_by_type (int atype
)
6810 #ifdef MANAGED_ALLOCATION
6813 if (!mono_runtime_has_tls_get ())
6816 mono_loader_lock ();
6817 res
= alloc_method_cache
[atype
];
6819 res
= alloc_method_cache
[atype
] = create_allocator (atype
);
6820 mono_loader_unlock ();
6828 mono_gc_get_managed_allocator_types (void)
6835 mono_gc_get_write_barrier (void)
6838 MonoMethodBuilder
*mb
;
6839 MonoMethodSignature
*sig
;
6840 #ifdef MANAGED_WBARRIER
6841 int label_no_wb_1
, label_no_wb_2
, label_no_wb_3
, label_no_wb_4
, label_need_wb
, label_slow_path
;
6842 #ifndef SGEN_ALIGN_NURSERY
6843 int label_continue_1
, label_continue_2
, label_no_wb_5
;
6844 int dereferenced_var
;
6846 int buffer_var
, buffer_index_var
, dummy_var
;
6848 #ifdef HAVE_KW_THREAD
6849 int stack_end_offset
= -1, store_remset_buffer_offset
= -1;
6850 int store_remset_buffer_index_offset
= -1, store_remset_buffer_index_addr_offset
= -1;
6852 MONO_THREAD_VAR_OFFSET (stack_end
, stack_end_offset
);
6853 g_assert (stack_end_offset
!= -1);
6854 MONO_THREAD_VAR_OFFSET (store_remset_buffer
, store_remset_buffer_offset
);
6855 g_assert (store_remset_buffer_offset
!= -1);
6856 MONO_THREAD_VAR_OFFSET (store_remset_buffer_index
, store_remset_buffer_index_offset
);
6857 g_assert (store_remset_buffer_index_offset
!= -1);
6858 MONO_THREAD_VAR_OFFSET (store_remset_buffer_index_addr
, store_remset_buffer_index_addr_offset
);
6859 g_assert (store_remset_buffer_index_addr_offset
!= -1);
6863 // FIXME: Maybe create a separate version for ctors (the branch would be
6864 // correctly predicted more times)
6865 if (write_barrier_method
)
6866 return write_barrier_method
;
6868 /* Create the IL version of mono_gc_barrier_generic_store () */
6869 sig
= mono_metadata_signature_alloc (mono_defaults
.corlib
, 1);
6870 sig
->ret
= &mono_defaults
.void_class
->byval_arg
;
6871 sig
->params
[0] = &mono_defaults
.int_class
->byval_arg
;
6873 mb
= mono_mb_new (mono_defaults
.object_class
, "wbarrier", MONO_WRAPPER_WRITE_BARRIER
);
6875 #ifdef MANAGED_WBARRIER
6876 if (mono_runtime_has_tls_get ()) {
6877 #ifdef SGEN_ALIGN_NURSERY
6878 // if (ptr_in_nursery (ptr)) return;
6880 * Masking out the bits might be faster, but we would have to use 64 bit
6881 * immediates, which might be slower.
6883 mono_mb_emit_ldarg (mb
, 0);
6884 mono_mb_emit_icon (mb
, DEFAULT_NURSERY_BITS
);
6885 mono_mb_emit_byte (mb
, CEE_SHR_UN
);
6886 mono_mb_emit_icon (mb
, (mword
)nursery_start
>> DEFAULT_NURSERY_BITS
);
6887 label_no_wb_1
= mono_mb_emit_branch (mb
, CEE_BEQ
);
6889 // if (!ptr_in_nursery (*ptr)) return;
6890 mono_mb_emit_ldarg (mb
, 0);
6891 mono_mb_emit_byte (mb
, CEE_LDIND_I
);
6892 mono_mb_emit_icon (mb
, DEFAULT_NURSERY_BITS
);
6893 mono_mb_emit_byte (mb
, CEE_SHR_UN
);
6894 mono_mb_emit_icon (mb
, (mword
)nursery_start
>> DEFAULT_NURSERY_BITS
);
6895 label_no_wb_2
= mono_mb_emit_branch (mb
, CEE_BNE_UN
);
6898 // if (ptr < (nursery_start)) goto continue;
6899 mono_mb_emit_ldarg (mb
, 0);
6900 mono_mb_emit_ptr (mb
, (gpointer
) nursery_start
);
6901 label_continue_1
= mono_mb_emit_branch (mb
, CEE_BLT
);
6903 // if (ptr >= nursery_real_end)) goto continue;
6904 mono_mb_emit_ldarg (mb
, 0);
6905 mono_mb_emit_ptr (mb
, (gpointer
) nursery_real_end
);
6906 label_continue_2
= mono_mb_emit_branch (mb
, CEE_BGE
);
6909 label_no_wb_1
= mono_mb_emit_branch (mb
, CEE_BR
);
6912 mono_mb_patch_branch (mb
, label_continue_1
);
6913 mono_mb_patch_branch (mb
, label_continue_2
);
6915 // Dereference and store in local var
6916 dereferenced_var
= mono_mb_add_local (mb
, &mono_defaults
.int_class
->byval_arg
);
6917 mono_mb_emit_ldarg (mb
, 0);
6918 mono_mb_emit_byte (mb
, CEE_LDIND_I
);
6919 mono_mb_emit_stloc (mb
, dereferenced_var
);
6921 // if (*ptr < nursery_start) return;
6922 mono_mb_emit_ldloc (mb
, dereferenced_var
);
6923 mono_mb_emit_ptr (mb
, (gpointer
) nursery_start
);
6924 label_no_wb_2
= mono_mb_emit_branch (mb
, CEE_BLT
);
6926 // if (*ptr >= nursery_end) return;
6927 mono_mb_emit_ldloc (mb
, dereferenced_var
);
6928 mono_mb_emit_ptr (mb
, (gpointer
) nursery_real_end
);
6929 label_no_wb_5
= mono_mb_emit_branch (mb
, CEE_BGE
);
6932 // if (ptr >= stack_end) goto need_wb;
6933 mono_mb_emit_ldarg (mb
, 0);
6934 EMIT_TLS_ACCESS (mb
, stack_end
, stack_end_offset
);
6935 label_need_wb
= mono_mb_emit_branch (mb
, CEE_BGE_UN
);
6937 // if (ptr >= stack_start) return;
6938 dummy_var
= mono_mb_add_local (mb
, &mono_defaults
.int_class
->byval_arg
);
6939 mono_mb_emit_ldarg (mb
, 0);
6940 mono_mb_emit_ldloc_addr (mb
, dummy_var
);
6941 label_no_wb_3
= mono_mb_emit_branch (mb
, CEE_BGE_UN
);
6944 mono_mb_patch_branch (mb
, label_need_wb
);
6946 // buffer = STORE_REMSET_BUFFER;
6947 buffer_var
= mono_mb_add_local (mb
, &mono_defaults
.int_class
->byval_arg
);
6948 EMIT_TLS_ACCESS (mb
, store_remset_buffer
, store_remset_buffer_offset
);
6949 mono_mb_emit_stloc (mb
, buffer_var
);
6951 // buffer_index = STORE_REMSET_BUFFER_INDEX;
6952 buffer_index_var
= mono_mb_add_local (mb
, &mono_defaults
.int_class
->byval_arg
);
6953 EMIT_TLS_ACCESS (mb
, store_remset_buffer_index
, store_remset_buffer_index_offset
);
6954 mono_mb_emit_stloc (mb
, buffer_index_var
);
6956 // if (buffer [buffer_index] == ptr) return;
6957 mono_mb_emit_ldloc (mb
, buffer_var
);
6958 mono_mb_emit_ldloc (mb
, buffer_index_var
);
6959 g_assert (sizeof (gpointer
) == 4 || sizeof (gpointer
) == 8);
6960 mono_mb_emit_icon (mb
, sizeof (gpointer
) == 4 ? 2 : 3);
6961 mono_mb_emit_byte (mb
, CEE_SHL
);
6962 mono_mb_emit_byte (mb
, CEE_ADD
);
6963 mono_mb_emit_byte (mb
, CEE_LDIND_I
);
6964 mono_mb_emit_ldarg (mb
, 0);
6965 label_no_wb_4
= mono_mb_emit_branch (mb
, CEE_BEQ
);
6968 mono_mb_emit_ldloc (mb
, buffer_index_var
);
6969 mono_mb_emit_icon (mb
, 1);
6970 mono_mb_emit_byte (mb
, CEE_ADD
);
6971 mono_mb_emit_stloc (mb
, buffer_index_var
);
6973 // if (buffer_index >= STORE_REMSET_BUFFER_SIZE) goto slow_path;
6974 mono_mb_emit_ldloc (mb
, buffer_index_var
);
6975 mono_mb_emit_icon (mb
, STORE_REMSET_BUFFER_SIZE
);
6976 label_slow_path
= mono_mb_emit_branch (mb
, CEE_BGE
);
6978 // buffer [buffer_index] = ptr;
6979 mono_mb_emit_ldloc (mb
, buffer_var
);
6980 mono_mb_emit_ldloc (mb
, buffer_index_var
);
6981 g_assert (sizeof (gpointer
) == 4 || sizeof (gpointer
) == 8);
6982 mono_mb_emit_icon (mb
, sizeof (gpointer
) == 4 ? 2 : 3);
6983 mono_mb_emit_byte (mb
, CEE_SHL
);
6984 mono_mb_emit_byte (mb
, CEE_ADD
);
6985 mono_mb_emit_ldarg (mb
, 0);
6986 mono_mb_emit_byte (mb
, CEE_STIND_I
);
6988 // STORE_REMSET_BUFFER_INDEX = buffer_index;
6989 EMIT_TLS_ACCESS (mb
, store_remset_buffer_index_addr
, store_remset_buffer_index_addr_offset
);
6990 mono_mb_emit_ldloc (mb
, buffer_index_var
);
6991 mono_mb_emit_byte (mb
, CEE_STIND_I
);
6994 mono_mb_patch_branch (mb
, label_no_wb_1
);
6995 mono_mb_patch_branch (mb
, label_no_wb_2
);
6996 mono_mb_patch_branch (mb
, label_no_wb_3
);
6997 mono_mb_patch_branch (mb
, label_no_wb_4
);
6998 #ifndef SGEN_ALIGN_NURSERY
6999 mono_mb_patch_branch (mb
, label_no_wb_5
);
7001 mono_mb_emit_byte (mb
, CEE_RET
);
7004 mono_mb_patch_branch (mb
, label_slow_path
);
7008 mono_mb_emit_ldarg (mb
, 0);
7009 mono_mb_emit_icall (mb
, mono_gc_wbarrier_generic_nostore
);
7010 mono_mb_emit_byte (mb
, CEE_RET
);
7012 res
= mono_mb_create_method (mb
, sig
, 16);
7015 mono_loader_lock ();
7016 if (write_barrier_method
) {
7017 /* Already created */
7018 mono_free_method (res
);
7020 /* double-checked locking */
7021 mono_memory_barrier ();
7022 write_barrier_method
= res
;
7024 mono_loader_unlock ();
7026 return write_barrier_method
;
7030 mono_gc_get_description (void)
7032 return g_strdup ("sgen");
7036 mono_gc_set_desktop_mode (void)
7041 mono_gc_is_moving (void)
7047 mono_gc_is_disabled (void)
7053 mono_sgen_debug_printf (int level
, const char *format
, ...)
7057 if (level
> gc_debug_level
)
7060 va_start (ap
, format
);
7061 vfprintf (gc_debug_file
, format
, ap
);
7065 #endif /* HAVE_SGEN_GC */