2 * sgen-gc.c: Simple generational GC.
5 * Paolo Molaro (lupus@ximian.com)
7 * Copyright 2005-2010 Novell, Inc (http://www.novell.com)
9 * Thread start/stop adapted from Boehm's GC:
10 * Copyright (c) 1994 by Xerox Corporation. All rights reserved.
11 * Copyright (c) 1996 by Silicon Graphics. All rights reserved.
12 * Copyright (c) 1998 by Fergus Henderson. All rights reserved.
13 * Copyright (c) 2000-2004 by Hewlett-Packard Company. All rights reserved.
15 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
16 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
18 * Permission is hereby granted to use or copy this program
19 * for any purpose, provided the above notices are retained on all copies.
20 * Permission to modify the code and to distribute modified code is granted,
21 * provided the above notices are retained, and a notice that the code was
22 * modified is included with the above copyright notice.
25 * Copyright 2001-2003 Ximian, Inc
26 * Copyright 2003-2010 Novell, Inc.
28 * Permission is hereby granted, free of charge, to any person obtaining
29 * a copy of this software and associated documentation files (the
30 * "Software"), to deal in the Software without restriction, including
31 * without limitation the rights to use, copy, modify, merge, publish,
32 * distribute, sublicense, and/or sell copies of the Software, and to
33 * permit persons to whom the Software is furnished to do so, subject to
34 * the following conditions:
36 * The above copyright notice and this permission notice shall be
37 * included in all copies or substantial portions of the Software.
39 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
40 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
41 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
42 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
43 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
44 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
45 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
48 * Important: allocation provides always zeroed memory, having to do
49 * a memset after allocation is deadly for performance.
50 * Memory usage at startup is currently as follows:
52 * 64 KB internal space
54 * We should provide a small memory config with half the sizes
56 * We currently try to make as few mono assumptions as possible:
57 * 1) 2-word header with no GC pointers in it (first vtable, second to store the
59 * 2) gc descriptor is the second word in the vtable (first word in the class)
60 * 3) 8 byte alignment is the minimum and enough (not true for special structures (SIMD), FIXME)
61 * 4) there is a function to get an object's size and the number of
62 * elements in an array.
63 * 5) we know the special way bounds are allocated for complex arrays
64 * 6) we know about proxies and how to treat them when domains are unloaded
66 * Always try to keep stack usage to a minimum: no recursive behaviour
67 * and no large stack allocs.
69 * General description.
70 * Objects are initially allocated in a nursery using a fast bump-pointer technique.
71 * When the nursery is full we start a nursery collection: this is performed with a
73 * When the old generation is full we start a copying GC of the old generation as well:
74 * this will be changed to mark&sweep with copying when fragmentation becomes to severe
75 * in the future. Maybe we'll even do both during the same collection like IMMIX.
77 * The things that complicate this description are:
78 * *) pinned objects: we can't move them so we need to keep track of them
79 * *) no precise info of the thread stacks and registers: we need to be able to
80 * quickly find the objects that may be referenced conservatively and pin them
81 * (this makes the first issues more important)
82 * *) large objects are too expensive to be dealt with using copying GC: we handle them
83 * with mark/sweep during major collections
84 * *) some objects need to not move even if they are small (interned strings, Type handles):
85 * we use mark/sweep for them, too: they are not allocated in the nursery, but inside
86 * PinnedChunks regions
92 *) we could have a function pointer in MonoClass to implement
93 customized write barriers for value types
95 *) investigate the stuff needed to advance a thread to a GC-safe
96 point (single-stepping, read from unmapped memory etc) and implement it.
97 This would enable us to inline allocations and write barriers, for example,
98 or at least parts of them, like the write barrier checks.
99 We may need this also for handling precise info on stacks, even simple things
100 as having uninitialized data on the stack and having to wait for the prolog
101 to zero it. Not an issue for the last frame that we scan conservatively.
102 We could always not trust the value in the slots anyway.
104 *) modify the jit to save info about references in stack locations:
105 this can be done just for locals as a start, so that at least
106 part of the stack is handled precisely.
108 *) test/fix endianess issues
110 *) Implement a card table as the write barrier instead of remembered
111 sets? Card tables are not easy to implement with our current
112 memory layout. We have several different kinds of major heap
113 objects: Small objects in regular blocks, small objects in pinned
114 chunks and LOS objects. If we just have a pointer we have no way
115 to tell which kind of object it points into, therefore we cannot
116 know where its card table is. The least we have to do to make
117 this happen is to get rid of write barriers for indirect stores.
120 *) Get rid of write barriers for indirect stores. We can do this by
121 telling the GC to wbarrier-register an object once we do an ldloca
122 or ldelema on it, and to unregister it once it's not used anymore
123 (it can only travel downwards on the stack). The problem with
124 unregistering is that it needs to happen eventually no matter
125 what, even if exceptions are thrown, the thread aborts, etc.
126 Rodrigo suggested that we could do only the registering part and
127 let the collector find out (pessimistically) when it's safe to
128 unregister, namely when the stack pointer of the thread that
129 registered the object is higher than it was when the registering
130 happened. This might make for a good first implementation to get
131 some data on performance.
133 *) Some sort of blacklist support? Blacklists is a concept from the
134 Boehm GC: if during a conservative scan we find pointers to an
135 area which we might use as heap, we mark that area as unusable, so
136 pointer retention by random pinning pointers is reduced.
138 *) experiment with max small object size (very small right now - 2kb,
139 because it's tied to the max freelist size)
141 *) add an option to mmap the whole heap in one chunk: it makes for many
142 simplifications in the checks (put the nursery at the top and just use a single
143 check for inclusion/exclusion): the issue this has is that on 32 bit systems it's
144 not flexible (too much of the address space may be used by default or we can't
145 increase the heap as needed) and we'd need a race-free mechanism to return memory
146 back to the system (mprotect(PROT_NONE) will still keep the memory allocated if it
147 was written to, munmap is needed, but the following mmap may not find the same segment
150 *) memzero the major fragments after restarting the world and optionally a smaller
153 *) investigate having fragment zeroing threads
155 *) separate locks for finalization and other minor stuff to reduce
158 *) try a different copying order to improve memory locality
160 *) a thread abort after a store but before the write barrier will
161 prevent the write barrier from executing
163 *) specialized dynamically generated markers/copiers
165 *) Dynamically adjust TLAB size to the number of threads. If we have
166 too many threads that do allocation, we might need smaller TLABs,
167 and we might get better performance with larger TLABs if we only
168 have a handful of threads. We could sum up the space left in all
169 assigned TLABs and if that's more than some percentage of the
170 nursery size, reduce the TLAB size.
172 *) Explore placing unreachable objects on unused nursery memory.
173 Instead of memset'ng a region to zero, place an int[] covering it.
174 A good place to start is add_nursery_frag. The tricky thing here is
175 placing those objects atomically outside of a collection.
185 #include <semaphore.h>
194 #define _XOPEN_SOURCE
196 #include "metadata/metadata-internals.h"
197 #include "metadata/class-internals.h"
198 #include "metadata/gc-internal.h"
199 #include "metadata/object-internals.h"
200 #include "metadata/threads.h"
201 #include "metadata/sgen-gc.h"
202 #include "metadata/sgen-archdep.h"
203 #include "metadata/mono-gc.h"
204 #include "metadata/method-builder.h"
205 #include "metadata/profiler-private.h"
206 #include "metadata/monitor.h"
207 #include "metadata/threadpool-internals.h"
208 #include "metadata/mempool-internals.h"
209 #include "metadata/marshal.h"
210 #include "utils/mono-mmap.h"
211 #include "utils/mono-time.h"
212 #include "utils/mono-semaphore.h"
213 #include "utils/mono-counters.h"
215 #include <mono/utils/memcheck.h>
217 #if defined(__MACH__)
218 #include "utils/mach-support.h"
221 #define OPDEF(a,b,c,d,e,f,g,h,i,j) \
225 #include "mono/cil/opcode.def"
231 #undef pthread_create
233 #undef pthread_detach
236 * ######################################################################
237 * ######## Types and constants used by the GC.
238 * ######################################################################
241 static int gc_initialized
= 0;
242 /* If set, do a minor collection before every allocation */
243 static gboolean collect_before_allocs
= FALSE
;
244 /* If set, do a heap consistency check before each minor collection */
245 static gboolean consistency_check_at_minor_collection
= FALSE
;
246 /* If set, check that there are no references to the domain left at domain unload */
247 static gboolean xdomain_checks
= FALSE
;
248 /* If not null, dump the heap after each collection into this file */
249 static FILE *heap_dump_file
= NULL
;
250 /* If set, mark stacks conservatively, even if precise marking is possible */
251 static gboolean conservative_stack_mark
= TRUE
;
252 /* If set, do a plausibility check on the scan_starts before and after
254 static gboolean do_scan_starts_check
= FALSE
;
256 #ifdef HEAVY_STATISTICS
257 static long long stat_objects_alloced
= 0;
258 static long long stat_bytes_alloced
= 0;
259 long long stat_objects_alloced_degraded
= 0;
260 long long stat_bytes_alloced_degraded
= 0;
261 static long long stat_bytes_alloced_los
= 0;
263 long long stat_copy_object_called_nursery
= 0;
264 long long stat_objects_copied_nursery
= 0;
265 long long stat_copy_object_called_major
= 0;
266 long long stat_objects_copied_major
= 0;
268 long long stat_scan_object_called_nursery
= 0;
269 long long stat_scan_object_called_major
= 0;
271 long long stat_nursery_copy_object_failed_from_space
= 0;
272 long long stat_nursery_copy_object_failed_forwarded
= 0;
273 long long stat_nursery_copy_object_failed_pinned
= 0;
275 static long long stat_store_remsets
= 0;
276 static long long stat_store_remsets_unique
= 0;
277 static long long stat_saved_remsets_1
= 0;
278 static long long stat_saved_remsets_2
= 0;
279 static long long stat_global_remsets_added
= 0;
280 static long long stat_global_remsets_readded
= 0;
281 static long long stat_global_remsets_processed
= 0;
282 static long long stat_global_remsets_discarded
= 0;
284 static long long stat_wasted_fragments_used
= 0;
285 static long long stat_wasted_fragments_bytes
= 0;
287 static int stat_wbarrier_set_field
= 0;
288 static int stat_wbarrier_set_arrayref
= 0;
289 static int stat_wbarrier_arrayref_copy
= 0;
290 static int stat_wbarrier_generic_store
= 0;
291 static int stat_wbarrier_generic_store_remset
= 0;
292 static int stat_wbarrier_set_root
= 0;
293 static int stat_wbarrier_value_copy
= 0;
294 static int stat_wbarrier_object_copy
= 0;
297 static long long time_minor_pre_collection_fragment_clear
= 0;
298 static long long time_minor_pinning
= 0;
299 static long long time_minor_scan_remsets
= 0;
300 static long long time_minor_scan_pinned
= 0;
301 static long long time_minor_scan_registered_roots
= 0;
302 static long long time_minor_scan_thread_data
= 0;
303 static long long time_minor_finish_gray_stack
= 0;
304 static long long time_minor_fragment_creation
= 0;
306 static long long time_major_pre_collection_fragment_clear
= 0;
307 static long long time_major_pinning
= 0;
308 static long long time_major_scan_pinned
= 0;
309 static long long time_major_scan_registered_roots
= 0;
310 static long long time_major_scan_thread_data
= 0;
311 static long long time_major_scan_alloc_pinned
= 0;
312 static long long time_major_scan_finalized
= 0;
313 static long long time_major_scan_big_objects
= 0;
314 static long long time_major_finish_gray_stack
= 0;
315 static long long time_major_free_bigobjs
= 0;
316 static long long time_major_los_sweep
= 0;
317 static long long time_major_sweep
= 0;
318 static long long time_major_fragment_creation
= 0;
320 #define DEBUG(level,a) do {if (G_UNLIKELY ((level) <= SGEN_MAX_DEBUG_LEVEL && (level) <= gc_debug_level)) a;} while (0)
322 static int gc_debug_level
= 0;
323 static FILE* gc_debug_file
;
327 mono_gc_flush_info (void)
329 fflush (gc_debug_file);
334 * Define this to allow the user to change the nursery size by
335 * specifying its value in the MONO_GC_PARAMS environmental
336 * variable. See mono_gc_base_init for details.
338 #define USER_CONFIG 1
340 #define TV_DECLARE(name) gint64 name
341 #define TV_GETTIME(tv) tv = mono_100ns_ticks ()
342 #define TV_ELAPSED(start,end) (int)((end-start) / 10)
343 #define TV_ELAPSED_MS(start,end) ((TV_ELAPSED((start),(end)) + 500) / 1000)
345 #define ALIGN_TO(val,align) ((((guint64)val) + ((align) - 1)) & ~((align) - 1))
347 /* The method used to clear the nursery */
348 /* Clearing at nursery collections is the safest, but has bad interactions with caches.
349 * Clearing at TLAB creation is much faster, but more complex and it might expose hard
354 CLEAR_AT_TLAB_CREATION
355 } NurseryClearPolicy
;
357 static NurseryClearPolicy nursery_clear_policy
= CLEAR_AT_TLAB_CREATION
;
360 * The young generation is divided into fragments. This is because
361 * we can hand one fragments to a thread for lock-less fast alloc and
362 * because the young generation ends up fragmented anyway by pinned objects.
363 * Once a collection is done, a list of fragments is created. When doing
364 * thread local alloc we use smallish nurseries so we allow new threads to
365 * allocate memory from gen0 without triggering a collection. Threads that
366 * are found to allocate lots of memory are given bigger fragments. This
367 * should make the finalizer thread use little nursery memory after a while.
368 * We should start assigning threads very small fragments: if there are many
369 * threads the nursery will be full of reserved space that the threads may not
370 * use at all, slowing down allocation speed.
371 * Thread local allocation is done from areas of memory Hotspot calls Thread Local
372 * Allocation Buffers (TLABs).
374 typedef struct _Fragment Fragment
;
378 char *fragment_start
;
379 char *fragment_limit
; /* the current soft limit for allocation */
383 /* the runtime can register areas of memory as roots: we keep two lists of roots,
384 * a pinned root set for conservatively scanned roots and a normal one for
385 * precisely scanned roots (currently implemented as a single list).
387 typedef struct _RootRecord RootRecord
;
396 * We're never actually using the first element. It's always set to
397 * NULL to simplify the elimination of consecutive duplicate
400 #define STORE_REMSET_BUFFER_SIZE 1024
402 typedef struct _GenericStoreRememberedSet GenericStoreRememberedSet
;
403 struct _GenericStoreRememberedSet
{
404 GenericStoreRememberedSet
*next
;
405 /* We need one entry less because the first entry of store
406 remset buffers is always a dummy and we don't copy it. */
407 gpointer data
[STORE_REMSET_BUFFER_SIZE
- 1];
410 /* we have 4 possible values in the low 2 bits */
412 REMSET_LOCATION
, /* just a pointer to the exact location */
413 REMSET_RANGE
, /* range of pointer fields */
414 REMSET_OBJECT
, /* mark all the object for scanning */
415 REMSET_VTYPE
, /* a valuetype array described by a gc descriptor and a count */
416 REMSET_TYPE_MASK
= 0x3
419 #ifdef HAVE_KW_THREAD
420 static __thread RememberedSet
*remembered_set MONO_TLS_FAST
;
422 static pthread_key_t remembered_set_key
;
423 static RememberedSet
*global_remset
;
424 static RememberedSet
*freed_thread_remsets
;
425 static GenericStoreRememberedSet
*generic_store_remsets
= NULL
;
427 /*A two slots cache for recently inserted remsets */
428 static gpointer global_remset_cache
[2];
430 /* FIXME: later choose a size that takes into account the RememberedSet struct
431 * and doesn't waste any alloc paddin space.
433 #define DEFAULT_REMSET_SIZE 1024
434 static RememberedSet
* alloc_remset (int size
, gpointer id
);
436 #define object_is_forwarded SGEN_OBJECT_IS_FORWARDED
437 #define object_is_pinned SGEN_OBJECT_IS_PINNED
438 #define pin_object SGEN_PIN_OBJECT
439 #define unpin_object SGEN_UNPIN_OBJECT
441 #define ptr_in_nursery(p) (SGEN_PTR_IN_NURSERY ((p), DEFAULT_NURSERY_BITS, nursery_start, nursery_real_end))
443 #define LOAD_VTABLE SGEN_LOAD_VTABLE
446 safe_name (void* obj
)
448 MonoVTable
*vt
= (MonoVTable
*)LOAD_VTABLE (obj
);
449 return vt
->klass
->name
;
452 #define safe_object_get_size mono_sgen_safe_object_get_size
455 * ######################################################################
456 * ######## Global data.
457 * ######################################################################
459 static LOCK_DECLARE (gc_mutex
);
460 static int gc_disabled
= 0;
461 static int num_minor_gcs
= 0;
462 static int num_major_gcs
= 0;
466 /* good sizes are 512KB-1MB: larger ones increase a lot memzeroing time */
467 #define DEFAULT_NURSERY_SIZE (default_nursery_size)
468 static int default_nursery_size
= (1 << 22);
469 #ifdef SGEN_ALIGN_NURSERY
470 /* The number of trailing 0 bits in DEFAULT_NURSERY_SIZE */
471 #define DEFAULT_NURSERY_BITS (default_nursery_bits)
472 static int default_nursery_bits
= 22;
477 #define DEFAULT_NURSERY_SIZE (4*1024*1024)
478 #ifdef SGEN_ALIGN_NURSERY
479 #define DEFAULT_NURSERY_BITS 22
484 #ifndef SGEN_ALIGN_NURSERY
485 #define DEFAULT_NURSERY_BITS -1
488 #define MIN_MINOR_COLLECTION_ALLOWANCE (DEFAULT_NURSERY_SIZE * 4)
490 #define SCAN_START_SIZE SGEN_SCAN_START_SIZE
492 /* the minimum size of a fragment that we consider useful for allocation */
493 #define FRAGMENT_MIN_SIZE (512)
495 static mword pagesize
= 4096;
496 static mword nursery_size
;
497 static int degraded_mode
= 0;
499 static mword total_alloc
= 0;
500 /* use this to tune when to do a major/minor collection */
501 static mword memory_pressure
= 0;
502 static mword minor_collection_allowance
;
503 static int minor_collection_sections_alloced
= 0;
505 static GCMemSection
*nursery_section
= NULL
;
506 static mword lowest_heap_address
= ~(mword
)0;
507 static mword highest_heap_address
= 0;
509 static LOCK_DECLARE (interruption_mutex
);
511 #ifdef SGEN_PARALLEL_MARK
512 static LOCK_DECLARE (global_remset_mutex
);
515 typedef struct _FinalizeEntry FinalizeEntry
;
516 struct _FinalizeEntry
{
521 typedef struct _FinalizeEntryHashTable FinalizeEntryHashTable
;
522 struct _FinalizeEntryHashTable
{
523 FinalizeEntry
**table
;
528 typedef struct _DisappearingLink DisappearingLink
;
529 struct _DisappearingLink
{
530 DisappearingLink
*next
;
534 typedef struct _DisappearingLinkHashTable DisappearingLinkHashTable
;
535 struct _DisappearingLinkHashTable
{
536 DisappearingLink
**table
;
541 typedef struct _EphemeronLinkNode EphemeronLinkNode
;
543 struct _EphemeronLinkNode
{
544 EphemeronLinkNode
*next
;
559 int current_collection_generation
= -1;
562 * The link pointer is hidden by negating each bit. We use the lowest
563 * bit of the link (before negation) to store whether it needs
564 * resurrection tracking.
566 #define HIDE_POINTER(p,t) ((gpointer)(~((gulong)(p)|((t)?1:0))))
567 #define REVEAL_POINTER(p) ((gpointer)((~(gulong)(p))&~3L))
569 #define DISLINK_OBJECT(d) (REVEAL_POINTER (*(d)->link))
570 #define DISLINK_TRACK(d) ((~(gulong)(*(d)->link)) & 1)
573 * The finalizable hash has the object as the key, the
574 * disappearing_link hash, has the link address as key.
576 static FinalizeEntryHashTable minor_finalizable_hash
;
577 static FinalizeEntryHashTable major_finalizable_hash
;
578 /* objects that are ready to be finalized */
579 static FinalizeEntry
*fin_ready_list
= NULL
;
580 static FinalizeEntry
*critical_fin_list
= NULL
;
582 static DisappearingLinkHashTable minor_disappearing_link_hash
;
583 static DisappearingLinkHashTable major_disappearing_link_hash
;
585 static EphemeronLinkNode
*ephemeron_list
;
587 static int num_ready_finalizers
= 0;
588 static int no_finalize
= 0;
591 ROOT_TYPE_NORMAL
= 0, /* "normal" roots */
592 ROOT_TYPE_PINNED
= 1, /* roots without a GC descriptor */
593 ROOT_TYPE_WBARRIER
= 2, /* roots with a write barrier */
597 /* registered roots: the key to the hash is the root start address */
599 * Different kinds of roots are kept separate to speed up pin_from_roots () for example.
601 static RootRecord
**roots_hash
[ROOT_TYPE_NUM
] = { NULL
, NULL
};
602 static int roots_hash_size
[ROOT_TYPE_NUM
] = { 0, 0, 0 };
603 static mword roots_size
= 0; /* amount of memory in the root set */
604 static int num_roots_entries
[ROOT_TYPE_NUM
] = { 0, 0, 0 };
607 * The current allocation cursors
608 * We allocate objects in the nursery.
609 * The nursery is the area between nursery_start and nursery_real_end.
610 * Allocation is done from a Thread Local Allocation Buffer (TLAB). TLABs are allocated
611 * from nursery fragments.
612 * tlab_next is the pointer to the space inside the TLAB where the next object will
614 * tlab_temp_end is the pointer to the end of the temporary space reserved for
615 * the allocation: it allows us to set the scan starts at reasonable intervals.
616 * tlab_real_end points to the end of the TLAB.
617 * nursery_frag_real_end points to the end of the currently used nursery fragment.
618 * nursery_first_pinned_start points to the start of the first pinned object in the nursery
619 * nursery_last_pinned_end points to the end of the last pinned object in the nursery
620 * At the next allocation, the area of the nursery where objects can be present is
621 * between MIN(nursery_first_pinned_start, first_fragment_start) and
622 * MAX(nursery_last_pinned_end, nursery_frag_real_end)
624 static char *nursery_start
= NULL
;
626 #ifdef HAVE_KW_THREAD
627 #define TLAB_ACCESS_INIT
628 #define TLAB_START tlab_start
629 #define TLAB_NEXT tlab_next
630 #define TLAB_TEMP_END tlab_temp_end
631 #define TLAB_REAL_END tlab_real_end
632 #define REMEMBERED_SET remembered_set
633 #define STORE_REMSET_BUFFER store_remset_buffer
634 #define STORE_REMSET_BUFFER_INDEX store_remset_buffer_index
635 #define IN_CRITICAL_REGION thread_info->in_critical_region
637 static pthread_key_t thread_info_key
;
638 #define TLAB_ACCESS_INIT SgenThreadInfo *__thread_info__ = pthread_getspecific (thread_info_key)
639 #define TLAB_START (__thread_info__->tlab_start)
640 #define TLAB_NEXT (__thread_info__->tlab_next)
641 #define TLAB_TEMP_END (__thread_info__->tlab_temp_end)
642 #define TLAB_REAL_END (__thread_info__->tlab_real_end)
643 #define REMEMBERED_SET (__thread_info__->remset)
644 #define STORE_REMSET_BUFFER (__thread_info__->store_remset_buffer)
645 #define STORE_REMSET_BUFFER_INDEX (__thread_info__->store_remset_buffer_index)
646 #define IN_CRITICAL_REGION (__thread_info__->in_critical_region)
649 /* we use the memory barrier only to prevent compiler reordering (a memory constraint may be enough) */
650 #define ENTER_CRITICAL_REGION do {IN_CRITICAL_REGION = 1;mono_memory_barrier ();} while (0)
651 #define EXIT_CRITICAL_REGION do {IN_CRITICAL_REGION = 0;mono_memory_barrier ();} while (0)
654 * FIXME: What is faster, a TLS variable pointing to a structure, or separate TLS
655 * variables for next+temp_end ?
657 #ifdef HAVE_KW_THREAD
658 static __thread SgenThreadInfo
*thread_info
;
659 static __thread
char *tlab_start
;
660 static __thread
char *tlab_next
;
661 static __thread
char *tlab_temp_end
;
662 static __thread
char *tlab_real_end
;
663 static __thread gpointer
*store_remset_buffer
;
664 static __thread
long store_remset_buffer_index
;
665 /* Used by the managed allocator/wbarrier */
666 static __thread
char **tlab_next_addr
;
667 static __thread
char *stack_end
;
668 static __thread
long *store_remset_buffer_index_addr
;
670 static char *nursery_next
= NULL
;
671 static char *nursery_frag_real_end
= NULL
;
672 static char *nursery_real_end
= NULL
;
673 static char *nursery_last_pinned_end
= NULL
;
675 /* The size of a TLAB */
676 /* The bigger the value, the less often we have to go to the slow path to allocate a new
677 * one, but the more space is wasted by threads not allocating much memory.
679 * FIXME: Make this self-tuning for each thread.
681 static guint32 tlab_size
= (1024 * 4);
683 /*How much space is tolerable to be wasted from the current fragment when allocating a new TLAB*/
684 #define MAX_NURSERY_TLAB_WASTE 512
686 /* fragments that are free and ready to be used for allocation */
687 static Fragment
*nursery_fragments
= NULL
;
688 /* freeelist of fragment structures */
689 static Fragment
*fragment_freelist
= NULL
;
691 #define MAX_SMALL_OBJ_SIZE SGEN_MAX_SMALL_OBJ_SIZE
693 /* Functions supplied by the runtime to be called by the GC */
694 static MonoGCCallbacks gc_callbacks
;
696 #define ALLOC_ALIGN SGEN_ALLOC_ALIGN
697 #define ALLOC_ALIGN_BITS SGEN_ALLOC_ALIGN_BITS
699 #define ALIGN_UP SGEN_ALIGN_UP
701 #define MOVED_OBJECTS_NUM 64
702 static void *moved_objects
[MOVED_OBJECTS_NUM
];
703 static int moved_objects_idx
= 0;
706 * ######################################################################
707 * ######## Macros and function declarations.
708 * ######################################################################
711 #define ADDR_IN_HEAP_BOUNDARIES(addr) ((p) >= lowest_heap_address && (p) < highest_heap_address)
714 align_pointer (void *ptr
)
716 mword p
= (mword
)ptr
;
717 p
+= sizeof (gpointer
) - 1;
718 p
&= ~ (sizeof (gpointer
) - 1);
722 typedef SgenGrayQueue GrayQueue
;
724 typedef void (*CopyOrMarkObjectFunc
) (void**, GrayQueue
*);
725 typedef char* (*ScanObjectFunc
) (char*, GrayQueue
*);
727 /* forward declarations */
728 static int stop_world (void);
729 static int restart_world (void);
730 static void scan_thread_data (void *start_nursery
, void *end_nursery
, gboolean precise
);
731 static void scan_from_remsets (void *start_nursery
, void *end_nursery
, GrayQueue
*queue
);
732 static void scan_from_registered_roots (CopyOrMarkObjectFunc copy_func
, char *addr_start
, char *addr_end
, int root_type
, GrayQueue
*queue
);
733 static void scan_finalizer_entries (CopyOrMarkObjectFunc copy_func
, FinalizeEntry
*list
, GrayQueue
*queue
);
734 static void find_pinning_ref_from_thread (char *obj
, size_t size
);
735 static void update_current_thread_stack (void *start
);
736 static void finalize_in_range (CopyOrMarkObjectFunc copy_func
, char *start
, char *end
, int generation
, GrayQueue
*queue
);
737 static void add_or_remove_disappearing_link (MonoObject
*obj
, void **link
, gboolean track
, int generation
);
738 static void null_link_in_range (CopyOrMarkObjectFunc copy_func
, char *start
, char *end
, int generation
, GrayQueue
*queue
);
739 static void null_links_for_domain (MonoDomain
*domain
, int generation
);
740 static gboolean
search_fragment_for_size (size_t size
);
741 static int search_fragment_for_size_range (size_t desired_size
, size_t minimum_size
);
742 static void clear_nursery_fragments (char *next
);
743 static void pin_from_roots (void *start_nursery
, void *end_nursery
);
744 static int pin_objects_from_addresses (GCMemSection
*section
, void **start
, void **end
, void *start_nursery
, void *end_nursery
, GrayQueue
*queue
);
745 static void optimize_pin_queue (int start_slot
);
746 static void clear_remsets (void);
747 static void clear_tlabs (void);
748 static void sort_addresses (void **array
, int size
);
749 static void drain_gray_stack (GrayQueue
*queue
);
750 static void finish_gray_stack (char *start_addr
, char *end_addr
, int generation
, GrayQueue
*queue
);
751 static gboolean
need_major_collection (void);
752 static void major_collection (const char *reason
);
754 static void mono_gc_register_disappearing_link (MonoObject
*obj
, void **link
, gboolean track
);
756 void describe_ptr (char *ptr
);
757 void check_object (char *start
);
759 static void check_consistency (void);
760 static void check_major_refs (void);
761 static void check_scan_starts (void);
762 static void check_for_xdomain_refs (void);
763 static void dump_heap (const char *type
, int num
, const char *reason
);
765 void mono_gc_scan_for_specific_ref (MonoObject
*key
);
767 static void init_stats (void);
769 static int mark_ephemerons_in_range (CopyOrMarkObjectFunc copy_func
, char *start
, char *end
, GrayQueue
*queue
);
770 static void clear_unreachable_ephemerons (CopyOrMarkObjectFunc copy_func
, char *start
, char *end
, GrayQueue
*queue
);
771 static void null_ephemerons_for_domain (MonoDomain
*domain
);
773 SgenMajorCollector major
;
775 #include "sgen-protocol.c"
776 #include "sgen-pinning.c"
777 #include "sgen-pinning-stats.c"
778 #include "sgen-gray.c"
779 #include "sgen-workers.c"
780 #include "sgen-los.c"
782 /* Root bitmap descriptors are simpler: the lower three bits describe the type
783 * and we either have 30/62 bitmap bits or nibble-based run-length,
784 * or a complex descriptor, or a user defined marker function.
787 ROOT_DESC_CONSERVATIVE
, /* 0, so matches NULL value */
792 ROOT_DESC_TYPE_MASK
= 0x7,
793 ROOT_DESC_TYPE_SHIFT
= 3,
796 #define MAKE_ROOT_DESC(type,val) ((type) | ((val) << ROOT_DESC_TYPE_SHIFT))
798 #define MAX_USER_DESCRIPTORS 16
800 static gsize
* complex_descriptors
= NULL
;
801 static int complex_descriptors_size
= 0;
802 static int complex_descriptors_next
= 0;
803 static MonoGCRootMarkFunc user_descriptors
[MAX_USER_DESCRIPTORS
];
804 static int user_descriptors_next
= 0;
807 alloc_complex_descriptor (gsize
*bitmap
, int numbits
)
811 numbits
= ALIGN_TO (numbits
, GC_BITS_PER_WORD
);
812 nwords
= numbits
/ GC_BITS_PER_WORD
+ 1;
815 res
= complex_descriptors_next
;
816 /* linear search, so we don't have duplicates with domain load/unload
817 * this should not be performance critical or we'd have bigger issues
818 * (the number and size of complex descriptors should be small).
820 for (i
= 0; i
< complex_descriptors_next
; ) {
821 if (complex_descriptors
[i
] == nwords
) {
823 for (j
= 0; j
< nwords
- 1; ++j
) {
824 if (complex_descriptors
[i
+ 1 + j
] != bitmap
[j
]) {
834 i
+= complex_descriptors
[i
];
836 if (complex_descriptors_next
+ nwords
> complex_descriptors_size
) {
837 int new_size
= complex_descriptors_size
* 2 + nwords
;
838 complex_descriptors
= g_realloc (complex_descriptors
, new_size
* sizeof (gsize
));
839 complex_descriptors_size
= new_size
;
841 DEBUG (6, fprintf (gc_debug_file
, "Complex descriptor %d, size: %d (total desc memory: %d)\n", res
, nwords
, complex_descriptors_size
));
842 complex_descriptors_next
+= nwords
;
843 complex_descriptors
[res
] = nwords
;
844 for (i
= 0; i
< nwords
- 1; ++i
) {
845 complex_descriptors
[res
+ 1 + i
] = bitmap
[i
];
846 DEBUG (6, fprintf (gc_debug_file
, "\tvalue: %p\n", (void*)complex_descriptors
[res
+ 1 + i
]));
853 mono_sgen_get_complex_descriptor (GCVTable
*vt
)
855 return complex_descriptors
+ (vt
->desc
>> LOW_TYPE_BITS
);
859 * Descriptor builders.
862 mono_gc_make_descr_for_string (gsize
*bitmap
, int numbits
)
864 return (void*) DESC_TYPE_RUN_LENGTH
;
868 mono_gc_make_descr_for_object (gsize
*bitmap
, int numbits
, size_t obj_size
)
870 int first_set
= -1, num_set
= 0, last_set
= -1, i
;
872 size_t stored_size
= obj_size
;
873 for (i
= 0; i
< numbits
; ++i
) {
874 if (bitmap
[i
/ GC_BITS_PER_WORD
] & ((gsize
)1 << (i
% GC_BITS_PER_WORD
))) {
882 * We don't encode the size of types that don't contain
883 * references because they might not be aligned, i.e. the
884 * bottom two bits might be set, which would clash with the
885 * bits we need to encode the descriptor type. Since we don't
886 * use the encoded size to skip objects, other than for
887 * processing remsets, in which case only the positions of
888 * references are relevant, this is not a problem.
891 return (void*)DESC_TYPE_RUN_LENGTH
;
892 g_assert (!(stored_size
& 0x3));
893 if (stored_size
<= MAX_SMALL_OBJ_SIZE
) {
894 /* check run-length encoding first: one byte offset, one byte number of pointers
895 * on 64 bit archs, we can have 3 runs, just one on 32.
896 * It may be better to use nibbles.
899 desc
= DESC_TYPE_RUN_LENGTH
| (stored_size
<< 1);
900 DEBUG (6, fprintf (gc_debug_file
, "Ptrfree descriptor %p, size: %zd\n", (void*)desc
, stored_size
));
902 } else if (first_set
< 256 && num_set
< 256 && (first_set
+ num_set
== last_set
+ 1)) {
903 desc
= DESC_TYPE_RUN_LENGTH
| (stored_size
<< 1) | (first_set
<< 16) | (num_set
<< 24);
904 DEBUG (6, fprintf (gc_debug_file
, "Runlen descriptor %p, size: %zd, first set: %d, num set: %d\n", (void*)desc
, stored_size
, first_set
, num_set
));
907 /* we know the 2-word header is ptr-free */
908 if (last_set
< SMALL_BITMAP_SIZE
+ OBJECT_HEADER_WORDS
) {
909 desc
= DESC_TYPE_SMALL_BITMAP
| (stored_size
<< 1) | ((*bitmap
>> OBJECT_HEADER_WORDS
) << SMALL_BITMAP_SHIFT
);
910 DEBUG (6, fprintf (gc_debug_file
, "Smallbitmap descriptor %p, size: %zd, last set: %d\n", (void*)desc
, stored_size
, last_set
));
914 /* we know the 2-word header is ptr-free */
915 if (last_set
< LARGE_BITMAP_SIZE
+ OBJECT_HEADER_WORDS
) {
916 desc
= DESC_TYPE_LARGE_BITMAP
| ((*bitmap
>> OBJECT_HEADER_WORDS
) << LOW_TYPE_BITS
);
917 DEBUG (6, fprintf (gc_debug_file
, "Largebitmap descriptor %p, size: %zd, last set: %d\n", (void*)desc
, stored_size
, last_set
));
920 /* it's a complex object ... */
921 desc
= DESC_TYPE_COMPLEX
| (alloc_complex_descriptor (bitmap
, last_set
+ 1) << LOW_TYPE_BITS
);
925 /* If the array holds references, numbits == 1 and the first bit is set in elem_bitmap */
927 mono_gc_make_descr_for_array (int vector
, gsize
*elem_bitmap
, int numbits
, size_t elem_size
)
929 int first_set
= -1, num_set
= 0, last_set
= -1, i
;
930 mword desc
= vector
? DESC_TYPE_VECTOR
: DESC_TYPE_ARRAY
;
931 for (i
= 0; i
< numbits
; ++i
) {
932 if (elem_bitmap
[i
/ GC_BITS_PER_WORD
] & ((gsize
)1 << (i
% GC_BITS_PER_WORD
))) {
939 /* See comment at the definition of DESC_TYPE_RUN_LENGTH. */
941 return (void*)DESC_TYPE_RUN_LENGTH
;
942 if (elem_size
<= MAX_ELEMENT_SIZE
) {
943 desc
|= elem_size
<< VECTOR_ELSIZE_SHIFT
;
945 return (void*)(desc
| VECTOR_SUBTYPE_PTRFREE
);
947 /* Note: we also handle structs with just ref fields */
948 if (num_set
* sizeof (gpointer
) == elem_size
) {
949 return (void*)(desc
| VECTOR_SUBTYPE_REFS
| ((gssize
)(-1) << 16));
951 /* FIXME: try run-len first */
952 /* Note: we can't skip the object header here, because it's not present */
953 if (last_set
<= SMALL_BITMAP_SIZE
) {
954 return (void*)(desc
| VECTOR_SUBTYPE_BITMAP
| (*elem_bitmap
<< 16));
957 /* it's am array of complex structs ... */
958 desc
= DESC_TYPE_COMPLEX_ARR
;
959 desc
|= alloc_complex_descriptor (elem_bitmap
, last_set
+ 1) << LOW_TYPE_BITS
;
963 /* Return the bitmap encoded by a descriptor */
965 mono_gc_get_bitmap_for_descr (void *descr
, int *numbits
)
967 mword d
= (mword
)descr
;
971 case DESC_TYPE_RUN_LENGTH
: {
972 int first_set
= (d
>> 16) & 0xff;
973 int num_set
= (d
>> 24) & 0xff;
976 bitmap
= g_new0 (gsize
, (first_set
+ num_set
+ 7) / 8);
978 for (i
= first_set
; i
< first_set
+ num_set
; ++i
)
979 bitmap
[i
/ GC_BITS_PER_WORD
] |= ((gsize
)1 << (i
% GC_BITS_PER_WORD
));
981 *numbits
= first_set
+ num_set
;
985 case DESC_TYPE_SMALL_BITMAP
:
986 bitmap
= g_new0 (gsize
, 1);
988 bitmap
[0] = (d
>> SMALL_BITMAP_SHIFT
) << OBJECT_HEADER_WORDS
;
990 *numbits
= GC_BITS_PER_WORD
;
994 g_assert_not_reached ();
999 is_xdomain_ref_allowed (gpointer
*ptr
, char *obj
, MonoDomain
*domain
)
1001 MonoObject
*o
= (MonoObject
*)(obj
);
1002 MonoObject
*ref
= (MonoObject
*)*(ptr
);
1003 int offset
= (char*)(ptr
) - (char*)o
;
1005 if (o
->vtable
->klass
== mono_defaults
.thread_class
&& offset
== G_STRUCT_OFFSET (MonoThread
, internal_thread
))
1007 if (o
->vtable
->klass
== mono_defaults
.internal_thread_class
&& offset
== G_STRUCT_OFFSET (MonoInternalThread
, current_appcontext
))
1009 if (mono_class_has_parent (o
->vtable
->klass
, mono_defaults
.real_proxy_class
) &&
1010 offset
== G_STRUCT_OFFSET (MonoRealProxy
, unwrapped_server
))
1012 /* Thread.cached_culture_info */
1013 if (!strcmp (ref
->vtable
->klass
->name_space
, "System.Globalization") &&
1014 !strcmp (ref
->vtable
->klass
->name
, "CultureInfo") &&
1015 !strcmp(o
->vtable
->klass
->name_space
, "System") &&
1016 !strcmp(o
->vtable
->klass
->name
, "Object[]"))
1019 * at System.IO.MemoryStream.InternalConstructor (byte[],int,int,bool,bool) [0x0004d] in /home/schani/Work/novell/trunk/mcs/class/corlib/System.IO/MemoryStream.cs:121
1020 * at System.IO.MemoryStream..ctor (byte[]) [0x00017] in /home/schani/Work/novell/trunk/mcs/class/corlib/System.IO/MemoryStream.cs:81
1021 * at (wrapper remoting-invoke-with-check) System.IO.MemoryStream..ctor (byte[]) <IL 0x00020, 0xffffffff>
1022 * at System.Runtime.Remoting.Messaging.CADMethodCallMessage.GetArguments () [0x0000d] in /home/schani/Work/novell/trunk/mcs/class/corlib/System.Runtime.Remoting.Messaging/CADMessages.cs:327
1023 * at System.Runtime.Remoting.Messaging.MethodCall..ctor (System.Runtime.Remoting.Messaging.CADMethodCallMessage) [0x00017] in /home/schani/Work/novell/trunk/mcs/class/corlib/System.Runtime.Remoting.Messaging/MethodCall.cs:87
1024 * at System.AppDomain.ProcessMessageInDomain (byte[],System.Runtime.Remoting.Messaging.CADMethodCallMessage,byte[]&,System.Runtime.Remoting.Messaging.CADMethodReturnMessage&) [0x00018] in /home/schani/Work/novell/trunk/mcs/class/corlib/System/AppDomain.cs:1213
1025 * at (wrapper remoting-invoke-with-check) System.AppDomain.ProcessMessageInDomain (byte[],System.Runtime.Remoting.Messaging.CADMethodCallMessage,byte[]&,System.Runtime.Remoting.Messaging.CADMethodReturnMessage&) <IL 0x0003d, 0xffffffff>
1026 * at System.Runtime.Remoting.Channels.CrossAppDomainSink.ProcessMessageInDomain (byte[],System.Runtime.Remoting.Messaging.CADMethodCallMessage) [0x00008] in /home/schani/Work/novell/trunk/mcs/class/corlib/System.Runtime.Remoting.Channels/CrossAppDomainChannel.cs:198
1027 * at (wrapper runtime-invoke) object.runtime_invoke_CrossAppDomainSink/ProcessMessageRes_object_object (object,intptr,intptr,intptr) <IL 0x0004c, 0xffffffff>
1029 if (!strcmp (ref
->vtable
->klass
->name_space
, "System") &&
1030 !strcmp (ref
->vtable
->klass
->name
, "Byte[]") &&
1031 !strcmp (o
->vtable
->klass
->name_space
, "System.IO") &&
1032 !strcmp (o
->vtable
->klass
->name
, "MemoryStream"))
1034 /* append_job() in threadpool.c */
1035 if (!strcmp (ref
->vtable
->klass
->name_space
, "System.Runtime.Remoting.Messaging") &&
1036 !strcmp (ref
->vtable
->klass
->name
, "AsyncResult") &&
1037 !strcmp (o
->vtable
->klass
->name_space
, "System") &&
1038 !strcmp (o
->vtable
->klass
->name
, "Object[]") &&
1039 mono_thread_pool_is_queue_array ((MonoArray
*) o
))
1045 check_reference_for_xdomain (gpointer
*ptr
, char *obj
, MonoDomain
*domain
)
1047 MonoObject
*o
= (MonoObject
*)(obj
);
1048 MonoObject
*ref
= (MonoObject
*)*(ptr
);
1049 int offset
= (char*)(ptr
) - (char*)o
;
1051 MonoClassField
*field
;
1054 if (!ref
|| ref
->vtable
->domain
== domain
)
1056 if (is_xdomain_ref_allowed (ptr
, obj
, domain
))
1060 for (class = o
->vtable
->klass
; class; class = class->parent
) {
1063 for (i
= 0; i
< class->field
.count
; ++i
) {
1064 if (class->fields
[i
].offset
== offset
) {
1065 field
= &class->fields
[i
];
1073 if (ref
->vtable
->klass
== mono_defaults
.string_class
)
1074 str
= mono_string_to_utf8 ((MonoString
*)ref
);
1077 g_print ("xdomain reference in %p (%s.%s) at offset %d (%s) to %p (%s.%s) (%s) - pointed to by:\n",
1078 o
, o
->vtable
->klass
->name_space
, o
->vtable
->klass
->name
,
1079 offset
, field
? field
->name
: "",
1080 ref
, ref
->vtable
->klass
->name_space
, ref
->vtable
->klass
->name
, str
? str
: "");
1081 mono_gc_scan_for_specific_ref (o
);
1087 #define HANDLE_PTR(ptr,obj) check_reference_for_xdomain ((ptr), (obj), domain)
1090 scan_object_for_xdomain_refs (char *start
, mword size
, void *data
)
1092 MonoDomain
*domain
= ((MonoObject
*)start
)->vtable
->domain
;
1094 #include "sgen-scan-object.h"
1098 #define HANDLE_PTR(ptr,obj) do { \
1099 if ((MonoObject*)*(ptr) == key) { \
1100 g_print ("found ref to %p in object %p (%s) at offset %td\n", \
1101 key, (obj), safe_name ((obj)), ((char*)(ptr) - (char*)(obj))); \
1106 scan_object_for_specific_ref (char *start
, MonoObject
*key
)
1108 #include "sgen-scan-object.h"
1112 mono_sgen_scan_area_with_callback (char *start
, char *end
, IterateObjectCallbackFunc callback
, void *data
)
1114 while (start
< end
) {
1116 if (!*(void**)start
) {
1117 start
+= sizeof (void*); /* should be ALLOC_ALIGN, really */
1121 size
= ALIGN_UP (safe_object_get_size ((MonoObject
*) start
));
1123 callback (start
, size
, data
);
1130 scan_object_for_specific_ref_callback (char *obj
, size_t size
, MonoObject
*key
)
1132 scan_object_for_specific_ref (obj
, key
);
1136 check_root_obj_specific_ref (RootRecord
*root
, MonoObject
*key
, MonoObject
*obj
)
1140 g_print ("found ref to %p in root record %p\n", key
, root
);
1143 static MonoObject
*check_key
= NULL
;
1144 static RootRecord
*check_root
= NULL
;
1147 check_root_obj_specific_ref_from_marker (void **obj
)
1149 check_root_obj_specific_ref (check_root
, check_key
, *obj
);
1153 scan_roots_for_specific_ref (MonoObject
*key
, int root_type
)
1158 for (i
= 0; i
< roots_hash_size
[root_type
]; ++i
) {
1159 for (root
= roots_hash
[root_type
][i
]; root
; root
= root
->next
) {
1160 void **start_root
= (void**)root
->start_root
;
1161 mword desc
= root
->root_desc
;
1165 switch (desc
& ROOT_DESC_TYPE_MASK
) {
1166 case ROOT_DESC_BITMAP
:
1167 desc
>>= ROOT_DESC_TYPE_SHIFT
;
1170 check_root_obj_specific_ref (root
, key
, *start_root
);
1175 case ROOT_DESC_COMPLEX
: {
1176 gsize
*bitmap_data
= complex_descriptors
+ (desc
>> ROOT_DESC_TYPE_SHIFT
);
1177 int bwords
= (*bitmap_data
) - 1;
1178 void **start_run
= start_root
;
1180 while (bwords
-- > 0) {
1181 gsize bmap
= *bitmap_data
++;
1182 void **objptr
= start_run
;
1185 check_root_obj_specific_ref (root
, key
, *objptr
);
1189 start_run
+= GC_BITS_PER_WORD
;
1193 case ROOT_DESC_USER
: {
1194 MonoGCRootMarkFunc marker
= user_descriptors
[desc
>> ROOT_DESC_TYPE_SHIFT
];
1195 marker (start_root
, check_root_obj_specific_ref_from_marker
);
1198 case ROOT_DESC_RUN_LEN
:
1199 g_assert_not_reached ();
1201 g_assert_not_reached ();
1210 mono_gc_scan_for_specific_ref (MonoObject
*key
)
1216 mono_sgen_scan_area_with_callback (nursery_section
->data
, nursery_section
->end_data
,
1217 (IterateObjectCallbackFunc
)scan_object_for_specific_ref_callback
, key
);
1219 major
.iterate_objects (TRUE
, TRUE
, (IterateObjectCallbackFunc
)scan_object_for_specific_ref_callback
, key
);
1221 for (bigobj
= los_object_list
; bigobj
; bigobj
= bigobj
->next
)
1222 scan_object_for_specific_ref (bigobj
->data
, key
);
1224 scan_roots_for_specific_ref (key
, ROOT_TYPE_NORMAL
);
1225 scan_roots_for_specific_ref (key
, ROOT_TYPE_WBARRIER
);
1227 for (i
= 0; i
< roots_hash_size
[ROOT_TYPE_PINNED
]; ++i
) {
1228 for (root
= roots_hash
[ROOT_TYPE_PINNED
][i
]; root
; root
= root
->next
) {
1229 void **ptr
= (void**)root
->start_root
;
1231 while (ptr
< (void**)root
->end_root
) {
1232 check_root_obj_specific_ref (root
, *ptr
, key
);
1239 /* Clear all remaining nursery fragments */
1241 clear_nursery_fragments (char *next
)
1244 if (nursery_clear_policy
== CLEAR_AT_TLAB_CREATION
) {
1245 g_assert (next
<= nursery_frag_real_end
);
1246 memset (next
, 0, nursery_frag_real_end
- next
);
1247 for (frag
= nursery_fragments
; frag
; frag
= frag
->next
) {
1248 memset (frag
->fragment_start
, 0, frag
->fragment_end
- frag
->fragment_start
);
1254 need_remove_object_for_domain (char *start
, MonoDomain
*domain
)
1256 if (mono_object_domain (start
) == domain
) {
1257 DEBUG (4, fprintf (gc_debug_file
, "Need to cleanup object %p\n", start
));
1258 binary_protocol_cleanup (start
, (gpointer
)LOAD_VTABLE (start
), safe_object_get_size ((MonoObject
*)start
));
1265 process_object_for_domain_clearing (char *start
, MonoDomain
*domain
)
1267 GCVTable
*vt
= (GCVTable
*)LOAD_VTABLE (start
);
1268 if (vt
->klass
== mono_defaults
.internal_thread_class
)
1269 g_assert (mono_object_domain (start
) == mono_get_root_domain ());
1270 /* The object could be a proxy for an object in the domain
1272 if (mono_class_has_parent (vt
->klass
, mono_defaults
.real_proxy_class
)) {
1273 MonoObject
*server
= ((MonoRealProxy
*)start
)->unwrapped_server
;
1275 /* The server could already have been zeroed out, so
1276 we need to check for that, too. */
1277 if (server
&& (!LOAD_VTABLE (server
) || mono_object_domain (server
) == domain
)) {
1278 DEBUG (4, fprintf (gc_debug_file
, "Cleaning up remote pointer in %p to object %p\n",
1280 ((MonoRealProxy
*)start
)->unwrapped_server
= NULL
;
1285 static MonoDomain
*check_domain
= NULL
;
1288 check_obj_not_in_domain (void **o
)
1290 g_assert (((MonoObject
*)(*o
))->vtable
->domain
!= check_domain
);
1294 scan_for_registered_roots_in_domain (MonoDomain
*domain
, int root_type
)
1298 check_domain
= domain
;
1299 for (i
= 0; i
< roots_hash_size
[root_type
]; ++i
) {
1300 for (root
= roots_hash
[root_type
][i
]; root
; root
= root
->next
) {
1301 void **start_root
= (void**)root
->start_root
;
1302 mword desc
= root
->root_desc
;
1304 /* The MonoDomain struct is allowed to hold
1305 references to objects in its own domain. */
1306 if (start_root
== (void**)domain
)
1309 switch (desc
& ROOT_DESC_TYPE_MASK
) {
1310 case ROOT_DESC_BITMAP
:
1311 desc
>>= ROOT_DESC_TYPE_SHIFT
;
1313 if ((desc
& 1) && *start_root
)
1314 check_obj_not_in_domain (*start_root
);
1319 case ROOT_DESC_COMPLEX
: {
1320 gsize
*bitmap_data
= complex_descriptors
+ (desc
>> ROOT_DESC_TYPE_SHIFT
);
1321 int bwords
= (*bitmap_data
) - 1;
1322 void **start_run
= start_root
;
1324 while (bwords
-- > 0) {
1325 gsize bmap
= *bitmap_data
++;
1326 void **objptr
= start_run
;
1328 if ((bmap
& 1) && *objptr
)
1329 check_obj_not_in_domain (*objptr
);
1333 start_run
+= GC_BITS_PER_WORD
;
1337 case ROOT_DESC_USER
: {
1338 MonoGCRootMarkFunc marker
= user_descriptors
[desc
>> ROOT_DESC_TYPE_SHIFT
];
1339 marker (start_root
, check_obj_not_in_domain
);
1342 case ROOT_DESC_RUN_LEN
:
1343 g_assert_not_reached ();
1345 g_assert_not_reached ();
1349 check_domain
= NULL
;
1353 check_for_xdomain_refs (void)
1357 mono_sgen_scan_area_with_callback (nursery_section
->data
, nursery_section
->end_data
,
1358 (IterateObjectCallbackFunc
)scan_object_for_xdomain_refs
, NULL
);
1360 major
.iterate_objects (TRUE
, TRUE
, (IterateObjectCallbackFunc
)scan_object_for_xdomain_refs
, NULL
);
1362 for (bigobj
= los_object_list
; bigobj
; bigobj
= bigobj
->next
)
1363 scan_object_for_xdomain_refs (bigobj
->data
, bigobj
->size
, NULL
);
1367 clear_domain_process_object (char *obj
, MonoDomain
*domain
)
1371 process_object_for_domain_clearing (obj
, domain
);
1372 remove
= need_remove_object_for_domain (obj
, domain
);
1374 if (remove
&& ((MonoObject
*)obj
)->synchronisation
) {
1375 void **dislink
= mono_monitor_get_object_monitor_weak_link ((MonoObject
*)obj
);
1377 mono_gc_register_disappearing_link (NULL
, dislink
, FALSE
);
1384 clear_domain_process_minor_object_callback (char *obj
, size_t size
, MonoDomain
*domain
)
1386 if (clear_domain_process_object (obj
, domain
))
1387 memset (obj
, 0, size
);
1391 clear_domain_process_major_object_callback (char *obj
, size_t size
, MonoDomain
*domain
)
1393 clear_domain_process_object (obj
, domain
);
1397 clear_domain_free_major_non_pinned_object_callback (char *obj
, size_t size
, MonoDomain
*domain
)
1399 if (need_remove_object_for_domain (obj
, domain
))
1400 major
.free_non_pinned_object (obj
, size
);
1404 clear_domain_free_major_pinned_object_callback (char *obj
, size_t size
, MonoDomain
*domain
)
1406 if (need_remove_object_for_domain (obj
, domain
))
1407 major
.free_pinned_object (obj
, size
);
1411 * When appdomains are unloaded we can easily remove objects that have finalizers,
1412 * but all the others could still be present in random places on the heap.
1413 * We need a sweep to get rid of them even though it's going to be costly
1415 * The reason we need to remove them is because we access the vtable and class
1416 * structures to know the object size and the reference bitmap: once the domain is
1417 * unloaded the point to random memory.
1420 mono_gc_clear_domain (MonoDomain
* domain
)
1422 LOSObject
*bigobj
, *prev
;
1427 clear_nursery_fragments (nursery_next
);
1429 if (xdomain_checks
&& domain
!= mono_get_root_domain ()) {
1430 scan_for_registered_roots_in_domain (domain
, ROOT_TYPE_NORMAL
);
1431 scan_for_registered_roots_in_domain (domain
, ROOT_TYPE_WBARRIER
);
1432 check_for_xdomain_refs ();
1435 mono_sgen_scan_area_with_callback (nursery_section
->data
, nursery_section
->end_data
,
1436 (IterateObjectCallbackFunc
)clear_domain_process_minor_object_callback
, domain
);
1438 /*Ephemerons and dislinks must be processed before LOS since they might end up pointing
1439 to memory returned to the OS.*/
1440 null_ephemerons_for_domain (domain
);
1442 for (i
= GENERATION_NURSERY
; i
< GENERATION_MAX
; ++i
)
1443 null_links_for_domain (domain
, i
);
1445 /* We need two passes over major and large objects because
1446 freeing such objects might give their memory back to the OS
1447 (in the case of large objects) or obliterate its vtable
1448 (pinned objects with major-copying or pinned and non-pinned
1449 objects with major-mark&sweep), but we might need to
1450 dereference a pointer from an object to another object if
1451 the first object is a proxy. */
1452 major
.iterate_objects (TRUE
, TRUE
, (IterateObjectCallbackFunc
)clear_domain_process_major_object_callback
, domain
);
1453 for (bigobj
= los_object_list
; bigobj
; bigobj
= bigobj
->next
)
1454 clear_domain_process_object (bigobj
->data
, domain
);
1457 for (bigobj
= los_object_list
; bigobj
;) {
1458 if (need_remove_object_for_domain (bigobj
->data
, domain
)) {
1459 LOSObject
*to_free
= bigobj
;
1461 prev
->next
= bigobj
->next
;
1463 los_object_list
= bigobj
->next
;
1464 bigobj
= bigobj
->next
;
1465 DEBUG (4, fprintf (gc_debug_file
, "Freeing large object %p\n",
1467 free_large_object (to_free
);
1471 bigobj
= bigobj
->next
;
1473 major
.iterate_objects (TRUE
, FALSE
, (IterateObjectCallbackFunc
)clear_domain_free_major_non_pinned_object_callback
, domain
);
1474 major
.iterate_objects (FALSE
, TRUE
, (IterateObjectCallbackFunc
)clear_domain_free_major_pinned_object_callback
, domain
);
1480 global_remset_cache_clear (void)
1482 memset (global_remset_cache
, 0, sizeof (global_remset_cache
));
1486 * Tries to check if a given remset location was already added to the global remset.
1489 * A 2 entry, LRU cache of recently saw location remsets.
1491 * It's hand-coded instead of done using loops to reduce the number of memory references on cache hit.
1493 * Returns TRUE is the element was added..
1496 global_remset_location_was_not_added (gpointer ptr
)
1499 gpointer first
= global_remset_cache
[0], second
;
1501 HEAVY_STAT (++stat_global_remsets_discarded
);
1505 second
= global_remset_cache
[1];
1507 if (second
== ptr
) {
1508 /*Move the second to the front*/
1509 global_remset_cache
[0] = second
;
1510 global_remset_cache
[1] = first
;
1512 HEAVY_STAT (++stat_global_remsets_discarded
);
1516 global_remset_cache
[0] = second
;
1517 global_remset_cache
[1] = ptr
;
1522 * mono_sgen_add_to_global_remset:
1524 * The global remset contains locations which point into newspace after
1525 * a minor collection. This can happen if the objects they point to are pinned.
1528 mono_sgen_add_to_global_remset (gpointer ptr
)
1532 g_assert (!ptr_in_nursery (ptr
) && ptr_in_nursery (*(gpointer
*)ptr
));
1536 if (!global_remset_location_was_not_added (ptr
)) {
1537 UNLOCK_GLOBAL_REMSET
;
1541 DEBUG (8, fprintf (gc_debug_file
, "Adding global remset for %p\n", ptr
));
1542 binary_protocol_global_remset (ptr
, *(gpointer
*)ptr
, (gpointer
)LOAD_VTABLE (*(gpointer
*)ptr
));
1544 HEAVY_STAT (++stat_global_remsets_added
);
1547 * FIXME: If an object remains pinned, we need to add it at every minor collection.
1548 * To avoid uncontrolled growth of the global remset, only add each pointer once.
1550 if (global_remset
->store_next
+ 3 < global_remset
->end_set
) {
1551 *(global_remset
->store_next
++) = (mword
)ptr
;
1552 UNLOCK_GLOBAL_REMSET
;
1555 rs
= alloc_remset (global_remset
->end_set
- global_remset
->data
, NULL
);
1556 rs
->next
= global_remset
;
1558 *(global_remset
->store_next
++) = (mword
)ptr
;
1561 int global_rs_size
= 0;
1563 for (rs
= global_remset
; rs
; rs
= rs
->next
) {
1564 global_rs_size
+= rs
->store_next
- rs
->data
;
1566 DEBUG (4, fprintf (gc_debug_file
, "Global remset now has size %d\n", global_rs_size
));
1569 UNLOCK_GLOBAL_REMSET
;
1575 * Scan objects in the gray stack until the stack is empty. This should be called
1576 * frequently after each object is copied, to achieve better locality and cache
1580 drain_gray_stack (GrayQueue
*queue
)
1584 if (current_collection_generation
== GENERATION_NURSERY
) {
1586 GRAY_OBJECT_DEQUEUE (queue
, obj
);
1589 DEBUG (9, fprintf (gc_debug_file
, "Precise gray object scan %p (%s)\n", obj
, safe_name (obj
)));
1590 major
.minor_scan_object (obj
, queue
);
1593 #ifdef SGEN_PARALLEL_MARK
1594 if (queue
== &workers_distribute_gray_queue
)
1599 GRAY_OBJECT_DEQUEUE (queue
, obj
);
1602 DEBUG (9, fprintf (gc_debug_file
, "Precise gray object scan %p (%s)\n", obj
, safe_name (obj
)));
1603 major
.major_scan_object (obj
, queue
);
1609 * Addresses from start to end are already sorted. This function finds
1610 * the object header for each address and pins the object. The
1611 * addresses must be inside the passed section. The (start of the)
1612 * address array is overwritten with the addresses of the actually
1613 * pinned objects. Return the number of pinned objects.
1616 pin_objects_from_addresses (GCMemSection
*section
, void **start
, void **end
, void *start_nursery
, void *end_nursery
, GrayQueue
*queue
)
1621 void *last_obj
= NULL
;
1622 size_t last_obj_size
= 0;
1625 void **definitely_pinned
= start
;
1626 while (start
< end
) {
1628 /* the range check should be reduntant */
1629 if (addr
!= last
&& addr
>= start_nursery
&& addr
< end_nursery
) {
1630 DEBUG (5, fprintf (gc_debug_file
, "Considering pinning addr %p\n", addr
));
1631 /* multiple pointers to the same object */
1632 if (addr
>= last_obj
&& (char*)addr
< (char*)last_obj
+ last_obj_size
) {
1636 idx
= ((char*)addr
- (char*)section
->data
) / SCAN_START_SIZE
;
1637 g_assert (idx
< section
->num_scan_start
);
1638 search_start
= (void*)section
->scan_starts
[idx
];
1639 if (!search_start
|| search_start
> addr
) {
1642 search_start
= section
->scan_starts
[idx
];
1643 if (search_start
&& search_start
<= addr
)
1646 if (!search_start
|| search_start
> addr
)
1647 search_start
= start_nursery
;
1649 if (search_start
< last_obj
)
1650 search_start
= (char*)last_obj
+ last_obj_size
;
1651 /* now addr should be in an object a short distance from search_start
1652 * Note that search_start must point to zeroed mem or point to an object.
1655 if (!*(void**)search_start
) {
1656 search_start
= (void*)ALIGN_UP ((mword
)search_start
+ sizeof (gpointer
));
1659 last_obj
= search_start
;
1660 last_obj_size
= ALIGN_UP (safe_object_get_size ((MonoObject
*)search_start
));
1661 DEBUG (8, fprintf (gc_debug_file
, "Pinned try match %p (%s), size %zd\n", last_obj
, safe_name (last_obj
), last_obj_size
));
1662 if (addr
>= search_start
&& (char*)addr
< (char*)last_obj
+ last_obj_size
) {
1663 DEBUG (4, fprintf (gc_debug_file
, "Pinned object %p, vtable %p (%s), count %d\n", search_start
, *(void**)search_start
, safe_name (search_start
), count
));
1664 binary_protocol_pin (search_start
, (gpointer
)LOAD_VTABLE (search_start
), safe_object_get_size (search_start
));
1665 pin_object (search_start
);
1666 GRAY_OBJECT_ENQUEUE (queue
, search_start
);
1668 mono_sgen_pin_stats_register_object (search_start
, last_obj_size
);
1669 definitely_pinned
[count
] = search_start
;
1673 /* skip to the next object */
1674 search_start
= (void*)((char*)search_start
+ last_obj_size
);
1675 } while (search_start
<= addr
);
1676 /* we either pinned the correct object or we ignored the addr because
1677 * it points to unused zeroed memory.
1683 //printf ("effective pinned: %d (at the end: %d)\n", count, (char*)end_nursery - (char*)last);
1688 mono_sgen_pin_objects_in_section (GCMemSection
*section
, GrayQueue
*queue
)
1690 int num_entries
= section
->pin_queue_num_entries
;
1692 void **start
= section
->pin_queue_start
;
1694 reduced_to
= pin_objects_from_addresses (section
, start
, start
+ num_entries
,
1695 section
->data
, section
->next_data
, queue
);
1696 section
->pin_queue_num_entries
= reduced_to
;
1698 section
->pin_queue_start
= NULL
;
1702 /* Sort the addresses in array in increasing order.
1703 * Done using a by-the book heap sort. Which has decent and stable performance, is pretty cache efficient.
1706 sort_addresses (void **array
, int size
)
1711 for (i
= 1; i
< size
; ++i
) {
1714 int parent
= (child
- 1) / 2;
1716 if (array
[parent
] >= array
[child
])
1719 tmp
= array
[parent
];
1720 array
[parent
] = array
[child
];
1721 array
[child
] = tmp
;
1727 for (i
= size
- 1; i
> 0; --i
) {
1730 array
[i
] = array
[0];
1736 while (root
* 2 + 1 <= end
) {
1737 int child
= root
* 2 + 1;
1739 if (child
< end
&& array
[child
] < array
[child
+ 1])
1741 if (array
[root
] >= array
[child
])
1745 array
[root
] = array
[child
];
1746 array
[child
] = tmp
;
1753 static G_GNUC_UNUSED
void
1754 print_nursery_gaps (void* start_nursery
, void *end_nursery
)
1757 gpointer first
= start_nursery
;
1759 for (i
= 0; i
< next_pin_slot
; ++i
) {
1760 next
= pin_queue
[i
];
1761 fprintf (gc_debug_file
, "Nursery range: %p-%p, size: %td\n", first
, next
, (char*)next
-(char*)first
);
1765 fprintf (gc_debug_file
, "Nursery range: %p-%p, size: %td\n", first
, next
, (char*)next
-(char*)first
);
1768 /* reduce the info in the pin queue, removing duplicate pointers and sorting them */
1770 optimize_pin_queue (int start_slot
)
1772 void **start
, **cur
, **end
;
1773 /* sort and uniq pin_queue: we just sort and we let the rest discard multiple values */
1774 /* it may be better to keep ranges of pinned memory instead of individually pinning objects */
1775 DEBUG (5, fprintf (gc_debug_file
, "Sorting pin queue, size: %d\n", next_pin_slot
));
1776 if ((next_pin_slot
- start_slot
) > 1)
1777 sort_addresses (pin_queue
+ start_slot
, next_pin_slot
- start_slot
);
1778 start
= cur
= pin_queue
+ start_slot
;
1779 end
= pin_queue
+ next_pin_slot
;
1782 while (*start
== *cur
&& cur
< end
)
1786 next_pin_slot
= start
- pin_queue
;
1787 DEBUG (5, fprintf (gc_debug_file
, "Pin queue reduced to size: %d\n", next_pin_slot
));
1788 //DEBUG (6, print_nursery_gaps (start_nursery, end_nursery));
1793 * Scan the memory between start and end and queue values which could be pointers
1794 * to the area between start_nursery and end_nursery for later consideration.
1795 * Typically used for thread stacks.
1798 conservatively_pin_objects_from (void **start
, void **end
, void *start_nursery
, void *end_nursery
, int pin_type
)
1801 while (start
< end
) {
1802 if (*start
>= start_nursery
&& *start
< end_nursery
) {
1804 * *start can point to the middle of an object
1805 * note: should we handle pointing at the end of an object?
1806 * pinning in C# code disallows pointing at the end of an object
1807 * but there is some small chance that an optimizing C compiler
1808 * may keep the only reference to an object by pointing
1809 * at the end of it. We ignore this small chance for now.
1810 * Pointers to the end of an object are indistinguishable
1811 * from pointers to the start of the next object in memory
1812 * so if we allow that we'd need to pin two objects...
1813 * We queue the pointer in an array, the
1814 * array will then be sorted and uniqued. This way
1815 * we can coalesce several pinning pointers and it should
1816 * be faster since we'd do a memory scan with increasing
1817 * addresses. Note: we can align the address to the allocation
1818 * alignment, so the unique process is more effective.
1820 mword addr
= (mword
)*start
;
1821 addr
&= ~(ALLOC_ALIGN
- 1);
1822 if (addr
>= (mword
)start_nursery
&& addr
< (mword
)end_nursery
)
1823 pin_stage_ptr ((void*)addr
);
1825 pin_stats_register_address ((char*)addr
, pin_type
);
1826 DEBUG (6, if (count
) fprintf (gc_debug_file
, "Pinning address %p\n", (void*)addr
));
1831 DEBUG (7, if (count
) fprintf (gc_debug_file
, "found %d potential pinned heap pointers\n", count
));
1835 * Debugging function: find in the conservative roots where @obj is being pinned.
1837 static G_GNUC_UNUSED
void
1838 find_pinning_reference (char *obj
, size_t size
)
1842 char *endobj
= obj
+ size
;
1843 for (i
= 0; i
< roots_hash_size
[0]; ++i
) {
1844 for (root
= roots_hash
[0][i
]; root
; root
= root
->next
) {
1845 /* if desc is non-null it has precise info */
1846 if (!root
->root_desc
) {
1847 char ** start
= (char**)root
->start_root
;
1848 while (start
< (char**)root
->end_root
) {
1849 if (*start
>= obj
&& *start
< endobj
) {
1850 DEBUG (0, fprintf (gc_debug_file
, "Object %p referenced in pinned roots %p-%p (at %p in record %p)\n", obj
, root
->start_root
, root
->end_root
, start
, root
));
1857 find_pinning_ref_from_thread (obj
, size
);
1861 * The first thing we do in a collection is to identify pinned objects.
1862 * This function considers all the areas of memory that need to be
1863 * conservatively scanned.
1866 pin_from_roots (void *start_nursery
, void *end_nursery
)
1870 DEBUG (2, fprintf (gc_debug_file
, "Scanning pinned roots (%d bytes, %d/%d entries)\n", (int)roots_size
, num_roots_entries
[ROOT_TYPE_NORMAL
], num_roots_entries
[ROOT_TYPE_PINNED
]));
1871 /* objects pinned from the API are inside these roots */
1872 for (i
= 0; i
< roots_hash_size
[ROOT_TYPE_PINNED
]; ++i
) {
1873 for (root
= roots_hash
[ROOT_TYPE_PINNED
][i
]; root
; root
= root
->next
) {
1874 DEBUG (6, fprintf (gc_debug_file
, "Pinned roots %p-%p\n", root
->start_root
, root
->end_root
));
1875 conservatively_pin_objects_from ((void**)root
->start_root
, (void**)root
->end_root
, start_nursery
, end_nursery
, PIN_TYPE_OTHER
);
1878 /* now deal with the thread stacks
1879 * in the future we should be able to conservatively scan only:
1880 * *) the cpu registers
1881 * *) the unmanaged stack frames
1882 * *) the _last_ managed stack frame
1883 * *) pointers slots in managed frames
1885 scan_thread_data (start_nursery
, end_nursery
, FALSE
);
1887 evacuate_pin_staging_area ();
1890 static CopyOrMarkObjectFunc user_copy_or_mark_func
;
1891 static GrayQueue
*user_copy_or_mark_queue
;
1894 single_arg_user_copy_or_mark (void **obj
)
1896 user_copy_or_mark_func (obj
, user_copy_or_mark_queue
);
1900 * The memory area from start_root to end_root contains pointers to objects.
1901 * Their position is precisely described by @desc (this means that the pointer
1902 * can be either NULL or the pointer to the start of an object).
1903 * This functions copies them to to_space updates them.
1905 * This function is not thread-safe!
1908 precisely_scan_objects_from (CopyOrMarkObjectFunc copy_func
, void** start_root
, void** end_root
, char* n_start
, char *n_end
, mword desc
, GrayQueue
*queue
)
1910 switch (desc
& ROOT_DESC_TYPE_MASK
) {
1911 case ROOT_DESC_BITMAP
:
1912 desc
>>= ROOT_DESC_TYPE_SHIFT
;
1914 if ((desc
& 1) && *start_root
) {
1915 copy_func (start_root
, queue
);
1916 DEBUG (9, fprintf (gc_debug_file
, "Overwrote root at %p with %p\n", start_root
, *start_root
));
1917 drain_gray_stack (queue
);
1923 case ROOT_DESC_COMPLEX
: {
1924 gsize
*bitmap_data
= complex_descriptors
+ (desc
>> ROOT_DESC_TYPE_SHIFT
);
1925 int bwords
= (*bitmap_data
) - 1;
1926 void **start_run
= start_root
;
1928 while (bwords
-- > 0) {
1929 gsize bmap
= *bitmap_data
++;
1930 void **objptr
= start_run
;
1932 if ((bmap
& 1) && *objptr
) {
1933 copy_func (objptr
, queue
);
1934 DEBUG (9, fprintf (gc_debug_file
, "Overwrote root at %p with %p\n", objptr
, *objptr
));
1935 drain_gray_stack (queue
);
1940 start_run
+= GC_BITS_PER_WORD
;
1944 case ROOT_DESC_USER
: {
1945 MonoGCRootMarkFunc marker
= user_descriptors
[desc
>> ROOT_DESC_TYPE_SHIFT
];
1946 user_copy_or_mark_func
= copy_func
;
1947 user_copy_or_mark_queue
= queue
;
1948 marker (start_root
, single_arg_user_copy_or_mark
);
1949 user_copy_or_mark_func
= NULL
;
1950 user_copy_or_mark_queue
= NULL
;
1953 case ROOT_DESC_RUN_LEN
:
1954 g_assert_not_reached ();
1956 g_assert_not_reached ();
1961 mono_sgen_update_heap_boundaries (mword low
, mword high
)
1966 old
= lowest_heap_address
;
1969 } while (SGEN_CAS_PTR ((gpointer
*)&lowest_heap_address
, (gpointer
)low
, (gpointer
)old
) != (gpointer
)old
);
1972 old
= highest_heap_address
;
1975 } while (SGEN_CAS_PTR ((gpointer
*)&highest_heap_address
, (gpointer
)high
, (gpointer
)old
) != (gpointer
)old
);
1979 alloc_fragment (void)
1981 Fragment
*frag
= fragment_freelist
;
1983 fragment_freelist
= frag
->next
;
1987 frag
= mono_sgen_alloc_internal (INTERNAL_MEM_FRAGMENT
);
1992 /* size must be a power of 2 */
1994 mono_sgen_alloc_os_memory_aligned (mword size
, mword alignment
, gboolean activate
)
1996 /* Allocate twice the memory to be able to put the block on an aligned address */
1997 char *mem
= mono_sgen_alloc_os_memory (size
+ alignment
, activate
);
2002 aligned
= (char*)((mword
)(mem
+ (alignment
- 1)) & ~(alignment
- 1));
2003 g_assert (aligned
>= mem
&& aligned
+ size
<= mem
+ size
+ alignment
&& !((mword
)aligned
& (alignment
- 1)));
2006 mono_sgen_free_os_memory (mem
, aligned
- mem
);
2007 if (aligned
+ size
< mem
+ size
+ alignment
)
2008 mono_sgen_free_os_memory (aligned
+ size
, (mem
+ size
+ alignment
) - (aligned
+ size
));
2014 * Allocate and setup the data structures needed to be able to allocate objects
2015 * in the nursery. The nursery is stored in nursery_section.
2018 alloc_nursery (void)
2020 GCMemSection
*section
;
2026 if (nursery_section
)
2028 DEBUG (2, fprintf (gc_debug_file
, "Allocating nursery size: %lu\n", (unsigned long)nursery_size
));
2029 /* later we will alloc a larger area for the nursery but only activate
2030 * what we need. The rest will be used as expansion if we have too many pinned
2031 * objects in the existing nursery.
2033 /* FIXME: handle OOM */
2034 section
= mono_sgen_alloc_internal (INTERNAL_MEM_SECTION
);
2036 g_assert (nursery_size
== DEFAULT_NURSERY_SIZE
);
2037 alloc_size
= nursery_size
;
2038 #ifdef SGEN_ALIGN_NURSERY
2039 data
= mono_sgen_alloc_os_memory_aligned (alloc_size
, alloc_size
, TRUE
);
2041 data
= mono_sgen_alloc_os_memory (alloc_size
, TRUE
);
2043 nursery_start
= data
;
2044 nursery_real_end
= nursery_start
+ nursery_size
;
2045 mono_sgen_update_heap_boundaries ((mword
)nursery_start
, (mword
)nursery_real_end
);
2046 nursery_next
= nursery_start
;
2047 DEBUG (4, fprintf (gc_debug_file
, "Expanding nursery size (%p-%p): %lu, total: %lu\n", data
, data
+ alloc_size
, (unsigned long)nursery_size
, (unsigned long)total_alloc
));
2048 section
->data
= section
->next_data
= data
;
2049 section
->size
= alloc_size
;
2050 section
->end_data
= nursery_real_end
;
2051 scan_starts
= (alloc_size
+ SCAN_START_SIZE
- 1) / SCAN_START_SIZE
;
2052 section
->scan_starts
= mono_sgen_alloc_internal_dynamic (sizeof (char*) * scan_starts
, INTERNAL_MEM_SCAN_STARTS
);
2053 section
->num_scan_start
= scan_starts
;
2054 section
->block
.role
= MEMORY_ROLE_GEN0
;
2055 section
->block
.next
= NULL
;
2057 nursery_section
= section
;
2059 /* Setup the single first large fragment */
2060 frag
= alloc_fragment ();
2061 frag
->fragment_start
= nursery_start
;
2062 frag
->fragment_limit
= nursery_start
;
2063 frag
->fragment_end
= nursery_real_end
;
2064 nursery_frag_real_end
= nursery_real_end
;
2065 /* FIXME: frag here is lost */
2069 scan_finalizer_entries (CopyOrMarkObjectFunc copy_func
, FinalizeEntry
*list
, GrayQueue
*queue
)
2073 for (fin
= list
; fin
; fin
= fin
->next
) {
2076 DEBUG (5, fprintf (gc_debug_file
, "Scan of fin ready object: %p (%s)\n", fin
->object
, safe_name (fin
->object
)));
2077 copy_func (&fin
->object
, queue
);
2081 static mword fragment_total
= 0;
2083 * We found a fragment of free memory in the nursery: memzero it and if
2084 * it is big enough, add it to the list of fragments that can be used for
2088 add_nursery_frag (size_t frag_size
, char* frag_start
, char* frag_end
)
2091 DEBUG (4, fprintf (gc_debug_file
, "Found empty fragment: %p-%p, size: %zd\n", frag_start
, frag_end
, frag_size
));
2092 binary_protocol_empty (frag_start
, frag_size
);
2093 /* memsetting just the first chunk start is bound to provide better cache locality */
2094 if (nursery_clear_policy
== CLEAR_AT_GC
)
2095 memset (frag_start
, 0, frag_size
);
2096 /* Not worth dealing with smaller fragments: need to tune */
2097 if (frag_size
>= FRAGMENT_MIN_SIZE
) {
2098 fragment
= alloc_fragment ();
2099 fragment
->fragment_start
= frag_start
;
2100 fragment
->fragment_limit
= frag_start
;
2101 fragment
->fragment_end
= frag_end
;
2102 fragment
->next
= nursery_fragments
;
2103 nursery_fragments
= fragment
;
2104 fragment_total
+= frag_size
;
2106 /* Clear unused fragments, pinning depends on this */
2107 /*TODO place an int[] here instead of the memset if size justify it*/
2108 memset (frag_start
, 0, frag_size
);
2113 generation_name (int generation
)
2115 switch (generation
) {
2116 case GENERATION_NURSERY
: return "nursery";
2117 case GENERATION_OLD
: return "old";
2118 default: g_assert_not_reached ();
2122 static DisappearingLinkHashTable
*
2123 get_dislink_hash_table (int generation
)
2125 switch (generation
) {
2126 case GENERATION_NURSERY
: return &minor_disappearing_link_hash
;
2127 case GENERATION_OLD
: return &major_disappearing_link_hash
;
2128 default: g_assert_not_reached ();
2132 static FinalizeEntryHashTable
*
2133 get_finalize_entry_hash_table (int generation
)
2135 switch (generation
) {
2136 case GENERATION_NURSERY
: return &minor_finalizable_hash
;
2137 case GENERATION_OLD
: return &major_finalizable_hash
;
2138 default: g_assert_not_reached ();
2143 finish_gray_stack (char *start_addr
, char *end_addr
, int generation
, GrayQueue
*queue
)
2148 int ephemeron_rounds
= 0;
2149 CopyOrMarkObjectFunc copy_func
= current_collection_generation
== GENERATION_NURSERY
? major
.copy_object
: major
.copy_or_mark_object
;
2152 * We copied all the reachable objects. Now it's the time to copy
2153 * the objects that were not referenced by the roots, but by the copied objects.
2154 * we built a stack of objects pointed to by gray_start: they are
2155 * additional roots and we may add more items as we go.
2156 * We loop until gray_start == gray_objects which means no more objects have
2157 * been added. Note this is iterative: no recursion is involved.
2158 * We need to walk the LO list as well in search of marked big objects
2159 * (use a flag since this is needed only on major collections). We need to loop
2160 * here as well, so keep a counter of marked LO (increasing it in copy_object).
2161 * To achieve better cache locality and cache usage, we drain the gray stack
2162 * frequently, after each object is copied, and just finish the work here.
2164 drain_gray_stack (queue
);
2166 DEBUG (2, fprintf (gc_debug_file
, "%s generation done\n", generation_name (generation
)));
2167 /* walk the finalization queue and move also the objects that need to be
2168 * finalized: use the finalized objects as new roots so the objects they depend
2169 * on are also not reclaimed. As with the roots above, only objects in the nursery
2170 * are marked/copied.
2171 * We need a loop here, since objects ready for finalizers may reference other objects
2172 * that are fin-ready. Speedup with a flag?
2176 * Walk the ephemeron tables marking all values with reachable keys. This must be completely done
2177 * before processing finalizable objects to avoid finalizing reachable values.
2179 * It must be done inside the finalizaters loop since objects must not be removed from CWT tables
2180 * while they are been finalized.
2182 int done_with_ephemerons
= 0;
2184 done_with_ephemerons
= mark_ephemerons_in_range (copy_func
, start_addr
, end_addr
, queue
);
2185 drain_gray_stack (queue
);
2187 } while (!done_with_ephemerons
);
2189 fin_ready
= num_ready_finalizers
;
2190 finalize_in_range (copy_func
, start_addr
, end_addr
, generation
, queue
);
2191 if (generation
== GENERATION_OLD
)
2192 finalize_in_range (copy_func
, nursery_start
, nursery_real_end
, GENERATION_NURSERY
, queue
);
2194 /* drain the new stack that might have been created */
2195 DEBUG (6, fprintf (gc_debug_file
, "Precise scan of gray area post fin\n"));
2196 drain_gray_stack (queue
);
2197 } while (fin_ready
!= num_ready_finalizers
);
2200 * Clear ephemeron pairs with unreachable keys.
2201 * We pass the copy func so we can figure out if an array was promoted or not.
2203 clear_unreachable_ephemerons (copy_func
, start_addr
, end_addr
, queue
);
2206 DEBUG (2, fprintf (gc_debug_file
, "Finalize queue handling scan for %s generation: %d usecs %d ephemeron roundss\n", generation_name (generation
), TV_ELAPSED (atv
, btv
), ephemeron_rounds
));
2209 * handle disappearing links
2210 * Note we do this after checking the finalization queue because if an object
2211 * survives (at least long enough to be finalized) we don't clear the link.
2212 * This also deals with a possible issue with the monitor reclamation: with the Boehm
2213 * GC a finalized object my lose the monitor because it is cleared before the finalizer is
2216 g_assert (gray_object_queue_is_empty (queue
));
2218 null_link_in_range (copy_func
, start_addr
, end_addr
, generation
, queue
);
2219 if (generation
== GENERATION_OLD
)
2220 null_link_in_range (copy_func
, start_addr
, end_addr
, GENERATION_NURSERY
, queue
);
2221 if (gray_object_queue_is_empty (queue
))
2223 drain_gray_stack (queue
);
2226 g_assert (gray_object_queue_is_empty (queue
));
2230 mono_sgen_check_section_scan_starts (GCMemSection
*section
)
2233 for (i
= 0; i
< section
->num_scan_start
; ++i
) {
2234 if (section
->scan_starts
[i
]) {
2235 guint size
= safe_object_get_size ((MonoObject
*) section
->scan_starts
[i
]);
2236 g_assert (size
>= sizeof (MonoObject
) && size
<= MAX_SMALL_OBJ_SIZE
);
2242 check_scan_starts (void)
2244 if (!do_scan_starts_check
)
2246 mono_sgen_check_section_scan_starts (nursery_section
);
2247 major
.check_scan_starts ();
2250 static int last_num_pinned
= 0;
2253 build_nursery_fragments (void **start
, int num_entries
)
2255 char *frag_start
, *frag_end
;
2259 while (nursery_fragments
) {
2260 Fragment
*next
= nursery_fragments
->next
;
2261 nursery_fragments
->next
= fragment_freelist
;
2262 fragment_freelist
= nursery_fragments
;
2263 nursery_fragments
= next
;
2265 frag_start
= nursery_start
;
2267 /* clear scan starts */
2268 memset (nursery_section
->scan_starts
, 0, nursery_section
->num_scan_start
* sizeof (gpointer
));
2269 for (i
= 0; i
< num_entries
; ++i
) {
2270 frag_end
= start
[i
];
2271 /* remove the pin bit from pinned objects */
2272 unpin_object (frag_end
);
2273 nursery_section
->scan_starts
[((char*)frag_end
- (char*)nursery_section
->data
)/SCAN_START_SIZE
] = frag_end
;
2274 frag_size
= frag_end
- frag_start
;
2276 add_nursery_frag (frag_size
, frag_start
, frag_end
);
2277 frag_size
= ALIGN_UP (safe_object_get_size ((MonoObject
*)start
[i
]));
2278 frag_start
= (char*)start
[i
] + frag_size
;
2280 nursery_last_pinned_end
= frag_start
;
2281 frag_end
= nursery_real_end
;
2282 frag_size
= frag_end
- frag_start
;
2284 add_nursery_frag (frag_size
, frag_start
, frag_end
);
2285 if (!nursery_fragments
) {
2286 DEBUG (1, fprintf (gc_debug_file
, "Nursery fully pinned (%d)\n", num_entries
));
2287 for (i
= 0; i
< num_entries
; ++i
) {
2288 DEBUG (3, fprintf (gc_debug_file
, "Bastard pinning obj %p (%s), size: %d\n", start
[i
], safe_name (start
[i
]), safe_object_get_size (start
[i
])));
2293 nursery_next
= nursery_frag_real_end
= NULL
;
2295 /* Clear TLABs for all threads */
2300 scan_from_registered_roots (CopyOrMarkObjectFunc copy_func
, char *addr_start
, char *addr_end
, int root_type
, GrayQueue
*queue
)
2304 for (i
= 0; i
< roots_hash_size
[root_type
]; ++i
) {
2305 for (root
= roots_hash
[root_type
][i
]; root
; root
= root
->next
) {
2306 DEBUG (6, fprintf (gc_debug_file
, "Precise root scan %p-%p (desc: %p)\n", root
->start_root
, root
->end_root
, (void*)root
->root_desc
));
2307 precisely_scan_objects_from (copy_func
, (void**)root
->start_root
, (void**)root
->end_root
, addr_start
, addr_end
, root
->root_desc
, queue
);
2313 mono_sgen_dump_occupied (char *start
, char *end
, char *section_start
)
2315 fprintf (heap_dump_file
, "<occupied offset=\"%td\" size=\"%td\"/>\n", start
- section_start
, end
- start
);
2319 mono_sgen_dump_section (GCMemSection
*section
, const char *type
)
2321 char *start
= section
->data
;
2322 char *end
= section
->data
+ section
->size
;
2323 char *occ_start
= NULL
;
2325 char *old_start
= NULL
; /* just for debugging */
2327 fprintf (heap_dump_file
, "<section type=\"%s\" size=\"%lu\">\n", type
, (unsigned long)section
->size
);
2329 while (start
< end
) {
2333 if (!*(void**)start
) {
2335 mono_sgen_dump_occupied (occ_start
, start
, section
->data
);
2338 start
+= sizeof (void*); /* should be ALLOC_ALIGN, really */
2341 g_assert (start
< section
->next_data
);
2346 vt
= (GCVTable
*)LOAD_VTABLE (start
);
2349 size
= ALIGN_UP (safe_object_get_size ((MonoObject
*) start
));
2352 fprintf (heap_dump_file, "<object offset=\"%d\" class=\"%s.%s\" size=\"%d\"/>\n",
2353 start - section->data,
2354 vt->klass->name_space, vt->klass->name,
2362 mono_sgen_dump_occupied (occ_start
, start
, section
->data
);
2364 fprintf (heap_dump_file
, "</section>\n");
2368 dump_object (MonoObject
*obj
, gboolean dump_location
)
2370 static char class_name
[1024];
2372 MonoClass
*class = mono_object_class (obj
);
2376 * Python's XML parser is too stupid to parse angle brackets
2377 * in strings, so we just ignore them;
2380 while (class->name
[i
] && j
< sizeof (class_name
) - 1) {
2381 if (!strchr ("<>\"", class->name
[i
]))
2382 class_name
[j
++] = class->name
[i
];
2385 g_assert (j
< sizeof (class_name
));
2388 fprintf (heap_dump_file
, "<object class=\"%s.%s\" size=\"%d\"",
2389 class->name_space
, class_name
,
2390 safe_object_get_size (obj
));
2391 if (dump_location
) {
2392 const char *location
;
2393 if (ptr_in_nursery (obj
))
2394 location
= "nursery";
2395 else if (safe_object_get_size (obj
) <= MAX_SMALL_OBJ_SIZE
)
2399 fprintf (heap_dump_file
, " location=\"%s\"", location
);
2401 fprintf (heap_dump_file
, "/>\n");
2405 dump_heap (const char *type
, int num
, const char *reason
)
2410 fprintf (heap_dump_file
, "<collection type=\"%s\" num=\"%d\"", type
, num
);
2412 fprintf (heap_dump_file
, " reason=\"%s\"", reason
);
2413 fprintf (heap_dump_file
, ">\n");
2414 fprintf (heap_dump_file
, "<other-mem-usage type=\"mempools\" size=\"%ld\"/>\n", mono_mempool_get_bytes_allocated ());
2415 mono_sgen_dump_internal_mem_usage (heap_dump_file
);
2416 fprintf (heap_dump_file
, "<pinned type=\"stack\" bytes=\"%zu\"/>\n", pinned_byte_counts
[PIN_TYPE_STACK
]);
2417 /* fprintf (heap_dump_file, "<pinned type=\"static-data\" bytes=\"%d\"/>\n", pinned_byte_counts [PIN_TYPE_STATIC_DATA]); */
2418 fprintf (heap_dump_file
, "<pinned type=\"other\" bytes=\"%zu\"/>\n", pinned_byte_counts
[PIN_TYPE_OTHER
]);
2420 fprintf (heap_dump_file
, "<pinned-objects>\n");
2421 for (list
= pinned_objects
; list
; list
= list
->next
)
2422 dump_object (list
->obj
, TRUE
);
2423 fprintf (heap_dump_file
, "</pinned-objects>\n");
2425 mono_sgen_dump_section (nursery_section
, "nursery");
2427 major
.dump_heap (heap_dump_file
);
2429 fprintf (heap_dump_file
, "<los>\n");
2430 for (bigobj
= los_object_list
; bigobj
; bigobj
= bigobj
->next
)
2431 dump_object ((MonoObject
*)bigobj
->data
, FALSE
);
2432 fprintf (heap_dump_file
, "</los>\n");
2434 fprintf (heap_dump_file
, "</collection>\n");
2438 mono_sgen_register_moved_object (void *obj
, void *destination
)
2440 g_assert (mono_profiler_events
& MONO_PROFILE_GC_MOVES
);
2442 /* FIXME: handle this for parallel collector */
2443 #ifdef SGEN_PARALLEL_MARK
2444 g_assert_not_reached ();
2446 if (moved_objects_idx
== MOVED_OBJECTS_NUM
) {
2447 mono_profiler_gc_moves (moved_objects
, moved_objects_idx
);
2448 moved_objects_idx
= 0;
2450 moved_objects
[moved_objects_idx
++] = obj
;
2451 moved_objects
[moved_objects_idx
++] = destination
;
2457 static gboolean inited
= FALSE
;
2462 mono_counters_register ("Minor fragment clear", MONO_COUNTER_GC
| MONO_COUNTER_LONG
, &time_minor_pre_collection_fragment_clear
);
2463 mono_counters_register ("Minor pinning", MONO_COUNTER_GC
| MONO_COUNTER_LONG
, &time_minor_pinning
);
2464 mono_counters_register ("Minor scan remsets", MONO_COUNTER_GC
| MONO_COUNTER_LONG
, &time_minor_scan_remsets
);
2465 mono_counters_register ("Minor scan pinned", MONO_COUNTER_GC
| MONO_COUNTER_LONG
, &time_minor_scan_pinned
);
2466 mono_counters_register ("Minor scan registered roots", MONO_COUNTER_GC
| MONO_COUNTER_LONG
, &time_minor_scan_registered_roots
);
2467 mono_counters_register ("Minor scan thread data", MONO_COUNTER_GC
| MONO_COUNTER_LONG
, &time_minor_scan_thread_data
);
2468 mono_counters_register ("Minor finish gray stack", MONO_COUNTER_GC
| MONO_COUNTER_LONG
, &time_minor_finish_gray_stack
);
2469 mono_counters_register ("Minor fragment creation", MONO_COUNTER_GC
| MONO_COUNTER_LONG
, &time_minor_fragment_creation
);
2471 mono_counters_register ("Major fragment clear", MONO_COUNTER_GC
| MONO_COUNTER_LONG
, &time_major_pre_collection_fragment_clear
);
2472 mono_counters_register ("Major pinning", MONO_COUNTER_GC
| MONO_COUNTER_LONG
, &time_major_pinning
);
2473 mono_counters_register ("Major scan pinned", MONO_COUNTER_GC
| MONO_COUNTER_LONG
, &time_major_scan_pinned
);
2474 mono_counters_register ("Major scan registered roots", MONO_COUNTER_GC
| MONO_COUNTER_LONG
, &time_major_scan_registered_roots
);
2475 mono_counters_register ("Major scan thread data", MONO_COUNTER_GC
| MONO_COUNTER_LONG
, &time_major_scan_thread_data
);
2476 mono_counters_register ("Major scan alloc_pinned", MONO_COUNTER_GC
| MONO_COUNTER_LONG
, &time_major_scan_alloc_pinned
);
2477 mono_counters_register ("Major scan finalized", MONO_COUNTER_GC
| MONO_COUNTER_LONG
, &time_major_scan_finalized
);
2478 mono_counters_register ("Major scan big objects", MONO_COUNTER_GC
| MONO_COUNTER_LONG
, &time_major_scan_big_objects
);
2479 mono_counters_register ("Major finish gray stack", MONO_COUNTER_GC
| MONO_COUNTER_LONG
, &time_major_finish_gray_stack
);
2480 mono_counters_register ("Major free big objects", MONO_COUNTER_GC
| MONO_COUNTER_LONG
, &time_major_free_bigobjs
);
2481 mono_counters_register ("Major LOS sweep", MONO_COUNTER_GC
| MONO_COUNTER_LONG
, &time_major_los_sweep
);
2482 mono_counters_register ("Major sweep", MONO_COUNTER_GC
| MONO_COUNTER_LONG
, &time_major_sweep
);
2483 mono_counters_register ("Major fragment creation", MONO_COUNTER_GC
| MONO_COUNTER_LONG
, &time_major_fragment_creation
);
2485 #ifdef HEAVY_STATISTICS
2486 mono_counters_register ("WBarrier set field", MONO_COUNTER_GC
| MONO_COUNTER_INT
, &stat_wbarrier_set_field
);
2487 mono_counters_register ("WBarrier set arrayref", MONO_COUNTER_GC
| MONO_COUNTER_INT
, &stat_wbarrier_set_arrayref
);
2488 mono_counters_register ("WBarrier arrayref copy", MONO_COUNTER_GC
| MONO_COUNTER_INT
, &stat_wbarrier_arrayref_copy
);
2489 mono_counters_register ("WBarrier generic store called", MONO_COUNTER_GC
| MONO_COUNTER_INT
, &stat_wbarrier_generic_store
);
2490 mono_counters_register ("WBarrier generic store stored", MONO_COUNTER_GC
| MONO_COUNTER_INT
, &stat_wbarrier_generic_store_remset
);
2491 mono_counters_register ("WBarrier set root", MONO_COUNTER_GC
| MONO_COUNTER_INT
, &stat_wbarrier_set_root
);
2492 mono_counters_register ("WBarrier value copy", MONO_COUNTER_GC
| MONO_COUNTER_INT
, &stat_wbarrier_value_copy
);
2493 mono_counters_register ("WBarrier object copy", MONO_COUNTER_GC
| MONO_COUNTER_INT
, &stat_wbarrier_object_copy
);
2495 mono_counters_register ("# objects allocated", MONO_COUNTER_GC
| MONO_COUNTER_LONG
, &stat_objects_alloced
);
2496 mono_counters_register ("bytes allocated", MONO_COUNTER_GC
| MONO_COUNTER_LONG
, &stat_bytes_alloced
);
2497 mono_counters_register ("# objects allocated degraded", MONO_COUNTER_GC
| MONO_COUNTER_LONG
, &stat_objects_alloced_degraded
);
2498 mono_counters_register ("bytes allocated degraded", MONO_COUNTER_GC
| MONO_COUNTER_LONG
, &stat_bytes_alloced_degraded
);
2499 mono_counters_register ("bytes allocated in LOS", MONO_COUNTER_GC
| MONO_COUNTER_LONG
, &stat_bytes_alloced_los
);
2501 mono_counters_register ("# copy_object() called (nursery)", MONO_COUNTER_GC
| MONO_COUNTER_LONG
, &stat_copy_object_called_nursery
);
2502 mono_counters_register ("# objects copied (nursery)", MONO_COUNTER_GC
| MONO_COUNTER_LONG
, &stat_objects_copied_nursery
);
2503 mono_counters_register ("# copy_object() called (major)", MONO_COUNTER_GC
| MONO_COUNTER_LONG
, &stat_copy_object_called_major
);
2504 mono_counters_register ("# objects copied (major)", MONO_COUNTER_GC
| MONO_COUNTER_LONG
, &stat_objects_copied_major
);
2506 mono_counters_register ("# scan_object() called (nursery)", MONO_COUNTER_GC
| MONO_COUNTER_LONG
, &stat_scan_object_called_nursery
);
2507 mono_counters_register ("# scan_object() called (major)", MONO_COUNTER_GC
| MONO_COUNTER_LONG
, &stat_scan_object_called_major
);
2509 mono_counters_register ("# nursery copy_object() failed from space", MONO_COUNTER_GC
| MONO_COUNTER_LONG
, &stat_nursery_copy_object_failed_from_space
);
2510 mono_counters_register ("# nursery copy_object() failed forwarded", MONO_COUNTER_GC
| MONO_COUNTER_LONG
, &stat_nursery_copy_object_failed_forwarded
);
2511 mono_counters_register ("# nursery copy_object() failed pinned", MONO_COUNTER_GC
| MONO_COUNTER_LONG
, &stat_nursery_copy_object_failed_pinned
);
2513 mono_counters_register ("# wasted fragments used", MONO_COUNTER_GC
| MONO_COUNTER_LONG
, &stat_wasted_fragments_used
);
2514 mono_counters_register ("bytes in wasted fragments", MONO_COUNTER_GC
| MONO_COUNTER_LONG
, &stat_wasted_fragments_bytes
);
2516 mono_counters_register ("Store remsets", MONO_COUNTER_GC
| MONO_COUNTER_LONG
, &stat_store_remsets
);
2517 mono_counters_register ("Unique store remsets", MONO_COUNTER_GC
| MONO_COUNTER_LONG
, &stat_store_remsets_unique
);
2518 mono_counters_register ("Saved remsets 1", MONO_COUNTER_GC
| MONO_COUNTER_LONG
, &stat_saved_remsets_1
);
2519 mono_counters_register ("Saved remsets 2", MONO_COUNTER_GC
| MONO_COUNTER_LONG
, &stat_saved_remsets_2
);
2520 mono_counters_register ("Global remsets added", MONO_COUNTER_GC
| MONO_COUNTER_LONG
, &stat_global_remsets_added
);
2521 mono_counters_register ("Global remsets re-added", MONO_COUNTER_GC
| MONO_COUNTER_LONG
, &stat_global_remsets_readded
);
2522 mono_counters_register ("Global remsets processed", MONO_COUNTER_GC
| MONO_COUNTER_LONG
, &stat_global_remsets_processed
);
2523 mono_counters_register ("Global remsets discarded", MONO_COUNTER_GC
| MONO_COUNTER_LONG
, &stat_global_remsets_discarded
);
2530 need_major_collection (void)
2532 mword los_alloced
= los_memory_usage
- MIN (last_los_memory_usage
, los_memory_usage
);
2533 return minor_collection_sections_alloced
* major
.section_size
+ los_alloced
> minor_collection_allowance
;
2537 * Collect objects in the nursery. Returns whether to trigger a major
2541 collect_nursery (size_t requested_size
)
2543 size_t max_garbage_amount
;
2544 char *orig_nursery_next
;
2545 TV_DECLARE (all_atv
);
2546 TV_DECLARE (all_btv
);
2550 current_collection_generation
= GENERATION_NURSERY
;
2552 binary_protocol_collection (GENERATION_NURSERY
);
2553 check_scan_starts ();
2556 orig_nursery_next
= nursery_next
;
2557 nursery_next
= MAX (nursery_next
, nursery_last_pinned_end
);
2558 /* FIXME: optimize later to use the higher address where an object can be present */
2559 nursery_next
= MAX (nursery_next
, nursery_real_end
);
2561 DEBUG (1, fprintf (gc_debug_file
, "Start nursery collection %d %p-%p, size: %d\n", num_minor_gcs
, nursery_start
, nursery_next
, (int)(nursery_next
- nursery_start
)));
2562 max_garbage_amount
= nursery_next
- nursery_start
;
2563 g_assert (nursery_section
->size
>= max_garbage_amount
);
2565 /* world must be stopped already */
2566 TV_GETTIME (all_atv
);
2569 /* Pinning depends on this */
2570 clear_nursery_fragments (orig_nursery_next
);
2573 time_minor_pre_collection_fragment_clear
+= TV_ELAPSED_MS (atv
, btv
);
2576 check_for_xdomain_refs ();
2578 nursery_section
->next_data
= nursery_next
;
2580 major
.start_nursery_collection ();
2582 gray_object_queue_init (&gray_queue
, mono_sgen_get_unmanaged_allocator ());
2585 mono_stats
.minor_gc_count
++;
2587 global_remset_cache_clear ();
2589 /* pin from pinned handles */
2591 pin_from_roots (nursery_start
, nursery_next
);
2592 /* identify pinned objects */
2593 optimize_pin_queue (0);
2594 next_pin_slot
= pin_objects_from_addresses (nursery_section
, pin_queue
, pin_queue
+ next_pin_slot
, nursery_start
, nursery_next
, &gray_queue
);
2595 nursery_section
->pin_queue_start
= pin_queue
;
2596 nursery_section
->pin_queue_num_entries
= next_pin_slot
;
2598 time_minor_pinning
+= TV_ELAPSED_MS (btv
, atv
);
2599 DEBUG (2, fprintf (gc_debug_file
, "Finding pinned pointers: %d in %d usecs\n", next_pin_slot
, TV_ELAPSED (btv
, atv
)));
2600 DEBUG (4, fprintf (gc_debug_file
, "Start scan with %d pinned objects\n", next_pin_slot
));
2602 if (consistency_check_at_minor_collection
)
2603 check_consistency ();
2606 * walk all the roots and copy the young objects to the old generation,
2607 * starting from to_space
2610 scan_from_remsets (nursery_start
, nursery_next
, &gray_queue
);
2611 /* we don't have complete write barrier yet, so we scan all the old generation sections */
2613 time_minor_scan_remsets
+= TV_ELAPSED_MS (atv
, btv
);
2614 DEBUG (2, fprintf (gc_debug_file
, "Old generation scan: %d usecs\n", TV_ELAPSED (atv
, btv
)));
2616 drain_gray_stack (&gray_queue
);
2619 time_minor_scan_pinned
+= TV_ELAPSED_MS (btv
, atv
);
2620 /* registered roots, this includes static fields */
2621 scan_from_registered_roots (major
.copy_object
, nursery_start
, nursery_next
, ROOT_TYPE_NORMAL
, &gray_queue
);
2622 scan_from_registered_roots (major
.copy_object
, nursery_start
, nursery_next
, ROOT_TYPE_WBARRIER
, &gray_queue
);
2624 time_minor_scan_registered_roots
+= TV_ELAPSED_MS (atv
, btv
);
2626 scan_thread_data (nursery_start
, nursery_next
, TRUE
);
2628 time_minor_scan_thread_data
+= TV_ELAPSED_MS (btv
, atv
);
2631 finish_gray_stack (nursery_start
, nursery_next
, GENERATION_NURSERY
, &gray_queue
);
2633 time_minor_finish_gray_stack
+= TV_ELAPSED_MS (btv
, atv
);
2635 /* walk the pin_queue, build up the fragment list of free memory, unmark
2636 * pinned objects as we go, memzero() the empty fragments so they are ready for the
2639 build_nursery_fragments (pin_queue
, next_pin_slot
);
2641 time_minor_fragment_creation
+= TV_ELAPSED_MS (atv
, btv
);
2642 DEBUG (2, fprintf (gc_debug_file
, "Fragment creation: %d usecs, %lu bytes available\n", TV_ELAPSED (atv
, btv
), (unsigned long)fragment_total
));
2644 if (consistency_check_at_minor_collection
)
2645 check_major_refs ();
2647 major
.finish_nursery_collection ();
2649 TV_GETTIME (all_btv
);
2650 mono_stats
.minor_gc_time_usecs
+= TV_ELAPSED (all_atv
, all_btv
);
2653 dump_heap ("minor", num_minor_gcs
- 1, NULL
);
2655 /* prepare the pin queue for the next collection */
2656 last_num_pinned
= next_pin_slot
;
2658 if (fin_ready_list
|| critical_fin_list
) {
2659 DEBUG (4, fprintf (gc_debug_file
, "Finalizer-thread wakeup: ready %d\n", num_ready_finalizers
));
2660 mono_gc_finalize_notify ();
2664 g_assert (gray_object_queue_is_empty (&gray_queue
));
2666 check_scan_starts ();
2668 binary_protocol_flush_buffers ();
2670 current_collection_generation
= -1;
2672 return need_major_collection ();
2676 major_do_collection (const char *reason
)
2678 LOSObject
*bigobj
, *prevbo
;
2679 TV_DECLARE (all_atv
);
2680 TV_DECLARE (all_btv
);
2683 /* FIXME: only use these values for the precise scan
2684 * note that to_space pointers should be excluded anyway...
2686 char *heap_start
= NULL
;
2687 char *heap_end
= (char*)-1;
2688 int old_num_major_sections
= major
.get_num_major_sections ();
2689 int num_major_sections
, num_major_sections_saved
, save_target
, allowance_target
;
2690 mword los_memory_saved
, los_memory_alloced
, old_los_memory_usage
;
2693 * A domain could have been freed, resulting in
2694 * los_memory_usage being less than last_los_memory_usage.
2696 los_memory_alloced
= los_memory_usage
- MIN (last_los_memory_usage
, los_memory_usage
);
2697 old_los_memory_usage
= los_memory_usage
;
2699 //count_ref_nonref_objs ();
2700 //consistency_check ();
2702 binary_protocol_collection (GENERATION_OLD
);
2703 check_scan_starts ();
2704 gray_object_queue_init (&gray_queue
, mono_sgen_get_unmanaged_allocator ());
2705 gray_object_queue_init (&workers_distribute_gray_queue
, mono_sgen_get_unmanaged_allocator ());
2708 DEBUG (1, fprintf (gc_debug_file
, "Start major collection %d\n", num_major_gcs
));
2710 mono_stats
.major_gc_count
++;
2712 /* world must be stopped already */
2713 TV_GETTIME (all_atv
);
2716 /* Pinning depends on this */
2717 clear_nursery_fragments (nursery_next
);
2720 time_major_pre_collection_fragment_clear
+= TV_ELAPSED_MS (atv
, btv
);
2723 check_for_xdomain_refs ();
2725 nursery_section
->next_data
= nursery_real_end
;
2726 /* we should also coalesce scanning from sections close to each other
2727 * and deal with pointers outside of the sections later.
2729 /* The remsets are not useful for a major collection */
2731 global_remset_cache_clear ();
2735 DEBUG (6, fprintf (gc_debug_file
, "Collecting pinned addresses\n"));
2736 pin_from_roots ((void*)lowest_heap_address
, (void*)highest_heap_address
);
2737 optimize_pin_queue (0);
2740 * pin_queue now contains all candidate pointers, sorted and
2741 * uniqued. We must do two passes now to figure out which
2742 * objects are pinned.
2744 * The first is to find within the pin_queue the area for each
2745 * section. This requires that the pin_queue be sorted. We
2746 * also process the LOS objects and pinned chunks here.
2748 * The second, destructive, pass is to reduce the section
2749 * areas to pointers to the actually pinned objects.
2751 DEBUG (6, fprintf (gc_debug_file
, "Pinning from sections\n"));
2752 /* first pass for the sections */
2753 mono_sgen_find_section_pin_queue_start_end (nursery_section
);
2754 major
.find_pin_queue_start_ends (&workers_distribute_gray_queue
);
2755 /* identify possible pointers to the insize of large objects */
2756 DEBUG (6, fprintf (gc_debug_file
, "Pinning from large objects\n"));
2757 for (bigobj
= los_object_list
; bigobj
; bigobj
= bigobj
->next
) {
2759 if (mono_sgen_find_optimized_pin_queue_area (bigobj
->data
, (char*)bigobj
->data
+ bigobj
->size
, &dummy
)) {
2760 pin_object (bigobj
->data
);
2761 /* FIXME: only enqueue if object has references */
2762 GRAY_OBJECT_ENQUEUE (&workers_distribute_gray_queue
, bigobj
->data
);
2764 mono_sgen_pin_stats_register_object ((char*) bigobj
->data
, safe_object_get_size ((MonoObject
*) bigobj
->data
));
2765 DEBUG (6, fprintf (gc_debug_file
, "Marked large object %p (%s) size: %lu from roots\n", bigobj
->data
, safe_name (bigobj
->data
), (unsigned long)bigobj
->size
));
2768 /* second pass for the sections */
2769 mono_sgen_pin_objects_in_section (nursery_section
, &workers_distribute_gray_queue
);
2770 major
.pin_objects (&workers_distribute_gray_queue
);
2773 time_major_pinning
+= TV_ELAPSED_MS (atv
, btv
);
2774 DEBUG (2, fprintf (gc_debug_file
, "Finding pinned pointers: %d in %d usecs\n", next_pin_slot
, TV_ELAPSED (atv
, btv
)));
2775 DEBUG (4, fprintf (gc_debug_file
, "Start scan with %d pinned objects\n", next_pin_slot
));
2777 major
.init_to_space ();
2779 workers_start_all_workers (1);
2782 time_major_scan_pinned
+= TV_ELAPSED_MS (btv
, atv
);
2784 /* registered roots, this includes static fields */
2785 scan_from_registered_roots (major
.copy_or_mark_object
, heap_start
, heap_end
, ROOT_TYPE_NORMAL
, &workers_distribute_gray_queue
);
2786 scan_from_registered_roots (major
.copy_or_mark_object
, heap_start
, heap_end
, ROOT_TYPE_WBARRIER
, &workers_distribute_gray_queue
);
2788 time_major_scan_registered_roots
+= TV_ELAPSED_MS (atv
, btv
);
2791 /* FIXME: This is the wrong place for this, because it does
2793 scan_thread_data (heap_start
, heap_end
, TRUE
);
2795 time_major_scan_thread_data
+= TV_ELAPSED_MS (btv
, atv
);
2798 time_major_scan_alloc_pinned
+= TV_ELAPSED_MS (atv
, btv
);
2800 /* scan the list of objects ready for finalization */
2801 scan_finalizer_entries (major
.copy_or_mark_object
, fin_ready_list
, &workers_distribute_gray_queue
);
2802 scan_finalizer_entries (major
.copy_or_mark_object
, critical_fin_list
, &workers_distribute_gray_queue
);
2804 time_major_scan_finalized
+= TV_ELAPSED_MS (btv
, atv
);
2805 DEBUG (2, fprintf (gc_debug_file
, "Root scan: %d usecs\n", TV_ELAPSED (btv
, atv
)));
2808 time_major_scan_big_objects
+= TV_ELAPSED_MS (atv
, btv
);
2810 while (!gray_object_queue_is_empty (&workers_distribute_gray_queue
))
2811 workers_distribute_gray_queue_sections ();
2812 workers_change_num_working (-1);
2815 #ifdef SGEN_PARALLEL_MARK
2816 g_assert (gray_object_queue_is_empty (&gray_queue
));
2819 /* all the objects in the heap */
2820 finish_gray_stack (heap_start
, heap_end
, GENERATION_OLD
, &gray_queue
);
2822 time_major_finish_gray_stack
+= TV_ELAPSED_MS (btv
, atv
);
2824 /* sweep the big objects list */
2826 for (bigobj
= los_object_list
; bigobj
;) {
2827 if (object_is_pinned (bigobj
->data
)) {
2828 unpin_object (bigobj
->data
);
2831 /* not referenced anywhere, so we can free it */
2833 prevbo
->next
= bigobj
->next
;
2835 los_object_list
= bigobj
->next
;
2837 bigobj
= bigobj
->next
;
2838 free_large_object (to_free
);
2842 bigobj
= bigobj
->next
;
2846 time_major_free_bigobjs
+= TV_ELAPSED_MS (atv
, btv
);
2851 time_major_los_sweep
+= TV_ELAPSED_MS (btv
, atv
);
2856 time_major_sweep
+= TV_ELAPSED_MS (atv
, btv
);
2858 /* walk the pin_queue, build up the fragment list of free memory, unmark
2859 * pinned objects as we go, memzero() the empty fragments so they are ready for the
2862 build_nursery_fragments (nursery_section
->pin_queue_start
, nursery_section
->pin_queue_num_entries
);
2865 time_major_fragment_creation
+= TV_ELAPSED_MS (btv
, atv
);
2867 TV_GETTIME (all_btv
);
2868 mono_stats
.major_gc_time_usecs
+= TV_ELAPSED (all_atv
, all_btv
);
2871 dump_heap ("major", num_major_gcs
- 1, reason
);
2873 /* prepare the pin queue for the next collection */
2875 if (fin_ready_list
|| critical_fin_list
) {
2876 DEBUG (4, fprintf (gc_debug_file
, "Finalizer-thread wakeup: ready %d\n", num_ready_finalizers
));
2877 mono_gc_finalize_notify ();
2881 g_assert (gray_object_queue_is_empty (&gray_queue
));
2883 num_major_sections
= major
.get_num_major_sections ();
2885 num_major_sections_saved
= MAX (old_num_major_sections
- num_major_sections
, 0);
2886 los_memory_saved
= MAX (old_los_memory_usage
- los_memory_usage
, 1);
2888 save_target
= ((num_major_sections
* major
.section_size
) + los_memory_saved
) / 2;
2890 * We aim to allow the allocation of as many sections as is
2891 * necessary to reclaim save_target sections in the next
2892 * collection. We assume the collection pattern won't change.
2893 * In the last cycle, we had num_major_sections_saved for
2894 * minor_collection_sections_alloced. Assuming things won't
2895 * change, this must be the same ratio as save_target for
2896 * allowance_target, i.e.
2898 * num_major_sections_saved save_target
2899 * --------------------------------- == ----------------
2900 * minor_collection_sections_alloced allowance_target
2904 allowance_target
= (mword
)((double)save_target
* (double)(minor_collection_sections_alloced
* major
.section_size
+ los_memory_alloced
) / (double)(num_major_sections_saved
* major
.section_size
+ los_memory_saved
));
2906 minor_collection_allowance
= MAX (MIN (allowance_target
, num_major_sections
* major
.section_size
+ los_memory_usage
), MIN_MINOR_COLLECTION_ALLOWANCE
);
2908 minor_collection_sections_alloced
= 0;
2909 last_los_memory_usage
= los_memory_usage
;
2911 major
.finish_major_collection ();
2913 check_scan_starts ();
2915 binary_protocol_flush_buffers ();
2917 //consistency_check ();
2921 major_collection (const char *reason
)
2923 if (g_getenv ("MONO_GC_NO_MAJOR")) {
2924 collect_nursery (0);
2928 current_collection_generation
= GENERATION_OLD
;
2929 major_do_collection (reason
);
2930 current_collection_generation
= -1;
2934 * When deciding if it's better to collect or to expand, keep track
2935 * of how much garbage was reclaimed with the last collection: if it's too
2937 * This is called when we could not allocate a small object.
2939 static void __attribute__((noinline
))
2940 minor_collect_or_expand_inner (size_t size
)
2942 int do_minor_collection
= 1;
2944 g_assert (nursery_section
);
2945 if (do_minor_collection
) {
2947 if (collect_nursery (size
))
2948 major_collection ("minor overflow");
2949 DEBUG (2, fprintf (gc_debug_file
, "Heap size: %lu, LOS size: %lu\n", (unsigned long)total_alloc
, (unsigned long)los_memory_usage
));
2951 /* this also sets the proper pointers for the next allocation */
2952 if (!search_fragment_for_size (size
)) {
2954 /* TypeBuilder and MonoMethod are killing mcs with fragmentation */
2955 DEBUG (1, fprintf (gc_debug_file
, "nursery collection didn't find enough room for %zd alloc (%d pinned)\n", size
, last_num_pinned
));
2956 for (i
= 0; i
< last_num_pinned
; ++i
) {
2957 DEBUG (3, fprintf (gc_debug_file
, "Bastard pinning obj %p (%s), size: %d\n", pin_queue
[i
], safe_name (pin_queue
[i
]), safe_object_get_size (pin_queue
[i
])));
2962 //report_internal_mem_usage ();
2966 * ######################################################################
2967 * ######## Memory allocation from the OS
2968 * ######################################################################
2969 * This section of code deals with getting memory from the OS and
2970 * allocating memory for GC-internal data structures.
2971 * Internal memory can be handled with a freelist for small objects.
2977 G_GNUC_UNUSED
static void
2978 report_internal_mem_usage (void)
2980 printf ("Internal memory usage:\n");
2981 mono_sgen_report_internal_mem_usage ();
2982 printf ("Pinned memory usage:\n");
2983 major
.report_pinned_memory_usage ();
2987 * Allocate a big chunk of memory from the OS (usually 64KB to several megabytes).
2988 * This must not require any lock.
2991 mono_sgen_alloc_os_memory (size_t size
, int activate
)
2994 unsigned long prot_flags
= activate
? MONO_MMAP_READ
|MONO_MMAP_WRITE
: MONO_MMAP_NONE
;
2996 prot_flags
|= MONO_MMAP_PRIVATE
| MONO_MMAP_ANON
;
2997 size
+= pagesize
- 1;
2998 size
&= ~(pagesize
- 1);
2999 ptr
= mono_valloc (0, size
, prot_flags
);
3001 total_alloc
+= size
;
3006 * Free the memory returned by mono_sgen_alloc_os_memory (), returning it to the OS.
3009 mono_sgen_free_os_memory (void *addr
, size_t size
)
3011 mono_vfree (addr
, size
);
3013 size
+= pagesize
- 1;
3014 size
&= ~(pagesize
- 1);
3016 total_alloc
-= size
;
3020 * ######################################################################
3021 * ######## Object allocation
3022 * ######################################################################
3023 * This section of code deals with allocating memory for objects.
3024 * There are several ways:
3025 * *) allocate large objects
3026 * *) allocate normal objects
3027 * *) fast lock-free allocation
3028 * *) allocation of pinned objects
3032 setup_fragment (Fragment
*frag
, Fragment
*prev
, size_t size
)
3034 /* remove from the list */
3036 prev
->next
= frag
->next
;
3038 nursery_fragments
= frag
->next
;
3039 nursery_next
= frag
->fragment_start
;
3040 nursery_frag_real_end
= frag
->fragment_end
;
3042 DEBUG (4, fprintf (gc_debug_file
, "Using nursery fragment %p-%p, size: %td (req: %zd)\n", nursery_next
, nursery_frag_real_end
, nursery_frag_real_end
- nursery_next
, size
));
3043 frag
->next
= fragment_freelist
;
3044 fragment_freelist
= frag
;
3047 /* check if we have a suitable fragment in nursery_fragments to be able to allocate
3048 * an object of size @size
3049 * Return FALSE if not found (which means we need a collection)
3052 search_fragment_for_size (size_t size
)
3054 Fragment
*frag
, *prev
;
3055 DEBUG (4, fprintf (gc_debug_file
, "Searching nursery fragment %p, size: %zd\n", nursery_frag_real_end
, size
));
3057 if (nursery_frag_real_end
> nursery_next
&& nursery_clear_policy
== CLEAR_AT_TLAB_CREATION
)
3058 /* Clear the remaining space, pinning depends on this */
3059 memset (nursery_next
, 0, nursery_frag_real_end
- nursery_next
);
3062 for (frag
= nursery_fragments
; frag
; frag
= frag
->next
) {
3063 if (size
<= (frag
->fragment_end
- frag
->fragment_start
)) {
3064 setup_fragment (frag
, prev
, size
);
3073 * Same as search_fragment_for_size but if search for @desired_size fails, try to satisfy @minimum_size.
3074 * This improves nursery usage.
3077 search_fragment_for_size_range (size_t desired_size
, size_t minimum_size
)
3079 Fragment
*frag
, *prev
, *min_prev
;
3080 DEBUG (4, fprintf (gc_debug_file
, "Searching nursery fragment %p, desired size: %zd minimum size %zd\n", nursery_frag_real_end
, desired_size
, minimum_size
));
3082 if (nursery_frag_real_end
> nursery_next
&& nursery_clear_policy
== CLEAR_AT_TLAB_CREATION
)
3083 /* Clear the remaining space, pinning depends on this */
3084 memset (nursery_next
, 0, nursery_frag_real_end
- nursery_next
);
3086 min_prev
= GINT_TO_POINTER (-1);
3089 for (frag
= nursery_fragments
; frag
; frag
= frag
->next
) {
3090 int frag_size
= frag
->fragment_end
- frag
->fragment_start
;
3091 if (desired_size
<= frag_size
) {
3092 setup_fragment (frag
, prev
, desired_size
);
3093 return desired_size
;
3095 if (minimum_size
<= frag_size
)
3101 if (min_prev
!= GINT_TO_POINTER (-1)) {
3104 frag
= min_prev
->next
;
3106 frag
= nursery_fragments
;
3108 frag_size
= frag
->fragment_end
- frag
->fragment_start
;
3109 HEAVY_STAT (++stat_wasted_fragments_used
);
3110 HEAVY_STAT (stat_wasted_fragments_bytes
+= frag_size
);
3112 setup_fragment (frag
, min_prev
, minimum_size
);
3120 alloc_degraded (MonoVTable
*vtable
, size_t size
)
3122 if (need_major_collection ()) {
3124 major_collection ("degraded overflow");
3128 degraded_mode
+= size
;
3129 return major
.alloc_degraded (vtable
, size
);
3133 * Provide a variant that takes just the vtable for small fixed-size objects.
3134 * The aligned size is already computed and stored in vt->gc_descr.
3135 * Note: every SCAN_START_SIZE or so we are given the chance to do some special
3136 * processing. We can keep track of where objects start, for example,
3137 * so when we scan the thread stacks for pinned objects, we can start
3138 * a search for the pinned object in SCAN_START_SIZE chunks.
3141 mono_gc_alloc_obj_nolock (MonoVTable
*vtable
, size_t size
)
3143 /* FIXME: handle OOM */
3148 HEAVY_STAT (++stat_objects_alloced
);
3149 if (size
<= MAX_SMALL_OBJ_SIZE
)
3150 HEAVY_STAT (stat_bytes_alloced
+= size
);
3152 HEAVY_STAT (stat_bytes_alloced_los
+= size
);
3154 size
= ALIGN_UP (size
);
3156 g_assert (vtable
->gc_descr
);
3158 if (G_UNLIKELY (collect_before_allocs
)) {
3159 if (nursery_section
) {
3161 collect_nursery (0);
3163 if (!degraded_mode
&& !search_fragment_for_size (size
)) {
3165 g_assert_not_reached ();
3171 * We must already have the lock here instead of after the
3172 * fast path because we might be interrupted in the fast path
3173 * (after confirming that new_next < TLAB_TEMP_END) by the GC,
3174 * and we'll end up allocating an object in a fragment which
3175 * no longer belongs to us.
3177 * The managed allocator does not do this, but it's treated
3178 * specially by the world-stopping code.
3181 if (size
> MAX_SMALL_OBJ_SIZE
) {
3182 p
= alloc_large_inner (vtable
, size
);
3184 /* tlab_next and tlab_temp_end are TLS vars so accessing them might be expensive */
3186 p
= (void**)TLAB_NEXT
;
3187 /* FIXME: handle overflow */
3188 new_next
= (char*)p
+ size
;
3189 TLAB_NEXT
= new_next
;
3191 if (G_LIKELY (new_next
< TLAB_TEMP_END
)) {
3195 * FIXME: We might need a memory barrier here so the change to tlab_next is
3196 * visible before the vtable store.
3199 DEBUG (6, fprintf (gc_debug_file
, "Allocated object %p, vtable: %p (%s), size: %zd\n", p
, vtable
, vtable
->klass
->name
, size
));
3200 binary_protocol_alloc (p
, vtable
, size
);
3201 g_assert (*p
== NULL
);
3204 g_assert (TLAB_NEXT
== new_next
);
3211 /* there are two cases: the object is too big or we run out of space in the TLAB */
3212 /* we also reach here when the thread does its first allocation after a minor
3213 * collection, since the tlab_ variables are initialized to NULL.
3214 * there can be another case (from ORP), if we cooperate with the runtime a bit:
3215 * objects that need finalizers can have the high bit set in their size
3216 * so the above check fails and we can readily add the object to the queue.
3217 * This avoids taking again the GC lock when registering, but this is moot when
3218 * doing thread-local allocation, so it may not be a good idea.
3220 g_assert (TLAB_NEXT
== new_next
);
3221 if (TLAB_NEXT
>= TLAB_REAL_END
) {
3223 * Run out of space in the TLAB. When this happens, some amount of space
3224 * remains in the TLAB, but not enough to satisfy the current allocation
3225 * request. Currently, we retire the TLAB in all cases, later we could
3226 * keep it if the remaining space is above a treshold, and satisfy the
3227 * allocation directly from the nursery.
3230 /* when running in degraded mode, we continue allocing that way
3231 * for a while, to decrease the number of useless nursery collections.
3233 if (degraded_mode
&& degraded_mode
< DEFAULT_NURSERY_SIZE
) {
3234 p
= alloc_degraded (vtable
, size
);
3235 binary_protocol_alloc_degraded (p
, vtable
, size
);
3239 /*FIXME This codepath is current deadcode since tlab_size > MAX_SMALL_OBJ_SIZE*/
3240 if (size
> tlab_size
) {
3241 /* Allocate directly from the nursery */
3242 if (nursery_next
+ size
>= nursery_frag_real_end
) {
3243 if (!search_fragment_for_size (size
)) {
3244 minor_collect_or_expand_inner (size
);
3245 if (degraded_mode
) {
3246 p
= alloc_degraded (vtable
, size
);
3247 binary_protocol_alloc_degraded (p
, vtable
, size
);
3253 p
= (void*)nursery_next
;
3254 nursery_next
+= size
;
3255 if (nursery_next
> nursery_frag_real_end
) {
3260 if (nursery_clear_policy
== CLEAR_AT_TLAB_CREATION
)
3261 memset (p
, 0, size
);
3263 int alloc_size
= tlab_size
;
3264 int available_in_nursery
= nursery_frag_real_end
- nursery_next
;
3266 DEBUG (3, fprintf (gc_debug_file
, "Retire TLAB: %p-%p [%ld]\n", TLAB_START
, TLAB_REAL_END
, (long)(TLAB_REAL_END
- TLAB_NEXT
- size
)));
3268 if (alloc_size
>= available_in_nursery
) {
3269 if (available_in_nursery
> MAX_NURSERY_TLAB_WASTE
&& available_in_nursery
> size
) {
3270 alloc_size
= available_in_nursery
;
3272 alloc_size
= search_fragment_for_size_range (tlab_size
, size
);
3274 alloc_size
= tlab_size
;
3275 minor_collect_or_expand_inner (tlab_size
);
3276 if (degraded_mode
) {
3277 p
= alloc_degraded (vtable
, size
);
3278 binary_protocol_alloc_degraded (p
, vtable
, size
);
3285 /* Allocate a new TLAB from the current nursery fragment */
3286 TLAB_START
= nursery_next
;
3287 nursery_next
+= alloc_size
;
3288 TLAB_NEXT
= TLAB_START
;
3289 TLAB_REAL_END
= TLAB_START
+ alloc_size
;
3290 TLAB_TEMP_END
= TLAB_START
+ MIN (SCAN_START_SIZE
, alloc_size
);
3292 if (nursery_clear_policy
== CLEAR_AT_TLAB_CREATION
)
3293 memset (TLAB_START
, 0, alloc_size
);
3295 /* Allocate from the TLAB */
3296 p
= (void*)TLAB_NEXT
;
3298 g_assert (TLAB_NEXT
<= TLAB_REAL_END
);
3300 nursery_section
->scan_starts
[((char*)p
- (char*)nursery_section
->data
)/SCAN_START_SIZE
] = (char*)p
;
3303 /* Reached tlab_temp_end */
3305 /* record the scan start so we can find pinned objects more easily */
3306 nursery_section
->scan_starts
[((char*)p
- (char*)nursery_section
->data
)/SCAN_START_SIZE
] = (char*)p
;
3307 /* we just bump tlab_temp_end as well */
3308 TLAB_TEMP_END
= MIN (TLAB_REAL_END
, TLAB_NEXT
+ SCAN_START_SIZE
);
3309 DEBUG (5, fprintf (gc_debug_file
, "Expanding local alloc: %p-%p\n", TLAB_NEXT
, TLAB_TEMP_END
));
3313 DEBUG (6, fprintf (gc_debug_file
, "Allocated object %p, vtable: %p (%s), size: %zd\n", p
, vtable
, vtable
->klass
->name
, size
));
3314 binary_protocol_alloc (p
, vtable
, size
);
3321 mono_gc_try_alloc_obj_nolock (MonoVTable
*vtable
, size_t size
)
3327 size
= ALIGN_UP (size
);
3329 g_assert (vtable
->gc_descr
);
3330 if (size
<= MAX_SMALL_OBJ_SIZE
) {
3331 /* tlab_next and tlab_temp_end are TLS vars so accessing them might be expensive */
3333 p
= (void**)TLAB_NEXT
;
3334 /* FIXME: handle overflow */
3335 new_next
= (char*)p
+ size
;
3336 TLAB_NEXT
= new_next
;
3338 if (G_LIKELY (new_next
< TLAB_TEMP_END
)) {
3342 * FIXME: We might need a memory barrier here so the change to tlab_next is
3343 * visible before the vtable store.
3346 HEAVY_STAT (++stat_objects_alloced
);
3347 HEAVY_STAT (stat_bytes_alloced
+= size
);
3349 DEBUG (6, fprintf (gc_debug_file
, "Allocated object %p, vtable: %p (%s), size: %zd\n", p
, vtable
, vtable
->klass
->name
, size
));
3350 binary_protocol_alloc (p
, vtable
, size
);
3351 g_assert (*p
== NULL
);
3354 g_assert (TLAB_NEXT
== new_next
);
3363 mono_gc_alloc_obj (MonoVTable
*vtable
, size_t size
)
3366 #ifndef DISABLE_CRITICAL_REGION
3368 ENTER_CRITICAL_REGION
;
3369 res
= mono_gc_try_alloc_obj_nolock (vtable
, size
);
3371 EXIT_CRITICAL_REGION
;
3374 EXIT_CRITICAL_REGION
;
3377 res
= mono_gc_alloc_obj_nolock (vtable
, size
);
3383 mono_gc_alloc_vector (MonoVTable
*vtable
, size_t size
, uintptr_t max_length
)
3386 #ifndef DISABLE_CRITICAL_REGION
3388 ENTER_CRITICAL_REGION
;
3389 arr
= mono_gc_try_alloc_obj_nolock (vtable
, size
);
3391 arr
->max_length
= max_length
;
3392 EXIT_CRITICAL_REGION
;
3395 EXIT_CRITICAL_REGION
;
3400 arr
= mono_gc_alloc_obj_nolock (vtable
, size
);
3401 arr
->max_length
= max_length
;
3409 mono_gc_alloc_array (MonoVTable
*vtable
, size_t size
, uintptr_t max_length
, uintptr_t bounds_size
)
3412 MonoArrayBounds
*bounds
;
3416 arr
= mono_gc_alloc_obj_nolock (vtable
, size
);
3417 arr
->max_length
= max_length
;
3419 bounds
= (MonoArrayBounds
*)((char*)arr
+ size
- bounds_size
);
3420 arr
->bounds
= bounds
;
3428 mono_gc_alloc_string (MonoVTable
*vtable
, size_t size
, gint32 len
)
3431 #ifndef DISABLE_CRITICAL_REGION
3433 ENTER_CRITICAL_REGION
;
3434 str
= mono_gc_try_alloc_obj_nolock (vtable
, size
);
3437 EXIT_CRITICAL_REGION
;
3440 EXIT_CRITICAL_REGION
;
3445 str
= mono_gc_alloc_obj_nolock (vtable
, size
);
3454 * To be used for interned strings and possibly MonoThread, reflection handles.
3455 * We may want to explicitly free these objects.
3458 mono_gc_alloc_pinned_obj (MonoVTable
*vtable
, size_t size
)
3460 /* FIXME: handle OOM */
3462 size
= ALIGN_UP (size
);
3464 if (size
> MAX_SMALL_OBJ_SIZE
) {
3465 /* large objects are always pinned anyway */
3466 p
= alloc_large_inner (vtable
, size
);
3468 DEBUG (9, g_assert (vtable
->klass
->inited
));
3469 p
= major
.alloc_small_pinned_obj (size
, vtable
->klass
->has_references
);
3471 DEBUG (6, fprintf (gc_debug_file
, "Allocated pinned object %p, vtable: %p (%s), size: %zd\n", p
, vtable
, vtable
->klass
->name
, size
));
3472 binary_protocol_alloc_pinned (p
, vtable
, size
);
3479 * ######################################################################
3480 * ######## Finalization support
3481 * ######################################################################
3485 * this is valid for the nursery: if the object has been forwarded it means it's
3486 * still refrenced from a root. If it is pinned it's still alive as well.
3487 * Return TRUE if @obj is ready to be finalized.
3489 #define object_is_fin_ready(obj) (!object_is_pinned (obj) && !object_is_forwarded (obj))
3492 is_critical_finalizer (FinalizeEntry
*entry
)
3497 if (!mono_defaults
.critical_finalizer_object
)
3500 obj
= entry
->object
;
3501 class = ((MonoVTable
*)LOAD_VTABLE (obj
))->klass
;
3503 return mono_class_has_parent (class, mono_defaults
.critical_finalizer_object
);
3507 queue_finalization_entry (FinalizeEntry
*entry
) {
3508 if (is_critical_finalizer (entry
)) {
3509 entry
->next
= critical_fin_list
;
3510 critical_fin_list
= entry
;
3512 entry
->next
= fin_ready_list
;
3513 fin_ready_list
= entry
;
3517 /* LOCKING: requires that the GC lock is held */
3519 rehash_fin_table (FinalizeEntryHashTable
*hash_table
)
3521 FinalizeEntry
**finalizable_hash
= hash_table
->table
;
3522 mword finalizable_hash_size
= hash_table
->size
;
3525 FinalizeEntry
**new_hash
;
3526 FinalizeEntry
*entry
, *next
;
3527 int new_size
= g_spaced_primes_closest (hash_table
->num_registered
);
3529 new_hash
= mono_sgen_alloc_internal_dynamic (new_size
* sizeof (FinalizeEntry
*), INTERNAL_MEM_FIN_TABLE
);
3530 for (i
= 0; i
< finalizable_hash_size
; ++i
) {
3531 for (entry
= finalizable_hash
[i
]; entry
; entry
= next
) {
3532 hash
= mono_object_hash (entry
->object
) % new_size
;
3534 entry
->next
= new_hash
[hash
];
3535 new_hash
[hash
] = entry
;
3538 mono_sgen_free_internal_dynamic (finalizable_hash
, finalizable_hash_size
* sizeof (FinalizeEntry
*), INTERNAL_MEM_FIN_TABLE
);
3539 hash_table
->table
= new_hash
;
3540 hash_table
->size
= new_size
;
3543 /* LOCKING: requires that the GC lock is held */
3545 rehash_fin_table_if_necessary (FinalizeEntryHashTable
*hash_table
)
3547 if (hash_table
->num_registered
>= hash_table
->size
* 2)
3548 rehash_fin_table (hash_table
);
3551 /* LOCKING: requires that the GC lock is held */
3553 finalize_in_range (CopyOrMarkObjectFunc copy_func
, char *start
, char *end
, int generation
, GrayQueue
*queue
)
3555 FinalizeEntryHashTable
*hash_table
= get_finalize_entry_hash_table (generation
);
3556 FinalizeEntry
*entry
, *prev
;
3558 FinalizeEntry
**finalizable_hash
= hash_table
->table
;
3559 mword finalizable_hash_size
= hash_table
->size
;
3563 for (i
= 0; i
< finalizable_hash_size
; ++i
) {
3565 for (entry
= finalizable_hash
[i
]; entry
;) {
3566 if ((char*)entry
->object
>= start
&& (char*)entry
->object
< end
&& !major
.is_object_live (entry
->object
)) {
3567 gboolean is_fin_ready
= object_is_fin_ready (entry
->object
);
3568 char *copy
= entry
->object
;
3569 copy_func ((void**)©
, queue
);
3572 FinalizeEntry
*next
;
3573 /* remove and put in fin_ready_list */
3575 prev
->next
= entry
->next
;
3577 finalizable_hash
[i
] = entry
->next
;
3579 num_ready_finalizers
++;
3580 hash_table
->num_registered
--;
3581 queue_finalization_entry (entry
);
3582 /* Make it survive */
3583 from
= entry
->object
;
3584 entry
->object
= copy
;
3585 DEBUG (5, fprintf (gc_debug_file
, "Queueing object for finalization: %p (%s) (was at %p) (%d/%d)\n", entry
->object
, safe_name (entry
->object
), from
, num_ready_finalizers
, hash_table
->num_registered
));
3589 char *from
= entry
->object
;
3590 if (hash_table
== &minor_finalizable_hash
&& !ptr_in_nursery (copy
)) {
3591 FinalizeEntry
*next
= entry
->next
;
3592 unsigned int major_hash
;
3593 /* remove from the list */
3595 prev
->next
= entry
->next
;
3597 finalizable_hash
[i
] = entry
->next
;
3598 hash_table
->num_registered
--;
3600 entry
->object
= copy
;
3602 /* insert it into the major hash */
3603 rehash_fin_table_if_necessary (&major_finalizable_hash
);
3604 major_hash
= mono_object_hash ((MonoObject
*) copy
) %
3605 major_finalizable_hash
.size
;
3606 entry
->next
= major_finalizable_hash
.table
[major_hash
];
3607 major_finalizable_hash
.table
[major_hash
] = entry
;
3608 major_finalizable_hash
.num_registered
++;
3610 DEBUG (5, fprintf (gc_debug_file
, "Promoting finalization of object %p (%s) (was at %p) to major table\n", copy
, safe_name (copy
), from
));
3615 /* update pointer */
3616 DEBUG (5, fprintf (gc_debug_file
, "Updating object for finalization: %p (%s) (was at %p)\n", entry
->object
, safe_name (entry
->object
), from
));
3617 entry
->object
= copy
;
3622 entry
= entry
->next
;
3628 object_is_reachable (char *object
, char *start
, char *end
)
3630 /*This happens for non nursery objects during minor collections. We just treat all objects as alive.*/
3631 if (object
< start
|| object
>= end
)
3633 return !object_is_fin_ready (object
) || major
.is_object_live (object
);
3636 /* LOCKING: requires that the GC lock is held */
3638 null_ephemerons_for_domain (MonoDomain
*domain
)
3640 EphemeronLinkNode
*current
= ephemeron_list
, *prev
= NULL
;
3643 MonoObject
*object
= (MonoObject
*)current
->array
;
3645 if (object
&& !object
->vtable
) {
3646 EphemeronLinkNode
*tmp
= current
;
3649 prev
->next
= current
->next
;
3651 ephemeron_list
= current
->next
;
3653 current
= current
->next
;
3654 mono_sgen_free_internal (tmp
, INTERNAL_MEM_EPHEMERON_LINK
);
3657 current
= current
->next
;
3662 /* LOCKING: requires that the GC lock is held */
3664 clear_unreachable_ephemerons (CopyOrMarkObjectFunc copy_func
, char *start
, char *end
, GrayQueue
*queue
)
3666 int was_in_nursery
, was_promoted
;
3667 EphemeronLinkNode
*current
= ephemeron_list
, *prev
= NULL
;
3669 Ephemeron
*cur
, *array_end
;
3673 char *object
= current
->array
;
3675 if (!object_is_reachable (object
, start
, end
)) {
3676 EphemeronLinkNode
*tmp
= current
;
3678 DEBUG (5, fprintf (gc_debug_file
, "Dead Ephemeron array at %p\n", object
));
3681 prev
->next
= current
->next
;
3683 ephemeron_list
= current
->next
;
3685 current
= current
->next
;
3686 mono_sgen_free_internal (tmp
, INTERNAL_MEM_EPHEMERON_LINK
);
3691 was_in_nursery
= ptr_in_nursery (object
);
3692 copy_func ((void**)&object
, queue
);
3693 current
->array
= object
;
3695 /*The array was promoted, add global remsets for key/values left behind in nursery.*/
3696 was_promoted
= was_in_nursery
&& !ptr_in_nursery (object
);
3698 DEBUG (5, fprintf (gc_debug_file
, "Clearing unreachable entries for ephemeron array at %p\n", object
));
3700 array
= (MonoArray
*)object
;
3701 cur
= mono_array_addr (array
, Ephemeron
, 0);
3702 array_end
= cur
+ mono_array_length_fast (array
);
3703 tombstone
= (char*)((MonoVTable
*)LOAD_VTABLE (object
))->domain
->ephemeron_tombstone
;
3705 for (; cur
< array_end
; ++cur
) {
3706 char *key
= (char*)cur
->key
;
3708 if (!key
|| key
== tombstone
)
3711 DEBUG (5, fprintf (gc_debug_file
, "[%td] key %p (%s) value %p (%s)\n", cur
- mono_array_addr (array
, Ephemeron
, 0),
3712 key
, object_is_reachable (key
, start
, end
) ? "reachable" : "unreachable",
3713 cur
->value
, cur
->value
&& object_is_reachable (cur
->value
, start
, end
) ? "reachable" : "unreachable"));
3715 if (!object_is_reachable (key
, start
, end
)) {
3716 cur
->key
= tombstone
;
3722 if (ptr_in_nursery (key
)) {/*key was not promoted*/
3723 DEBUG (5, fprintf (gc_debug_file
, "\tAdded remset to key %p\n", key
));
3724 mono_sgen_add_to_global_remset (&cur
->key
);
3726 if (ptr_in_nursery (cur
->value
)) {/*value was not promoted*/
3727 DEBUG (5, fprintf (gc_debug_file
, "\tAdded remset to value %p\n", cur
->value
));
3728 mono_sgen_add_to_global_remset (&cur
->value
);
3733 current
= current
->next
;
3737 /* LOCKING: requires that the GC lock is held */
3739 mark_ephemerons_in_range (CopyOrMarkObjectFunc copy_func
, char *start
, char *end
, GrayQueue
*queue
)
3741 int nothing_marked
= 1;
3742 EphemeronLinkNode
*current
= ephemeron_list
;
3744 Ephemeron
*cur
, *array_end
;
3747 for (current
= ephemeron_list
; current
; current
= current
->next
) {
3748 char *object
= current
->array
;
3749 DEBUG (5, fprintf (gc_debug_file
, "Ephemeron array at %p\n", object
));
3751 /*We ignore arrays in old gen during minor collections since all objects are promoted by the remset machinery.*/
3752 if (object
< start
|| object
>= end
)
3755 /*It has to be alive*/
3756 if (!object_is_reachable (object
, start
, end
)) {
3757 DEBUG (5, fprintf (gc_debug_file
, "\tnot reachable\n"));
3761 copy_func ((void**)&object
, queue
);
3763 array
= (MonoArray
*)object
;
3764 cur
= mono_array_addr (array
, Ephemeron
, 0);
3765 array_end
= cur
+ mono_array_length_fast (array
);
3766 tombstone
= (char*)((MonoVTable
*)LOAD_VTABLE (object
))->domain
->ephemeron_tombstone
;
3768 for (; cur
< array_end
; ++cur
) {
3769 char *key
= cur
->key
;
3771 if (!key
|| key
== tombstone
)
3774 DEBUG (5, fprintf (gc_debug_file
, "[%td] key %p (%s) value %p (%s)\n", cur
- mono_array_addr (array
, Ephemeron
, 0),
3775 key
, object_is_reachable (key
, start
, end
) ? "reachable" : "unreachable",
3776 cur
->value
, cur
->value
&& object_is_reachable (cur
->value
, start
, end
) ? "reachable" : "unreachable"));
3778 if (object_is_reachable (key
, start
, end
)) {
3779 char *value
= cur
->value
;
3781 copy_func ((void**)&cur
->key
, queue
);
3783 if (!object_is_reachable (value
, start
, end
))
3785 copy_func ((void**)&cur
->value
, queue
);
3791 DEBUG (5, fprintf (gc_debug_file
, "Ephemeron run finished. Is it done %d\n", nothing_marked
));
3792 return nothing_marked
;
3795 /* LOCKING: requires that the GC lock is held */
3797 null_link_in_range (CopyOrMarkObjectFunc copy_func
, char *start
, char *end
, int generation
, GrayQueue
*queue
)
3799 DisappearingLinkHashTable
*hash
= get_dislink_hash_table (generation
);
3800 DisappearingLink
**disappearing_link_hash
= hash
->table
;
3801 int disappearing_link_hash_size
= hash
->size
;
3802 DisappearingLink
*entry
, *prev
;
3804 if (!hash
->num_links
)
3806 for (i
= 0; i
< disappearing_link_hash_size
; ++i
) {
3808 for (entry
= disappearing_link_hash
[i
]; entry
;) {
3809 char *object
= DISLINK_OBJECT (entry
);
3810 if (object
>= start
&& object
< end
&& !major
.is_object_live (object
)) {
3811 gboolean track
= DISLINK_TRACK (entry
);
3812 if (!track
&& object_is_fin_ready (object
)) {
3813 void **p
= entry
->link
;
3814 DisappearingLink
*old
;
3816 /* remove from list */
3818 prev
->next
= entry
->next
;
3820 disappearing_link_hash
[i
] = entry
->next
;
3821 DEBUG (5, fprintf (gc_debug_file
, "Dislink nullified at %p to GCed object %p\n", p
, object
));
3823 mono_sgen_free_internal (entry
, INTERNAL_MEM_DISLINK
);
3828 char *copy
= object
;
3829 copy_func ((void**)©
, queue
);
3831 /* Update pointer if it's moved. If the object
3832 * has been moved out of the nursery, we need to
3833 * remove the link from the minor hash table to
3836 * FIXME: what if an object is moved earlier?
3839 if (hash
== &minor_disappearing_link_hash
&& !ptr_in_nursery (copy
)) {
3840 void **link
= entry
->link
;
3841 DisappearingLink
*old
;
3842 /* remove from list */
3844 prev
->next
= entry
->next
;
3846 disappearing_link_hash
[i
] = entry
->next
;
3848 mono_sgen_free_internal (entry
, INTERNAL_MEM_DISLINK
);
3852 add_or_remove_disappearing_link ((MonoObject
*)copy
, link
,
3853 track
, GENERATION_OLD
);
3855 DEBUG (5, fprintf (gc_debug_file
, "Upgraded dislink at %p to major because object %p moved to %p\n", link
, object
, copy
));
3859 /* We set the track resurrection bit to
3860 * FALSE if the object is to be finalized
3861 * so that the object can be collected in
3862 * the next cycle (i.e. after it was
3865 *entry
->link
= HIDE_POINTER (copy
,
3866 object_is_fin_ready (object
) ? FALSE
: track
);
3867 DEBUG (5, fprintf (gc_debug_file
, "Updated dislink at %p to %p\n", entry
->link
, DISLINK_OBJECT (entry
)));
3872 entry
= entry
->next
;
3877 /* LOCKING: requires that the GC lock is held */
3879 null_links_for_domain (MonoDomain
*domain
, int generation
)
3881 DisappearingLinkHashTable
*hash
= get_dislink_hash_table (generation
);
3882 DisappearingLink
**disappearing_link_hash
= hash
->table
;
3883 int disappearing_link_hash_size
= hash
->size
;
3884 DisappearingLink
*entry
, *prev
;
3886 for (i
= 0; i
< disappearing_link_hash_size
; ++i
) {
3888 for (entry
= disappearing_link_hash
[i
]; entry
; ) {
3889 char *object
= DISLINK_OBJECT (entry
);
3890 if (object
&& !((MonoObject
*)object
)->vtable
) {
3891 DisappearingLink
*next
= entry
->next
;
3896 disappearing_link_hash
[i
] = next
;
3898 if (*(entry
->link
)) {
3899 *(entry
->link
) = NULL
;
3900 g_warning ("Disappearing link %p not freed", entry
->link
);
3902 mono_sgen_free_internal (entry
, INTERNAL_MEM_DISLINK
);
3909 entry
= entry
->next
;
3914 /* LOCKING: requires that the GC lock is held */
3916 finalizers_for_domain (MonoDomain
*domain
, MonoObject
**out_array
, int out_size
,
3917 FinalizeEntryHashTable
*hash_table
)
3919 FinalizeEntry
**finalizable_hash
= hash_table
->table
;
3920 mword finalizable_hash_size
= hash_table
->size
;
3921 FinalizeEntry
*entry
, *prev
;
3924 if (no_finalize
|| !out_size
|| !out_array
)
3927 for (i
= 0; i
< finalizable_hash_size
; ++i
) {
3929 for (entry
= finalizable_hash
[i
]; entry
;) {
3930 if (mono_object_domain (entry
->object
) == domain
) {
3931 FinalizeEntry
*next
;
3932 /* remove and put in out_array */
3934 prev
->next
= entry
->next
;
3936 finalizable_hash
[i
] = entry
->next
;
3938 hash_table
->num_registered
--;
3939 out_array
[count
++] = entry
->object
;
3940 DEBUG (5, fprintf (gc_debug_file
, "Collecting object for finalization: %p (%s) (%d/%d)\n", entry
->object
, safe_name (entry
->object
), num_ready_finalizers
, hash_table
->num_registered
));
3942 if (count
== out_size
)
3947 entry
= entry
->next
;
3954 * mono_gc_finalizers_for_domain:
3955 * @domain: the unloading appdomain
3956 * @out_array: output array
3957 * @out_size: size of output array
3959 * Store inside @out_array up to @out_size objects that belong to the unloading
3960 * appdomain @domain. Returns the number of stored items. Can be called repeteadly
3961 * until it returns 0.
3962 * The items are removed from the finalizer data structure, so the caller is supposed
3964 * @out_array should be on the stack to allow the GC to know the objects are still alive.
3967 mono_gc_finalizers_for_domain (MonoDomain
*domain
, MonoObject
**out_array
, int out_size
)
3972 result
= finalizers_for_domain (domain
, out_array
, out_size
, &minor_finalizable_hash
);
3973 if (result
< out_size
) {
3974 result
+= finalizers_for_domain (domain
, out_array
+ result
, out_size
- result
,
3975 &major_finalizable_hash
);
3983 register_for_finalization (MonoObject
*obj
, void *user_data
, int generation
)
3985 FinalizeEntryHashTable
*hash_table
= get_finalize_entry_hash_table (generation
);
3986 FinalizeEntry
**finalizable_hash
;
3987 mword finalizable_hash_size
;
3988 FinalizeEntry
*entry
, *prev
;
3992 g_assert (user_data
== NULL
|| user_data
== mono_gc_run_finalize
);
3993 hash
= mono_object_hash (obj
);
3995 rehash_fin_table_if_necessary (hash_table
);
3996 finalizable_hash
= hash_table
->table
;
3997 finalizable_hash_size
= hash_table
->size
;
3998 hash
%= finalizable_hash_size
;
4000 for (entry
= finalizable_hash
[hash
]; entry
; entry
= entry
->next
) {
4001 if (entry
->object
== obj
) {
4003 /* remove from the list */
4005 prev
->next
= entry
->next
;
4007 finalizable_hash
[hash
] = entry
->next
;
4008 hash_table
->num_registered
--;
4009 DEBUG (5, fprintf (gc_debug_file
, "Removed finalizer %p for object: %p (%s) (%d)\n", entry
, obj
, obj
->vtable
->klass
->name
, hash_table
->num_registered
));
4010 mono_sgen_free_internal (entry
, INTERNAL_MEM_FINALIZE_ENTRY
);
4018 /* request to deregister, but already out of the list */
4022 entry
= mono_sgen_alloc_internal (INTERNAL_MEM_FINALIZE_ENTRY
);
4023 entry
->object
= obj
;
4024 entry
->next
= finalizable_hash
[hash
];
4025 finalizable_hash
[hash
] = entry
;
4026 hash_table
->num_registered
++;
4027 DEBUG (5, fprintf (gc_debug_file
, "Added finalizer %p for object: %p (%s) (%d) to %s table\n", entry
, obj
, obj
->vtable
->klass
->name
, hash_table
->num_registered
, generation_name (generation
)));
4032 mono_gc_register_for_finalization (MonoObject
*obj
, void *user_data
)
4034 if (ptr_in_nursery (obj
))
4035 register_for_finalization (obj
, user_data
, GENERATION_NURSERY
);
4037 register_for_finalization (obj
, user_data
, GENERATION_OLD
);
4041 rehash_dislink (DisappearingLinkHashTable
*hash_table
)
4043 DisappearingLink
**disappearing_link_hash
= hash_table
->table
;
4044 int disappearing_link_hash_size
= hash_table
->size
;
4047 DisappearingLink
**new_hash
;
4048 DisappearingLink
*entry
, *next
;
4049 int new_size
= g_spaced_primes_closest (hash_table
->num_links
);
4051 new_hash
= mono_sgen_alloc_internal_dynamic (new_size
* sizeof (DisappearingLink
*), INTERNAL_MEM_DISLINK_TABLE
);
4052 for (i
= 0; i
< disappearing_link_hash_size
; ++i
) {
4053 for (entry
= disappearing_link_hash
[i
]; entry
; entry
= next
) {
4054 hash
= mono_aligned_addr_hash (entry
->link
) % new_size
;
4056 entry
->next
= new_hash
[hash
];
4057 new_hash
[hash
] = entry
;
4060 mono_sgen_free_internal_dynamic (disappearing_link_hash
,
4061 disappearing_link_hash_size
* sizeof (DisappearingLink
*), INTERNAL_MEM_DISLINK_TABLE
);
4062 hash_table
->table
= new_hash
;
4063 hash_table
->size
= new_size
;
4066 /* LOCKING: assumes the GC lock is held */
4068 add_or_remove_disappearing_link (MonoObject
*obj
, void **link
, gboolean track
, int generation
)
4070 DisappearingLinkHashTable
*hash_table
= get_dislink_hash_table (generation
);
4071 DisappearingLink
*entry
, *prev
;
4073 DisappearingLink
**disappearing_link_hash
= hash_table
->table
;
4074 int disappearing_link_hash_size
= hash_table
->size
;
4076 if (hash_table
->num_links
>= disappearing_link_hash_size
* 2) {
4077 rehash_dislink (hash_table
);
4078 disappearing_link_hash
= hash_table
->table
;
4079 disappearing_link_hash_size
= hash_table
->size
;
4081 /* FIXME: add check that link is not in the heap */
4082 hash
= mono_aligned_addr_hash (link
) % disappearing_link_hash_size
;
4083 entry
= disappearing_link_hash
[hash
];
4085 for (; entry
; entry
= entry
->next
) {
4086 /* link already added */
4087 if (link
== entry
->link
) {
4088 /* NULL obj means remove */
4091 prev
->next
= entry
->next
;
4093 disappearing_link_hash
[hash
] = entry
->next
;
4094 hash_table
->num_links
--;
4095 DEBUG (5, fprintf (gc_debug_file
, "Removed dislink %p (%d) from %s table\n", entry
, hash_table
->num_links
, generation_name (generation
)));
4096 mono_sgen_free_internal (entry
, INTERNAL_MEM_DISLINK
);
4099 *link
= HIDE_POINTER (obj
, track
); /* we allow the change of object */
4107 entry
= mono_sgen_alloc_internal (INTERNAL_MEM_DISLINK
);
4108 *link
= HIDE_POINTER (obj
, track
);
4110 entry
->next
= disappearing_link_hash
[hash
];
4111 disappearing_link_hash
[hash
] = entry
;
4112 hash_table
->num_links
++;
4113 DEBUG (5, fprintf (gc_debug_file
, "Added dislink %p for object: %p (%s) at %p to %s table\n", entry
, obj
, obj
->vtable
->klass
->name
, link
, generation_name (generation
)));
4116 /* LOCKING: assumes the GC lock is held */
4118 mono_gc_register_disappearing_link (MonoObject
*obj
, void **link
, gboolean track
)
4120 add_or_remove_disappearing_link (NULL
, link
, FALSE
, GENERATION_NURSERY
);
4121 add_or_remove_disappearing_link (NULL
, link
, FALSE
, GENERATION_OLD
);
4123 if (ptr_in_nursery (obj
))
4124 add_or_remove_disappearing_link (obj
, link
, track
, GENERATION_NURSERY
);
4126 add_or_remove_disappearing_link (obj
, link
, track
, GENERATION_OLD
);
4131 mono_gc_invoke_finalizers (void)
4133 FinalizeEntry
*entry
= NULL
;
4134 gboolean entry_is_critical
= FALSE
;
4137 /* FIXME: batch to reduce lock contention */
4138 while (fin_ready_list
|| critical_fin_list
) {
4142 FinalizeEntry
**list
= entry_is_critical
? &critical_fin_list
: &fin_ready_list
;
4144 /* We have finalized entry in the last
4145 interation, now we need to remove it from
4148 *list
= entry
->next
;
4150 FinalizeEntry
*e
= *list
;
4151 while (e
->next
!= entry
)
4153 e
->next
= entry
->next
;
4155 mono_sgen_free_internal (entry
, INTERNAL_MEM_FINALIZE_ENTRY
);
4159 /* Now look for the first non-null entry. */
4160 for (entry
= fin_ready_list
; entry
&& !entry
->object
; entry
= entry
->next
)
4163 entry_is_critical
= FALSE
;
4165 entry_is_critical
= TRUE
;
4166 for (entry
= critical_fin_list
; entry
&& !entry
->object
; entry
= entry
->next
)
4171 g_assert (entry
->object
);
4172 num_ready_finalizers
--;
4173 obj
= entry
->object
;
4174 entry
->object
= NULL
;
4175 DEBUG (7, fprintf (gc_debug_file
, "Finalizing object %p (%s)\n", obj
, safe_name (obj
)));
4183 g_assert (entry
->object
== NULL
);
4185 /* the object is on the stack so it is pinned */
4186 /*g_print ("Calling finalizer for object: %p (%s)\n", entry->object, safe_name (entry->object));*/
4187 mono_gc_run_finalize (obj
, NULL
);
4194 mono_gc_pending_finalizers (void)
4196 return fin_ready_list
|| critical_fin_list
;
4199 /* Negative value to remove */
4201 mono_gc_add_memory_pressure (gint64 value
)
4203 /* FIXME: Use interlocked functions */
4205 memory_pressure
+= value
;
4210 mono_sgen_register_major_sections_alloced (int num_sections
)
4212 minor_collection_sections_alloced
+= num_sections
;
4216 mono_sgen_get_minor_collection_allowance (void)
4218 return minor_collection_allowance
;
4222 * ######################################################################
4223 * ######## registered roots support
4224 * ######################################################################
4228 rehash_roots (gboolean pinned
)
4232 RootRecord
**new_hash
;
4233 RootRecord
*entry
, *next
;
4236 new_size
= g_spaced_primes_closest (num_roots_entries
[pinned
]);
4237 new_hash
= mono_sgen_alloc_internal_dynamic (new_size
* sizeof (RootRecord
*), INTERNAL_MEM_ROOTS_TABLE
);
4238 for (i
= 0; i
< roots_hash_size
[pinned
]; ++i
) {
4239 for (entry
= roots_hash
[pinned
][i
]; entry
; entry
= next
) {
4240 hash
= mono_aligned_addr_hash (entry
->start_root
) % new_size
;
4242 entry
->next
= new_hash
[hash
];
4243 new_hash
[hash
] = entry
;
4246 mono_sgen_free_internal_dynamic (roots_hash
[pinned
], roots_hash_size
[pinned
] * sizeof (RootRecord
*), INTERNAL_MEM_ROOTS_TABLE
);
4247 roots_hash
[pinned
] = new_hash
;
4248 roots_hash_size
[pinned
] = new_size
;
4252 find_root (int root_type
, char *start
, guint32 addr_hash
)
4254 RootRecord
*new_root
;
4256 guint32 hash
= addr_hash
% roots_hash_size
[root_type
];
4257 for (new_root
= roots_hash
[root_type
][hash
]; new_root
; new_root
= new_root
->next
) {
4258 /* we allow changing the size and the descriptor (for thread statics etc) */
4259 if (new_root
->start_root
== start
) {
4268 * We do not coalesce roots.
4271 mono_gc_register_root_inner (char *start
, size_t size
, void *descr
, int root_type
)
4273 RootRecord
*new_root
;
4274 unsigned int hash
, addr_hash
= mono_aligned_addr_hash (start
);
4277 for (i
= 0; i
< ROOT_TYPE_NUM
; ++i
) {
4278 if (num_roots_entries
[i
] >= roots_hash_size
[i
] * 2)
4281 for (i
= 0; i
< ROOT_TYPE_NUM
; ++i
) {
4282 new_root
= find_root (i
, start
, addr_hash
);
4283 /* we allow changing the size and the descriptor (for thread statics etc) */
4285 size_t old_size
= new_root
->end_root
- new_root
->start_root
;
4286 new_root
->end_root
= new_root
->start_root
+ size
;
4287 g_assert (((new_root
->root_desc
!= 0) && (descr
!= NULL
)) ||
4288 ((new_root
->root_desc
== 0) && (descr
== NULL
)));
4289 new_root
->root_desc
= (mword
)descr
;
4291 roots_size
-= old_size
;
4296 new_root
= mono_sgen_alloc_internal (INTERNAL_MEM_ROOT_RECORD
);
4298 new_root
->start_root
= start
;
4299 new_root
->end_root
= new_root
->start_root
+ size
;
4300 new_root
->root_desc
= (mword
)descr
;
4302 hash
= addr_hash
% roots_hash_size
[root_type
];
4303 num_roots_entries
[root_type
]++;
4304 new_root
->next
= roots_hash
[root_type
] [hash
];
4305 roots_hash
[root_type
][hash
] = new_root
;
4306 DEBUG (3, fprintf (gc_debug_file
, "Added root %p for range: %p-%p, descr: %p (%d/%d bytes)\n", new_root
, new_root
->start_root
, new_root
->end_root
, descr
, (int)size
, (int)roots_size
));
4316 mono_gc_register_root (char *start
, size_t size
, void *descr
)
4318 return mono_gc_register_root_inner (start
, size
, descr
, descr
? ROOT_TYPE_NORMAL
: ROOT_TYPE_PINNED
);
4322 mono_gc_register_root_wbarrier (char *start
, size_t size
, void *descr
)
4324 return mono_gc_register_root_inner (start
, size
, descr
, ROOT_TYPE_WBARRIER
);
4328 mono_gc_deregister_root (char* addr
)
4330 RootRecord
*tmp
, *prev
;
4331 unsigned int hash
, addr_hash
= mono_aligned_addr_hash (addr
);
4335 for (root_type
= 0; root_type
< ROOT_TYPE_NUM
; ++root_type
) {
4336 hash
= addr_hash
% roots_hash_size
[root_type
];
4337 tmp
= roots_hash
[root_type
][hash
];
4340 if (tmp
->start_root
== (char*)addr
) {
4342 prev
->next
= tmp
->next
;
4344 roots_hash
[root_type
][hash
] = tmp
->next
;
4345 roots_size
-= (tmp
->end_root
- tmp
->start_root
);
4346 num_roots_entries
[root_type
]--;
4347 DEBUG (3, fprintf (gc_debug_file
, "Removed root %p for range: %p-%p\n", tmp
, tmp
->start_root
, tmp
->end_root
));
4348 mono_sgen_free_internal (tmp
, INTERNAL_MEM_ROOT_RECORD
);
4359 * ######################################################################
4360 * ######## Thread handling (stop/start code)
4361 * ######################################################################
4364 /* FIXME: handle large/small config */
4365 #define HASH_PTHREAD_T(id) (((unsigned int)(id) >> 4) * 2654435761u)
4367 static SgenThreadInfo
* thread_table
[THREAD_HASH_SIZE
];
4369 #if USE_SIGNAL_BASED_START_STOP_WORLD
4371 static MonoSemType suspend_ack_semaphore
;
4372 static MonoSemType
*suspend_ack_semaphore_ptr
;
4373 static unsigned int global_stop_count
= 0;
4375 static sigset_t suspend_signal_mask
;
4376 static mword cur_thread_regs
[ARCH_NUM_REGS
] = {0};
4378 /* LOCKING: assumes the GC lock is held */
4380 mono_sgen_get_thread_table (void)
4382 return thread_table
;
4386 mono_sgen_thread_info_lookup (ARCH_THREAD_TYPE id
)
4388 unsigned int hash
= HASH_PTHREAD_T (id
) % THREAD_HASH_SIZE
;
4389 SgenThreadInfo
*info
;
4391 info
= thread_table
[hash
];
4392 while (info
&& !ARCH_THREAD_EQUALS (info
->id
, id
)) {
4399 update_current_thread_stack (void *start
)
4401 void *ptr
= cur_thread_regs
;
4402 SgenThreadInfo
*info
= mono_sgen_thread_info_lookup (ARCH_GET_THREAD ());
4404 info
->stack_start
= align_pointer (&ptr
);
4405 g_assert (info
->stack_start
>= info
->stack_start_limit
&& info
->stack_start
< info
->stack_end
);
4406 ARCH_STORE_REGS (ptr
);
4407 info
->stopped_regs
= ptr
;
4408 if (gc_callbacks
.thread_suspend_func
)
4409 gc_callbacks
.thread_suspend_func (info
->runtime_data
, NULL
);
4413 * Define this and use the "xdomain-checks" MONO_GC_DEBUG option to
4414 * have cross-domain checks in the write barrier.
4416 //#define XDOMAIN_CHECKS_IN_WBARRIER
4418 #ifndef SGEN_BINARY_PROTOCOL
4419 #ifndef HEAVY_STATISTICS
4420 #define MANAGED_ALLOCATION
4421 #ifndef XDOMAIN_CHECKS_IN_WBARRIER
4422 #define MANAGED_WBARRIER
4428 is_ip_in_managed_allocator (MonoDomain
*domain
, gpointer ip
);
4431 mono_sgen_wait_for_suspend_ack (int count
)
4435 for (i
= 0; i
< count
; ++i
) {
4436 while ((result
= MONO_SEM_WAIT (suspend_ack_semaphore_ptr
)) != 0) {
4437 if (errno
!= EINTR
) {
4438 g_error ("sem_wait ()");
4445 restart_threads_until_none_in_managed_allocator (void)
4447 SgenThreadInfo
*info
;
4448 int i
, result
, num_threads_died
= 0;
4449 int sleep_duration
= -1;
4452 int restart_count
= 0, restarted_count
= 0;
4453 /* restart all threads that stopped in the
4455 for (i
= 0; i
< THREAD_HASH_SIZE
; ++i
) {
4456 for (info
= thread_table
[i
]; info
; info
= info
->next
) {
4459 if (!info
->stack_start
|| info
->in_critical_region
||
4460 is_ip_in_managed_allocator (info
->stopped_domain
, info
->stopped_ip
)) {
4461 binary_protocol_thread_restart ((gpointer
)info
->id
);
4462 #if defined(__MACH__) && MONO_MACH_ARCH_SUPPORTED
4463 result
= thread_resume (pthread_mach_thread_np (info
->id
));
4465 result
= pthread_kill (info
->id
, restart_signal_num
);
4473 /* we set the stopped_ip to
4474 NULL for threads which
4475 we're not restarting so
4476 that we can easily identify
4478 info
->stopped_ip
= NULL
;
4479 info
->stopped_domain
= NULL
;
4483 /* if no threads were restarted, we're done */
4484 if (restart_count
== 0)
4487 #if defined(__MACH__) && MONO_MACH_ARCH_SUPPORTED
4488 /* mach thread_resume is synchronous so we dont need to wait for them */
4490 /* wait for the threads to signal their restart */
4491 mono_sgen_wait_for_suspend_ack (restart_count
);
4494 if (sleep_duration
< 0) {
4498 g_usleep (sleep_duration
);
4499 sleep_duration
+= 10;
4502 /* stop them again */
4503 for (i
= 0; i
< THREAD_HASH_SIZE
; ++i
) {
4504 for (info
= thread_table
[i
]; info
; info
= info
->next
) {
4505 if (info
->skip
|| info
->stopped_ip
== NULL
)
4507 #if defined(__MACH__) && MONO_MACH_ARCH_SUPPORTED
4508 result
= thread_suspend (pthread_mach_thread_np (info
->id
));
4510 result
= pthread_kill (info
->id
, suspend_signal_num
);
4519 /* some threads might have died */
4520 num_threads_died
+= restart_count
- restarted_count
;
4521 #if defined(__MACH__) && MONO_MACH_ARCH_SUPPORTED
4522 /* mach thread_resume is synchronous so we dont need to wait for them */
4524 /* wait for the threads to signal their suspension
4526 mono_sgen_wait_for_suspend_ack (restart_count
);
4530 return num_threads_died
;
4533 /* LOCKING: assumes the GC lock is held (by the stopping thread) */
4535 suspend_handler (int sig
, siginfo_t
*siginfo
, void *context
)
4537 SgenThreadInfo
*info
;
4540 int old_errno
= errno
;
4541 gpointer regs
[ARCH_NUM_REGS
];
4542 gpointer stack_start
;
4544 id
= pthread_self ();
4545 info
= mono_sgen_thread_info_lookup (id
);
4546 info
->stopped_domain
= mono_domain_get ();
4547 info
->stopped_ip
= (gpointer
) ARCH_SIGCTX_IP (context
);
4548 stop_count
= global_stop_count
;
4549 /* duplicate signal */
4550 if (0 && info
->stop_count
== stop_count
) {
4554 #ifdef HAVE_KW_THREAD
4555 /* update the remset info in the thread data structure */
4556 info
->remset
= remembered_set
;
4558 stack_start
= (char*) ARCH_SIGCTX_SP (context
) - REDZONE_SIZE
;
4559 /* If stack_start is not within the limits, then don't set it
4560 in info and we will be restarted. */
4561 if (stack_start
>= info
->stack_start_limit
&& info
->stack_start
<= info
->stack_end
) {
4562 info
->stack_start
= stack_start
;
4564 ARCH_COPY_SIGCTX_REGS (regs
, context
);
4565 info
->stopped_regs
= regs
;
4567 g_assert (!info
->stack_start
);
4570 /* Notify the JIT */
4571 if (gc_callbacks
.thread_suspend_func
)
4572 gc_callbacks
.thread_suspend_func (info
->runtime_data
, context
);
4574 DEBUG (4, fprintf (gc_debug_file
, "Posting suspend_ack_semaphore for suspend from %p %p\n", info
, (gpointer
)ARCH_GET_THREAD ()));
4575 /* notify the waiting thread */
4576 MONO_SEM_POST (suspend_ack_semaphore_ptr
);
4577 info
->stop_count
= stop_count
;
4579 /* wait until we receive the restart signal */
4582 sigsuspend (&suspend_signal_mask
);
4583 } while (info
->signal
!= restart_signal_num
);
4585 DEBUG (4, fprintf (gc_debug_file
, "Posting suspend_ack_semaphore for resume from %p %p\n", info
, (gpointer
)ARCH_GET_THREAD ()));
4586 /* notify the waiting thread */
4587 MONO_SEM_POST (suspend_ack_semaphore_ptr
);
4593 restart_handler (int sig
)
4595 SgenThreadInfo
*info
;
4596 int old_errno
= errno
;
4598 info
= mono_sgen_thread_info_lookup (pthread_self ());
4599 info
->signal
= restart_signal_num
;
4600 DEBUG (4, fprintf (gc_debug_file
, "Restart handler in %p %p\n", info
, (gpointer
)ARCH_GET_THREAD ()));
4606 acquire_gc_locks (void)
4612 release_gc_locks (void)
4614 UNLOCK_INTERRUPTION
;
4617 static TV_DECLARE (stop_world_time
);
4618 static unsigned long max_pause_usec
= 0;
4620 /* LOCKING: assumes the GC lock is held */
4626 acquire_gc_locks ();
4628 update_current_thread_stack (&count
);
4630 global_stop_count
++;
4631 DEBUG (3, fprintf (gc_debug_file
, "stopping world n %d from %p %p\n", global_stop_count
, mono_sgen_thread_info_lookup (ARCH_GET_THREAD ()), (gpointer
)ARCH_GET_THREAD ()));
4632 TV_GETTIME (stop_world_time
);
4633 count
= mono_sgen_thread_handshake (suspend_signal_num
);
4634 count
-= restart_threads_until_none_in_managed_allocator ();
4635 g_assert (count
>= 0);
4636 DEBUG (3, fprintf (gc_debug_file
, "world stopped %d thread(s)\n", count
));
4640 /* LOCKING: assumes the GC lock is held */
4642 restart_world (void)
4645 SgenThreadInfo
*info
;
4646 TV_DECLARE (end_sw
);
4649 /* notify the profiler of the leftovers */
4650 if (G_UNLIKELY (mono_profiler_events
& MONO_PROFILE_GC_MOVES
)) {
4651 if (moved_objects_idx
) {
4652 mono_profiler_gc_moves (moved_objects
, moved_objects_idx
);
4653 moved_objects_idx
= 0;
4656 for (i
= 0; i
< THREAD_HASH_SIZE
; ++i
) {
4657 for (info
= thread_table
[i
]; info
; info
= info
->next
) {
4658 info
->stack_start
= NULL
;
4659 info
->stopped_regs
= NULL
;
4663 release_gc_locks ();
4665 count
= mono_sgen_thread_handshake (restart_signal_num
);
4666 TV_GETTIME (end_sw
);
4667 usec
= TV_ELAPSED (stop_world_time
, end_sw
);
4668 max_pause_usec
= MAX (usec
, max_pause_usec
);
4669 DEBUG (2, fprintf (gc_debug_file
, "restarted %d thread(s) (pause time: %d usec, max: %d)\n", count
, (int)usec
, (int)max_pause_usec
));
4673 #endif /* USE_SIGNAL_BASED_START_STOP_WORLD */
4676 mono_gc_set_gc_callbacks (MonoGCCallbacks
*callbacks
)
4678 gc_callbacks
= *callbacks
;
4682 mono_gc_get_gc_callbacks ()
4684 return &gc_callbacks
;
4687 /* Variables holding start/end nursery so it won't have to be passed at every call */
4688 static void *scan_area_arg_start
, *scan_area_arg_end
;
4691 mono_gc_conservatively_scan_area (void *start
, void *end
)
4693 conservatively_pin_objects_from (start
, end
, scan_area_arg_start
, scan_area_arg_end
, PIN_TYPE_STACK
);
4697 mono_gc_scan_object (void *obj
)
4699 g_assert_not_reached ();
4700 if (current_collection_generation
== GENERATION_NURSERY
)
4701 major
.copy_object (&obj
, &gray_queue
);
4703 major
.copy_or_mark_object (&obj
, &gray_queue
);
4708 * Mark from thread stacks and registers.
4711 scan_thread_data (void *start_nursery
, void *end_nursery
, gboolean precise
)
4714 SgenThreadInfo
*info
;
4716 scan_area_arg_start
= start_nursery
;
4717 scan_area_arg_end
= end_nursery
;
4719 for (i
= 0; i
< THREAD_HASH_SIZE
; ++i
) {
4720 for (info
= thread_table
[i
]; info
; info
= info
->next
) {
4722 DEBUG (3, fprintf (gc_debug_file
, "Skipping dead thread %p, range: %p-%p, size: %td\n", info
, info
->stack_start
, info
->stack_end
, (char*)info
->stack_end
- (char*)info
->stack_start
));
4725 DEBUG (3, fprintf (gc_debug_file
, "Scanning thread %p, range: %p-%p, size: %td, pinned=%d\n", info
, info
->stack_start
, info
->stack_end
, (char*)info
->stack_end
- (char*)info
->stack_start
, next_pin_slot
));
4726 if (gc_callbacks
.thread_mark_func
&& !conservative_stack_mark
)
4727 gc_callbacks
.thread_mark_func (info
->runtime_data
, info
->stack_start
, info
->stack_end
, precise
);
4729 conservatively_pin_objects_from (info
->stack_start
, info
->stack_end
, start_nursery
, end_nursery
, PIN_TYPE_STACK
);
4732 conservatively_pin_objects_from (info
->stopped_regs
, info
->stopped_regs
+ ARCH_NUM_REGS
,
4733 start_nursery
, end_nursery
, PIN_TYPE_STACK
);
4739 find_pinning_ref_from_thread (char *obj
, size_t size
)
4742 SgenThreadInfo
*info
;
4743 char *endobj
= obj
+ size
;
4745 for (i
= 0; i
< THREAD_HASH_SIZE
; ++i
) {
4746 for (info
= thread_table
[i
]; info
; info
= info
->next
) {
4747 char **start
= (char**)info
->stack_start
;
4750 while (start
< (char**)info
->stack_end
) {
4751 if (*start
>= obj
&& *start
< endobj
) {
4752 DEBUG (0, fprintf (gc_debug_file
, "Object %p referenced in thread %p (id %p) at %p, stack: %p-%p\n", obj
, info
, (gpointer
)info
->id
, start
, info
->stack_start
, info
->stack_end
));
4757 /* FIXME: check info->stopped_regs */
4763 ptr_on_stack (void *ptr
)
4765 gpointer stack_start
= &stack_start
;
4766 SgenThreadInfo
*info
= mono_sgen_thread_info_lookup (ARCH_GET_THREAD ());
4768 if (ptr
>= stack_start
&& ptr
< (gpointer
)info
->stack_end
)
4774 handle_remset (mword
*p
, void *start_nursery
, void *end_nursery
, gboolean global
, GrayQueue
*queue
)
4781 HEAVY_STAT (++stat_global_remsets_processed
);
4783 /* FIXME: exclude stack locations */
4784 switch ((*p
) & REMSET_TYPE_MASK
) {
4785 case REMSET_LOCATION
:
4787 //__builtin_prefetch (ptr);
4788 if (((void*)ptr
< start_nursery
|| (void*)ptr
>= end_nursery
)) {
4789 gpointer old
= *ptr
;
4790 major
.copy_object (ptr
, queue
);
4791 DEBUG (9, fprintf (gc_debug_file
, "Overwrote remset at %p with %p\n", ptr
, *ptr
));
4793 binary_protocol_ptr_update (ptr
, old
, *ptr
, (gpointer
)LOAD_VTABLE (*ptr
), safe_object_get_size (*ptr
));
4794 if (!global
&& *ptr
>= start_nursery
&& *ptr
< end_nursery
) {
4796 * If the object is pinned, each reference to it from nonpinned objects
4797 * becomes part of the global remset, which can grow very large.
4799 DEBUG (9, fprintf (gc_debug_file
, "Add to global remset because of pinning %p (%p %s)\n", ptr
, *ptr
, safe_name (*ptr
)));
4800 mono_sgen_add_to_global_remset (ptr
);
4803 DEBUG (9, fprintf (gc_debug_file
, "Skipping remset at %p holding %p\n", ptr
, *ptr
));
4807 ptr
= (void**)(*p
& ~REMSET_TYPE_MASK
);
4808 if (((void*)ptr
>= start_nursery
&& (void*)ptr
< end_nursery
))
4811 while (count
-- > 0) {
4812 major
.copy_object (ptr
, queue
);
4813 DEBUG (9, fprintf (gc_debug_file
, "Overwrote remset at %p with %p (count: %d)\n", ptr
, *ptr
, (int)count
));
4814 if (!global
&& *ptr
>= start_nursery
&& *ptr
< end_nursery
)
4815 mono_sgen_add_to_global_remset (ptr
);
4820 ptr
= (void**)(*p
& ~REMSET_TYPE_MASK
);
4821 if (((void*)ptr
>= start_nursery
&& (void*)ptr
< end_nursery
))
4823 major
.minor_scan_object ((char*)ptr
, queue
);
4825 case REMSET_VTYPE
: {
4826 ptr
= (void**)(*p
& ~REMSET_TYPE_MASK
);
4827 if (((void*)ptr
>= start_nursery
&& (void*)ptr
< end_nursery
))
4832 ptr
= (void**) major
.minor_scan_vtype ((char*)ptr
, desc
, start_nursery
, end_nursery
, queue
);
4836 g_assert_not_reached ();
4841 #ifdef HEAVY_STATISTICS
4843 collect_store_remsets (RememberedSet
*remset
, mword
*bumper
)
4845 mword
*p
= remset
->data
;
4850 while (p
< remset
->store_next
) {
4851 switch ((*p
) & REMSET_TYPE_MASK
) {
4852 case REMSET_LOCATION
:
4855 ++stat_saved_remsets_1
;
4857 if (*p
== last1
|| *p
== last2
) {
4858 ++stat_saved_remsets_2
;
4875 g_assert_not_reached ();
4885 RememberedSet
*remset
;
4887 SgenThreadInfo
*info
;
4889 mword
*addresses
, *bumper
, *p
, *r
;
4891 for (i
= 0; i
< THREAD_HASH_SIZE
; ++i
) {
4892 for (info
= thread_table
[i
]; info
; info
= info
->next
) {
4893 for (remset
= info
->remset
; remset
; remset
= remset
->next
)
4894 size
+= remset
->store_next
- remset
->data
;
4897 for (remset
= freed_thread_remsets
; remset
; remset
= remset
->next
)
4898 size
+= remset
->store_next
- remset
->data
;
4899 for (remset
= global_remset
; remset
; remset
= remset
->next
)
4900 size
+= remset
->store_next
- remset
->data
;
4902 bumper
= addresses
= mono_sgen_alloc_internal_dynamic (sizeof (mword
) * size
, INTERNAL_MEM_STATISTICS
);
4904 for (i
= 0; i
< THREAD_HASH_SIZE
; ++i
) {
4905 for (info
= thread_table
[i
]; info
; info
= info
->next
) {
4906 for (remset
= info
->remset
; remset
; remset
= remset
->next
)
4907 bumper
= collect_store_remsets (remset
, bumper
);
4910 for (remset
= global_remset
; remset
; remset
= remset
->next
)
4911 bumper
= collect_store_remsets (remset
, bumper
);
4912 for (remset
= freed_thread_remsets
; remset
; remset
= remset
->next
)
4913 bumper
= collect_store_remsets (remset
, bumper
);
4915 g_assert (bumper
<= addresses
+ size
);
4917 stat_store_remsets
+= bumper
- addresses
;
4919 sort_addresses ((void**)addresses
, bumper
- addresses
);
4922 while (r
< bumper
) {
4928 stat_store_remsets_unique
+= p
- addresses
;
4930 mono_sgen_free_internal_dynamic (addresses
, sizeof (mword
) * size
, INTERNAL_MEM_STATISTICS
);
4935 clear_thread_store_remset_buffer (SgenThreadInfo
*info
)
4937 *info
->store_remset_buffer_index_addr
= 0;
4938 memset (*info
->store_remset_buffer_addr
, 0, sizeof (gpointer
) * STORE_REMSET_BUFFER_SIZE
);
4942 remset_byte_size (RememberedSet
*remset
)
4944 return sizeof (RememberedSet
) + (remset
->end_set
- remset
->data
) * sizeof (gpointer
);
4948 scan_from_remsets (void *start_nursery
, void *end_nursery
, GrayQueue
*queue
)
4951 SgenThreadInfo
*info
;
4952 RememberedSet
*remset
;
4953 GenericStoreRememberedSet
*store_remset
;
4954 mword
*p
, *next_p
, *store_pos
;
4956 #ifdef HEAVY_STATISTICS
4960 /* the global one */
4961 for (remset
= global_remset
; remset
; remset
= remset
->next
) {
4962 DEBUG (4, fprintf (gc_debug_file
, "Scanning global remset range: %p-%p, size: %td\n", remset
->data
, remset
->store_next
, remset
->store_next
- remset
->data
));
4963 store_pos
= remset
->data
;
4964 for (p
= remset
->data
; p
< remset
->store_next
; p
= next_p
) {
4965 void **ptr
= (void**)p
[0];
4967 /*Ignore previously processed remset.*/
4968 if (!global_remset_location_was_not_added (ptr
)) {
4973 next_p
= handle_remset (p
, start_nursery
, end_nursery
, TRUE
, queue
);
4976 * Clear global remsets of locations which no longer point to the
4977 * nursery. Otherwise, they could grow indefinitely between major
4980 * Since all global remsets are location remsets, we don't need to unmask the pointer.
4982 if (ptr_in_nursery (*ptr
)) {
4983 *store_pos
++ = p
[0];
4984 HEAVY_STAT (++stat_global_remsets_readded
);
4988 /* Truncate the remset */
4989 remset
->store_next
= store_pos
;
4992 /* the generic store ones */
4993 store_remset
= generic_store_remsets
;
4994 while (store_remset
) {
4995 GenericStoreRememberedSet
*next
= store_remset
->next
;
4997 for (i
= 0; i
< STORE_REMSET_BUFFER_SIZE
- 1; ++i
) {
4998 gpointer addr
= store_remset
->data
[i
];
5000 handle_remset ((mword
*)&addr
, start_nursery
, end_nursery
, FALSE
, queue
);
5003 mono_sgen_free_internal (store_remset
, INTERNAL_MEM_STORE_REMSET
);
5005 store_remset
= next
;
5007 generic_store_remsets
= NULL
;
5009 /* the per-thread ones */
5010 for (i
= 0; i
< THREAD_HASH_SIZE
; ++i
) {
5011 for (info
= thread_table
[i
]; info
; info
= info
->next
) {
5012 RememberedSet
*next
;
5014 for (remset
= info
->remset
; remset
; remset
= next
) {
5015 DEBUG (4, fprintf (gc_debug_file
, "Scanning remset for thread %p, range: %p-%p, size: %td\n", info
, remset
->data
, remset
->store_next
, remset
->store_next
- remset
->data
));
5016 for (p
= remset
->data
; p
< remset
->store_next
;) {
5017 p
= handle_remset (p
, start_nursery
, end_nursery
, FALSE
, queue
);
5019 remset
->store_next
= remset
->data
;
5020 next
= remset
->next
;
5021 remset
->next
= NULL
;
5022 if (remset
!= info
->remset
) {
5023 DEBUG (4, fprintf (gc_debug_file
, "Freed remset at %p\n", remset
->data
));
5024 mono_sgen_free_internal_dynamic (remset
, remset_byte_size (remset
), INTERNAL_MEM_REMSET
);
5027 for (j
= 0; j
< *info
->store_remset_buffer_index_addr
; ++j
)
5028 handle_remset ((mword
*)*info
->store_remset_buffer_addr
+ j
+ 1, start_nursery
, end_nursery
, FALSE
, queue
);
5029 clear_thread_store_remset_buffer (info
);
5033 /* the freed thread ones */
5034 while (freed_thread_remsets
) {
5035 RememberedSet
*next
;
5036 remset
= freed_thread_remsets
;
5037 DEBUG (4, fprintf (gc_debug_file
, "Scanning remset for freed thread, range: %p-%p, size: %td\n", remset
->data
, remset
->store_next
, remset
->store_next
- remset
->data
));
5038 for (p
= remset
->data
; p
< remset
->store_next
;) {
5039 p
= handle_remset (p
, start_nursery
, end_nursery
, FALSE
, queue
);
5041 next
= remset
->next
;
5042 DEBUG (4, fprintf (gc_debug_file
, "Freed remset at %p\n", remset
->data
));
5043 mono_sgen_free_internal_dynamic (remset
, remset_byte_size (remset
), INTERNAL_MEM_REMSET
);
5044 freed_thread_remsets
= next
;
5049 * Clear the info in the remembered sets: we're doing a major collection, so
5050 * the per-thread ones are not needed and the global ones will be reconstructed
5054 clear_remsets (void)
5057 SgenThreadInfo
*info
;
5058 RememberedSet
*remset
, *next
;
5060 /* the global list */
5061 for (remset
= global_remset
; remset
; remset
= next
) {
5062 remset
->store_next
= remset
->data
;
5063 next
= remset
->next
;
5064 remset
->next
= NULL
;
5065 if (remset
!= global_remset
) {
5066 DEBUG (4, fprintf (gc_debug_file
, "Freed remset at %p\n", remset
->data
));
5067 mono_sgen_free_internal_dynamic (remset
, remset_byte_size (remset
), INTERNAL_MEM_REMSET
);
5070 /* the generic store ones */
5071 while (generic_store_remsets
) {
5072 GenericStoreRememberedSet
*gs_next
= generic_store_remsets
->next
;
5073 mono_sgen_free_internal (generic_store_remsets
, INTERNAL_MEM_STORE_REMSET
);
5074 generic_store_remsets
= gs_next
;
5076 /* the per-thread ones */
5077 for (i
= 0; i
< THREAD_HASH_SIZE
; ++i
) {
5078 for (info
= thread_table
[i
]; info
; info
= info
->next
) {
5079 for (remset
= info
->remset
; remset
; remset
= next
) {
5080 remset
->store_next
= remset
->data
;
5081 next
= remset
->next
;
5082 remset
->next
= NULL
;
5083 if (remset
!= info
->remset
) {
5084 DEBUG (3, fprintf (gc_debug_file
, "Freed remset at %p\n", remset
->data
));
5085 mono_sgen_free_internal_dynamic (remset
, remset_byte_size (remset
), INTERNAL_MEM_REMSET
);
5088 clear_thread_store_remset_buffer (info
);
5092 /* the freed thread ones */
5093 while (freed_thread_remsets
) {
5094 next
= freed_thread_remsets
->next
;
5095 DEBUG (4, fprintf (gc_debug_file
, "Freed remset at %p\n", freed_thread_remsets
->data
));
5096 mono_sgen_free_internal_dynamic (freed_thread_remsets
, remset_byte_size (freed_thread_remsets
), INTERNAL_MEM_REMSET
);
5097 freed_thread_remsets
= next
;
5102 * Clear the thread local TLAB variables for all threads.
5107 SgenThreadInfo
*info
;
5110 for (i
= 0; i
< THREAD_HASH_SIZE
; ++i
) {
5111 for (info
= thread_table
[i
]; info
; info
= info
->next
) {
5112 /* A new TLAB will be allocated when the thread does its first allocation */
5113 *info
->tlab_start_addr
= NULL
;
5114 *info
->tlab_next_addr
= NULL
;
5115 *info
->tlab_temp_end_addr
= NULL
;
5116 *info
->tlab_real_end_addr
= NULL
;
5121 /* LOCKING: assumes the GC lock is held */
5122 static SgenThreadInfo
*
5123 gc_register_current_thread (void *addr
)
5126 SgenThreadInfo
* info
= malloc (sizeof (SgenThreadInfo
));
5127 #ifndef HAVE_KW_THREAD
5128 SgenThreadInfo
*__thread_info__
= info
;
5134 memset (info
, 0, sizeof (SgenThreadInfo
));
5135 #ifndef HAVE_KW_THREAD
5136 info
->tlab_start
= info
->tlab_next
= info
->tlab_temp_end
= info
->tlab_real_end
= NULL
;
5138 g_assert (!pthread_getspecific (thread_info_key
));
5139 pthread_setspecific (thread_info_key
, info
);
5144 info
->id
= ARCH_GET_THREAD ();
5145 info
->stop_count
= -1;
5148 info
->stack_start
= NULL
;
5149 info
->tlab_start_addr
= &TLAB_START
;
5150 info
->tlab_next_addr
= &TLAB_NEXT
;
5151 info
->tlab_temp_end_addr
= &TLAB_TEMP_END
;
5152 info
->tlab_real_end_addr
= &TLAB_REAL_END
;
5153 info
->store_remset_buffer_addr
= &STORE_REMSET_BUFFER
;
5154 info
->store_remset_buffer_index_addr
= &STORE_REMSET_BUFFER_INDEX
;
5155 info
->stopped_ip
= NULL
;
5156 info
->stopped_domain
= NULL
;
5157 info
->stopped_regs
= NULL
;
5159 binary_protocol_thread_register ((gpointer
)info
->id
);
5161 #ifdef HAVE_KW_THREAD
5162 tlab_next_addr
= &tlab_next
;
5163 store_remset_buffer_index_addr
= &store_remset_buffer_index
;
5166 /* try to get it with attributes first */
5167 #if defined(HAVE_PTHREAD_GETATTR_NP) && defined(HAVE_PTHREAD_ATTR_GETSTACK)
5171 pthread_attr_t attr
;
5172 pthread_getattr_np (pthread_self (), &attr
);
5173 pthread_attr_getstack (&attr
, &sstart
, &size
);
5174 info
->stack_start_limit
= sstart
;
5175 info
->stack_end
= (char*)sstart
+ size
;
5176 pthread_attr_destroy (&attr
);
5178 #elif defined(HAVE_PTHREAD_GET_STACKSIZE_NP) && defined(HAVE_PTHREAD_GET_STACKADDR_NP)
5179 info
->stack_end
= (char*)pthread_get_stackaddr_np (pthread_self ());
5180 info
->stack_start_limit
= (char*)info
->stack_end
- pthread_get_stacksize_np (pthread_self ());
5183 /* FIXME: we assume the stack grows down */
5184 gsize stack_bottom
= (gsize
)addr
;
5185 stack_bottom
+= 4095;
5186 stack_bottom
&= ~4095;
5187 info
->stack_end
= (char*)stack_bottom
;
5191 #ifdef HAVE_KW_THREAD
5192 stack_end
= info
->stack_end
;
5195 /* hash into the table */
5196 hash
= HASH_PTHREAD_T (info
->id
) % THREAD_HASH_SIZE
;
5197 info
->next
= thread_table
[hash
];
5198 thread_table
[hash
] = info
;
5200 info
->remset
= alloc_remset (DEFAULT_REMSET_SIZE
, info
);
5201 pthread_setspecific (remembered_set_key
, info
->remset
);
5202 #ifdef HAVE_KW_THREAD
5203 remembered_set
= info
->remset
;
5206 STORE_REMSET_BUFFER
= mono_sgen_alloc_internal (INTERNAL_MEM_STORE_REMSET
);
5207 STORE_REMSET_BUFFER_INDEX
= 0;
5209 DEBUG (3, fprintf (gc_debug_file
, "registered thread %p (%p) (hash: %d)\n", info
, (gpointer
)info
->id
, hash
));
5211 if (gc_callbacks
.thread_attach_func
)
5212 info
->runtime_data
= gc_callbacks
.thread_attach_func ();
5218 add_generic_store_remset_from_buffer (gpointer
*buffer
)
5220 GenericStoreRememberedSet
*remset
= mono_sgen_alloc_internal (INTERNAL_MEM_STORE_REMSET
);
5221 memcpy (remset
->data
, buffer
+ 1, sizeof (gpointer
) * (STORE_REMSET_BUFFER_SIZE
- 1));
5222 remset
->next
= generic_store_remsets
;
5223 generic_store_remsets
= remset
;
5227 unregister_current_thread (void)
5230 SgenThreadInfo
*prev
= NULL
;
5232 RememberedSet
*rset
;
5233 ARCH_THREAD_TYPE id
= ARCH_GET_THREAD ();
5235 binary_protocol_thread_unregister ((gpointer
)id
);
5237 hash
= HASH_PTHREAD_T (id
) % THREAD_HASH_SIZE
;
5238 p
= thread_table
[hash
];
5240 DEBUG (3, fprintf (gc_debug_file
, "unregister thread %p (%p)\n", p
, (gpointer
)p
->id
));
5241 while (!ARCH_THREAD_EQUALS (p
->id
, id
)) {
5246 thread_table
[hash
] = p
->next
;
5248 prev
->next
= p
->next
;
5251 if (freed_thread_remsets
) {
5252 for (rset
= p
->remset
; rset
->next
; rset
= rset
->next
)
5254 rset
->next
= freed_thread_remsets
;
5255 freed_thread_remsets
= p
->remset
;
5257 freed_thread_remsets
= p
->remset
;
5260 if (*p
->store_remset_buffer_index_addr
)
5261 add_generic_store_remset_from_buffer (*p
->store_remset_buffer_addr
);
5262 mono_sgen_free_internal (*p
->store_remset_buffer_addr
, INTERNAL_MEM_STORE_REMSET
);
5267 unregister_thread (void *k
)
5269 g_assert (!mono_domain_get ());
5271 unregister_current_thread ();
5276 mono_gc_register_thread (void *baseptr
)
5278 SgenThreadInfo
*info
;
5282 info
= mono_sgen_thread_info_lookup (ARCH_GET_THREAD ());
5284 info
= gc_register_current_thread (baseptr
);
5286 return info
!= NULL
;
5289 #if USE_PTHREAD_INTERCEPT
5292 void *(*start_routine
) (void *);
5295 MonoSemType registered
;
5296 } SgenThreadStartInfo
;
5299 gc_start_thread (void *arg
)
5301 SgenThreadStartInfo
*start_info
= arg
;
5302 SgenThreadInfo
* info
;
5303 void *t_arg
= start_info
->arg
;
5304 void *(*start_func
) (void*) = start_info
->start_routine
;
5309 info
= gc_register_current_thread (&result
);
5311 post_result
= MONO_SEM_POST (&(start_info
->registered
));
5312 g_assert (!post_result
);
5313 result
= start_func (t_arg
);
5314 g_assert (!mono_domain_get ());
5316 * this is done by the pthread key dtor
5318 unregister_current_thread ();
5326 mono_gc_pthread_create (pthread_t
*new_thread
, const pthread_attr_t
*attr
, void *(*start_routine
)(void *), void *arg
)
5328 SgenThreadStartInfo
*start_info
;
5331 start_info
= malloc (sizeof (SgenThreadStartInfo
));
5334 MONO_SEM_INIT (&(start_info
->registered
), 0);
5335 start_info
->arg
= arg
;
5336 start_info
->start_routine
= start_routine
;
5338 result
= pthread_create (new_thread
, attr
, gc_start_thread
, start_info
);
5340 while (MONO_SEM_WAIT (&(start_info
->registered
)) != 0) {
5341 /*if (EINTR != errno) ABORT("sem_wait failed"); */
5344 MONO_SEM_DESTROY (&(start_info
->registered
));
5350 mono_gc_pthread_join (pthread_t thread
, void **retval
)
5352 return pthread_join (thread
, retval
);
5356 mono_gc_pthread_detach (pthread_t thread
)
5358 return pthread_detach (thread
);
5361 #endif /* USE_PTHREAD_INTERCEPT */
5364 * ######################################################################
5365 * ######## Write barriers
5366 * ######################################################################
5369 static RememberedSet
*
5370 alloc_remset (int size
, gpointer id
) {
5371 RememberedSet
* res
= mono_sgen_alloc_internal_dynamic (sizeof (RememberedSet
) + (size
* sizeof (gpointer
)), INTERNAL_MEM_REMSET
);
5372 res
->store_next
= res
->data
;
5373 res
->end_set
= res
->data
+ size
;
5375 DEBUG (4, fprintf (gc_debug_file
, "Allocated remset size %d at %p for %p\n", size
, res
->data
, id
));
5380 * Note: the write barriers first do the needed GC work and then do the actual store:
5381 * this way the value is visible to the conservative GC scan after the write barrier
5382 * itself. If a GC interrupts the barrier in the middle, value will be kept alive by
5383 * the conservative scan, otherwise by the remembered set scan.
5386 mono_gc_wbarrier_set_field (MonoObject
*obj
, gpointer field_ptr
, MonoObject
* value
)
5390 HEAVY_STAT (++stat_wbarrier_set_field
);
5391 if (ptr_in_nursery (field_ptr
)) {
5392 *(void**)field_ptr
= value
;
5395 DEBUG (8, fprintf (gc_debug_file
, "Adding remset at %p\n", field_ptr
));
5397 rs
= REMEMBERED_SET
;
5398 if (rs
->store_next
< rs
->end_set
) {
5399 *(rs
->store_next
++) = (mword
)field_ptr
;
5400 *(void**)field_ptr
= value
;
5404 rs
= alloc_remset (rs
->end_set
- rs
->data
, (void*)1);
5405 rs
->next
= REMEMBERED_SET
;
5406 REMEMBERED_SET
= rs
;
5407 #ifdef HAVE_KW_THREAD
5408 mono_sgen_thread_info_lookup (ARCH_GET_THREAD ())->remset
= rs
;
5410 *(rs
->store_next
++) = (mword
)field_ptr
;
5411 *(void**)field_ptr
= value
;
5416 mono_gc_wbarrier_set_arrayref (MonoArray
*arr
, gpointer slot_ptr
, MonoObject
* value
)
5420 HEAVY_STAT (++stat_wbarrier_set_arrayref
);
5421 if (ptr_in_nursery (slot_ptr
)) {
5422 *(void**)slot_ptr
= value
;
5425 DEBUG (8, fprintf (gc_debug_file
, "Adding remset at %p\n", slot_ptr
));
5427 rs
= REMEMBERED_SET
;
5428 if (rs
->store_next
< rs
->end_set
) {
5429 *(rs
->store_next
++) = (mword
)slot_ptr
;
5430 *(void**)slot_ptr
= value
;
5434 rs
= alloc_remset (rs
->end_set
- rs
->data
, (void*)1);
5435 rs
->next
= REMEMBERED_SET
;
5436 REMEMBERED_SET
= rs
;
5437 #ifdef HAVE_KW_THREAD
5438 mono_sgen_thread_info_lookup (ARCH_GET_THREAD ())->remset
= rs
;
5440 *(rs
->store_next
++) = (mword
)slot_ptr
;
5441 *(void**)slot_ptr
= value
;
5446 mono_gc_wbarrier_arrayref_copy (gpointer dest_ptr
, gpointer src_ptr
, int count
)
5450 HEAVY_STAT (++stat_wbarrier_arrayref_copy
);
5452 memmove (dest_ptr
, src_ptr
, count
* sizeof (gpointer
));
5453 if (ptr_in_nursery (dest_ptr
)) {
5457 rs
= REMEMBERED_SET
;
5458 DEBUG (8, fprintf (gc_debug_file
, "Adding remset at %p, %d\n", dest_ptr
, count
));
5459 if (rs
->store_next
+ 1 < rs
->end_set
) {
5460 *(rs
->store_next
++) = (mword
)dest_ptr
| REMSET_RANGE
;
5461 *(rs
->store_next
++) = count
;
5465 rs
= alloc_remset (rs
->end_set
- rs
->data
, (void*)1);
5466 rs
->next
= REMEMBERED_SET
;
5467 REMEMBERED_SET
= rs
;
5468 #ifdef HAVE_KW_THREAD
5469 mono_sgen_thread_info_lookup (ARCH_GET_THREAD ())->remset
= rs
;
5471 *(rs
->store_next
++) = (mword
)dest_ptr
| REMSET_RANGE
;
5472 *(rs
->store_next
++) = count
;
5476 static char *found_obj
;
5479 find_object_for_ptr_callback (char *obj
, size_t size
, char *ptr
)
5481 if (ptr
>= obj
&& ptr
< obj
+ size
) {
5482 g_assert (!found_obj
);
5487 /* for use in the debugger */
5488 char* find_object_for_ptr (char *ptr
);
5490 find_object_for_ptr (char *ptr
)
5494 if (ptr
>= nursery_section
->data
&& ptr
< nursery_section
->end_data
) {
5496 mono_sgen_scan_area_with_callback (nursery_section
->data
, nursery_section
->end_data
,
5497 (IterateObjectCallbackFunc
)find_object_for_ptr_callback
, ptr
);
5502 for (bigobj
= los_object_list
; bigobj
; bigobj
= bigobj
->next
) {
5503 if (ptr
>= bigobj
->data
&& ptr
< bigobj
->data
+ bigobj
->size
)
5504 return bigobj
->data
;
5508 * Very inefficient, but this is debugging code, supposed to
5509 * be called from gdb, so we don't care.
5512 major
.iterate_objects (TRUE
, TRUE
, (IterateObjectCallbackFunc
)find_object_for_ptr_callback
, ptr
);
5517 evacuate_remset_buffer (void)
5522 buffer
= STORE_REMSET_BUFFER
;
5524 add_generic_store_remset_from_buffer (buffer
);
5525 memset (buffer
, 0, sizeof (gpointer
) * STORE_REMSET_BUFFER_SIZE
);
5527 STORE_REMSET_BUFFER_INDEX
= 0;
5531 mono_gc_wbarrier_generic_nostore (gpointer ptr
)
5537 HEAVY_STAT (++stat_wbarrier_generic_store
);
5539 #ifdef XDOMAIN_CHECKS_IN_WBARRIER
5540 /* FIXME: ptr_in_heap must be called with the GC lock held */
5541 if (xdomain_checks
&& *(MonoObject
**)ptr
&& ptr_in_heap (ptr
)) {
5542 char *start
= find_object_for_ptr (ptr
);
5543 MonoObject
*value
= *(MonoObject
**)ptr
;
5547 MonoObject
*obj
= (MonoObject
*)start
;
5548 if (obj
->vtable
->domain
!= value
->vtable
->domain
)
5549 g_assert (is_xdomain_ref_allowed (ptr
, start
, obj
->vtable
->domain
));
5557 if (*(gpointer
*)ptr
)
5558 binary_protocol_wbarrier (ptr
, *(gpointer
*)ptr
, (gpointer
)LOAD_VTABLE (*(gpointer
*)ptr
));
5560 if (ptr_in_nursery (ptr
) || ptr_on_stack (ptr
) || !ptr_in_nursery (*(gpointer
*)ptr
)) {
5561 DEBUG (8, fprintf (gc_debug_file
, "Skipping remset at %p\n", ptr
));
5566 buffer
= STORE_REMSET_BUFFER
;
5567 index
= STORE_REMSET_BUFFER_INDEX
;
5568 /* This simple optimization eliminates a sizable portion of
5569 entries. Comparing it to the last but one entry as well
5570 doesn't eliminate significantly more entries. */
5571 if (buffer
[index
] == ptr
) {
5576 DEBUG (8, fprintf (gc_debug_file
, "Adding remset at %p\n", ptr
));
5577 HEAVY_STAT (++stat_wbarrier_generic_store_remset
);
5580 if (index
>= STORE_REMSET_BUFFER_SIZE
) {
5581 evacuate_remset_buffer ();
5582 index
= STORE_REMSET_BUFFER_INDEX
;
5583 g_assert (index
== 0);
5586 buffer
[index
] = ptr
;
5587 STORE_REMSET_BUFFER_INDEX
= index
;
5593 mono_gc_wbarrier_generic_store (gpointer ptr
, MonoObject
* value
)
5595 DEBUG (8, fprintf (gc_debug_file
, "Wbarrier store at %p to %p (%s)\n", ptr
, value
, value
? safe_name (value
) : "null"));
5596 *(void**)ptr
= value
;
5597 if (ptr_in_nursery (value
))
5598 mono_gc_wbarrier_generic_nostore (ptr
);
5601 void mono_gc_wbarrier_value_copy_bitmap (gpointer _dest
, gpointer _src
, int size
, unsigned bitmap
)
5603 mword
*dest
= _dest
;
5608 mono_gc_wbarrier_generic_store (dest
, (MonoObject
*)*src
);
5613 size
-= SIZEOF_VOID_P
;
5620 mono_gc_wbarrier_value_copy (gpointer dest
, gpointer src
, int count
, MonoClass
*klass
)
5624 HEAVY_STAT (++stat_wbarrier_value_copy
);
5625 g_assert (klass
->valuetype
);
5627 memmove (dest
, src
, count
* mono_class_value_size (klass
, NULL
));
5628 rs
= REMEMBERED_SET
;
5629 if (ptr_in_nursery (dest
) || ptr_on_stack (dest
) || !klass
->has_references
) {
5633 g_assert (klass
->gc_descr_inited
);
5634 DEBUG (8, fprintf (gc_debug_file
, "Adding value remset at %p, count %d, descr %p for class %s (%p)\n", dest
, count
, klass
->gc_descr
, klass
->name
, klass
));
5636 if (rs
->store_next
+ 3 < rs
->end_set
) {
5637 *(rs
->store_next
++) = (mword
)dest
| REMSET_VTYPE
;
5638 *(rs
->store_next
++) = (mword
)klass
->gc_descr
;
5639 *(rs
->store_next
++) = (mword
)count
;
5643 rs
= alloc_remset (rs
->end_set
- rs
->data
, (void*)1);
5644 rs
->next
= REMEMBERED_SET
;
5645 REMEMBERED_SET
= rs
;
5646 #ifdef HAVE_KW_THREAD
5647 mono_sgen_thread_info_lookup (ARCH_GET_THREAD ())->remset
= rs
;
5649 *(rs
->store_next
++) = (mword
)dest
| REMSET_VTYPE
;
5650 *(rs
->store_next
++) = (mword
)klass
->gc_descr
;
5651 *(rs
->store_next
++) = (mword
)count
;
5656 * mono_gc_wbarrier_object_copy:
5658 * Write barrier to call when obj is the result of a clone or copy of an object.
5661 mono_gc_wbarrier_object_copy (MonoObject
* obj
, MonoObject
*src
)
5667 HEAVY_STAT (++stat_wbarrier_object_copy
);
5668 rs
= REMEMBERED_SET
;
5669 DEBUG (6, fprintf (gc_debug_file
, "Adding object remset for %p\n", obj
));
5670 size
= mono_object_class (obj
)->instance_size
;
5672 /* do not copy the sync state */
5673 memcpy ((char*)obj
+ sizeof (MonoObject
), (char*)src
+ sizeof (MonoObject
),
5674 size
- sizeof (MonoObject
));
5675 if (ptr_in_nursery (obj
) || ptr_on_stack (obj
)) {
5679 if (rs
->store_next
< rs
->end_set
) {
5680 *(rs
->store_next
++) = (mword
)obj
| REMSET_OBJECT
;
5684 rs
= alloc_remset (rs
->end_set
- rs
->data
, (void*)1);
5685 rs
->next
= REMEMBERED_SET
;
5686 REMEMBERED_SET
= rs
;
5687 #ifdef HAVE_KW_THREAD
5688 mono_sgen_thread_info_lookup (ARCH_GET_THREAD ())->remset
= rs
;
5690 *(rs
->store_next
++) = (mword
)obj
| REMSET_OBJECT
;
5695 * ######################################################################
5696 * ######## Collector debugging
5697 * ######################################################################
5700 const char*descriptor_types
[] = {
5712 describe_ptr (char *ptr
)
5718 if (ptr_in_nursery (ptr
)) {
5719 printf ("Pointer inside nursery.\n");
5721 if (major
.ptr_is_in_non_pinned_space (ptr
)) {
5722 printf ("Pointer inside oldspace.\n");
5723 } else if (major
.obj_is_from_pinned_alloc (ptr
)) {
5724 printf ("Pointer is inside a pinned chunk.\n");
5726 printf ("Pointer unknown.\n");
5731 if (object_is_pinned (ptr
))
5732 printf ("Object is pinned.\n");
5734 if (object_is_forwarded (ptr
))
5735 printf ("Object is forwared.\n");
5737 // FIXME: Handle pointers to the inside of objects
5738 vtable
= (MonoVTable
*)LOAD_VTABLE (ptr
);
5740 printf ("VTable: %p\n", vtable
);
5741 if (vtable
== NULL
) {
5742 printf ("VTable is invalid (empty).\n");
5745 if (ptr_in_nursery (vtable
)) {
5746 printf ("VTable is invalid (points inside nursery).\n");
5749 printf ("Class: %s\n", vtable
->klass
->name
);
5751 desc
= ((GCVTable
*)vtable
)->desc
;
5752 printf ("Descriptor: %lx\n", (long)desc
);
5755 printf ("Descriptor type: %d (%s)\n", type
, descriptor_types
[type
]);
5759 find_in_remset_loc (mword
*p
, char *addr
, gboolean
*found
)
5765 switch ((*p
) & REMSET_TYPE_MASK
) {
5766 case REMSET_LOCATION
:
5767 if (*p
== (mword
)addr
)
5771 ptr
= (void**)(*p
& ~REMSET_TYPE_MASK
);
5773 if ((void**)addr
>= ptr
&& (void**)addr
< ptr
+ count
)
5777 ptr
= (void**)(*p
& ~REMSET_TYPE_MASK
);
5778 count
= safe_object_get_size ((MonoObject
*)ptr
);
5779 count
= ALIGN_UP (count
);
5780 count
/= sizeof (mword
);
5781 if ((void**)addr
>= ptr
&& (void**)addr
< ptr
+ count
)
5785 ptr
= (void**)(*p
& ~REMSET_TYPE_MASK
);
5789 switch (desc
& 0x7) {
5790 case DESC_TYPE_RUN_LENGTH
:
5791 OBJ_RUN_LEN_SIZE (skip_size
, desc
, ptr
);
5793 case DESC_TYPE_SMALL_BITMAP
:
5794 OBJ_BITMAP_SIZE (skip_size
, desc
, start
);
5798 g_assert_not_reached ();
5801 /* The descriptor includes the size of MonoObject */
5802 skip_size
-= sizeof (MonoObject
);
5804 if ((void**)addr
>= ptr
&& (void**)addr
< ptr
+ (skip_size
/ sizeof (gpointer
)))
5809 g_assert_not_reached ();
5815 * Return whenever ADDR occurs in the remembered sets
5818 find_in_remsets (char *addr
)
5821 SgenThreadInfo
*info
;
5822 RememberedSet
*remset
;
5823 GenericStoreRememberedSet
*store_remset
;
5825 gboolean found
= FALSE
;
5827 /* the global one */
5828 for (remset
= global_remset
; remset
; remset
= remset
->next
) {
5829 DEBUG (4, fprintf (gc_debug_file
, "Scanning global remset range: %p-%p, size: %td\n", remset
->data
, remset
->store_next
, remset
->store_next
- remset
->data
));
5830 for (p
= remset
->data
; p
< remset
->store_next
;) {
5831 p
= find_in_remset_loc (p
, addr
, &found
);
5837 /* the generic store ones */
5838 for (store_remset
= generic_store_remsets
; store_remset
; store_remset
= store_remset
->next
) {
5839 for (i
= 0; i
< STORE_REMSET_BUFFER_SIZE
- 1; ++i
) {
5840 if (store_remset
->data
[i
] == addr
)
5845 /* the per-thread ones */
5846 for (i
= 0; i
< THREAD_HASH_SIZE
; ++i
) {
5847 for (info
= thread_table
[i
]; info
; info
= info
->next
) {
5849 for (remset
= info
->remset
; remset
; remset
= remset
->next
) {
5850 DEBUG (4, fprintf (gc_debug_file
, "Scanning remset for thread %p, range: %p-%p, size: %td\n", info
, remset
->data
, remset
->store_next
, remset
->store_next
- remset
->data
));
5851 for (p
= remset
->data
; p
< remset
->store_next
;) {
5852 p
= find_in_remset_loc (p
, addr
, &found
);
5857 for (j
= 0; j
< *info
->store_remset_buffer_index_addr
; ++j
) {
5858 if ((*info
->store_remset_buffer_addr
) [j
+ 1] == addr
)
5864 /* the freed thread ones */
5865 for (remset
= freed_thread_remsets
; remset
; remset
= remset
->next
) {
5866 DEBUG (4, fprintf (gc_debug_file
, "Scanning remset for freed thread, range: %p-%p, size: %td\n", remset
->data
, remset
->store_next
, remset
->store_next
- remset
->data
));
5867 for (p
= remset
->data
; p
< remset
->store_next
;) {
5868 p
= find_in_remset_loc (p
, addr
, &found
);
5877 static gboolean missing_remsets
;
5880 * We let a missing remset slide if the target object is pinned,
5881 * because the store might have happened but the remset not yet added,
5882 * but in that case the target must be pinned. We might theoretically
5883 * miss some missing remsets this way, but it's very unlikely.
5886 #define HANDLE_PTR(ptr,obj) do { \
5887 if (*(ptr) && (char*)*(ptr) >= nursery_start && (char*)*(ptr) < nursery_next) { \
5888 if (!find_in_remsets ((char*)(ptr))) { \
5889 fprintf (gc_debug_file, "Oldspace->newspace reference %p at offset %td in object %p (%s.%s) not found in remsets.\n", *(ptr), (char*)(ptr) - (char*)(obj), (obj), ((MonoObject*)(obj))->vtable->klass->name_space, ((MonoObject*)(obj))->vtable->klass->name); \
5890 binary_protocol_missing_remset ((obj), (gpointer)LOAD_VTABLE ((obj)), (char*)(ptr) - (char*)(obj), *(ptr), (gpointer)LOAD_VTABLE(*(ptr)), object_is_pinned (*(ptr))); \
5891 if (!object_is_pinned (*(ptr))) \
5892 missing_remsets = TRUE; \
5898 * Check that each object reference which points into the nursery can
5899 * be found in the remembered sets.
5902 check_consistency_callback (char *start
, size_t size
, void *dummy
)
5904 GCVTable
*vt
= (GCVTable
*)LOAD_VTABLE (start
);
5905 DEBUG (8, fprintf (gc_debug_file
, "Scanning object %p, vtable: %p (%s)\n", start
, vt
, vt
->klass
->name
));
5907 #define SCAN_OBJECT_ACTION
5908 #include "sgen-scan-object.h"
5912 * Perform consistency check of the heap.
5914 * Assumes the world is stopped.
5917 check_consistency (void)
5921 // Need to add more checks
5923 missing_remsets
= FALSE
;
5925 DEBUG (1, fprintf (gc_debug_file
, "Begin heap consistency check...\n"));
5927 // Check that oldspace->newspace pointers are registered with the collector
5928 major
.iterate_objects (TRUE
, TRUE
, (IterateObjectCallbackFunc
)check_consistency_callback
, NULL
);
5930 for (bigobj
= los_object_list
; bigobj
; bigobj
= bigobj
->next
)
5931 check_consistency_callback (bigobj
->data
, bigobj
->size
, NULL
);
5933 DEBUG (1, fprintf (gc_debug_file
, "Heap consistency check done.\n"));
5935 #ifdef SGEN_BINARY_PROTOCOL
5936 if (!binary_protocol_file
)
5938 g_assert (!missing_remsets
);
5943 #define HANDLE_PTR(ptr,obj) do { \
5945 g_assert (LOAD_VTABLE (*(ptr))); \
5949 check_major_refs_callback (char *start
, size_t size
, void *dummy
)
5951 #define SCAN_OBJECT_ACTION
5952 #include "sgen-scan-object.h"
5956 check_major_refs (void)
5960 major
.iterate_objects (TRUE
, TRUE
, (IterateObjectCallbackFunc
)check_major_refs_callback
, NULL
);
5962 for (bigobj
= los_object_list
; bigobj
; bigobj
= bigobj
->next
)
5963 check_major_refs_callback (bigobj
->data
, bigobj
->size
, NULL
);
5966 /* Check that the reference is valid */
5968 #define HANDLE_PTR(ptr,obj) do { \
5970 g_assert (safe_name (*(ptr)) != NULL); \
5977 * Perform consistency check on an object. Currently we only check that the
5978 * reference fields are valid.
5981 check_object (char *start
)
5986 #include "sgen-scan-object.h"
5990 * ######################################################################
5991 * ######## Other mono public interface functions.
5992 * ######################################################################
5996 mono_gc_collect (int generation
)
6000 if (generation
== 0) {
6001 collect_nursery (0);
6003 major_collection ("user request");
6010 mono_gc_max_generation (void)
6016 mono_gc_collection_count (int generation
)
6018 if (generation
== 0)
6019 return num_minor_gcs
;
6020 return num_major_gcs
;
6024 mono_gc_get_used_size (void)
6028 tot
= los_memory_usage
;
6029 tot
+= nursery_section
->next_data
- nursery_section
->data
;
6030 tot
+= major
.get_used_size ();
6031 /* FIXME: account for pinned objects */
6037 mono_gc_get_heap_size (void)
6043 mono_gc_disable (void)
6051 mono_gc_enable (void)
6059 mono_gc_get_los_limit (void)
6061 return MAX_SMALL_OBJ_SIZE
;
6065 mono_object_is_alive (MonoObject
* o
)
6071 mono_gc_get_generation (MonoObject
*obj
)
6073 if (ptr_in_nursery (obj
))
6079 mono_gc_enable_events (void)
6084 mono_gc_weak_link_add (void **link_addr
, MonoObject
*obj
, gboolean track
)
6087 mono_gc_register_disappearing_link (obj
, link_addr
, track
);
6092 mono_gc_weak_link_remove (void **link_addr
)
6095 mono_gc_register_disappearing_link (NULL
, link_addr
, FALSE
);
6100 mono_gc_weak_link_get (void **link_addr
)
6104 return (MonoObject
*) REVEAL_POINTER (*link_addr
);
6108 mono_gc_ephemeron_array_add (MonoObject
*obj
)
6110 EphemeronLinkNode
*node
;
6114 node
= mono_sgen_alloc_internal (INTERNAL_MEM_EPHEMERON_LINK
);
6119 node
->array
= (char*)obj
;
6120 node
->next
= ephemeron_list
;
6121 ephemeron_list
= node
;
6123 DEBUG (5, fprintf (gc_debug_file
, "Registered ephemeron array %p\n", obj
));
6130 mono_gc_make_descr_from_bitmap (gsize
*bitmap
, int numbits
)
6132 if (numbits
< ((sizeof (*bitmap
) * 8) - ROOT_DESC_TYPE_SHIFT
)) {
6133 return (void*)MAKE_ROOT_DESC (ROOT_DESC_BITMAP
, bitmap
[0]);
6135 mword
complex = alloc_complex_descriptor (bitmap
, numbits
);
6136 return (void*)MAKE_ROOT_DESC (ROOT_DESC_COMPLEX
, complex);
6141 mono_gc_make_root_descr_user (MonoGCRootMarkFunc marker
)
6145 g_assert (user_descriptors_next
< MAX_USER_DESCRIPTORS
);
6146 descr
= (void*)MAKE_ROOT_DESC (ROOT_DESC_USER
, (mword
)user_descriptors_next
);
6147 user_descriptors
[user_descriptors_next
++] = marker
;
6153 mono_gc_alloc_fixed (size_t size
, void *descr
)
6155 /* FIXME: do a single allocation */
6156 void *res
= calloc (1, size
);
6159 if (!mono_gc_register_root (res
, size
, descr
)) {
6167 mono_gc_free_fixed (void* addr
)
6169 mono_gc_deregister_root (addr
);
6174 mono_gc_invoke_with_gc_lock (MonoGCLockedCallbackFunc func
, void *data
)
6178 result
= func (data
);
6179 UNLOCK_INTERRUPTION
;
6184 mono_gc_is_gc_thread (void)
6188 result
= mono_sgen_thread_info_lookup (ARCH_GET_THREAD ()) != NULL
;
6195 /* Tries to extract a number from the passed string, taking in to account m, k
6198 parse_environment_string_extract_number (gchar
*str
, glong
*out
)
6201 int len
= strlen (str
), shift
= 0;
6203 gboolean is_suffix
= FALSE
;
6206 switch (str
[len
- 1]) {
6217 suffix
= str
[len
- 1];
6222 val
= strtol (str
, &endptr
, 10);
6224 if ((errno
== ERANGE
&& (val
== LONG_MAX
|| val
== LONG_MIN
))
6225 || (errno
!= 0 && val
== 0) || (endptr
== str
))
6229 if (*(endptr
+ 1)) /* Invalid string. */
6241 mono_gc_base_init (void)
6245 char *major_collector
= NULL
;
6246 struct sigaction sinfo
;
6248 LOCK_INIT (gc_mutex
);
6250 if (gc_initialized
) {
6254 pagesize
= mono_pagesize ();
6255 gc_debug_file
= stderr
;
6257 LOCK_INIT (interruption_mutex
);
6258 #ifdef SGEN_PARALLEL_MARK
6259 LOCK_INIT (global_remset_mutex
);
6262 if ((env
= getenv ("MONO_GC_PARAMS"))) {
6263 opts
= g_strsplit (env
, ",", -1);
6264 for (ptr
= opts
; *ptr
; ++ptr
) {
6267 if (g_str_has_prefix (opt
, "nursery-size=")) {
6269 opt
= strchr (opt
, '=') + 1;
6270 if (*opt
&& parse_environment_string_extract_number (opt
, &val
)) {
6271 default_nursery_size
= val
;
6272 #ifdef SGEN_ALIGN_NURSERY
6273 if ((val
& (val
- 1))) {
6274 fprintf (stderr
, "The nursery size must be a power of two.\n");
6278 default_nursery_bits
= 0;
6279 while (1 << (++ default_nursery_bits
) != default_nursery_size
)
6283 fprintf (stderr
, "nursery-size must be an integer.\n");
6288 if (g_str_has_prefix (opt
, "major=")) {
6289 opt
= strchr (opt
, '=') + 1;
6290 major_collector
= g_strdup (opt
);
6292 fprintf (stderr
, "MONO_GC_PARAMS must be a comma-delimited list of one or more of the following:\n");
6293 fprintf (stderr
, " nursery-size=N (where N is an integer, possibly with a k, m or a g suffix)\n");
6294 fprintf (stderr
, " major=COLLECTOR (where collector is `marksweep' or `copying')\n");
6301 nursery_size
= DEFAULT_NURSERY_SIZE
;
6302 minor_collection_allowance
= MIN_MINOR_COLLECTION_ALLOWANCE
;
6305 mono_sgen_init_internal_allocator ();
6307 mono_sgen_register_fixed_internal_mem_type (INTERNAL_MEM_FRAGMENT
, sizeof (Fragment
));
6308 mono_sgen_register_fixed_internal_mem_type (INTERNAL_MEM_SECTION
, SGEN_SIZEOF_GC_MEM_SECTION
);
6309 mono_sgen_register_fixed_internal_mem_type (INTERNAL_MEM_FINALIZE_ENTRY
, sizeof (FinalizeEntry
));
6310 mono_sgen_register_fixed_internal_mem_type (INTERNAL_MEM_DISLINK
, sizeof (DisappearingLink
));
6311 mono_sgen_register_fixed_internal_mem_type (INTERNAL_MEM_ROOT_RECORD
, sizeof (RootRecord
));
6312 mono_sgen_register_fixed_internal_mem_type (INTERNAL_MEM_GRAY_QUEUE
, sizeof (GrayQueueSection
));
6313 g_assert (sizeof (GenericStoreRememberedSet
) == sizeof (gpointer
) * STORE_REMSET_BUFFER_SIZE
);
6314 mono_sgen_register_fixed_internal_mem_type (INTERNAL_MEM_STORE_REMSET
, sizeof (GenericStoreRememberedSet
));
6315 mono_sgen_register_fixed_internal_mem_type (INTERNAL_MEM_EPHEMERON_LINK
, sizeof (EphemeronLinkNode
));
6319 if (!major_collector
|| !strcmp (major_collector
, "marksweep")) {
6320 mono_sgen_marksweep_init (&major
, DEFAULT_NURSERY_BITS
, nursery_start
, nursery_real_end
);
6321 workers_init (mono_cpu_count ());
6322 } else if (!strcmp (major_collector
, "copying")) {
6323 mono_sgen_copying_init (&major
, DEFAULT_NURSERY_BITS
, nursery_start
, nursery_real_end
);
6325 fprintf (stderr
, "Unknown major collector `%s'.\n", major_collector
);
6329 if (major_collector
)
6330 g_free (major_collector
);
6332 if ((env
= getenv ("MONO_GC_DEBUG"))) {
6333 opts
= g_strsplit (env
, ",", -1);
6334 for (ptr
= opts
; ptr
&& *ptr
; ptr
++) {
6336 if (opt
[0] >= '0' && opt
[0] <= '9') {
6337 gc_debug_level
= atoi (opt
);
6342 char *rf
= g_strdup_printf ("%s.%d", opt
, getpid ());
6343 gc_debug_file
= fopen (rf
, "wb");
6345 gc_debug_file
= stderr
;
6348 } else if (!strcmp (opt
, "collect-before-allocs")) {
6349 collect_before_allocs
= TRUE
;
6350 } else if (!strcmp (opt
, "check-at-minor-collections")) {
6351 consistency_check_at_minor_collection
= TRUE
;
6352 nursery_clear_policy
= CLEAR_AT_GC
;
6353 } else if (!strcmp (opt
, "xdomain-checks")) {
6354 xdomain_checks
= TRUE
;
6355 } else if (!strcmp (opt
, "clear-at-gc")) {
6356 nursery_clear_policy
= CLEAR_AT_GC
;
6357 } else if (!strcmp (opt
, "conservative-stack-mark")) {
6358 conservative_stack_mark
= TRUE
;
6359 } else if (!strcmp (opt
, "check-scan-starts")) {
6360 do_scan_starts_check
= TRUE
;
6361 } else if (g_str_has_prefix (opt
, "heap-dump=")) {
6362 char *filename
= strchr (opt
, '=') + 1;
6363 nursery_clear_policy
= CLEAR_AT_GC
;
6364 heap_dump_file
= fopen (filename
, "w");
6366 fprintf (heap_dump_file
, "<sgen-dump>\n");
6367 #ifdef SGEN_BINARY_PROTOCOL
6368 } else if (g_str_has_prefix (opt
, "binary-protocol=")) {
6369 char *filename
= strchr (opt
, '=') + 1;
6370 binary_protocol_file
= fopen (filename
, "w");
6373 fprintf (stderr
, "Invalid format for the MONO_GC_DEBUG env variable: '%s'\n", env
);
6374 fprintf (stderr
, "The format is: MONO_GC_DEBUG=[l[:filename]|<option>]+ where l is a debug level 0-9.\n");
6375 fprintf (stderr
, "Valid options are: collect-before-allocs, check-at-minor-collections, xdomain-checks, clear-at-gc.\n");
6382 suspend_ack_semaphore_ptr
= &suspend_ack_semaphore
;
6383 MONO_SEM_INIT (&suspend_ack_semaphore
, 0);
6385 sigfillset (&sinfo
.sa_mask
);
6386 sinfo
.sa_flags
= SA_RESTART
| SA_SIGINFO
;
6387 sinfo
.sa_sigaction
= suspend_handler
;
6388 if (sigaction (suspend_signal_num
, &sinfo
, NULL
) != 0) {
6389 g_error ("failed sigaction");
6392 sinfo
.sa_handler
= restart_handler
;
6393 if (sigaction (restart_signal_num
, &sinfo
, NULL
) != 0) {
6394 g_error ("failed sigaction");
6397 sigfillset (&suspend_signal_mask
);
6398 sigdelset (&suspend_signal_mask
, restart_signal_num
);
6400 global_remset
= alloc_remset (1024, NULL
);
6401 global_remset
->next
= NULL
;
6403 pthread_key_create (&remembered_set_key
, unregister_thread
);
6405 #ifndef HAVE_KW_THREAD
6406 pthread_key_create (&thread_info_key
, NULL
);
6409 gc_initialized
= TRUE
;
6411 mono_gc_register_thread (&sinfo
);
6415 mono_gc_get_suspend_signal (void)
6417 return suspend_signal_num
;
6427 #ifdef HAVE_KW_THREAD
6428 #define EMIT_TLS_ACCESS(mb,dummy,offset) do { \
6429 mono_mb_emit_byte ((mb), MONO_CUSTOM_PREFIX); \
6430 mono_mb_emit_byte ((mb), CEE_MONO_TLS); \
6431 mono_mb_emit_i4 ((mb), (offset)); \
6434 #define EMIT_TLS_ACCESS(mb,member,dummy) do { \
6435 mono_mb_emit_byte ((mb), MONO_CUSTOM_PREFIX); \
6436 mono_mb_emit_byte ((mb), CEE_MONO_TLS); \
6437 mono_mb_emit_i4 ((mb), thread_info_key); \
6438 mono_mb_emit_icon ((mb), G_STRUCT_OFFSET (SgenThreadInfo, member)); \
6439 mono_mb_emit_byte ((mb), CEE_ADD); \
6440 mono_mb_emit_byte ((mb), CEE_LDIND_I); \
6444 #ifdef MANAGED_ALLOCATION
6445 /* FIXME: Do this in the JIT, where specialized allocation sequences can be created
6446 * for each class. This is currently not easy to do, as it is hard to generate basic
6447 * blocks + branches, but it is easy with the linear IL codebase.
6449 * For this to work we'd need to solve the TLAB race, first. Now we
6450 * require the allocator to be in a few known methods to make sure
6451 * that they are executed atomically via the restart mechanism.
6454 create_allocator (int atype
)
6456 int p_var
, size_var
;
6457 guint32 slowpath_branch
, max_size_branch
;
6458 MonoMethodBuilder
*mb
;
6460 MonoMethodSignature
*csig
;
6461 static gboolean registered
= FALSE
;
6462 int tlab_next_addr_var
, new_next_var
;
6464 const char *name
= NULL
;
6465 AllocatorWrapperInfo
*info
;
6467 #ifdef HAVE_KW_THREAD
6468 int tlab_next_addr_offset
= -1;
6469 int tlab_temp_end_offset
= -1;
6471 MONO_THREAD_VAR_OFFSET (tlab_next_addr
, tlab_next_addr_offset
);
6472 MONO_THREAD_VAR_OFFSET (tlab_temp_end
, tlab_temp_end_offset
);
6474 g_assert (tlab_next_addr_offset
!= -1);
6475 g_assert (tlab_temp_end_offset
!= -1);
6479 mono_register_jit_icall (mono_gc_alloc_obj
, "mono_gc_alloc_obj", mono_create_icall_signature ("object ptr int"), FALSE
);
6480 mono_register_jit_icall (mono_gc_alloc_vector
, "mono_gc_alloc_vector", mono_create_icall_signature ("object ptr int int"), FALSE
);
6484 if (atype
== ATYPE_SMALL
) {
6486 name
= "AllocSmall";
6487 } else if (atype
== ATYPE_NORMAL
) {
6490 } else if (atype
== ATYPE_VECTOR
) {
6492 name
= "AllocVector";
6494 g_assert_not_reached ();
6497 csig
= mono_metadata_signature_alloc (mono_defaults
.corlib
, num_params
);
6498 csig
->ret
= &mono_defaults
.object_class
->byval_arg
;
6499 for (i
= 0; i
< num_params
; ++i
)
6500 csig
->params
[i
] = &mono_defaults
.int_class
->byval_arg
;
6502 mb
= mono_mb_new (mono_defaults
.object_class
, name
, MONO_WRAPPER_ALLOC
);
6503 size_var
= mono_mb_add_local (mb
, &mono_defaults
.int32_class
->byval_arg
);
6504 if (atype
== ATYPE_NORMAL
|| atype
== ATYPE_SMALL
) {
6505 /* size = vtable->klass->instance_size; */
6506 mono_mb_emit_ldarg (mb
, 0);
6507 mono_mb_emit_icon (mb
, G_STRUCT_OFFSET (MonoVTable
, klass
));
6508 mono_mb_emit_byte (mb
, CEE_ADD
);
6509 mono_mb_emit_byte (mb
, CEE_LDIND_I
);
6510 mono_mb_emit_icon (mb
, G_STRUCT_OFFSET (MonoClass
, instance_size
));
6511 mono_mb_emit_byte (mb
, CEE_ADD
);
6512 /* FIXME: assert instance_size stays a 4 byte integer */
6513 mono_mb_emit_byte (mb
, CEE_LDIND_U4
);
6514 mono_mb_emit_stloc (mb
, size_var
);
6515 } else if (atype
== ATYPE_VECTOR
) {
6516 MonoExceptionClause
*clause
;
6518 MonoClass
*oom_exc_class
;
6521 /* n > MONO_ARRAY_MAX_INDEX -> OverflowException */
6522 mono_mb_emit_ldarg (mb
, 1);
6523 mono_mb_emit_icon (mb
, MONO_ARRAY_MAX_INDEX
);
6524 pos
= mono_mb_emit_short_branch (mb
, CEE_BLE_UN_S
);
6525 mono_mb_emit_exception (mb
, "OverflowException", NULL
);
6526 mono_mb_patch_short_branch (mb
, pos
);
6528 clause
= mono_image_alloc0 (mono_defaults
.corlib
, sizeof (MonoExceptionClause
));
6529 clause
->try_offset
= mono_mb_get_label (mb
);
6531 /* vtable->klass->sizes.element_size */
6532 mono_mb_emit_ldarg (mb
, 0);
6533 mono_mb_emit_icon (mb
, G_STRUCT_OFFSET (MonoVTable
, klass
));
6534 mono_mb_emit_byte (mb
, CEE_ADD
);
6535 mono_mb_emit_byte (mb
, CEE_LDIND_I
);
6536 mono_mb_emit_icon (mb
, G_STRUCT_OFFSET (MonoClass
, sizes
.element_size
));
6537 mono_mb_emit_byte (mb
, CEE_ADD
);
6538 mono_mb_emit_byte (mb
, CEE_LDIND_U4
);
6541 mono_mb_emit_ldarg (mb
, 1);
6542 mono_mb_emit_byte (mb
, CEE_MUL_OVF_UN
);
6543 /* + sizeof (MonoArray) */
6544 mono_mb_emit_icon (mb
, sizeof (MonoArray
));
6545 mono_mb_emit_byte (mb
, CEE_ADD_OVF_UN
);
6546 mono_mb_emit_stloc (mb
, size_var
);
6548 pos_leave
= mono_mb_emit_branch (mb
, CEE_LEAVE
);
6551 clause
->flags
= MONO_EXCEPTION_CLAUSE_NONE
;
6552 clause
->try_len
= mono_mb_get_pos (mb
) - clause
->try_offset
;
6553 clause
->data
.catch_class
= mono_class_from_name (mono_defaults
.corlib
,
6554 "System", "OverflowException");
6555 g_assert (clause
->data
.catch_class
);
6556 clause
->handler_offset
= mono_mb_get_label (mb
);
6558 oom_exc_class
= mono_class_from_name (mono_defaults
.corlib
,
6559 "System", "OutOfMemoryException");
6560 g_assert (oom_exc_class
);
6561 ctor
= mono_class_get_method_from_name (oom_exc_class
, ".ctor", 0);
6564 mono_mb_emit_byte (mb
, CEE_POP
);
6565 mono_mb_emit_op (mb
, CEE_NEWOBJ
, ctor
);
6566 mono_mb_emit_byte (mb
, CEE_THROW
);
6568 clause
->handler_len
= mono_mb_get_pos (mb
) - clause
->handler_offset
;
6569 mono_mb_set_clauses (mb
, 1, clause
);
6570 mono_mb_patch_branch (mb
, pos_leave
);
6573 g_assert_not_reached ();
6576 /* size += ALLOC_ALIGN - 1; */
6577 mono_mb_emit_ldloc (mb
, size_var
);
6578 mono_mb_emit_icon (mb
, ALLOC_ALIGN
- 1);
6579 mono_mb_emit_byte (mb
, CEE_ADD
);
6580 /* size &= ~(ALLOC_ALIGN - 1); */
6581 mono_mb_emit_icon (mb
, ~(ALLOC_ALIGN
- 1));
6582 mono_mb_emit_byte (mb
, CEE_AND
);
6583 mono_mb_emit_stloc (mb
, size_var
);
6585 /* if (size > MAX_SMALL_OBJ_SIZE) goto slowpath */
6586 if (atype
!= ATYPE_SMALL
) {
6587 mono_mb_emit_ldloc (mb
, size_var
);
6588 mono_mb_emit_icon (mb
, MAX_SMALL_OBJ_SIZE
);
6589 max_size_branch
= mono_mb_emit_short_branch (mb
, MONO_CEE_BGT_S
);
6593 * We need to modify tlab_next, but the JIT only supports reading, so we read
6594 * another tls var holding its address instead.
6597 /* tlab_next_addr (local) = tlab_next_addr (TLS var) */
6598 tlab_next_addr_var
= mono_mb_add_local (mb
, &mono_defaults
.int_class
->byval_arg
);
6599 EMIT_TLS_ACCESS (mb
, tlab_next_addr
, tlab_next_addr_offset
);
6600 mono_mb_emit_stloc (mb
, tlab_next_addr_var
);
6602 /* p = (void**)tlab_next; */
6603 p_var
= mono_mb_add_local (mb
, &mono_defaults
.int_class
->byval_arg
);
6604 mono_mb_emit_ldloc (mb
, tlab_next_addr_var
);
6605 mono_mb_emit_byte (mb
, CEE_LDIND_I
);
6606 mono_mb_emit_stloc (mb
, p_var
);
6608 /* new_next = (char*)p + size; */
6609 new_next_var
= mono_mb_add_local (mb
, &mono_defaults
.int_class
->byval_arg
);
6610 mono_mb_emit_ldloc (mb
, p_var
);
6611 mono_mb_emit_ldloc (mb
, size_var
);
6612 mono_mb_emit_byte (mb
, CEE_CONV_I
);
6613 mono_mb_emit_byte (mb
, CEE_ADD
);
6614 mono_mb_emit_stloc (mb
, new_next_var
);
6616 /* tlab_next = new_next */
6617 mono_mb_emit_ldloc (mb
, tlab_next_addr_var
);
6618 mono_mb_emit_ldloc (mb
, new_next_var
);
6619 mono_mb_emit_byte (mb
, CEE_STIND_I
);
6621 /* if (G_LIKELY (new_next < tlab_temp_end)) */
6622 mono_mb_emit_ldloc (mb
, new_next_var
);
6623 EMIT_TLS_ACCESS (mb
, tlab_temp_end
, tlab_temp_end_offset
);
6624 slowpath_branch
= mono_mb_emit_short_branch (mb
, MONO_CEE_BLT_UN_S
);
6627 if (atype
!= ATYPE_SMALL
)
6628 mono_mb_patch_short_branch (mb
, max_size_branch
);
6630 mono_mb_emit_byte (mb
, MONO_CUSTOM_PREFIX
);
6631 mono_mb_emit_byte (mb
, CEE_MONO_NOT_TAKEN
);
6633 /* FIXME: mono_gc_alloc_obj takes a 'size_t' as an argument, not an int32 */
6634 mono_mb_emit_ldarg (mb
, 0);
6635 mono_mb_emit_ldloc (mb
, size_var
);
6636 if (atype
== ATYPE_NORMAL
|| atype
== ATYPE_SMALL
) {
6637 mono_mb_emit_icall (mb
, mono_gc_alloc_obj
);
6638 } else if (atype
== ATYPE_VECTOR
) {
6639 mono_mb_emit_ldarg (mb
, 1);
6640 mono_mb_emit_icall (mb
, mono_gc_alloc_vector
);
6642 g_assert_not_reached ();
6644 mono_mb_emit_byte (mb
, CEE_RET
);
6647 mono_mb_patch_short_branch (mb
, slowpath_branch
);
6649 /* FIXME: Memory barrier */
6652 mono_mb_emit_ldloc (mb
, p_var
);
6653 mono_mb_emit_ldarg (mb
, 0);
6654 mono_mb_emit_byte (mb
, CEE_STIND_I
);
6656 if (atype
== ATYPE_VECTOR
) {
6657 /* arr->max_length = max_length; */
6658 mono_mb_emit_ldloc (mb
, p_var
);
6659 mono_mb_emit_ldflda (mb
, G_STRUCT_OFFSET (MonoArray
, max_length
));
6660 mono_mb_emit_ldarg (mb
, 1);
6661 mono_mb_emit_byte (mb
, CEE_STIND_I
);
6665 mono_mb_emit_ldloc (mb
, p_var
);
6666 mono_mb_emit_byte (mb
, CEE_RET
);
6668 res
= mono_mb_create_method (mb
, csig
, 8);
6670 mono_method_get_header (res
)->init_locals
= FALSE
;
6672 info
= mono_image_alloc0 (mono_defaults
.corlib
, sizeof (AllocatorWrapperInfo
));
6673 info
->gc_name
= "sgen";
6674 info
->alloc_type
= atype
;
6675 mono_marshal_set_wrapper_info (res
, info
);
6682 mono_gc_get_gc_name (void)
6687 static MonoMethod
* alloc_method_cache
[ATYPE_NUM
];
6688 static MonoMethod
*write_barrier_method
;
6691 is_ip_in_managed_allocator (MonoDomain
*domain
, gpointer ip
)
6699 ji
= mono_jit_info_table_find (domain
, ip
);
6702 method
= ji
->method
;
6704 if (method
== write_barrier_method
)
6706 for (i
= 0; i
< ATYPE_NUM
; ++i
)
6707 if (method
== alloc_method_cache
[i
])
6713 * Generate an allocator method implementing the fast path of mono_gc_alloc_obj ().
6714 * The signature of the called method is:
6715 * object allocate (MonoVTable *vtable)
6718 mono_gc_get_managed_allocator (MonoVTable
*vtable
, gboolean for_box
)
6720 #ifdef MANAGED_ALLOCATION
6721 MonoClass
*klass
= vtable
->klass
;
6723 #ifdef HAVE_KW_THREAD
6724 int tlab_next_offset
= -1;
6725 int tlab_temp_end_offset
= -1;
6726 MONO_THREAD_VAR_OFFSET (tlab_next
, tlab_next_offset
);
6727 MONO_THREAD_VAR_OFFSET (tlab_temp_end
, tlab_temp_end_offset
);
6729 if (tlab_next_offset
== -1 || tlab_temp_end_offset
== -1)
6733 if (!mono_runtime_has_tls_get ())
6735 if (klass
->instance_size
> tlab_size
)
6737 if (klass
->has_finalize
|| klass
->marshalbyref
|| (mono_profiler_get_events () & MONO_PROFILE_ALLOCATIONS
))
6741 if (klass
->byval_arg
.type
== MONO_TYPE_STRING
)
6743 if (collect_before_allocs
)
6746 if (ALIGN_TO (klass
->instance_size
, ALLOC_ALIGN
) < MAX_SMALL_OBJ_SIZE
)
6747 return mono_gc_get_managed_allocator_by_type (ATYPE_SMALL
);
6749 return mono_gc_get_managed_allocator_by_type (ATYPE_NORMAL
);
6756 mono_gc_get_managed_array_allocator (MonoVTable
*vtable
, int rank
)
6758 #ifdef MANAGED_ALLOCATION
6759 MonoClass
*klass
= vtable
->klass
;
6761 #ifdef HAVE_KW_THREAD
6762 int tlab_next_offset
= -1;
6763 int tlab_temp_end_offset
= -1;
6764 MONO_THREAD_VAR_OFFSET (tlab_next
, tlab_next_offset
);
6765 MONO_THREAD_VAR_OFFSET (tlab_temp_end
, tlab_temp_end_offset
);
6767 if (tlab_next_offset
== -1 || tlab_temp_end_offset
== -1)
6773 if (!mono_runtime_has_tls_get ())
6775 if (mono_profiler_get_events () & MONO_PROFILE_ALLOCATIONS
)
6777 if (collect_before_allocs
)
6779 g_assert (!klass
->has_finalize
&& !klass
->marshalbyref
);
6781 return mono_gc_get_managed_allocator_by_type (ATYPE_VECTOR
);
6788 mono_gc_get_managed_allocator_by_type (int atype
)
6790 #ifdef MANAGED_ALLOCATION
6793 if (!mono_runtime_has_tls_get ())
6796 mono_loader_lock ();
6797 res
= alloc_method_cache
[atype
];
6799 res
= alloc_method_cache
[atype
] = create_allocator (atype
);
6800 mono_loader_unlock ();
6808 mono_gc_get_managed_allocator_types (void)
6815 mono_gc_get_write_barrier (void)
6818 MonoMethodBuilder
*mb
;
6819 MonoMethodSignature
*sig
;
6820 #ifdef MANAGED_WBARRIER
6821 int label_no_wb_1
, label_no_wb_2
, label_no_wb_3
, label_no_wb_4
, label_need_wb
, label_slow_path
;
6822 #ifndef SGEN_ALIGN_NURSERY
6823 int label_continue_1
, label_continue_2
, label_no_wb_5
;
6824 int dereferenced_var
;
6826 int buffer_var
, buffer_index_var
, dummy_var
;
6828 #ifdef HAVE_KW_THREAD
6829 int stack_end_offset
= -1, store_remset_buffer_offset
= -1;
6830 int store_remset_buffer_index_offset
= -1, store_remset_buffer_index_addr_offset
= -1;
6832 MONO_THREAD_VAR_OFFSET (stack_end
, stack_end_offset
);
6833 g_assert (stack_end_offset
!= -1);
6834 MONO_THREAD_VAR_OFFSET (store_remset_buffer
, store_remset_buffer_offset
);
6835 g_assert (store_remset_buffer_offset
!= -1);
6836 MONO_THREAD_VAR_OFFSET (store_remset_buffer_index
, store_remset_buffer_index_offset
);
6837 g_assert (store_remset_buffer_index_offset
!= -1);
6838 MONO_THREAD_VAR_OFFSET (store_remset_buffer_index_addr
, store_remset_buffer_index_addr_offset
);
6839 g_assert (store_remset_buffer_index_addr_offset
!= -1);
6843 // FIXME: Maybe create a separate version for ctors (the branch would be
6844 // correctly predicted more times)
6845 if (write_barrier_method
)
6846 return write_barrier_method
;
6848 /* Create the IL version of mono_gc_barrier_generic_store () */
6849 sig
= mono_metadata_signature_alloc (mono_defaults
.corlib
, 1);
6850 sig
->ret
= &mono_defaults
.void_class
->byval_arg
;
6851 sig
->params
[0] = &mono_defaults
.int_class
->byval_arg
;
6853 mb
= mono_mb_new (mono_defaults
.object_class
, "wbarrier", MONO_WRAPPER_WRITE_BARRIER
);
6855 #ifdef MANAGED_WBARRIER
6856 if (mono_runtime_has_tls_get ()) {
6857 #ifdef SGEN_ALIGN_NURSERY
6858 // if (ptr_in_nursery (ptr)) return;
6860 * Masking out the bits might be faster, but we would have to use 64 bit
6861 * immediates, which might be slower.
6863 mono_mb_emit_ldarg (mb
, 0);
6864 mono_mb_emit_icon (mb
, DEFAULT_NURSERY_BITS
);
6865 mono_mb_emit_byte (mb
, CEE_SHR_UN
);
6866 mono_mb_emit_icon (mb
, (mword
)nursery_start
>> DEFAULT_NURSERY_BITS
);
6867 label_no_wb_1
= mono_mb_emit_branch (mb
, CEE_BEQ
);
6869 // if (!ptr_in_nursery (*ptr)) return;
6870 mono_mb_emit_ldarg (mb
, 0);
6871 mono_mb_emit_byte (mb
, CEE_LDIND_I
);
6872 mono_mb_emit_icon (mb
, DEFAULT_NURSERY_BITS
);
6873 mono_mb_emit_byte (mb
, CEE_SHR_UN
);
6874 mono_mb_emit_icon (mb
, (mword
)nursery_start
>> DEFAULT_NURSERY_BITS
);
6875 label_no_wb_2
= mono_mb_emit_branch (mb
, CEE_BNE_UN
);
6878 // if (ptr < (nursery_start)) goto continue;
6879 mono_mb_emit_ldarg (mb
, 0);
6880 mono_mb_emit_ptr (mb
, (gpointer
) nursery_start
);
6881 label_continue_1
= mono_mb_emit_branch (mb
, CEE_BLT
);
6883 // if (ptr >= nursery_real_end)) goto continue;
6884 mono_mb_emit_ldarg (mb
, 0);
6885 mono_mb_emit_ptr (mb
, (gpointer
) nursery_real_end
);
6886 label_continue_2
= mono_mb_emit_branch (mb
, CEE_BGE
);
6889 label_no_wb_1
= mono_mb_emit_branch (mb
, CEE_BR
);
6892 mono_mb_patch_branch (mb
, label_continue_1
);
6893 mono_mb_patch_branch (mb
, label_continue_2
);
6895 // Dereference and store in local var
6896 dereferenced_var
= mono_mb_add_local (mb
, &mono_defaults
.int_class
->byval_arg
);
6897 mono_mb_emit_ldarg (mb
, 0);
6898 mono_mb_emit_byte (mb
, CEE_LDIND_I
);
6899 mono_mb_emit_stloc (mb
, dereferenced_var
);
6901 // if (*ptr < nursery_start) return;
6902 mono_mb_emit_ldloc (mb
, dereferenced_var
);
6903 mono_mb_emit_ptr (mb
, (gpointer
) nursery_start
);
6904 label_no_wb_2
= mono_mb_emit_branch (mb
, CEE_BLT
);
6906 // if (*ptr >= nursery_end) return;
6907 mono_mb_emit_ldloc (mb
, dereferenced_var
);
6908 mono_mb_emit_ptr (mb
, (gpointer
) nursery_real_end
);
6909 label_no_wb_5
= mono_mb_emit_branch (mb
, CEE_BGE
);
6912 // if (ptr >= stack_end) goto need_wb;
6913 mono_mb_emit_ldarg (mb
, 0);
6914 EMIT_TLS_ACCESS (mb
, stack_end
, stack_end_offset
);
6915 label_need_wb
= mono_mb_emit_branch (mb
, CEE_BGE_UN
);
6917 // if (ptr >= stack_start) return;
6918 dummy_var
= mono_mb_add_local (mb
, &mono_defaults
.int_class
->byval_arg
);
6919 mono_mb_emit_ldarg (mb
, 0);
6920 mono_mb_emit_ldloc_addr (mb
, dummy_var
);
6921 label_no_wb_3
= mono_mb_emit_branch (mb
, CEE_BGE_UN
);
6924 mono_mb_patch_branch (mb
, label_need_wb
);
6926 // buffer = STORE_REMSET_BUFFER;
6927 buffer_var
= mono_mb_add_local (mb
, &mono_defaults
.int_class
->byval_arg
);
6928 EMIT_TLS_ACCESS (mb
, store_remset_buffer
, store_remset_buffer_offset
);
6929 mono_mb_emit_stloc (mb
, buffer_var
);
6931 // buffer_index = STORE_REMSET_BUFFER_INDEX;
6932 buffer_index_var
= mono_mb_add_local (mb
, &mono_defaults
.int_class
->byval_arg
);
6933 EMIT_TLS_ACCESS (mb
, store_remset_buffer_index
, store_remset_buffer_index_offset
);
6934 mono_mb_emit_stloc (mb
, buffer_index_var
);
6936 // if (buffer [buffer_index] == ptr) return;
6937 mono_mb_emit_ldloc (mb
, buffer_var
);
6938 mono_mb_emit_ldloc (mb
, buffer_index_var
);
6939 g_assert (sizeof (gpointer
) == 4 || sizeof (gpointer
) == 8);
6940 mono_mb_emit_icon (mb
, sizeof (gpointer
) == 4 ? 2 : 3);
6941 mono_mb_emit_byte (mb
, CEE_SHL
);
6942 mono_mb_emit_byte (mb
, CEE_ADD
);
6943 mono_mb_emit_byte (mb
, CEE_LDIND_I
);
6944 mono_mb_emit_ldarg (mb
, 0);
6945 label_no_wb_4
= mono_mb_emit_branch (mb
, CEE_BEQ
);
6948 mono_mb_emit_ldloc (mb
, buffer_index_var
);
6949 mono_mb_emit_icon (mb
, 1);
6950 mono_mb_emit_byte (mb
, CEE_ADD
);
6951 mono_mb_emit_stloc (mb
, buffer_index_var
);
6953 // if (buffer_index >= STORE_REMSET_BUFFER_SIZE) goto slow_path;
6954 mono_mb_emit_ldloc (mb
, buffer_index_var
);
6955 mono_mb_emit_icon (mb
, STORE_REMSET_BUFFER_SIZE
);
6956 label_slow_path
= mono_mb_emit_branch (mb
, CEE_BGE
);
6958 // buffer [buffer_index] = ptr;
6959 mono_mb_emit_ldloc (mb
, buffer_var
);
6960 mono_mb_emit_ldloc (mb
, buffer_index_var
);
6961 g_assert (sizeof (gpointer
) == 4 || sizeof (gpointer
) == 8);
6962 mono_mb_emit_icon (mb
, sizeof (gpointer
) == 4 ? 2 : 3);
6963 mono_mb_emit_byte (mb
, CEE_SHL
);
6964 mono_mb_emit_byte (mb
, CEE_ADD
);
6965 mono_mb_emit_ldarg (mb
, 0);
6966 mono_mb_emit_byte (mb
, CEE_STIND_I
);
6968 // STORE_REMSET_BUFFER_INDEX = buffer_index;
6969 EMIT_TLS_ACCESS (mb
, store_remset_buffer_index_addr
, store_remset_buffer_index_addr_offset
);
6970 mono_mb_emit_ldloc (mb
, buffer_index_var
);
6971 mono_mb_emit_byte (mb
, CEE_STIND_I
);
6974 mono_mb_patch_branch (mb
, label_no_wb_1
);
6975 mono_mb_patch_branch (mb
, label_no_wb_2
);
6976 mono_mb_patch_branch (mb
, label_no_wb_3
);
6977 mono_mb_patch_branch (mb
, label_no_wb_4
);
6978 #ifndef SGEN_ALIGN_NURSERY
6979 mono_mb_patch_branch (mb
, label_no_wb_5
);
6981 mono_mb_emit_byte (mb
, CEE_RET
);
6984 mono_mb_patch_branch (mb
, label_slow_path
);
6988 mono_mb_emit_ldarg (mb
, 0);
6989 mono_mb_emit_icall (mb
, mono_gc_wbarrier_generic_nostore
);
6990 mono_mb_emit_byte (mb
, CEE_RET
);
6992 res
= mono_mb_create_method (mb
, sig
, 16);
6995 mono_loader_lock ();
6996 if (write_barrier_method
) {
6997 /* Already created */
6998 mono_free_method (res
);
7000 /* double-checked locking */
7001 mono_memory_barrier ();
7002 write_barrier_method
= res
;
7004 mono_loader_unlock ();
7006 return write_barrier_method
;
7010 mono_gc_get_description (void)
7012 return g_strdup ("sgen");
7016 mono_gc_set_desktop_mode (void)
7021 mono_gc_is_moving (void)
7027 mono_gc_is_disabled (void)
7033 mono_sgen_debug_printf (int level
, const char *format
, ...)
7037 if (level
> gc_debug_level
)
7040 va_start (ap
, format
);
7041 vfprintf (gc_debug_file
, format
, ap
);
7045 #endif /* HAVE_SGEN_GC */