3 * Nursery allocation code.
5 * Copyright 2009-2010 Novell, Inc.
8 * Copyright 2011 Xamarin Inc (http://www.xamarin.com)
9 * Copyright (C) 2012 Xamarin Inc
11 * Licensed under the MIT license. See LICENSE file in the project root for full license information.
15 * The young generation is divided into fragments. This is because
16 * we can hand one fragments to a thread for lock-less fast alloc and
17 * because the young generation ends up fragmented anyway by pinned objects.
18 * Once a collection is done, a list of fragments is created. When doing
19 * thread local alloc we use smallish nurseries so we allow new threads to
20 * allocate memory from gen0 without triggering a collection. Threads that
21 * are found to allocate lots of memory are given bigger fragments. This
22 * should make the finalizer thread use little nursery memory after a while.
23 * We should start assigning threads very small fragments: if there are many
24 * threads the nursery will be full of reserved space that the threads may not
25 * use at all, slowing down allocation speed.
26 * Thread local allocation is done from areas of memory Hotspot calls Thread Local
27 * Allocation Buffers (TLABs).
38 #ifdef HAVE_SEMAPHORE_H
39 #include <semaphore.h>
52 #include "mono/sgen/sgen-gc.h"
53 #include "mono/sgen/sgen-cardtable.h"
54 #include "mono/sgen/sgen-protocol.h"
55 #include "mono/sgen/sgen-memory-governor.h"
56 #include "mono/sgen/sgen-pinning.h"
57 #include "mono/sgen/sgen-client.h"
58 #include "mono/utils/mono-membar.h"
60 /* Enable it so nursery allocation diagnostic data is collected */
61 //#define NALLOC_DEBUG 1
63 /* The mutator allocs from here. */
64 static SgenFragmentAllocator mutator_allocator
;
66 /* freeelist of fragment structures */
67 static SgenFragment
*fragment_freelist
= NULL
;
69 char *sgen_nursery_start
;
70 char *sgen_nursery_end
;
72 /* good sizes are 512KB-1MB: larger ones increase a lot memzeroing time */
73 size_t sgen_nursery_size
;
75 * Maximum size that we can resize the nursery to.
76 * If sgen_nursery_default_size == sgen_nursery_max_size then we are not
77 * dynamically resizing the nursery
79 size_t sgen_nursery_max_size
;
80 size_t sgen_nursery_min_size
;
81 /* The number of trailing 0 bits in sgen_nursery_max_size */
82 int sgen_nursery_bits
;
85 char *sgen_space_bitmap
;
86 size_t sgen_space_bitmap_size
;
88 #ifdef HEAVY_STATISTICS
90 static mword stat_wasted_bytes_trailer
= 0;
91 static mword stat_wasted_bytes_small_areas
= 0;
92 static mword stat_wasted_bytes_discarded_fragments
= 0;
93 static guint64 stat_nursery_alloc_requests
= 0;
94 static guint64 stat_alloc_iterations
= 0;
95 static guint64 stat_alloc_retries
= 0;
97 static guint64 stat_nursery_alloc_range_requests
= 0;
98 static guint64 stat_alloc_range_iterations
= 0;
99 static guint64 stat_alloc_range_retries
= 0;
103 /************************************Nursery allocation debugging *********************************************/
120 MonoNativeThreadId tid
;
123 #define ALLOC_RECORD_COUNT 128000
126 static AllocRecord
*alloc_records
;
127 static volatile int next_record
;
128 static volatile int alloc_count
;
130 void dump_alloc_records (void);
131 void verify_alloc_records (void);
134 get_reason_name (AllocRecord
*rec
)
136 switch (rec
->reason
) {
137 case FIXED_ALLOC
: return "fixed-alloc";
138 case RANGE_ALLOC
: return "range-alloc";
139 case PINNING
: return "pinning";
140 case BLOCK_ZEROING
: return "block-zeroing";
141 case CLEAR_NURSERY_FRAGS
: return "clear-nursery-frag";
142 default: return "invalid";
147 reset_alloc_records (void)
154 add_alloc_record (char *addr
, size_t size
, int reason
)
156 int idx
= mono_atomic_inc_i32 (&next_record
) - 1;
157 alloc_records
[idx
].address
= addr
;
158 alloc_records
[idx
].size
= size
;
159 alloc_records
[idx
].reason
= reason
;
160 alloc_records
[idx
].seq
= idx
;
161 alloc_records
[idx
].tid
= mono_native_thread_id_get ();
165 comp_alloc_record (const void *_a
, const void *_b
)
167 const AllocRecord
*a
= _a
;
168 const AllocRecord
*b
= _b
;
169 if (a
->address
== b
->address
)
170 return a
->seq
- b
->seq
;
171 return a
->address
- b
->address
;
174 #define rec_end(REC) ((REC)->address + (REC)->size)
177 dump_alloc_records (void)
180 sgen_qsort (alloc_records
, next_record
, sizeof (AllocRecord
), comp_alloc_record
);
182 printf ("------------------------------------DUMP RECORDS----------------------------\n");
183 for (i
= 0; i
< next_record
; ++i
) {
184 AllocRecord
*rec
= alloc_records
+ i
;
185 printf ("obj [%p, %p] size %d reason %s seq %d tid %x\n", rec
->address
, rec_end (rec
), (int)rec
->size
, get_reason_name (rec
), rec
->seq
, (size_t)rec
->tid
);
190 verify_alloc_records (void)
196 AllocRecord
*prev
= NULL
;
198 sgen_qsort (alloc_records
, next_record
, sizeof (AllocRecord
), comp_alloc_record
);
199 printf ("------------------------------------DUMP RECORDS- %d %d---------------------------\n", next_record
, alloc_count
);
200 for (i
= 0; i
< next_record
; ++i
) {
201 AllocRecord
*rec
= alloc_records
+ i
;
205 if (rec_end (prev
) > rec
->address
)
206 printf ("WE GOT OVERLAPPING objects %p and %p\n", prev
->address
, rec
->address
);
207 if ((rec
->address
- rec_end (prev
)) >= 8)
209 hole_size
= rec
->address
- rec_end (prev
);
210 max_hole
= MAX (max_hole
, hole_size
);
212 printf ("obj [%p, %p] size %d hole to prev %d reason %s seq %d tid %zx\n", rec
->address
, rec_end (rec
), (int)rec
->size
, hole_size
, get_reason_name (rec
), rec
->seq
, (size_t)rec
->tid
);
215 printf ("SUMMARY total alloc'd %d holes %d max_hole %d\n", total
, holes
, max_hole
);
220 /*********************************************************************************/
223 static inline gpointer
224 mask (gpointer n
, uintptr_t bit
)
226 return (gpointer
)(((uintptr_t)n
) | bit
);
229 static inline gpointer
232 return (gpointer
)((uintptr_t)p
& ~(uintptr_t)0x3);
235 static inline uintptr_t
236 get_mark (gpointer n
)
238 return (uintptr_t)n
& 0x1;
241 /*MUST be called with world stopped*/
243 sgen_fragment_allocator_alloc (void)
245 SgenFragment
*frag
= fragment_freelist
;
247 fragment_freelist
= frag
->next_in_order
;
248 frag
->next
= frag
->next_in_order
= NULL
;
251 frag
= (SgenFragment
*)sgen_alloc_internal (INTERNAL_MEM_FRAGMENT
);
252 frag
->next
= frag
->next_in_order
= NULL
;
257 sgen_fragment_allocator_add (SgenFragmentAllocator
*allocator
, char *start
, char *end
)
259 SgenFragment
*fragment
;
261 fragment
= sgen_fragment_allocator_alloc ();
262 fragment
->fragment_start
= start
;
263 fragment
->fragment_next
= start
;
264 fragment
->fragment_end
= end
;
265 fragment
->next_in_order
= fragment
->next
= (SgenFragment
*)unmask (allocator
->region_head
);
267 allocator
->region_head
= allocator
->alloc_head
= fragment
;
268 g_assert (fragment
->fragment_end
> fragment
->fragment_start
);
272 sgen_fragment_allocator_release (SgenFragmentAllocator
*allocator
)
274 SgenFragment
*last
= allocator
->region_head
;
278 /* Find the last fragment in insert order */
279 for (; last
->next_in_order
; last
= last
->next_in_order
) ;
281 last
->next_in_order
= fragment_freelist
;
282 fragment_freelist
= allocator
->region_head
;
283 allocator
->alloc_head
= allocator
->region_head
= NULL
;
286 static SgenFragment
**
287 find_previous_pointer_fragment (SgenFragmentAllocator
*allocator
, SgenFragment
*frag
)
290 SgenFragment
*cur
, *next
;
296 prev
= &allocator
->alloc_head
;
299 printf ("retry count for fppf is %d\n", count
);
302 cur
= (SgenFragment
*)unmask (*prev
);
310 * We need to make sure that we dereference prev below
311 * after reading cur->next above, so we need a read
314 mono_memory_read_barrier ();
319 if (!get_mark (next
)) {
324 next
= (SgenFragment
*)unmask (next
);
325 if (mono_atomic_cas_ptr ((volatile gpointer
*)prev
, next
, cur
) != cur
)
327 /*we must make sure that the next from cur->next happens after*/
328 mono_memory_write_barrier ();
331 cur
= (SgenFragment
*)unmask (next
);
337 claim_remaining_size (SgenFragment
*frag
, char *alloc_end
)
339 /* All space used, nothing to claim. */
340 if (frag
->fragment_end
<= alloc_end
)
343 /* Try to alloc all the remaining space. */
344 return mono_atomic_cas_ptr ((volatile gpointer
*)&frag
->fragment_next
, frag
->fragment_end
, alloc_end
) == alloc_end
;
348 par_alloc_from_fragment (SgenFragmentAllocator
*allocator
, SgenFragment
*frag
, size_t size
)
350 char *p
= frag
->fragment_next
;
351 char *end
= p
+ size
;
353 if (end
> frag
->fragment_end
|| end
> (sgen_nursery_start
+ sgen_nursery_size
))
356 /* p = frag->fragment_next must happen before */
357 mono_memory_barrier ();
359 if (mono_atomic_cas_ptr ((volatile gpointer
*)&frag
->fragment_next
, end
, p
) != p
)
362 if (frag
->fragment_end
- end
< SGEN_MAX_NURSERY_WASTE
) {
363 SgenFragment
*next
, **prev_ptr
;
366 * Before we clean the remaining nursery, we must claim the remaining space
367 * as it could end up been used by the range allocator since it can end up
368 * allocating from this dying fragment as it doesn't respect SGEN_MAX_NURSERY_WASTE
369 * when doing second chance allocation.
371 if ((sgen_get_nursery_clear_policy () == CLEAR_AT_TLAB_CREATION
|| sgen_get_nursery_clear_policy () == CLEAR_AT_TLAB_CREATION_DEBUG
) && claim_remaining_size (frag
, end
)) {
372 sgen_clear_range (end
, frag
->fragment_end
);
373 HEAVY_STAT (stat_wasted_bytes_trailer
+= frag
->fragment_end
- end
);
375 add_alloc_record (end
, frag
->fragment_end
- end
, BLOCK_ZEROING
);
379 prev_ptr
= find_previous_pointer_fragment (allocator
, frag
);
381 /*Use Michaels linked list remove*/
383 /*prev_ptr will be null if the fragment was removed concurrently */
388 if (!get_mark (next
)) {
389 /*frag->next read must happen before the first CAS*/
390 mono_memory_write_barrier ();
392 /*Fail if the next node is removed concurrently and its CAS wins */
393 if (mono_atomic_cas_ptr ((volatile gpointer
*)&frag
->next
, mask (next
, 1), next
) != next
) {
398 /* The second CAS must happen after the first CAS or frag->next. */
399 mono_memory_write_barrier ();
401 /* Fail if the previous node was deleted and its CAS wins */
402 if (mono_atomic_cas_ptr ((volatile gpointer
*)prev_ptr
, unmask (next
), frag
) != frag
) {
403 prev_ptr
= find_previous_pointer_fragment (allocator
, frag
);
414 serial_alloc_from_fragment (SgenFragment
**previous
, SgenFragment
*frag
, size_t size
)
416 char *p
= frag
->fragment_next
;
417 char *end
= p
+ size
;
419 if (end
> frag
->fragment_end
)
422 frag
->fragment_next
= end
;
424 if (frag
->fragment_end
- end
< SGEN_MAX_NURSERY_WASTE
) {
425 *previous
= frag
->next
;
427 /* Clear the remaining space, pinning depends on this. FIXME move this to use phony arrays */
428 memset (end
, 0, frag
->fragment_end
- end
);
430 *previous
= frag
->next
;
437 sgen_fragment_allocator_par_alloc (SgenFragmentAllocator
*allocator
, size_t size
)
442 mono_atomic_inc_i32 (&alloc_count
);
446 for (frag
= (SgenFragment
*)unmask (allocator
->alloc_head
); unmask (frag
); frag
= (SgenFragment
*)unmask (frag
->next
)) {
447 size_t frag_size
= frag
->fragment_end
- frag
->fragment_next
;
449 if (frag
->fragment_next
>= (sgen_nursery_start
+ sgen_nursery_size
))
452 HEAVY_STAT (++stat_alloc_iterations
);
454 if (size
<= frag_size
) {
455 void *p
= par_alloc_from_fragment (allocator
, frag
, size
);
457 HEAVY_STAT (++stat_alloc_retries
);
461 add_alloc_record (p
, size
, FIXED_ALLOC
);
470 sgen_fragment_allocator_serial_range_alloc (SgenFragmentAllocator
*allocator
, size_t desired_size
, size_t minimum_size
, size_t *out_alloc_size
)
472 SgenFragment
*frag
, **previous
, *min_frag
= NULL
, **prev_min_frag
= NULL
;
473 size_t current_minimum
= minimum_size
;
476 mono_atomic_inc_i32 (&alloc_count
);
479 previous
= &allocator
->alloc_head
;
481 for (frag
= *previous
; frag
; frag
= *previous
) {
482 size_t frag_size
= frag
->fragment_end
- frag
->fragment_next
;
484 HEAVY_STAT (++stat_alloc_range_iterations
);
486 if (desired_size
<= frag_size
) {
488 *out_alloc_size
= desired_size
;
490 p
= serial_alloc_from_fragment (previous
, frag
, desired_size
);
492 add_alloc_record (p
, desired_size
, RANGE_ALLOC
);
496 if (current_minimum
<= frag_size
) {
498 prev_min_frag
= previous
;
499 current_minimum
= frag_size
;
501 previous
= &frag
->next
;
506 size_t frag_size
= min_frag
->fragment_end
- min_frag
->fragment_next
;
507 *out_alloc_size
= frag_size
;
509 p
= serial_alloc_from_fragment (prev_min_frag
, min_frag
, frag_size
);
512 add_alloc_record (p
, frag_size
, RANGE_ALLOC
);
521 sgen_fragment_allocator_par_range_alloc (SgenFragmentAllocator
*allocator
, size_t desired_size
, size_t minimum_size
, size_t *out_alloc_size
)
523 SgenFragment
*frag
, *min_frag
;
524 size_t current_minimum
;
528 current_minimum
= minimum_size
;
531 mono_atomic_inc_i32 (&alloc_count
);
534 for (frag
= (SgenFragment
*)unmask (allocator
->alloc_head
); frag
; frag
= (SgenFragment
*)unmask (frag
->next
)) {
535 size_t frag_size
= frag
->fragment_end
- frag
->fragment_next
;
537 if (frag
->fragment_next
>= (sgen_nursery_start
+ sgen_nursery_size
))
540 HEAVY_STAT (++stat_alloc_range_iterations
);
542 if (desired_size
<= frag_size
) {
544 *out_alloc_size
= desired_size
;
546 p
= par_alloc_from_fragment (allocator
, frag
, desired_size
);
548 HEAVY_STAT (++stat_alloc_range_retries
);
552 add_alloc_record (p
, desired_size
, RANGE_ALLOC
);
556 if (current_minimum
<= frag_size
) {
558 current_minimum
= frag_size
;
562 /* The second fragment_next read should be ordered in respect to the first code block */
563 mono_memory_barrier ();
567 size_t frag_size
= min_frag
->fragment_end
- min_frag
->fragment_next
;
569 if (frag_size
< minimum_size
)
572 *out_alloc_size
= frag_size
;
574 mono_memory_barrier ();
575 p
= par_alloc_from_fragment (allocator
, min_frag
, frag_size
);
577 /*XXX restarting here is quite dubious given this is already second chance allocation. */
579 HEAVY_STAT (++stat_alloc_retries
);
583 add_alloc_record (p
, frag_size
, RANGE_ALLOC
);
592 sgen_clear_allocator_fragments (SgenFragmentAllocator
*allocator
)
596 for (frag
= (SgenFragment
*)unmask (allocator
->alloc_head
); frag
; frag
= (SgenFragment
*)unmask (frag
->next
)) {
597 SGEN_LOG (4, "Clear nursery frag %p-%p", frag
->fragment_next
, frag
->fragment_end
);
598 sgen_clear_range (frag
->fragment_next
, frag
->fragment_end
);
600 add_alloc_record (frag
->fragment_next
, frag
->fragment_end
- frag
->fragment_next
, CLEAR_NURSERY_FRAGS
);
605 /* Clear all remaining nursery fragments */
607 sgen_clear_nursery_fragments (void)
609 if (sgen_get_nursery_clear_policy () == CLEAR_AT_TLAB_CREATION
|| sgen_get_nursery_clear_policy () == CLEAR_AT_TLAB_CREATION_DEBUG
) {
610 sgen_clear_allocator_fragments (&mutator_allocator
);
611 sgen_minor_collector
.clear_fragments ();
616 * Mark a given range of memory as invalid.
618 * This can be done either by zeroing memory or by placing
619 * a phony byte[] array. This keeps the heap forward walkable.
621 * This function ignores calls with a zero range, even if
622 * both start and end are NULL.
625 sgen_clear_range (char *start
, char *end
)
627 size_t size
= end
- start
;
629 if ((start
&& !end
) || (start
> end
))
630 g_error ("Invalid range [%p %p]", start
, end
);
632 if (sgen_client_array_fill_range (start
, size
)) {
633 sgen_set_nursery_scan_start (start
);
634 SGEN_ASSERT (0, start
+ sgen_safe_object_get_size ((GCObject
*)start
) == end
, "Array fill produced wrong size");
639 sgen_nursery_allocator_prepare_for_pinning (void)
641 sgen_clear_allocator_fragments (&mutator_allocator
);
642 sgen_minor_collector
.clear_fragments ();
645 static mword fragment_total
= 0;
647 * We found a fragment of free memory in the nursery: memzero it and if
648 * it is big enough, add it to the list of fragments that can be used for
652 add_nursery_frag (SgenFragmentAllocator
*allocator
, size_t frag_size
, char* frag_start
, char* frag_end
)
654 SGEN_LOG (4, "Found empty fragment: %p-%p, size: %zd", frag_start
, frag_end
, frag_size
);
655 sgen_binary_protocol_empty (frag_start
, frag_size
);
656 /* Not worth dealing with smaller fragments: need to tune */
657 if (frag_size
>= SGEN_MAX_NURSERY_WASTE
) {
658 /* memsetting just the first chunk start is bound to provide better cache locality */
659 if (sgen_get_nursery_clear_policy () == CLEAR_AT_GC
)
660 memset (frag_start
, 0, frag_size
);
661 else if (sgen_get_nursery_clear_policy () == CLEAR_AT_TLAB_CREATION_DEBUG
)
662 memset (frag_start
, 0xff, frag_size
);
665 /* XXX convert this into a flight record entry
666 printf ("\tfragment [%p %p] size %zd\n", frag_start, frag_end, frag_size);
669 sgen_fragment_allocator_add (allocator
, frag_start
, frag_end
);
670 fragment_total
+= frag_size
;
672 /* Clear unused fragments, pinning depends on this */
673 sgen_clear_range (frag_start
, frag_end
);
674 HEAVY_STAT (stat_wasted_bytes_small_areas
+= frag_size
);
679 fragment_list_reverse (SgenFragmentAllocator
*allocator
)
681 SgenFragment
*prev
= NULL
, *list
= allocator
->region_head
;
683 SgenFragment
*next
= list
->next
;
685 list
->next_in_order
= prev
;
690 allocator
->region_head
= allocator
->alloc_head
= prev
;
694 * We split fragments at the border of the current nursery limit. When we
695 * allocate from the nursery we only consider fragments that start in the
696 * current nursery section. We build fragments for the entire nursery in
697 * order to facilitate scanning it for objects (adding a nursery frag also
698 * marks a region in the nursery as being free)
701 add_nursery_frag_checks (SgenFragmentAllocator
*allocator
, char *frag_start
, char *frag_end
)
703 char *nursery_limit
= sgen_nursery_start
+ sgen_nursery_size
;
705 if (frag_start
< nursery_limit
&& frag_end
> nursery_limit
) {
706 add_nursery_frag (allocator
, nursery_limit
- frag_start
, frag_start
, nursery_limit
);
707 add_nursery_frag (allocator
, frag_end
- nursery_limit
, nursery_limit
, frag_end
);
709 add_nursery_frag (allocator
, frag_end
- frag_start
, frag_start
, frag_end
);
714 sgen_build_nursery_fragments (GCMemSection
*nursery_section
, SgenGrayQueue
*unpin_queue
)
716 char *frag_start
, *frag_end
;
718 SgenFragment
*frags_ranges
;
719 void **pin_start
, **pin_entry
, **pin_end
;
722 reset_alloc_records ();
724 /*The mutator fragments are done. We no longer need them. */
725 sgen_fragment_allocator_release (&mutator_allocator
);
727 frag_start
= sgen_nursery_start
;
730 /* The current nursery might give us a fragment list to exclude [start, next[*/
731 frags_ranges
= sgen_minor_collector
.build_fragments_get_exclude_head ();
733 /* clear scan starts */
734 memset (nursery_section
->scan_starts
, 0, nursery_section
->num_scan_start
* sizeof (gpointer
));
736 pin_start
= pin_entry
= sgen_pinning_get_entry (nursery_section
->pin_queue_first_entry
);
737 pin_end
= sgen_pinning_get_entry (nursery_section
->pin_queue_last_entry
);
739 while (pin_entry
< pin_end
|| frags_ranges
) {
743 addr0
= addr1
= sgen_nursery_end
;
744 if (pin_entry
< pin_end
)
745 addr0
= (char *)*pin_entry
;
747 addr1
= frags_ranges
->fragment_start
;
751 GRAY_OBJECT_ENQUEUE_SERIAL (unpin_queue
, (GCObject
*)addr0
, sgen_obj_get_descriptor_safe ((GCObject
*)addr0
));
753 SGEN_UNPIN_OBJECT (addr0
);
754 size
= SGEN_ALIGN_UP (sgen_safe_object_get_size ((GCObject
*)addr0
));
755 CANARIFY_SIZE (size
);
756 sgen_set_nursery_scan_start (addr0
);
761 size
= frags_ranges
->fragment_next
- addr1
;
762 frags_ranges
= frags_ranges
->next_in_order
;
765 frag_size
= frag_end
- frag_start
;
770 g_assert (frag_size
>= 0);
772 if (frag_size
&& size
)
773 add_nursery_frag_checks (&mutator_allocator
, frag_start
, frag_end
);
777 add_alloc_record (*pin_entry
, frag_size
, PINNING
);
779 frag_start
= frag_end
+ frag_size
;
782 frag_end
= sgen_nursery_end
;
783 frag_size
= frag_end
- frag_start
;
785 add_nursery_frag_checks (&mutator_allocator
, frag_start
, frag_end
);
787 /* Now it's safe to release the fragments exclude list. */
788 sgen_minor_collector
.build_fragments_release_exclude_head ();
790 /* First we reorder the fragment list to be in ascending address order. This makes H/W prefetchers happier. */
791 fragment_list_reverse (&mutator_allocator
);
793 /*The collector might want to do something with the final nursery fragment list.*/
794 sgen_minor_collector
.build_fragments_finish (&mutator_allocator
);
796 if (!unmask (mutator_allocator
.alloc_head
)) {
797 SGEN_LOG (1, "Nursery fully pinned");
798 for (pin_entry
= pin_start
; pin_entry
< pin_end
; ++pin_entry
) {
799 GCObject
*p
= (GCObject
*)*pin_entry
;
800 SGEN_LOG (3, "Bastard pinning obj %p (%s), size: %zd", p
, sgen_client_vtable_get_name (SGEN_LOAD_VTABLE (p
)), sgen_safe_object_get_size (p
));
803 return fragment_total
;
806 /*** Nursery memory allocation ***/
808 sgen_nursery_retire_region (void *address
, ptrdiff_t size
)
810 HEAVY_STAT (stat_wasted_bytes_discarded_fragments
+= size
);
814 sgen_can_alloc_size (size_t size
)
818 if (!SGEN_CAN_ALIGN_UP (size
))
821 size
= SGEN_ALIGN_UP (size
);
823 for (frag
= (SgenFragment
*)unmask (mutator_allocator
.alloc_head
); frag
; frag
= (SgenFragment
*)unmask (frag
->next
)) {
824 if ((size_t)(frag
->fragment_end
- frag
->fragment_next
) >= size
)
831 sgen_nursery_alloc (size_t size
)
833 SGEN_ASSERT (1, size
>= (SGEN_CLIENT_MINIMUM_OBJECT_SIZE
+ CANARY_SIZE
) && size
<= (SGEN_MAX_SMALL_OBJ_SIZE
+ CANARY_SIZE
), "Invalid nursery object size");
835 SGEN_LOG (4, "Searching nursery for size: %zd", size
);
836 size
= SGEN_ALIGN_UP (size
);
838 HEAVY_STAT (++stat_nursery_alloc_requests
);
840 return sgen_fragment_allocator_par_alloc (&mutator_allocator
, size
);
844 sgen_nursery_alloc_range (size_t desired_size
, size_t minimum_size
, size_t *out_alloc_size
)
846 SGEN_LOG (4, "Searching for byte range desired size: %zd minimum size %zd", desired_size
, minimum_size
);
848 HEAVY_STAT (++stat_nursery_alloc_range_requests
);
850 return sgen_fragment_allocator_par_range_alloc (&mutator_allocator
, desired_size
, minimum_size
, out_alloc_size
);
853 /*** Initialization ***/
855 #ifdef HEAVY_STATISTICS
858 sgen_nursery_allocator_init_heavy_stats (void)
860 mono_counters_register ("bytes wasted trailer fragments", MONO_COUNTER_GC
| MONO_COUNTER_WORD
| MONO_COUNTER_BYTES
, &stat_wasted_bytes_trailer
);
861 mono_counters_register ("bytes wasted small areas", MONO_COUNTER_GC
| MONO_COUNTER_WORD
| MONO_COUNTER_BYTES
, &stat_wasted_bytes_small_areas
);
862 mono_counters_register ("bytes wasted discarded fragments", MONO_COUNTER_GC
| MONO_COUNTER_WORD
| MONO_COUNTER_BYTES
, &stat_wasted_bytes_discarded_fragments
);
864 mono_counters_register ("# nursery alloc requests", MONO_COUNTER_GC
| MONO_COUNTER_ULONG
, &stat_nursery_alloc_requests
);
865 mono_counters_register ("# nursery alloc iterations", MONO_COUNTER_GC
| MONO_COUNTER_ULONG
, &stat_alloc_iterations
);
866 mono_counters_register ("# nursery alloc retries", MONO_COUNTER_GC
| MONO_COUNTER_ULONG
, &stat_alloc_retries
);
868 mono_counters_register ("# nursery alloc range requests", MONO_COUNTER_GC
| MONO_COUNTER_ULONG
, &stat_nursery_alloc_range_requests
);
869 mono_counters_register ("# nursery alloc range iterations", MONO_COUNTER_GC
| MONO_COUNTER_ULONG
, &stat_alloc_range_iterations
);
870 mono_counters_register ("# nursery alloc range restries", MONO_COUNTER_GC
| MONO_COUNTER_ULONG
, &stat_alloc_range_retries
);
876 sgen_init_nursery_allocator (void)
878 sgen_register_fixed_internal_mem_type (INTERNAL_MEM_FRAGMENT
, sizeof (SgenFragment
));
880 alloc_records
= sgen_alloc_os_memory (sizeof (AllocRecord
) * ALLOC_RECORD_COUNT
, SGEN_ALLOC_INTERNAL
| SGEN_ALLOC_ACTIVATE
, "debugging memory");
885 sgen_nursery_alloc_prepare_for_minor (void)
887 sgen_minor_collector
.prepare_to_space (sgen_space_bitmap
, sgen_space_bitmap_size
);
891 sgen_nursery_alloc_prepare_for_major (void)
893 sgen_minor_collector
.prepare_to_space (sgen_space_bitmap
, sgen_space_bitmap_size
);
897 sgen_nursery_allocator_set_nursery_bounds (char *start
, size_t min_size
, size_t max_size
)
899 sgen_nursery_start
= start
;
900 sgen_nursery_end
= start
+ max_size
;
902 sgen_nursery_size
= min_size
;
903 sgen_nursery_min_size
= min_size
;
904 sgen_nursery_max_size
= max_size
;
906 sgen_nursery_bits
= 0;
907 while (ONE_P
<< (++ sgen_nursery_bits
) != sgen_nursery_max_size
)
911 * This will not divide evenly for tiny nurseries (<4kb), so we make sure to be on
912 * the right side of things and round up. We could just do a MIN(1,x) instead,
913 * since the nursery size must be a power of 2.
915 sgen_space_bitmap_size
= (sgen_nursery_end
- sgen_nursery_start
+ SGEN_TO_SPACE_GRANULE_IN_BYTES
* 8 - 1) / (SGEN_TO_SPACE_GRANULE_IN_BYTES
* 8);
916 sgen_space_bitmap
= (char *)g_malloc0 (sgen_space_bitmap_size
);
918 /* Setup the single first large fragment */
919 sgen_minor_collector
.init_nursery (&mutator_allocator
, sgen_nursery_start
, sgen_nursery_end
);
923 sgen_resize_nursery (gboolean need_shrink
)
927 if (sgen_nursery_min_size
== sgen_nursery_max_size
)
930 major_size
= sgen_major_collector
.get_num_major_sections () * sgen_major_collector
.section_size
+ sgen_los_memory_usage
;
932 * We attempt to use a larger nursery size, as long as it doesn't
933 * exceed a certain percentage of the major heap.
936 * Commit memory when expanding and release it when shrinking (which
937 * would only be possible if there aren't any pinned objects in the
940 if ((sgen_nursery_size
* 2) < (major_size
/ SGEN_DEFAULT_ALLOWANCE_NURSERY_SIZE_RATIO
) &&
941 (sgen_nursery_size
* 2) <= sgen_nursery_max_size
&& !need_shrink
) {
942 if ((sgen_nursery_section
->end_data
- sgen_nursery_section
->data
) == sgen_nursery_size
)
943 sgen_nursery_section
->end_data
+= sgen_nursery_size
;
944 sgen_nursery_size
*= 2;
945 } else if ((sgen_nursery_size
> (major_size
/ SGEN_DEFAULT_ALLOWANCE_NURSERY_SIZE_RATIO
) || need_shrink
) &&
946 (sgen_nursery_size
/ 2) >= sgen_nursery_min_size
) {
947 sgen_nursery_size
/= 2;