[threading] Remove old suspend code now that all targets support it.
[mono-project.git] / mono / metadata / sgen-nursery-allocator.c
blobaa570ad1acd55a16b9dc944bdd9bc95bdd263dd4
1 /*
2 * sgen-nursery-allocator.c: Nursery allocation code.
4 * Copyright 2009-2010 Novell, Inc.
5 * 2011 Rodrigo Kumpera
6 *
7 * Copyright 2011 Xamarin Inc (http://www.xamarin.com)
8 * Copyright (C) 2012 Xamarin Inc
10 * This library is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Library General Public
12 * License 2.0 as published by the Free Software Foundation;
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Library General Public License for more details.
19 * You should have received a copy of the GNU Library General Public
20 * License 2.0 along with this library; if not, write to the Free
21 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 * The young generation is divided into fragments. This is because
26 * we can hand one fragments to a thread for lock-less fast alloc and
27 * because the young generation ends up fragmented anyway by pinned objects.
28 * Once a collection is done, a list of fragments is created. When doing
29 * thread local alloc we use smallish nurseries so we allow new threads to
30 * allocate memory from gen0 without triggering a collection. Threads that
31 * are found to allocate lots of memory are given bigger fragments. This
32 * should make the finalizer thread use little nursery memory after a while.
33 * We should start assigning threads very small fragments: if there are many
34 * threads the nursery will be full of reserved space that the threads may not
35 * use at all, slowing down allocation speed.
36 * Thread local allocation is done from areas of memory Hotspot calls Thread Local
37 * Allocation Buffers (TLABs).
39 #include "config.h"
40 #ifdef HAVE_SGEN_GC
42 #ifdef HAVE_UNISTD_H
43 #include <unistd.h>
44 #endif
45 #ifdef HAVE_PTHREAD_H
46 #include <pthread.h>
47 #endif
48 #ifdef HAVE_SEMAPHORE_H
49 #include <semaphore.h>
50 #endif
51 #include <stdio.h>
52 #include <string.h>
53 #include <errno.h>
54 #include <assert.h>
55 #ifdef __MACH__
56 #undef _XOPEN_SOURCE
57 #endif
58 #ifdef __MACH__
59 #define _XOPEN_SOURCE
60 #endif
62 #include "metadata/sgen-gc.h"
63 #include "metadata/metadata-internals.h"
64 #include "metadata/class-internals.h"
65 #include "metadata/gc-internal.h"
66 #include "metadata/object-internals.h"
67 #include "metadata/threads.h"
68 #include "metadata/sgen-cardtable.h"
69 #include "metadata/sgen-protocol.h"
70 #include "metadata/sgen-archdep.h"
71 #include "metadata/sgen-bridge.h"
72 #include "metadata/sgen-memory-governor.h"
73 #include "metadata/sgen-pinning.h"
74 #include "metadata/mono-gc.h"
75 #include "metadata/method-builder.h"
76 #include "metadata/profiler-private.h"
77 #include "metadata/monitor.h"
78 #include "metadata/threadpool-internals.h"
79 #include "metadata/mempool-internals.h"
80 #include "metadata/marshal.h"
81 #include "utils/mono-mmap.h"
82 #include "utils/mono-time.h"
83 #include "utils/mono-semaphore.h"
84 #include "utils/mono-counters.h"
85 #include "utils/mono-proclib.h"
86 #include "utils/mono-threads.h"
88 /* Enable it so nursery allocation diagnostic data is collected */
89 //#define NALLOC_DEBUG 1
91 /* The mutator allocs from here. */
92 SgenFragmentAllocator mutator_allocator;
94 /* freeelist of fragment structures */
95 static SgenFragment *fragment_freelist = NULL;
97 /* Allocator cursors */
98 static char *nursery_last_pinned_end = NULL;
100 char *sgen_nursery_start;
101 char *sgen_nursery_end;
103 #ifdef USER_CONFIG
104 size_t sgen_nursery_size = (1 << 22);
105 int sgen_nursery_bits = 22;
106 #endif
108 char *sgen_space_bitmap;
109 size_t sgen_space_bitmap_size;
111 #ifdef HEAVY_STATISTICS
113 static gint32 stat_wasted_bytes_trailer = 0;
114 static gint32 stat_wasted_bytes_small_areas = 0;
115 static gint32 stat_wasted_bytes_discarded_fragments = 0;
116 static gint32 stat_nursery_alloc_requests = 0;
117 static gint32 stat_alloc_iterations = 0;
118 static gint32 stat_alloc_retries = 0;
120 static gint32 stat_nursery_alloc_range_requests = 0;
121 static gint32 stat_alloc_range_iterations = 0;
122 static gint32 stat_alloc_range_retries = 0;
124 #endif
126 /************************************Nursery allocation debugging *********************************************/
128 #ifdef NALLOC_DEBUG
130 enum {
131 FIXED_ALLOC = 1,
132 RANGE_ALLOC,
133 PINNING,
134 BLOCK_ZEROING,
135 CLEAR_NURSERY_FRAGS
138 typedef struct {
139 char *address;
140 size_t size;
141 int reason;
142 int seq;
143 MonoNativeThreadId tid;
144 } AllocRecord;
146 #define ALLOC_RECORD_COUNT 128000
149 static AllocRecord *alloc_records;
150 static volatile int next_record;
151 static volatile int alloc_count;
153 void dump_alloc_records (void);
154 void verify_alloc_records (void);
156 static const char*
157 get_reason_name (AllocRecord *rec)
159 switch (rec->reason) {
160 case FIXED_ALLOC: return "fixed-alloc";
161 case RANGE_ALLOC: return "range-alloc";
162 case PINNING: return "pinning";
163 case BLOCK_ZEROING: return "block-zeroing";
164 case CLEAR_NURSERY_FRAGS: return "clear-nursery-frag";
165 default: return "invalid";
169 static void
170 reset_alloc_records (void)
172 next_record = 0;
173 alloc_count = 0;
176 static void
177 add_alloc_record (char *addr, size_t size, int reason)
179 int idx = InterlockedIncrement (&next_record) - 1;
180 alloc_records [idx].address = addr;
181 alloc_records [idx].size = size;
182 alloc_records [idx].reason = reason;
183 alloc_records [idx].seq = idx;
184 alloc_records [idx].tid = mono_native_thread_id_get ();
187 static int
188 comp_alloc_record (const void *_a, const void *_b)
190 const AllocRecord *a = _a;
191 const AllocRecord *b = _b;
192 if (a->address == b->address)
193 return a->seq - b->seq;
194 return a->address - b->address;
197 #define rec_end(REC) ((REC)->address + (REC)->size)
199 void
200 dump_alloc_records (void)
202 int i;
203 sgen_qsort (alloc_records, next_record, sizeof (AllocRecord), comp_alloc_record);
205 printf ("------------------------------------DUMP RECORDS----------------------------\n");
206 for (i = 0; i < next_record; ++i) {
207 AllocRecord *rec = alloc_records + i;
208 printf ("obj [%p, %p] size %d reason %s seq %d tid %x\n", rec->address, rec_end (rec), (int)rec->size, get_reason_name (rec), rec->seq, (size_t)rec->tid);
212 void
213 verify_alloc_records (void)
215 int i;
216 int total = 0;
217 int holes = 0;
218 int max_hole = 0;
219 AllocRecord *prev = NULL;
221 sgen_qsort (alloc_records, next_record, sizeof (AllocRecord), comp_alloc_record);
222 printf ("------------------------------------DUMP RECORDS- %d %d---------------------------\n", next_record, alloc_count);
223 for (i = 0; i < next_record; ++i) {
224 AllocRecord *rec = alloc_records + i;
225 int hole_size = 0;
226 total += rec->size;
227 if (prev) {
228 if (rec_end (prev) > rec->address)
229 printf ("WE GOT OVERLAPPING objects %p and %p\n", prev->address, rec->address);
230 if ((rec->address - rec_end (prev)) >= 8)
231 ++holes;
232 hole_size = rec->address - rec_end (prev);
233 max_hole = MAX (max_hole, hole_size);
235 printf ("obj [%p, %p] size %d hole to prev %d reason %s seq %d tid %zx\n", rec->address, rec_end (rec), (int)rec->size, hole_size, get_reason_name (rec), rec->seq, (size_t)rec->tid);
236 prev = rec;
238 printf ("SUMMARY total alloc'd %d holes %d max_hole %d\n", total, holes, max_hole);
241 #endif
243 /*********************************************************************************/
246 static inline gpointer
247 mask (gpointer n, uintptr_t bit)
249 return (gpointer)(((uintptr_t)n) | bit);
252 static inline gpointer
253 unmask (gpointer p)
255 return (gpointer)((uintptr_t)p & ~(uintptr_t)0x3);
258 static inline uintptr_t
259 get_mark (gpointer n)
261 return (uintptr_t)n & 0x1;
264 /*MUST be called with world stopped*/
265 SgenFragment*
266 sgen_fragment_allocator_alloc (void)
268 SgenFragment *frag = fragment_freelist;
269 if (frag) {
270 fragment_freelist = frag->next_in_order;
271 frag->next = frag->next_in_order = NULL;
272 return frag;
274 frag = sgen_alloc_internal (INTERNAL_MEM_FRAGMENT);
275 frag->next = frag->next_in_order = NULL;
276 return frag;
279 void
280 sgen_fragment_allocator_add (SgenFragmentAllocator *allocator, char *start, char *end)
282 SgenFragment *fragment;
284 fragment = sgen_fragment_allocator_alloc ();
285 fragment->fragment_start = start;
286 fragment->fragment_next = start;
287 fragment->fragment_end = end;
288 fragment->next_in_order = fragment->next = unmask (allocator->region_head);
290 allocator->region_head = allocator->alloc_head = fragment;
291 g_assert (fragment->fragment_end > fragment->fragment_start);
294 void
295 sgen_fragment_allocator_release (SgenFragmentAllocator *allocator)
297 SgenFragment *last = allocator->region_head;
298 if (!last)
299 return;
301 /* Find the last fragment in insert order */
302 for (; last->next_in_order; last = last->next_in_order) ;
304 last->next_in_order = fragment_freelist;
305 fragment_freelist = allocator->region_head;
306 allocator->alloc_head = allocator->region_head = NULL;
309 static SgenFragment**
310 find_previous_pointer_fragment (SgenFragmentAllocator *allocator, SgenFragment *frag)
312 SgenFragment **prev;
313 SgenFragment *cur, *next;
314 #ifdef NALLOC_DEBUG
315 int count = 0;
316 #endif
318 try_again:
319 prev = &allocator->alloc_head;
320 #ifdef NALLOC_DEBUG
321 if (count++ > 5)
322 printf ("retry count for fppf is %d\n", count);
323 #endif
325 cur = unmask (*prev);
327 while (1) {
328 if (cur == NULL)
329 return NULL;
330 next = cur->next;
333 * We need to make sure that we dereference prev below
334 * after reading cur->next above, so we need a read
335 * barrier.
337 mono_memory_read_barrier ();
339 if (*prev != cur)
340 goto try_again;
342 if (!get_mark (next)) {
343 if (cur == frag)
344 return prev;
345 prev = &cur->next;
346 } else {
347 next = unmask (next);
348 if (InterlockedCompareExchangePointer ((volatile gpointer*)prev, next, cur) != cur)
349 goto try_again;
350 /*we must make sure that the next from cur->next happens after*/
351 mono_memory_write_barrier ();
354 cur = mono_lls_pointer_unmask (next);
356 return NULL;
359 static gboolean
360 claim_remaining_size (SgenFragment *frag, char *alloc_end)
362 /* All space used, nothing to claim. */
363 if (frag->fragment_end <= alloc_end)
364 return FALSE;
366 /* Try to alloc all the remaining space. */
367 return InterlockedCompareExchangePointer ((volatile gpointer*)&frag->fragment_next, frag->fragment_end, alloc_end) == alloc_end;
370 static void*
371 par_alloc_from_fragment (SgenFragmentAllocator *allocator, SgenFragment *frag, size_t size)
373 char *p = frag->fragment_next;
374 char *end = p + size;
376 if (end > frag->fragment_end)
377 return NULL;
379 /* p = frag->fragment_next must happen before */
380 mono_memory_barrier ();
382 if (InterlockedCompareExchangePointer ((volatile gpointer*)&frag->fragment_next, end, p) != p)
383 return NULL;
385 if (frag->fragment_end - end < SGEN_MAX_NURSERY_WASTE) {
386 SgenFragment *next, **prev_ptr;
389 * Before we clean the remaining nursery, we must claim the remaining space
390 * as it could end up been used by the range allocator since it can end up
391 * allocating from this dying fragment as it doesn't respect SGEN_MAX_NURSERY_WASTE
392 * when doing second chance allocation.
394 if ((sgen_get_nursery_clear_policy () == CLEAR_AT_TLAB_CREATION || sgen_get_nursery_clear_policy () == CLEAR_AT_TLAB_CREATION_DEBUG) && claim_remaining_size (frag, end)) {
395 sgen_clear_range (end, frag->fragment_end);
396 HEAVY_STAT (InterlockedExchangeAdd (&stat_wasted_bytes_trailer, frag->fragment_end - end));
397 #ifdef NALLOC_DEBUG
398 add_alloc_record (end, frag->fragment_end - end, BLOCK_ZEROING);
399 #endif
402 prev_ptr = find_previous_pointer_fragment (allocator, frag);
404 /*Use Michaels linked list remove*/
406 /*prev_ptr will be null if the fragment was removed concurrently */
407 while (prev_ptr) {
408 next = frag->next;
410 /*already deleted*/
411 if (!get_mark (next)) {
412 /*frag->next read must happen before the first CAS*/
413 mono_memory_write_barrier ();
415 /*Fail if the next node is removed concurrently and its CAS wins */
416 if (InterlockedCompareExchangePointer ((volatile gpointer*)&frag->next, mask (next, 1), next) != next) {
417 continue;
421 /* The second CAS must happen after the first CAS or frag->next. */
422 mono_memory_write_barrier ();
424 /* Fail if the previous node was deleted and its CAS wins */
425 if (InterlockedCompareExchangePointer ((volatile gpointer*)prev_ptr, unmask (next), frag) != frag) {
426 prev_ptr = find_previous_pointer_fragment (allocator, frag);
427 continue;
429 break;
433 return p;
436 static void*
437 serial_alloc_from_fragment (SgenFragment **previous, SgenFragment *frag, size_t size)
439 char *p = frag->fragment_next;
440 char *end = p + size;
442 if (end > frag->fragment_end)
443 return NULL;
445 frag->fragment_next = end;
447 if (frag->fragment_end - end < SGEN_MAX_NURSERY_WASTE) {
448 *previous = frag->next;
450 /* Clear the remaining space, pinning depends on this. FIXME move this to use phony arrays */
451 memset (end, 0, frag->fragment_end - end);
453 *previous = frag->next;
456 return p;
459 void*
460 sgen_fragment_allocator_par_alloc (SgenFragmentAllocator *allocator, size_t size)
462 SgenFragment *frag;
464 #ifdef NALLOC_DEBUG
465 InterlockedIncrement (&alloc_count);
466 #endif
468 restart:
469 for (frag = unmask (allocator->alloc_head); unmask (frag); frag = unmask (frag->next)) {
470 HEAVY_STAT (InterlockedIncrement (&stat_alloc_iterations));
472 if (size <= (size_t)(frag->fragment_end - frag->fragment_next)) {
473 void *p = par_alloc_from_fragment (allocator, frag, size);
474 if (!p) {
475 HEAVY_STAT (InterlockedIncrement (&stat_alloc_retries));
476 goto restart;
478 #ifdef NALLOC_DEBUG
479 add_alloc_record (p, size, FIXED_ALLOC);
480 #endif
481 return p;
484 return NULL;
487 void*
488 sgen_fragment_allocator_serial_alloc (SgenFragmentAllocator *allocator, size_t size)
490 SgenFragment *frag;
491 SgenFragment **previous;
492 #ifdef NALLOC_DEBUG
493 InterlockedIncrement (&alloc_count);
494 #endif
496 previous = &allocator->alloc_head;
498 for (frag = *previous; frag; frag = *previous) {
499 char *p = serial_alloc_from_fragment (previous, frag, size);
501 HEAVY_STAT (InterlockedIncrement (&stat_alloc_iterations));
503 if (p) {
504 #ifdef NALLOC_DEBUG
505 add_alloc_record (p, size, FIXED_ALLOC);
506 #endif
507 return p;
509 previous = &frag->next;
511 return NULL;
514 void*
515 sgen_fragment_allocator_serial_range_alloc (SgenFragmentAllocator *allocator, size_t desired_size, size_t minimum_size, size_t *out_alloc_size)
517 SgenFragment *frag, **previous, *min_frag = NULL, **prev_min_frag = NULL;
518 size_t current_minimum = minimum_size;
520 #ifdef NALLOC_DEBUG
521 InterlockedIncrement (&alloc_count);
522 #endif
524 previous = &allocator->alloc_head;
526 for (frag = *previous; frag; frag = *previous) {
527 size_t frag_size = frag->fragment_end - frag->fragment_next;
529 HEAVY_STAT (InterlockedIncrement (&stat_alloc_range_iterations));
531 if (desired_size <= frag_size) {
532 void *p;
533 *out_alloc_size = desired_size;
535 p = serial_alloc_from_fragment (previous, frag, desired_size);
536 #ifdef NALLOC_DEBUG
537 add_alloc_record (p, desired_size, RANGE_ALLOC);
538 #endif
539 return p;
541 if (current_minimum <= frag_size) {
542 min_frag = frag;
543 prev_min_frag = previous;
544 current_minimum = frag_size;
546 previous = &frag->next;
549 if (min_frag) {
550 void *p;
551 size_t frag_size = min_frag->fragment_end - min_frag->fragment_next;
552 *out_alloc_size = frag_size;
554 p = serial_alloc_from_fragment (prev_min_frag, min_frag, frag_size);
556 #ifdef NALLOC_DEBUG
557 add_alloc_record (p, frag_size, RANGE_ALLOC);
558 #endif
559 return p;
562 return NULL;
565 void*
566 sgen_fragment_allocator_par_range_alloc (SgenFragmentAllocator *allocator, size_t desired_size, size_t minimum_size, size_t *out_alloc_size)
568 SgenFragment *frag, *min_frag;
569 size_t current_minimum;
571 restart:
572 min_frag = NULL;
573 current_minimum = minimum_size;
575 #ifdef NALLOC_DEBUG
576 InterlockedIncrement (&alloc_count);
577 #endif
579 for (frag = unmask (allocator->alloc_head); frag; frag = unmask (frag->next)) {
580 size_t frag_size = frag->fragment_end - frag->fragment_next;
582 HEAVY_STAT (InterlockedIncrement (&stat_alloc_range_iterations));
584 if (desired_size <= frag_size) {
585 void *p;
586 *out_alloc_size = desired_size;
588 p = par_alloc_from_fragment (allocator, frag, desired_size);
589 if (!p) {
590 HEAVY_STAT (InterlockedIncrement (&stat_alloc_range_retries));
591 goto restart;
593 #ifdef NALLOC_DEBUG
594 add_alloc_record (p, desired_size, RANGE_ALLOC);
595 #endif
596 return p;
598 if (current_minimum <= frag_size) {
599 min_frag = frag;
600 current_minimum = frag_size;
604 /* The second fragment_next read should be ordered in respect to the first code block */
605 mono_memory_barrier ();
607 if (min_frag) {
608 void *p;
609 size_t frag_size;
611 frag_size = min_frag->fragment_end - min_frag->fragment_next;
612 if (frag_size < minimum_size)
613 goto restart;
615 *out_alloc_size = frag_size;
617 mono_memory_barrier ();
618 p = par_alloc_from_fragment (allocator, min_frag, frag_size);
620 /*XXX restarting here is quite dubious given this is already second chance allocation. */
621 if (!p) {
622 HEAVY_STAT (InterlockedIncrement (&stat_alloc_retries));
623 goto restart;
625 #ifdef NALLOC_DEBUG
626 add_alloc_record (p, frag_size, RANGE_ALLOC);
627 #endif
628 return p;
631 return NULL;
634 void
635 sgen_clear_allocator_fragments (SgenFragmentAllocator *allocator)
637 SgenFragment *frag;
639 for (frag = unmask (allocator->alloc_head); frag; frag = unmask (frag->next)) {
640 SGEN_LOG (4, "Clear nursery frag %p-%p", frag->fragment_next, frag->fragment_end);
641 sgen_clear_range (frag->fragment_next, frag->fragment_end);
642 #ifdef NALLOC_DEBUG
643 add_alloc_record (frag->fragment_next, frag->fragment_end - frag->fragment_next, CLEAR_NURSERY_FRAGS);
644 #endif
648 /* Clear all remaining nursery fragments */
649 void
650 sgen_clear_nursery_fragments (void)
652 if (sgen_get_nursery_clear_policy () == CLEAR_AT_TLAB_CREATION || sgen_get_nursery_clear_policy () == CLEAR_AT_TLAB_CREATION_DEBUG) {
653 sgen_clear_allocator_fragments (&mutator_allocator);
654 sgen_minor_collector.clear_fragments ();
659 * Mark a given range of memory as invalid.
661 * This can be done either by zeroing memory or by placing
662 * a phony byte[] array. This keeps the heap forward walkable.
664 * This function ignores calls with a zero range, even if
665 * both start and end are NULL.
667 void
668 sgen_clear_range (char *start, char *end)
670 MonoArray *o;
671 size_t size = end - start;
673 if ((start && !end) || (start > end))
674 g_error ("Invalid range [%p %p]", start, end);
676 if (size < sizeof (MonoArray)) {
677 memset (start, 0, size);
678 return;
681 o = (MonoArray*)start;
682 o->obj.vtable = sgen_get_array_fill_vtable ();
683 /* Mark this as not a real object */
684 o->obj.synchronisation = GINT_TO_POINTER (-1);
685 o->bounds = NULL;
686 o->max_length = (mono_array_size_t)(size - sizeof (MonoArray));
687 sgen_set_nursery_scan_start (start);
688 g_assert (start + sgen_safe_object_get_size ((MonoObject*)o) == end);
691 void
692 sgen_nursery_allocator_prepare_for_pinning (void)
694 sgen_clear_allocator_fragments (&mutator_allocator);
695 sgen_minor_collector.clear_fragments ();
698 static mword fragment_total = 0;
700 * We found a fragment of free memory in the nursery: memzero it and if
701 * it is big enough, add it to the list of fragments that can be used for
702 * allocation.
704 static void
705 add_nursery_frag (SgenFragmentAllocator *allocator, size_t frag_size, char* frag_start, char* frag_end)
707 SGEN_LOG (4, "Found empty fragment: %p-%p, size: %zd", frag_start, frag_end, frag_size);
708 binary_protocol_empty (frag_start, frag_size);
709 MONO_GC_NURSERY_SWEPT ((mword)frag_start, frag_end - frag_start);
710 /* Not worth dealing with smaller fragments: need to tune */
711 if (frag_size >= SGEN_MAX_NURSERY_WASTE) {
712 /* memsetting just the first chunk start is bound to provide better cache locality */
713 if (sgen_get_nursery_clear_policy () == CLEAR_AT_GC)
714 memset (frag_start, 0, frag_size);
715 else if (sgen_get_nursery_clear_policy () == CLEAR_AT_TLAB_CREATION_DEBUG)
716 memset (frag_start, 0xff, frag_size);
718 #ifdef NALLOC_DEBUG
719 /* XXX convert this into a flight record entry
720 printf ("\tfragment [%p %p] size %zd\n", frag_start, frag_end, frag_size);
722 #endif
723 sgen_fragment_allocator_add (allocator, frag_start, frag_end);
724 fragment_total += frag_size;
725 } else {
726 /* Clear unused fragments, pinning depends on this */
727 sgen_clear_range (frag_start, frag_end);
728 HEAVY_STAT (InterlockedExchangeAdd (&stat_wasted_bytes_small_areas, frag_size));
732 static void
733 fragment_list_reverse (SgenFragmentAllocator *allocator)
735 SgenFragment *prev = NULL, *list = allocator->region_head;
736 while (list) {
737 SgenFragment *next = list->next;
738 list->next = prev;
739 list->next_in_order = prev;
740 prev = list;
741 list = next;
744 allocator->region_head = allocator->alloc_head = prev;
747 mword
748 sgen_build_nursery_fragments (GCMemSection *nursery_section, SgenGrayQueue *unpin_queue)
750 char *frag_start, *frag_end;
751 size_t frag_size;
752 SgenFragment *frags_ranges;
753 void **pin_start, **pin_entry, **pin_end;
755 #ifdef NALLOC_DEBUG
756 reset_alloc_records ();
757 #endif
758 /*The mutator fragments are done. We no longer need them. */
759 sgen_fragment_allocator_release (&mutator_allocator);
761 frag_start = sgen_nursery_start;
762 fragment_total = 0;
764 /* The current nursery might give us a fragment list to exclude [start, next[*/
765 frags_ranges = sgen_minor_collector.build_fragments_get_exclude_head ();
767 /* clear scan starts */
768 memset (nursery_section->scan_starts, 0, nursery_section->num_scan_start * sizeof (gpointer));
770 pin_start = pin_entry = sgen_pinning_get_entry (nursery_section->pin_queue_first_entry);
771 pin_end = sgen_pinning_get_entry (nursery_section->pin_queue_last_entry);
773 while (pin_entry < pin_end || frags_ranges) {
774 char *addr0, *addr1;
775 size_t size;
777 addr0 = addr1 = sgen_nursery_end;
778 if (pin_entry < pin_end)
779 addr0 = *pin_entry;
780 if (frags_ranges)
781 addr1 = frags_ranges->fragment_start;
783 if (addr0 < addr1) {
784 if (unpin_queue)
785 GRAY_OBJECT_ENQUEUE (unpin_queue, addr0, sgen_obj_get_descriptor_safe (addr0));
786 else
787 SGEN_UNPIN_OBJECT (addr0);
788 size = SGEN_ALIGN_UP (sgen_safe_object_get_size ((MonoObject*)addr0));
789 CANARIFY_SIZE (size);
790 sgen_set_nursery_scan_start (addr0);
791 frag_end = addr0;
792 ++pin_entry;
793 } else {
794 frag_end = addr1;
795 size = frags_ranges->fragment_next - addr1;
796 frags_ranges = frags_ranges->next_in_order;
799 frag_size = frag_end - frag_start;
801 if (size == 0)
802 continue;
804 g_assert (frag_size >= 0);
805 g_assert (size > 0);
806 if (frag_size && size)
807 add_nursery_frag (&mutator_allocator, frag_size, frag_start, frag_end);
809 frag_size = size;
810 #ifdef NALLOC_DEBUG
811 add_alloc_record (*pin_entry, frag_size, PINNING);
812 #endif
813 frag_start = frag_end + frag_size;
816 nursery_last_pinned_end = frag_start;
817 frag_end = sgen_nursery_end;
818 frag_size = frag_end - frag_start;
819 if (frag_size)
820 add_nursery_frag (&mutator_allocator, frag_size, frag_start, frag_end);
822 /* Now it's safe to release the fragments exclude list. */
823 sgen_minor_collector.build_fragments_release_exclude_head ();
825 /* First we reorder the fragment list to be in ascending address order. This makes H/W prefetchers happier. */
826 fragment_list_reverse (&mutator_allocator);
828 /*The collector might want to do something with the final nursery fragment list.*/
829 sgen_minor_collector.build_fragments_finish (&mutator_allocator);
831 if (!unmask (mutator_allocator.alloc_head)) {
832 SGEN_LOG (1, "Nursery fully pinned");
833 for (pin_entry = pin_start; pin_entry < pin_end; ++pin_entry) {
834 void *p = *pin_entry;
835 SGEN_LOG (3, "Bastard pinning obj %p (%s), size: %zd", p, sgen_safe_name (p), sgen_safe_object_get_size (p));
838 return fragment_total;
841 char *
842 sgen_nursery_alloc_get_upper_alloc_bound (void)
844 /*FIXME we need to calculate the collector upper bound as well, but this must be done in the previous GC. */
845 return sgen_nursery_end;
848 /*** Nursery memory allocation ***/
849 void
850 sgen_nursery_retire_region (void *address, ptrdiff_t size)
852 HEAVY_STAT (InterlockedExchangeAdd (&stat_wasted_bytes_discarded_fragments, size));
855 gboolean
856 sgen_can_alloc_size (size_t size)
858 SgenFragment *frag;
860 if (!SGEN_CAN_ALIGN_UP (size))
861 return FALSE;
863 size = SGEN_ALIGN_UP (size);
865 for (frag = unmask (mutator_allocator.alloc_head); frag; frag = unmask (frag->next)) {
866 if ((size_t)(frag->fragment_end - frag->fragment_next) >= size)
867 return TRUE;
869 return FALSE;
872 void*
873 sgen_nursery_alloc (size_t size)
875 SGEN_ASSERT (1, size >= sizeof (MonoObject) && size <= (SGEN_MAX_SMALL_OBJ_SIZE + CANARY_SIZE), "Invalid nursery object size");
877 SGEN_LOG (4, "Searching nursery for size: %zd", size);
878 size = SGEN_ALIGN_UP (size);
880 HEAVY_STAT (InterlockedIncrement (&stat_nursery_alloc_requests));
882 return sgen_fragment_allocator_par_alloc (&mutator_allocator, size);
885 void*
886 sgen_nursery_alloc_range (size_t desired_size, size_t minimum_size, size_t *out_alloc_size)
888 SGEN_LOG (4, "Searching for byte range desired size: %zd minimum size %zd", desired_size, minimum_size);
890 HEAVY_STAT (InterlockedIncrement (&stat_nursery_alloc_range_requests));
892 return sgen_fragment_allocator_par_range_alloc (&mutator_allocator, desired_size, minimum_size, out_alloc_size);
895 /*** Initialization ***/
897 #ifdef HEAVY_STATISTICS
899 void
900 sgen_nursery_allocator_init_heavy_stats (void)
902 mono_counters_register ("bytes wasted trailer fragments", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_wasted_bytes_trailer);
903 mono_counters_register ("bytes wasted small areas", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_wasted_bytes_small_areas);
904 mono_counters_register ("bytes wasted discarded fragments", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_wasted_bytes_discarded_fragments);
906 mono_counters_register ("# nursery alloc requests", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_nursery_alloc_requests);
907 mono_counters_register ("# nursery alloc iterations", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_alloc_iterations);
908 mono_counters_register ("# nursery alloc retries", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_alloc_retries);
910 mono_counters_register ("# nursery alloc range requests", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_nursery_alloc_range_requests);
911 mono_counters_register ("# nursery alloc range iterations", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_alloc_range_iterations);
912 mono_counters_register ("# nursery alloc range restries", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_alloc_range_retries);
915 #endif
917 void
918 sgen_init_nursery_allocator (void)
920 sgen_register_fixed_internal_mem_type (INTERNAL_MEM_FRAGMENT, sizeof (SgenFragment));
921 #ifdef NALLOC_DEBUG
922 alloc_records = sgen_alloc_os_memory (sizeof (AllocRecord) * ALLOC_RECORD_COUNT, SGEN_ALLOC_INTERNAL | SGEN_ALLOC_ACTIVATE, "debugging memory");
923 #endif
926 void
927 sgen_nursery_alloc_prepare_for_minor (void)
929 sgen_minor_collector.prepare_to_space (sgen_space_bitmap, sgen_space_bitmap_size);
932 void
933 sgen_nursery_alloc_prepare_for_major (void)
935 sgen_minor_collector.prepare_to_space (sgen_space_bitmap, sgen_space_bitmap_size);
938 void
939 sgen_nursery_allocator_set_nursery_bounds (char *start, char *end)
941 sgen_nursery_start = start;
942 sgen_nursery_end = end;
945 * This will not divide evenly for tiny nurseries (<4kb), so we make sure to be on
946 * the right side of things and round up. We could just do a MIN(1,x) instead,
947 * since the nursery size must be a power of 2.
949 sgen_space_bitmap_size = (end - start + SGEN_TO_SPACE_GRANULE_IN_BYTES * 8 - 1) / (SGEN_TO_SPACE_GRANULE_IN_BYTES * 8);
950 sgen_space_bitmap = g_malloc0 (sgen_space_bitmap_size);
952 /* Setup the single first large fragment */
953 sgen_minor_collector.init_nursery (&mutator_allocator, start, end);
956 #endif