[sgen] One internal allocator per worker thread, to get rid of locking.
[mono-project/dkf.git] / mono / metadata / sgen-marksweep.c
blob27b3a27bd96d4fe2c42f35c5800cb00e9948b9b5
1 /*
2 * sgen-marksweep.c: Simple generational GC.
4 * Author:
5 * Mark Probst <mark.probst@gmail.com>
7 * Copyright 2009-2010 Novell, Inc.
8 *
9 * Permission is hereby granted, free of charge, to any person obtaining
10 * a copy of this software and associated documentation files (the
11 * "Software"), to deal in the Software without restriction, including
12 * without limitation the rights to use, copy, modify, merge, publish,
13 * distribute, sublicense, and/or sell copies of the Software, and to
14 * permit persons to whom the Software is furnished to do so, subject to
15 * the following conditions:
17 * The above copyright notice and this permission notice shall be
18 * included in all copies or substantial portions of the Software.
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
21 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
23 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
24 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
25 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
26 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29 #ifdef HAVE_SGEN_GC
31 #include <math.h>
33 #include "utils/mono-counters.h"
34 #include "metadata/object-internals.h"
35 #include "metadata/profiler-private.h"
37 #include "metadata/sgen-gc.h"
38 #include "metadata/sgen-protocol.h"
40 #define DEBUG(l,x)
42 #define MS_BLOCK_SIZE (16*1024)
43 #define MAJOR_SECTION_SIZE MS_BLOCK_SIZE
46 * Don't allocate single blocks, but alloc a contingent of this many
47 * blocks in one swoop.
49 #define MS_BLOCK_ALLOC_NUM 32
52 * Number of bytes before the first object in a block. At the start
53 * of a block is the MSBlockHeader, then opional padding, then come
54 * the objects, so this must be >= sizeof (MSBlockHeader).
56 #define MS_BLOCK_SKIP 16
58 #define MS_BLOCK_FREE (MS_BLOCK_SIZE - MS_BLOCK_SKIP)
60 #define MS_NUM_MARK_WORDS ((MS_BLOCK_SIZE / SGEN_ALLOC_ALIGN + sizeof (mword) * 8 - 1) / (sizeof (mword) * 8))
62 #if SGEN_MAX_SMALL_OBJ_SIZE > MS_BLOCK_FREE / 2
63 #error MAX_SMALL_OBJ_SIZE must be at most (MS_BLOCK_SIZE - MS_BLOCK_SKIP) / 2
64 #endif
66 typedef struct _MSBlockInfo MSBlockInfo;
67 struct _MSBlockInfo {
68 int obj_size;
69 gboolean pinned;
70 gboolean has_references;
71 char *block;
72 void **free_list;
73 MSBlockInfo *next_free;
74 MSBlockInfo *next;
75 void **pin_queue_start;
76 int pin_queue_num_entries;
77 mword mark_words [MS_NUM_MARK_WORDS];
80 #define MS_BLOCK_OBJ(b,i) ((b)->block + MS_BLOCK_SKIP + (b)->obj_size * (i))
82 typedef struct {
83 MSBlockInfo *info;
84 } MSBlockHeader;
86 #define MS_BLOCK_FOR_OBJ(o) (((MSBlockHeader*)((mword)(o) & ~(MS_BLOCK_SIZE-1)))->info)
88 #define MS_BLOCK_OBJ_INDEX(o,b) (((char*)(o) - ((b)->block + MS_BLOCK_SKIP)) / (b)->obj_size)
90 #define MS_CALC_MARK_BIT(w,b,o,bl) do { \
91 int i = ((char*)(o) - (bl)->block) >> SGEN_ALLOC_ALIGN_BITS; \
92 if (sizeof (mword) == 4) { \
93 (w) = i >> 5; \
94 (b) = i & 31; \
95 } else { \
96 (w) = i >> 6; \
97 (b) = i & 63; \
98 } \
99 } while (0)
101 #define MS_MARK_BIT(bl,w,b) ((bl)->mark_words [(w)] & (1L << (b)))
102 #define MS_SET_MARK_BIT(bl,w,b) ((bl)->mark_words [(w)] |= (1L << (b)))
103 #define MS_PAR_SET_MARK_BIT(was_marked,bl,w,b) do { \
104 mword __old = (bl)->mark_words [(w)]; \
105 mword __bitmask = 1L << (b); \
106 if (__old & __bitmask) { \
107 was_marked = TRUE; \
108 break; \
110 if (SGEN_CAS_PTR ((gpointer*)&(bl)->mark_words [(w)], \
111 (gpointer)(__old | __bitmask), \
112 (gpointer)__old) == \
113 (gpointer)__old) { \
114 was_marked = FALSE; \
115 break; \
117 } while (1)
119 #define MS_OBJ_ALLOCED(o,b) (*(void**)(o) && (*(char**)(o) < (b)->block || *(char**)(o) >= (b)->block + MS_BLOCK_SIZE))
121 #define MS_BLOCK_OBJ_SIZE_FACTOR (sqrt (2.0))
124 * This way we can lookup block object size indexes for sizes up to
125 * 256 bytes with a single load.
127 #define MS_NUM_FAST_BLOCK_OBJ_SIZE_INDEXES 32
129 static int *block_obj_sizes;
130 static int num_block_obj_sizes;
131 static int fast_block_obj_size_indexes [MS_NUM_FAST_BLOCK_OBJ_SIZE_INDEXES];
133 #define MS_BLOCK_FLAG_PINNED 1
134 #define MS_BLOCK_FLAG_REFS 2
136 #define MS_BLOCK_TYPE_MAX 4
138 #ifdef SGEN_PARALLEL_MARK
139 static LOCK_DECLARE (ms_block_list_mutex);
140 #define LOCK_MS_BLOCK_LIST pthread_mutex_lock (&ms_block_list_mutex)
141 #define UNLOCK_MS_BLOCK_LIST pthread_mutex_unlock (&ms_block_list_mutex)
142 #else
143 #define LOCK_MS_BLOCK_LIST
144 #define UNLOCK_MS_BLOCK_LIST
145 #endif
147 /* we get this at init */
148 static int nursery_bits;
149 static char *nursery_start;
150 static char *nursery_end;
152 #define ptr_in_nursery(p) (SGEN_PTR_IN_NURSERY ((p), nursery_bits, nursery_start, nursery_end))
154 /* non-allocated block free-list */
155 static void *empty_blocks = NULL;
156 static int num_empty_blocks = 0;
157 /* all allocated blocks in the system */
158 static MSBlockInfo *all_blocks;
159 static int num_major_sections = 0;
160 /* one free block list for each block object size */
161 static MSBlockInfo **free_block_lists [MS_BLOCK_TYPE_MAX];
163 static long long stat_major_blocks_alloced = 0;
164 static long long stat_major_blocks_freed = 0;
166 static int
167 ms_find_block_obj_size_index (int size)
169 int i;
170 DEBUG (9, g_assert (size <= SGEN_MAX_SMALL_OBJ_SIZE));
171 for (i = 0; i < num_block_obj_sizes; ++i)
172 if (block_obj_sizes [i] >= size)
173 return i;
174 g_assert_not_reached ();
177 #define FREE_BLOCKS(p,r) (free_block_lists [((p) ? MS_BLOCK_FLAG_PINNED : 0) | ((r) ? MS_BLOCK_FLAG_REFS : 0)])
179 #define MS_BLOCK_OBJ_SIZE_INDEX(s) \
180 (((s)+7)>>3 < MS_NUM_FAST_BLOCK_OBJ_SIZE_INDEXES ? \
181 fast_block_obj_size_indexes [((s)+7)>>3] : \
182 ms_find_block_obj_size_index ((s)))
184 static void*
185 ms_get_empty_block (void)
187 char *p;
188 int i;
189 void *block, *empty, *next;
191 retry:
192 if (!empty_blocks) {
193 p = mono_sgen_alloc_os_memory_aligned (MS_BLOCK_SIZE * MS_BLOCK_ALLOC_NUM, MS_BLOCK_SIZE, TRUE);
195 for (i = 0; i < MS_BLOCK_ALLOC_NUM; ++i) {
196 block = p;
198 * We do the free list update one after the
199 * other so that other threads can use the new
200 * blocks as quickly as possible.
202 do {
203 empty = empty_blocks;
204 *(void**)block = empty;
205 } while (SGEN_CAS_PTR (&empty_blocks, block, empty) != empty);
206 p += MS_BLOCK_SIZE;
209 SGEN_ATOMIC_ADD (num_empty_blocks, MS_BLOCK_ALLOC_NUM);
211 stat_major_blocks_alloced += MS_BLOCK_ALLOC_NUM;
214 do {
215 empty = empty_blocks;
216 if (!empty)
217 goto retry;
218 block = empty;
219 next = *(void**)block;
220 } while (SGEN_CAS_PTR (&empty_blocks, next, empty) != empty);
222 SGEN_ATOMIC_ADD (num_empty_blocks, -1);
224 *(void**)block = NULL;
226 g_assert (!((mword)block & (MS_BLOCK_SIZE - 1)));
228 mono_sgen_update_heap_boundaries ((mword)block, (mword)block + MS_BLOCK_SIZE);
230 return block;
233 static void
234 ms_free_block (void *block)
236 void *empty;
238 memset (block, 0, MS_BLOCK_SIZE);
240 do {
241 empty = empty_blocks;
242 *(void**)block = empty;
243 } while (SGEN_CAS_PTR (&empty_blocks, block, empty) != empty);
245 SGEN_ATOMIC_ADD (num_empty_blocks, 1);
248 //#define MARKSWEEP_CONSISTENCY_CHECK
250 #ifdef MARKSWEEP_CONSISTENCY_CHECK
251 static void
252 check_block_free_list (MSBlockInfo *block, int size, gboolean pinned)
254 MSBlockInfo *b;
256 for (; block; block = block->next_free) {
257 g_assert (block->obj_size == size);
258 g_assert ((pinned && block->pinned) || (!pinned && !block->pinned));
260 /* blocks in the free lists must have at least
261 one free slot */
262 g_assert (block->free_list);
264 /* the block must be in the all_blocks list */
265 for (b = all_blocks; b; b = b->next) {
266 if (b == block)
267 break;
269 g_assert (b == block);
273 static void
274 check_empty_blocks (void)
276 void *p;
277 int i = 0;
278 for (p = empty_blocks; p; p = *(void**)p)
279 ++i;
280 g_assert (i == num_empty_blocks);
283 static void
284 consistency_check (void)
286 MSBlockInfo *block;
287 int i;
289 /* check all blocks */
290 for (block = all_blocks; block; block = block->next) {
291 int count = MS_BLOCK_FREE / block->obj_size;
292 int num_free = 0;
293 void **free;
295 /* check block header */
296 g_assert (((MSBlockHeader*)block->block)->info == block);
298 /* count number of free slots */
299 for (i = 0; i < count; ++i) {
300 void **obj = (void**) MS_BLOCK_OBJ (block, i);
301 if (!MS_OBJ_ALLOCED (obj, block))
302 ++num_free;
305 /* check free list */
306 for (free = block->free_list; free; free = (void**)*free) {
307 g_assert (MS_BLOCK_FOR_OBJ (free) == block);
308 --num_free;
310 g_assert (num_free == 0);
312 /* check all mark words are zero */
313 for (i = 0; i < MS_NUM_MARK_WORDS; ++i)
314 g_assert (block->mark_words [i] == 0);
317 /* check free blocks */
318 for (i = 0; i < num_block_obj_sizes; ++i) {
319 int j;
320 for (j = 0; j < MS_BLOCK_TYPE_MAX; ++j)
321 check_block_free_list (free_block_lists [j][i], block_obj_sizes [i], j & MS_BLOCK_FLAG_PINNED);
324 check_empty_blocks ();
326 #endif
328 static void
329 ms_alloc_block (int size_index, gboolean pinned, gboolean has_references)
331 int size = block_obj_sizes [size_index];
332 int count = MS_BLOCK_FREE / size;
333 MSBlockInfo *info = mono_sgen_alloc_internal (INTERNAL_MEM_MS_BLOCK_INFO);
334 MSBlockHeader *header;
335 MSBlockInfo **free_blocks = FREE_BLOCKS (pinned, has_references);
336 char *obj_start;
337 int i;
339 DEBUG (9, g_assert (count >= 2));
341 info->obj_size = size;
342 info->pinned = pinned;
343 info->has_references = has_references;
344 info->block = ms_get_empty_block ();
346 header = (MSBlockHeader*) info->block;
347 header->info = info;
349 /* build free list */
350 obj_start = info->block + MS_BLOCK_SKIP;
351 info->free_list = (void**)obj_start;
352 /* we're skipping the last one - it's already NULL */
353 for (i = 0; i < count - 1; ++i) {
354 char *next_obj_start = obj_start + size;
355 *(void**)obj_start = next_obj_start;
356 obj_start = next_obj_start;
359 info->next_free = free_blocks [size_index];
360 free_blocks [size_index] = info;
362 info->next = all_blocks;
363 all_blocks = info;
365 ++num_major_sections;
368 static gboolean
369 obj_is_from_pinned_alloc (char *obj)
371 MSBlockInfo *block = MS_BLOCK_FOR_OBJ (obj);
372 return block->pinned;
375 static void*
376 alloc_obj (int size, gboolean pinned, gboolean has_references)
378 int size_index = MS_BLOCK_OBJ_SIZE_INDEX (size);
379 MSBlockInfo **free_blocks = FREE_BLOCKS (pinned, has_references);
380 MSBlockInfo *block;
381 void *obj;
383 /* FIXME: try to do this without locking */
385 LOCK_MS_BLOCK_LIST;
387 if (!free_blocks [size_index])
388 ms_alloc_block (size_index, pinned, has_references);
390 block = free_blocks [size_index];
391 DEBUG (9, g_assert (block));
393 obj = block->free_list;
394 DEBUG (9, g_assert (obj));
396 block->free_list = *(void**)obj;
397 if (!block->free_list) {
398 free_blocks [size_index] = block->next_free;
399 block->next_free = NULL;
402 UNLOCK_MS_BLOCK_LIST;
405 * FIXME: This should not be necessary because it'll be
406 * overwritten by the vtable immediately.
408 *(void**)obj = NULL;
410 return obj;
413 static void*
414 major_alloc_object (int size, gboolean has_references)
416 return alloc_obj (size, FALSE, has_references);
420 * We're not freeing the block if it's empty. We leave that work for
421 * the next major collection.
423 * This is just called from the domain clearing code, which runs in a
424 * single thread and has the GC lock, so we don't need an extra lock.
426 static void
427 free_object (char *obj, size_t size, gboolean pinned)
429 MSBlockInfo *block = MS_BLOCK_FOR_OBJ (obj);
430 int word, bit;
431 DEBUG (9, g_assert ((pinned && block->pinned) || (!pinned && !block->pinned)));
432 DEBUG (9, g_assert (MS_OBJ_ALLOCED (obj, block)));
433 MS_CALC_MARK_BIT (word, bit, obj, block);
434 DEBUG (9, g_assert (!MS_MARK_BIT (block, word, bit)));
435 if (!block->free_list) {
436 MSBlockInfo **free_blocks = FREE_BLOCKS (pinned, block->has_references);
437 int size_index = MS_BLOCK_OBJ_SIZE_INDEX (size);
438 DEBUG (9, g_assert (!block->next_free));
439 block->next_free = free_blocks [size_index];
440 free_blocks [size_index] = block;
442 memset (obj, 0, size);
443 *(void**)obj = block->free_list;
444 block->free_list = (void**)obj;
447 static void
448 major_free_non_pinned_object (char *obj, size_t size)
450 free_object (obj, size, FALSE);
453 /* size is a multiple of SGEN_ALLOC_ALIGN */
454 static void*
455 major_alloc_small_pinned_obj (size_t size, gboolean has_references)
457 return alloc_obj (size, TRUE, has_references);
460 static void
461 free_pinned_object (char *obj, size_t size)
463 free_object (obj, size, TRUE);
467 * size is already rounded up and we hold the GC lock.
469 static void*
470 major_alloc_degraded (MonoVTable *vtable, size_t size)
472 void *obj;
473 int old_num_sections = num_major_sections;
474 obj = alloc_obj (size, FALSE, vtable->klass->has_references);
475 *(MonoVTable**)obj = vtable;
476 HEAVY_STAT (++stat_objects_alloced_degraded);
477 HEAVY_STAT (stat_bytes_alloced_degraded += size);
478 g_assert (num_major_sections >= old_num_sections);
479 mono_sgen_register_major_sections_alloced (num_major_sections - old_num_sections);
480 return obj;
483 #define MAJOR_OBJ_IS_IN_TO_SPACE(obj) FALSE
486 * obj is some object. If it's not in the major heap (i.e. if it's in
487 * the nursery or LOS), return FALSE. Otherwise return whether it's
488 * been marked or copied.
490 static gboolean
491 major_is_object_live (char *obj)
493 MSBlockInfo *block;
494 int word, bit;
495 mword objsize;
497 if (ptr_in_nursery (obj))
498 return FALSE;
500 objsize = SGEN_ALIGN_UP (mono_sgen_safe_object_get_size ((MonoObject*)obj));
502 /* LOS */
503 if (objsize > SGEN_MAX_SMALL_OBJ_SIZE)
504 return FALSE;
506 /* now we know it's in a major block */
507 block = MS_BLOCK_FOR_OBJ (obj);
508 DEBUG (9, g_assert (!block->pinned));
509 MS_CALC_MARK_BIT (word, bit, obj, block);
510 return MS_MARK_BIT (block, word, bit) ? TRUE : FALSE;
513 static gboolean
514 major_ptr_is_in_non_pinned_space (char *ptr)
516 g_assert_not_reached ();
519 static void
520 major_iterate_objects (gboolean non_pinned, gboolean pinned, IterateObjectCallbackFunc callback, void *data)
522 MSBlockInfo *block;
524 for (block = all_blocks; block; block = block->next) {
525 int count = MS_BLOCK_FREE / block->obj_size;
526 int i;
528 if (block->pinned && !pinned)
529 continue;
530 if (!block->pinned && !non_pinned)
531 continue;
533 for (i = 0; i < count; ++i) {
534 void **obj = (void**) MS_BLOCK_OBJ (block, i);
535 if (MS_OBJ_ALLOCED (obj, block))
536 callback ((char*)obj, block->obj_size, data);
541 static void
542 major_check_scan_starts (void)
546 static void
547 major_dump_heap (FILE *heap_dump_file)
549 MSBlockInfo *block;
551 for (block = all_blocks; block; block = block->next) {
552 int count = MS_BLOCK_FREE / block->obj_size;
553 int i;
554 int start = -1;
556 fprintf (heap_dump_file, "<section type=\"%s\" size=\"%zu\">\n", "old", (size_t)MS_BLOCK_FREE);
558 for (i = 0; i <= count; ++i) {
559 if ((i < count) && MS_OBJ_ALLOCED (MS_BLOCK_OBJ (block, i), block)) {
560 if (start < 0)
561 start = i;
562 } else {
563 if (start >= 0) {
564 mono_sgen_dump_occupied (MS_BLOCK_OBJ (block, start), MS_BLOCK_OBJ (block, i), block->block);
565 start = -1;
570 fprintf (heap_dump_file, "</section>\n");
574 #define LOAD_VTABLE SGEN_LOAD_VTABLE
576 #define MS_MARK_OBJECT_AND_ENQUEUE_CHECKED(obj,block,queue) do { \
577 int __word, __bit; \
578 MS_CALC_MARK_BIT (__word, __bit, (obj), (block)); \
579 if (!MS_MARK_BIT ((block), __word, __bit) && MS_OBJ_ALLOCED ((obj), (block))) { \
580 MS_SET_MARK_BIT ((block), __word, __bit); \
581 if ((block)->has_references) \
582 GRAY_OBJECT_ENQUEUE ((queue), (obj)); \
583 binary_protocol_mark ((obj), (gpointer)LOAD_VTABLE ((obj)), mono_sgen_safe_object_get_size ((MonoObject*)(obj))); \
585 } while (0)
586 #define MS_MARK_OBJECT_AND_ENQUEUE(obj,block,queue) do { \
587 int __word, __bit; \
588 gboolean __was_marked; \
589 DEBUG (9, g_assert (MS_OBJ_ALLOCED ((obj), (block)))); \
590 MS_CALC_MARK_BIT (__word, __bit, (obj), (block)); \
591 MS_PAR_SET_MARK_BIT (__was_marked, (block), __word, __bit); \
592 if (!__was_marked) { \
593 if ((block)->has_references) \
594 GRAY_OBJECT_ENQUEUE ((queue), (obj)); \
595 binary_protocol_mark ((obj), (gpointer)LOAD_VTABLE ((obj)), mono_sgen_safe_object_get_size ((MonoObject*)(obj))); \
597 } while (0)
599 #include "sgen-major-copy-object.h"
601 static void
602 major_copy_or_mark_object (void **ptr, SgenGrayQueue *queue)
604 void *obj = *ptr;
605 mword vtable_word = *(mword*)obj;
606 MonoVTable *vt = (MonoVTable*)(vtable_word & ~SGEN_VTABLE_BITS_MASK);
607 mword objsize;
608 MSBlockInfo *block;
610 HEAVY_STAT (++stat_copy_object_called_major);
612 DEBUG (9, g_assert (obj));
613 DEBUG (9, g_assert (current_collection_generation == GENERATION_OLD));
615 if (ptr_in_nursery (obj)) {
616 int word, bit;
617 gboolean has_references;
618 void *destination;
620 if (vtable_word & SGEN_FORWARDED_BIT) {
621 *ptr = (void*)vt;
622 return;
625 if (vtable_word & SGEN_PINNED_BIT)
626 return;
628 HEAVY_STAT (++stat_objects_copied_major);
630 objsize = SGEN_ALIGN_UP (mono_sgen_par_object_get_size (vt, (MonoObject*)obj));
631 has_references = SGEN_VTABLE_HAS_REFERENCES (vt);
633 destination = major_alloc_object (objsize, has_references);
635 if (SGEN_CAS_PTR (obj, (void*)((mword)destination | SGEN_FORWARDED_BIT), vt) == vt) {
636 gboolean was_marked;
638 par_copy_object_no_checks (destination, vt, obj, objsize, has_references ? queue : NULL);
639 obj = destination;
640 *ptr = obj;
643 * FIXME: If we make major_alloc_object() give
644 * us the block info, too, we won't have to
645 * re-fetch it here.
647 block = MS_BLOCK_FOR_OBJ (obj);
648 MS_CALC_MARK_BIT (word, bit, obj, block);
649 DEBUG (9, g_assert (!MS_MARK_BIT (block, word, bit)));
650 MS_PAR_SET_MARK_BIT (was_marked, block, word, bit);
651 } else {
653 * FIXME: We have allocated destination, but
654 * we cannot use it. Give it back to the
655 * allocator.
657 *(void**)destination = NULL;
659 vtable_word = *(mword*)obj;
660 g_assert (vtable_word & SGEN_FORWARDED_BIT);
662 obj = (void*)(vtable_word & ~SGEN_VTABLE_BITS_MASK);
664 *ptr = obj;
666 return;
669 objsize = SGEN_ALIGN_UP (mono_sgen_par_object_get_size (vt, (MonoObject*)obj));
671 if (objsize > SGEN_MAX_SMALL_OBJ_SIZE) {
672 if (vtable_word & SGEN_PINNED_BIT)
673 return;
674 binary_protocol_pin (obj, vt, mono_sgen_safe_object_get_size ((MonoObject*)obj));
675 if (SGEN_CAS_PTR (obj, (void*)(vtable_word | SGEN_PINNED_BIT), (void*)vtable_word) == (void*)vtable_word) {
676 if (SGEN_VTABLE_HAS_REFERENCES (vt))
677 GRAY_OBJECT_ENQUEUE (queue, obj);
678 } else {
679 g_assert (SGEN_OBJECT_IS_PINNED (obj));
681 return;
684 block = MS_BLOCK_FOR_OBJ (obj);
685 MS_MARK_OBJECT_AND_ENQUEUE (obj, block, queue);
688 #include "sgen-major-scan-object.h"
690 static void
691 mark_pinned_objects_in_block (MSBlockInfo *block, SgenGrayQueue *queue)
693 int i;
694 int last_index = -1;
695 int count = MS_BLOCK_FREE / block->obj_size;
697 for (i = 0; i < block->pin_queue_num_entries; ++i) {
698 int index = MS_BLOCK_OBJ_INDEX (block->pin_queue_start [i], block);
699 DEBUG (9, g_assert (index >= 0 && index < count));
700 if (index == last_index)
701 continue;
702 MS_MARK_OBJECT_AND_ENQUEUE_CHECKED (MS_BLOCK_OBJ (block, index), block, queue);
703 last_index = index;
707 static void
708 major_sweep (void)
710 MSBlockInfo **iter;
711 int i;
713 /* clear all the free lists */
714 for (i = 0; i < MS_BLOCK_TYPE_MAX; ++i) {
715 MSBlockInfo **free_blocks = free_block_lists [i];
716 int j;
717 for (j = 0; j < num_block_obj_sizes; ++j)
718 free_blocks [j] = NULL;
721 /* traverse all blocks, free and zero unmarked objects */
722 iter = &all_blocks;
723 while (*iter) {
724 MSBlockInfo *block = *iter;
725 int count = MS_BLOCK_FREE / block->obj_size;
726 gboolean have_live = FALSE;
727 int obj_index;
729 block->free_list = NULL;
731 for (obj_index = 0; obj_index < count; ++obj_index) {
732 int word, bit;
733 void *obj = MS_BLOCK_OBJ (block, obj_index);
735 MS_CALC_MARK_BIT (word, bit, obj, block);
736 if (MS_MARK_BIT (block, word, bit)) {
737 DEBUG (9, g_assert (MS_OBJ_ALLOCED (obj, block)));
738 have_live = TRUE;
739 } else {
740 /* an unmarked object */
741 if (MS_OBJ_ALLOCED (obj, block)) {
742 binary_protocol_empty (obj, block->obj_size);
743 memset (obj, 0, block->obj_size);
745 *(void**)obj = block->free_list;
746 block->free_list = obj;
750 /* reset mark bits */
751 memset (block->mark_words, 0, sizeof (mword) * MS_NUM_MARK_WORDS);
754 * FIXME: reverse free list so that it's in address
755 * order
758 if (have_live) {
759 iter = &block->next;
762 * If there are free slots in the block, add
763 * the block to the corresponding free list.
765 if (block->free_list) {
766 MSBlockInfo **free_blocks = FREE_BLOCKS (block->pinned, block->has_references);
767 int index = MS_BLOCK_OBJ_SIZE_INDEX (block->obj_size);
768 block->next_free = free_blocks [index];
769 free_blocks [index] = block;
771 } else {
773 * Blocks without live objects are removed from the
774 * block list and freed.
776 *iter = block->next;
778 ms_free_block (block->block);
779 mono_sgen_free_internal (block, INTERNAL_MEM_MS_BLOCK_INFO);
781 --num_major_sections;
786 static int count_pinned_ref;
787 static int count_pinned_nonref;
788 static int count_nonpinned_ref;
789 static int count_nonpinned_nonref;
791 static void
792 count_nonpinned_callback (char *obj, size_t size, void *data)
794 MonoVTable *vtable = (MonoVTable*)LOAD_VTABLE (obj);
796 if (vtable->klass->has_references)
797 ++count_nonpinned_ref;
798 else
799 ++count_nonpinned_nonref;
802 static void
803 count_pinned_callback (char *obj, size_t size, void *data)
805 MonoVTable *vtable = (MonoVTable*)LOAD_VTABLE (obj);
807 if (vtable->klass->has_references)
808 ++count_pinned_ref;
809 else
810 ++count_pinned_nonref;
813 static void __attribute__ ((unused))
814 count_ref_nonref_objs (void)
816 int total;
818 count_pinned_ref = 0;
819 count_pinned_nonref = 0;
820 count_nonpinned_ref = 0;
821 count_nonpinned_nonref = 0;
823 major_iterate_objects (TRUE, FALSE, count_nonpinned_callback, NULL);
824 major_iterate_objects (FALSE, TRUE, count_pinned_callback, NULL);
826 total = count_pinned_nonref + count_nonpinned_nonref + count_pinned_ref + count_nonpinned_ref;
828 g_print ("ref: %d pinned %d non-pinned non-ref: %d pinned %d non-pinned -- %.1f\n",
829 count_pinned_ref, count_nonpinned_ref,
830 count_pinned_nonref, count_nonpinned_nonref,
831 (count_pinned_nonref + count_nonpinned_nonref) * 100.0 / total);
834 static int
835 ms_calculate_block_obj_sizes (double factor, int *arr)
837 double target_size = sizeof (MonoObject);
838 int num_sizes = 0;
839 int last_size = 0;
841 do {
842 int target_count = ceil (MS_BLOCK_FREE / target_size);
843 int size = MIN ((MS_BLOCK_FREE / target_count) & ~(SGEN_ALLOC_ALIGN - 1), SGEN_MAX_SMALL_OBJ_SIZE);
845 if (size != last_size) {
846 if (arr)
847 arr [num_sizes] = size;
848 ++num_sizes;
849 last_size = size;
852 target_size *= factor;
853 } while (last_size < SGEN_MAX_SMALL_OBJ_SIZE);
855 return num_sizes;
858 /* only valid during minor collections */
859 static int old_num_major_sections;
861 static void
862 major_start_nursery_collection (void)
864 #ifdef MARKSWEEP_CONSISTENCY_CHECK
865 consistency_check ();
866 #endif
868 old_num_major_sections = num_major_sections;
871 static void
872 major_finish_nursery_collection (void)
874 #ifdef MARKSWEEP_CONSISTENCY_CHECK
875 consistency_check ();
876 #endif
877 mono_sgen_register_major_sections_alloced (num_major_sections - old_num_major_sections);
880 static void
881 major_finish_major_collection (void)
883 int section_reserve = mono_sgen_get_minor_collection_allowance () / MS_BLOCK_SIZE;
886 * FIXME: We don't free blocks on 32 bit platforms because it
887 * can lead to address space fragmentation, since we're
888 * allocating blocks in larger contingents.
890 if (sizeof (mword) < 8)
891 return;
893 while (num_empty_blocks > section_reserve) {
894 void *next = *(void**)empty_blocks;
895 mono_sgen_free_os_memory (empty_blocks, MS_BLOCK_SIZE);
896 empty_blocks = next;
898 * Needs not be atomic because this is running
899 * single-threaded.
901 --num_empty_blocks;
903 ++stat_major_blocks_freed;
907 static void
908 major_find_pin_queue_start_ends (SgenGrayQueue *queue)
910 MSBlockInfo *block;
912 for (block = all_blocks; block; block = block->next) {
913 block->pin_queue_start = mono_sgen_find_optimized_pin_queue_area (block->block + MS_BLOCK_SKIP, block->block + MS_BLOCK_SIZE,
914 &block->pin_queue_num_entries);
918 static void
919 major_pin_objects (SgenGrayQueue *queue)
921 MSBlockInfo *block;
923 for (block = all_blocks; block; block = block->next)
924 mark_pinned_objects_in_block (block, queue);
927 static void
928 major_init_to_space (void)
932 static void
933 major_report_pinned_memory_usage (void)
935 g_assert_not_reached ();
938 static gint64
939 major_get_used_size (void)
941 gint64 size = 0;
942 MSBlockInfo *block;
944 for (block = all_blocks; block; block = block->next) {
945 int count = MS_BLOCK_FREE / block->obj_size;
946 void **iter;
947 size += count * block->obj_size;
948 for (iter = block->free_list; iter; iter = (void**)*iter)
949 size -= block->obj_size;
952 return size;
955 static int
956 get_num_major_sections (void)
958 return num_major_sections;
961 void
962 mono_sgen_marksweep_init (SgenMajorCollector *collector, int the_nursery_bits, char *the_nursery_start, char *the_nursery_end)
964 int i;
966 nursery_bits = the_nursery_bits;
967 nursery_start = the_nursery_start;
968 nursery_end = the_nursery_end;
970 mono_sgen_register_fixed_internal_mem_type (INTERNAL_MEM_MS_BLOCK_INFO, sizeof (MSBlockInfo));
972 num_block_obj_sizes = ms_calculate_block_obj_sizes (MS_BLOCK_OBJ_SIZE_FACTOR, NULL);
973 block_obj_sizes = mono_sgen_alloc_internal_dynamic (sizeof (int) * num_block_obj_sizes, INTERNAL_MEM_MS_TABLES);
974 ms_calculate_block_obj_sizes (MS_BLOCK_OBJ_SIZE_FACTOR, block_obj_sizes);
978 int i;
979 g_print ("block object sizes:\n");
980 for (i = 0; i < num_block_obj_sizes; ++i)
981 g_print ("%d\n", block_obj_sizes [i]);
985 for (i = 0; i < MS_BLOCK_TYPE_MAX; ++i)
986 free_block_lists [i] = mono_sgen_alloc_internal_dynamic (sizeof (MSBlockInfo*) * num_block_obj_sizes, INTERNAL_MEM_MS_TABLES);
988 for (i = 0; i < MS_NUM_FAST_BLOCK_OBJ_SIZE_INDEXES; ++i)
989 fast_block_obj_size_indexes [i] = ms_find_block_obj_size_index (i * 8);
990 for (i = 0; i < MS_NUM_FAST_BLOCK_OBJ_SIZE_INDEXES * 8; ++i)
991 g_assert (MS_BLOCK_OBJ_SIZE_INDEX (i) == ms_find_block_obj_size_index (i));
993 LOCK_INIT (ms_block_list_mutex);
995 mono_counters_register ("# major blocks allocated", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_major_blocks_alloced);
996 mono_counters_register ("# major blocks freed", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_major_blocks_freed);
998 collector->section_size = MAJOR_SECTION_SIZE;
1000 collector->is_object_live = major_is_object_live;
1001 collector->alloc_small_pinned_obj = major_alloc_small_pinned_obj;
1002 collector->alloc_degraded = major_alloc_degraded;
1003 collector->copy_or_mark_object = major_copy_or_mark_object;
1004 collector->alloc_object = major_alloc_object;
1005 collector->free_pinned_object = free_pinned_object;
1006 collector->iterate_objects = major_iterate_objects;
1007 collector->free_non_pinned_object = major_free_non_pinned_object;
1008 collector->find_pin_queue_start_ends = major_find_pin_queue_start_ends;
1009 collector->pin_objects = major_pin_objects;
1010 collector->init_to_space = major_init_to_space;
1011 collector->sweep = major_sweep;
1012 collector->check_scan_starts = major_check_scan_starts;
1013 collector->dump_heap = major_dump_heap;
1014 collector->get_used_size = major_get_used_size;
1015 collector->start_nursery_collection = major_start_nursery_collection;
1016 collector->finish_nursery_collection = major_finish_nursery_collection;
1017 collector->finish_major_collection = major_finish_major_collection;
1018 collector->ptr_is_in_non_pinned_space = major_ptr_is_in_non_pinned_space;
1019 collector->obj_is_from_pinned_alloc = obj_is_from_pinned_alloc;
1020 collector->report_pinned_memory_usage = major_report_pinned_memory_usage;
1021 collector->get_num_major_sections = get_num_major_sections;
1022 FILL_COLLECTOR_COPY_OBJECT (collector);
1023 FILL_COLLECTOR_SCAN_OBJECT (collector);
1026 #endif