3 * Internal lock-free memory allocator.
5 * Copyright (C) 2012 Xamarin Inc
7 * Licensed under the MIT license. See LICENSE file in the project root for full license information.
16 #include "mono/sgen/sgen-gc.h"
17 #include "mono/utils/lock-free-alloc.h"
18 #include "mono/sgen/sgen-memory-governor.h"
19 #include "mono/sgen/sgen-client.h"
22 * When allocating sgen memory we choose the allocator with the smallest slot size
23 * that can fit our requested size. These slots are allocated within a block that
24 * can contain at least 2 slots of the specific size.
26 * Currently, slots from 8 to 2044/2040 are allocated inside 4096 sized blocks,
27 * 2728 to 4092/4088 inside 8192 sized blocks, and higher inside 16384 sized
28 * blocks. We also need to make sure the slots are pointer size aligned so we
29 * don't allocate unaligned memory.
31 * The computation of these sizes spawns from two basic rules :
32 * - if we use slots of size s1 that fit n times in a block, it is illogical
33 * to use another slot of size s2 which also fits the same n times in a block.
34 * - if we use slots of size s1 that fit n times in a block, there is no
35 * s2 > s1 that can fit n times in the block. That would mean we are wasting memory
36 * when allocating size S where s1 < S <= s2.
38 #if SIZEOF_VOID_P == 4
39 static const int allocator_sizes
[] = {
40 8, 16, 24, 32, 40, 48, 64, 80,
41 96, 124, 160, 192, 224, 252, 292, 340,
42 408, 452, 508, 584, 680, 816, 1020,
43 1364, 2044, 2728, 4092, 5460, 8188 };
45 static const int allocator_sizes
[] = {
46 8, 16, 24, 32, 40, 48, 64, 80,
47 96, 128, 160, 192, 224, 248, 288, 336,
48 368, 448, 504, 584, 680, 816, 1016,
49 1360, 2040, 2728, 4088, 5456, 8184 };
52 #define NUM_ALLOCATORS (sizeof (allocator_sizes) / sizeof (int))
54 static int allocator_block_sizes
[NUM_ALLOCATORS
];
56 static MonoLockFreeAllocSizeClass size_classes
[NUM_ALLOCATORS
];
57 static MonoLockFreeAllocator allocators
[NUM_ALLOCATORS
];
59 #ifdef HEAVY_STATISTICS
60 static int allocator_sizes_stats
[NUM_ALLOCATORS
];
64 block_size (size_t slot_size
)
66 static int pagesize
= -1;
69 size_t aligned_slot_size
= SGEN_ALIGN_UP_TO (slot_size
, SIZEOF_VOID_P
);
72 pagesize
= mono_pagesize ();
74 for (size
= pagesize
; size
< LOCK_FREE_ALLOC_SB_MAX_SIZE
; size
<<= 1) {
75 if (aligned_slot_size
* 2 <= LOCK_FREE_ALLOC_SB_USABLE_SIZE (size
))
78 return LOCK_FREE_ALLOC_SB_MAX_SIZE
;
82 * Find the allocator index for memory chunks that can contain @size
86 index_for_size (size_t size
)
89 /* do a binary search or lookup table later. */
90 for (slot
= 0; slot
< NUM_ALLOCATORS
; ++slot
) {
91 if (allocator_sizes
[slot
] >= size
)
94 g_assert_not_reached ();
99 * Allocator indexes for the fixed INTERNAL_MEM_XXX types. -1 if that
102 static int fixed_type_allocator_indexes
[INTERNAL_MEM_MAX
];
105 sgen_register_fixed_internal_mem_type (int type
, size_t size
)
109 g_assert (type
>= 0 && type
< INTERNAL_MEM_MAX
);
110 g_assert (size
<= allocator_sizes
[NUM_ALLOCATORS
- 1]);
112 slot
= index_for_size (size
);
113 g_assert (slot
>= 0);
115 if (fixed_type_allocator_indexes
[type
] == -1)
116 fixed_type_allocator_indexes
[type
] = slot
;
118 if (fixed_type_allocator_indexes
[type
] != slot
)
119 g_error ("Invalid double registration of type %d old slot %d new slot %d", type
, fixed_type_allocator_indexes
[type
], slot
);
124 description_for_type (int type
)
127 case INTERNAL_MEM_PIN_QUEUE
: return "pin-queue";
128 case INTERNAL_MEM_FRAGMENT
: return "fragment";
129 case INTERNAL_MEM_SECTION
: return "section";
130 case INTERNAL_MEM_SCAN_STARTS
: return "scan-starts";
131 case INTERNAL_MEM_FIN_TABLE
: return "fin-table";
132 case INTERNAL_MEM_FINALIZE_ENTRY
: return "finalize-entry";
133 case INTERNAL_MEM_FINALIZE_READY
: return "finalize-ready";
134 case INTERNAL_MEM_DISLINK_TABLE
: return "dislink-table";
135 case INTERNAL_MEM_DISLINK
: return "dislink";
136 case INTERNAL_MEM_ROOTS_TABLE
: return "roots-table";
137 case INTERNAL_MEM_ROOT_RECORD
: return "root-record";
138 case INTERNAL_MEM_STATISTICS
: return "statistics";
139 case INTERNAL_MEM_STAT_PINNED_CLASS
: return "pinned-class";
140 case INTERNAL_MEM_STAT_REMSET_CLASS
: return "remset-class";
141 case INTERNAL_MEM_STAT_GCHANDLE_CLASS
: return "gchandle-class";
142 case INTERNAL_MEM_GRAY_QUEUE
: return "gray-queue";
143 case INTERNAL_MEM_MS_TABLES
: return "marksweep-tables";
144 case INTERNAL_MEM_MS_BLOCK_INFO
: return "marksweep-block-info";
145 case INTERNAL_MEM_MS_BLOCK_INFO_SORT
: return "marksweep-block-info-sort";
146 case INTERNAL_MEM_WORKER_DATA
: return "worker-data";
147 case INTERNAL_MEM_THREAD_POOL_JOB
: return "thread-pool-job";
148 case INTERNAL_MEM_BRIDGE_DATA
: return "bridge-data";
149 case INTERNAL_MEM_OLD_BRIDGE_HASH_TABLE
: return "old-bridge-hash-table";
150 case INTERNAL_MEM_OLD_BRIDGE_HASH_TABLE_ENTRY
: return "old-bridge-hash-table-entry";
151 case INTERNAL_MEM_BRIDGE_HASH_TABLE
: return "bridge-hash-table";
152 case INTERNAL_MEM_BRIDGE_HASH_TABLE_ENTRY
: return "bridge-hash-table-entry";
153 case INTERNAL_MEM_TARJAN_BRIDGE_HASH_TABLE
: return "tarjan-bridge-hash-table";
154 case INTERNAL_MEM_TARJAN_BRIDGE_HASH_TABLE_ENTRY
: return "tarjan-bridge-hash-table-entry";
155 case INTERNAL_MEM_TARJAN_OBJ_BUCKET
: return "tarjan-bridge-object-buckets";
156 case INTERNAL_MEM_BRIDGE_ALIVE_HASH_TABLE
: return "bridge-alive-hash-table";
157 case INTERNAL_MEM_BRIDGE_ALIVE_HASH_TABLE_ENTRY
: return "bridge-alive-hash-table-entry";
158 case INTERNAL_MEM_BRIDGE_DEBUG
: return "bridge-debug";
159 case INTERNAL_MEM_TOGGLEREF_DATA
: return "toggleref-data";
160 case INTERNAL_MEM_CARDTABLE_MOD_UNION
: return "cardtable-mod-union";
161 case INTERNAL_MEM_BINARY_PROTOCOL
: return "binary-protocol";
162 case INTERNAL_MEM_TEMPORARY
: return "temporary";
163 case INTERNAL_MEM_LOG_ENTRY
: return "log-entry";
164 case INTERNAL_MEM_COMPLEX_DESCRIPTORS
: return "complex-descriptors";
166 const char *description
= sgen_client_description_for_internal_mem_type (type
);
167 SGEN_ASSERT (0, description
, "Unknown internal mem type");
174 sgen_alloc_internal_dynamic (size_t size
, int type
, gboolean assert_on_failure
)
179 if (size
> allocator_sizes
[NUM_ALLOCATORS
- 1]) {
180 p
= sgen_alloc_os_memory (size
, (SgenAllocFlags
)(SGEN_ALLOC_INTERNAL
| SGEN_ALLOC_ACTIVATE
), NULL
, MONO_MEM_ACCOUNT_SGEN_INTERNAL
);
182 sgen_assert_memory_alloc (NULL
, size
, description_for_type (type
));
184 index
= index_for_size (size
);
186 #ifdef HEAVY_STATISTICS
187 ++ allocator_sizes_stats
[index
];
190 p
= mono_lock_free_alloc (&allocators
[index
]);
192 sgen_assert_memory_alloc (NULL
, size
, description_for_type (type
));
196 SGEN_ASSERT (0, !(((mword
)p
) & (sizeof(gpointer
) - 1)), "Why do we allocate unaligned addresses ?");
201 sgen_free_internal_dynamic (void *addr
, size_t size
, int type
)
206 if (size
> allocator_sizes
[NUM_ALLOCATORS
- 1])
207 sgen_free_os_memory (addr
, size
, SGEN_ALLOC_INTERNAL
, MONO_MEM_ACCOUNT_SGEN_INTERNAL
);
209 mono_lock_free_free (addr
, block_size (size
));
213 sgen_alloc_internal (int type
)
218 index
= fixed_type_allocator_indexes
[type
];
219 g_assert (index
>= 0 && index
< NUM_ALLOCATORS
);
221 #ifdef HEAVY_STATISTICS
222 ++ allocator_sizes_stats
[index
];
225 size
= allocator_sizes
[index
];
227 p
= mono_lock_free_alloc (&allocators
[index
]);
230 SGEN_ASSERT (0, !(((mword
)p
) & (sizeof(gpointer
) - 1)), "Why do we allocate unaligned addresses ?");
236 sgen_free_internal (void *addr
, int type
)
243 index
= fixed_type_allocator_indexes
[type
];
244 g_assert (index
>= 0 && index
< NUM_ALLOCATORS
);
246 mono_lock_free_free (addr
, allocator_block_sizes
[index
]);
250 sgen_dump_internal_mem_usage (FILE *heap_dump_file
)
255 fprintf (heap_dump_file, "<other-mem-usage type=\"large-internal\" size=\"%lld\"/>\n", large_internal_bytes_alloced);
256 fprintf (heap_dump_file, "<other-mem-usage type=\"pinned-chunks\" size=\"%lld\"/>\n", pinned_chunk_bytes_alloced);
257 for (i = 0; i < INTERNAL_MEM_MAX; ++i) {
258 fprintf (heap_dump_file, "<other-mem-usage type=\"%s\" size=\"%ld\"/>\n",
259 description_for_type (i), unmanaged_allocator.small_internal_mem_bytes [i]);
265 sgen_report_internal_mem_usage (void)
268 #ifdef HEAVY_STATISTICS
269 printf ("size -> # allocations\n");
270 for (i
= 0; i
< NUM_ALLOCATORS
; ++i
)
271 printf ("%d -> %d\n", allocator_sizes
[i
], allocator_sizes_stats
[i
]);
276 sgen_init_internal_allocator (void)
280 for (i
= 0; i
< INTERNAL_MEM_MAX
; ++i
)
281 fixed_type_allocator_indexes
[i
] = -1;
283 for (i
= 0; i
< NUM_ALLOCATORS
; ++i
) {
284 allocator_block_sizes
[i
] = block_size (allocator_sizes
[i
]);
285 mono_lock_free_allocator_init_size_class (&size_classes
[i
], allocator_sizes
[i
], allocator_block_sizes
[i
]);
286 mono_lock_free_allocator_init_allocator (&allocators
[i
], &size_classes
[i
], MONO_MEM_ACCOUNT_SGEN_INTERNAL
);
289 for (size
= mono_pagesize (); size
<= LOCK_FREE_ALLOC_SB_MAX_SIZE
; size
<<= 1) {
290 int max_size
= (LOCK_FREE_ALLOC_SB_USABLE_SIZE (size
) / 2) & ~(SIZEOF_VOID_P
- 1);
292 * we assert that allocator_sizes contains the biggest possible object size
293 * per block which has to be an aligned address.
294 * (4K => 2040, 8k => 4088, 16k => 8184 on 64bits),
295 * so that we do not get different block sizes for sizes that should go to the same one
297 g_assert (allocator_sizes
[index_for_size (max_size
)] == max_size
);
298 g_assert (block_size (max_size
) == size
);
299 if (size
< LOCK_FREE_ALLOC_SB_MAX_SIZE
)
300 g_assert (block_size (max_size
+ 1) == size
<< 1);