3 * Gray queue management.
5 * Copyright 2001-2003 Ximian, Inc
6 * Copyright 2003-2010 Novell, Inc.
7 * Copyright (C) 2012 Xamarin Inc
9 * Licensed under the MIT license. See LICENSE file in the project root for full license information.
14 #include "mono/sgen/sgen-gc.h"
15 #include "mono/sgen/sgen-protocol.h"
17 #ifdef HEAVY_STATISTICS
18 guint64 stat_gray_queue_section_alloc
;
19 guint64 stat_gray_queue_section_free
;
20 guint64 stat_gray_queue_enqueue_fast_path
;
21 guint64 stat_gray_queue_dequeue_fast_path
;
22 guint64 stat_gray_queue_enqueue_slow_path
;
23 guint64 stat_gray_queue_dequeue_slow_path
;
26 #define GRAY_QUEUE_LENGTH_LIMIT 64
28 #ifdef SGEN_CHECK_GRAY_OBJECT_SECTIONS
29 #define STATE_TRANSITION(s,o,n) do { \
31 if (mono_atomic_cas_i32 ((volatile int*)&(s)->state, (n), __old) != __old) \
32 g_assert_not_reached (); \
34 #define STATE_SET(s,v) (s)->state = (v)
35 #define STATE_ASSERT(s,v) g_assert ((s)->state == (v))
37 #define STATE_TRANSITION(s,o,n)
38 #define STATE_SET(s,v)
39 #define STATE_ASSERT(s,v)
43 * Whenever we dispose a gray queue, we save its free list. Then, in the next collection,
44 * we reuse that free list for the new gray queue.
46 static GrayQueueSection
*last_gray_queue_free_list
;
49 sgen_gray_object_alloc_queue_section (SgenGrayQueue
*queue
, gboolean is_parallel
)
51 GrayQueueSection
*section
;
53 if (queue
->free_list
) {
54 /* Use the previously allocated queue sections if possible */
55 section
= queue
->free_list
;
56 queue
->free_list
= section
->next
;
57 STATE_TRANSITION (section
, GRAY_QUEUE_SECTION_STATE_FREE_LIST
, GRAY_QUEUE_SECTION_STATE_FLOATING
);
59 HEAVY_STAT (stat_gray_queue_section_alloc
++);
61 /* Allocate a new section */
62 section
= (GrayQueueSection
*)sgen_alloc_internal (INTERNAL_MEM_GRAY_QUEUE
);
63 STATE_SET (section
, GRAY_QUEUE_SECTION_STATE_FLOATING
);
66 /* Section is empty */
69 STATE_TRANSITION (section
, GRAY_QUEUE_SECTION_STATE_FLOATING
, GRAY_QUEUE_SECTION_STATE_ENQUEUED
);
71 /* Link it with the others */
72 section
->next
= queue
->first
;
75 queue
->first
->prev
= section
;
77 queue
->last
= section
;
78 queue
->first
= section
;
79 queue
->cursor
= section
->entries
- 1;
82 mono_memory_write_barrier ();
85 * we could probably optimize the code to only rely on the write barrier
86 * for synchronization with the stealer thread. Additionally we could also
87 * do a write barrier once every other gray queue change, and request
88 * to have a minimum of sections before stealing, to keep consistency.
90 mono_atomic_inc_i32 (&queue
->num_sections
);
92 queue
->num_sections
++;
97 sgen_gray_object_free_queue_section (GrayQueueSection
*section
)
99 HEAVY_STAT (stat_gray_queue_section_free
++);
101 STATE_TRANSITION (section
, GRAY_QUEUE_SECTION_STATE_FLOATING
, GRAY_QUEUE_SECTION_STATE_FREED
);
102 sgen_free_internal (section
, INTERNAL_MEM_GRAY_QUEUE
);
106 * The following two functions are called in the inner loops of the
107 * collector, so they need to be as fast as possible. We have macros
108 * for them in sgen-gc.h.
112 sgen_gray_object_enqueue (SgenGrayQueue
*queue
, GCObject
*obj
, SgenDescriptor desc
, gboolean is_parallel
)
114 GrayQueueEntry entry
= SGEN_GRAY_QUEUE_ENTRY (obj
, desc
);
116 HEAVY_STAT (stat_gray_queue_enqueue_slow_path
++);
118 SGEN_ASSERT (9, obj
, "enqueueing a null object");
119 //sgen_check_objref (obj);
121 #ifdef SGEN_CHECK_GRAY_OBJECT_ENQUEUE
122 if (queue
->enqueue_check_func
)
123 queue
->enqueue_check_func (obj
);
126 if (G_UNLIKELY (!queue
->first
|| queue
->cursor
== GRAY_LAST_CURSOR_POSITION (queue
->first
))) {
129 * We don't actively update the section size with each push/pop. For the first
130 * section we determine the size from the cursor position. For the reset of the
131 * sections we need to have the size set.
133 queue
->first
->size
= SGEN_GRAY_QUEUE_SECTION_SIZE
;
136 sgen_gray_object_alloc_queue_section (queue
, is_parallel
);
138 STATE_ASSERT (queue
->first
, GRAY_QUEUE_SECTION_STATE_ENQUEUED
);
139 SGEN_ASSERT (9, queue
->cursor
<= GRAY_LAST_CURSOR_POSITION (queue
->first
), "gray queue %p overflow, first %p, cursor %p", queue
, queue
->first
, queue
->cursor
);
140 *++queue
->cursor
= entry
;
142 #ifdef SGEN_HEAVY_BINARY_PROTOCOL
143 sgen_binary_protocol_gray_enqueue (queue
, queue
->cursor
, obj
);
148 * We attempt to spread the objects in the gray queue across a number
149 * of sections. If the queue has more sections, then it's already spread,
150 * if it doesn't have enough sections, then we allocate as many as we
154 sgen_gray_object_spread (SgenGrayQueue
*queue
, int num_sections
)
156 GrayQueueSection
*section_start
, *section_end
;
157 int total_entries
= 0, num_entries_per_section
;
158 int num_sections_final
;
160 if (queue
->num_sections
>= num_sections
)
166 /* Compute number of elements in the gray queue */
167 queue
->first
->size
= queue
->cursor
- queue
->first
->entries
+ 1;
168 total_entries
= queue
->first
->size
;
169 for (section_start
= queue
->first
->next
; section_start
!= NULL
; section_start
= section_start
->next
) {
170 SGEN_ASSERT (0, section_start
->size
== SGEN_GRAY_QUEUE_SECTION_SIZE
, "We expect all section aside from the first one to be full");
171 total_entries
+= section_start
->size
;
174 /* Compute how many sections we should have and elements per section */
175 num_sections_final
= (total_entries
> num_sections
) ? num_sections
: total_entries
;
176 num_entries_per_section
= total_entries
/ num_sections_final
;
178 /* Allocate all needed sections */
179 while (queue
->num_sections
< num_sections_final
)
180 sgen_gray_object_alloc_queue_section (queue
, TRUE
);
182 /* Spread out the elements in the sections. By design, sections at the end are fuller. */
183 section_start
= queue
->first
;
184 section_end
= queue
->last
;
185 while (section_start
!= section_end
) {
186 /* We move entries from end to start, until they meet */
187 while (section_start
->size
< num_entries_per_section
) {
188 GrayQueueEntry entry
;
189 if (section_end
->size
<= num_entries_per_section
) {
190 section_end
= section_end
->prev
;
191 if (section_end
== section_start
)
194 if (section_end
->size
<= num_entries_per_section
)
198 entry
= section_end
->entries
[section_end
->size
];
199 section_start
->entries
[section_start
->size
] = entry
;
200 section_start
->size
++;
202 section_start
= section_start
->next
;
205 queue
->cursor
= queue
->first
->entries
+ queue
->first
->size
- 1;
206 queue
->num_sections
= num_sections_final
;
210 sgen_gray_object_dequeue (SgenGrayQueue
*queue
, gboolean is_parallel
)
212 GrayQueueEntry entry
;
214 HEAVY_STAT (stat_gray_queue_dequeue_slow_path
++);
216 if (sgen_gray_object_queue_is_empty (queue
)) {
221 STATE_ASSERT (queue
->first
, GRAY_QUEUE_SECTION_STATE_ENQUEUED
);
222 SGEN_ASSERT (9, queue
->cursor
>= GRAY_FIRST_CURSOR_POSITION (queue
->first
), "gray queue %p underflow", queue
);
224 entry
= *queue
->cursor
--;
226 #ifdef SGEN_HEAVY_BINARY_PROTOCOL
227 sgen_binary_protocol_gray_dequeue (queue
, queue
->cursor
+ 1, entry
.obj
);
230 if (G_UNLIKELY (queue
->cursor
< GRAY_FIRST_CURSOR_POSITION (queue
->first
))) {
231 GrayQueueSection
*section
;
232 gint32 old_num_sections
= 0;
235 old_num_sections
= mono_atomic_dec_i32 (&queue
->num_sections
);
237 queue
->num_sections
--;
239 if (is_parallel
&& old_num_sections
<= 0) {
240 mono_os_mutex_lock (&queue
->steal_mutex
);
243 section
= queue
->first
;
244 queue
->first
= section
->next
;
246 queue
->first
->prev
= NULL
;
249 SGEN_ASSERT (0, !old_num_sections
, "Why do we have an inconsistent number of sections ?");
251 section
->next
= queue
->free_list
;
253 STATE_TRANSITION (section
, GRAY_QUEUE_SECTION_STATE_ENQUEUED
, GRAY_QUEUE_SECTION_STATE_FREE_LIST
);
255 queue
->free_list
= section
;
256 queue
->cursor
= queue
->first
? queue
->first
->entries
+ queue
->first
->size
- 1 : NULL
;
258 if (is_parallel
&& old_num_sections
<= 0) {
259 mono_os_mutex_unlock (&queue
->steal_mutex
);
267 sgen_gray_object_dequeue_section (SgenGrayQueue
*queue
)
269 GrayQueueSection
*section
;
274 /* We never steal from this queue */
275 queue
->num_sections
--;
277 section
= queue
->first
;
278 queue
->first
= section
->next
;
280 queue
->first
->prev
= NULL
;
284 section
->next
= NULL
;
285 section
->size
= queue
->cursor
- section
->entries
+ 1;
287 queue
->cursor
= queue
->first
? queue
->first
->entries
+ queue
->first
->size
- 1 : NULL
;
289 STATE_TRANSITION (section
, GRAY_QUEUE_SECTION_STATE_ENQUEUED
, GRAY_QUEUE_SECTION_STATE_FLOATING
);
295 sgen_gray_object_steal_section (SgenGrayQueue
*queue
)
297 gint32 sections_remaining
;
298 GrayQueueSection
*section
= NULL
;
301 * With each push/pop into the queue we increment the number of sections.
302 * There is only one thread accessing the top (the owner) and potentially
303 * multiple workers trying to steal sections from the bottom, so we need
304 * to lock. A num sections decrement from the owner means that the first
305 * section is reserved, while a decrement by the stealer means that the
306 * last section is reserved. If after we decrement the num sections, we
307 * have at least one more section present, it means we can't race with
308 * the other thread. If this is not the case the steal end abandons the
309 * pop, setting back the num_sections, while the owner end will take a
310 * lock to make sure we are not racing with the stealer (since the stealer
311 * might have popped an entry and be in the process of updating the entry
312 * that the owner is trying to pop.
315 if (queue
->num_sections
<= 1)
318 /* Give up if there is contention on the last section */
319 if (mono_os_mutex_trylock (&queue
->steal_mutex
) != 0)
322 sections_remaining
= mono_atomic_dec_i32 (&queue
->num_sections
);
323 if (sections_remaining
<= 0) {
324 /* The section that we tried to steal might be the head of the queue. */
325 mono_atomic_inc_i32 (&queue
->num_sections
);
327 /* We have reserved for us the tail section of the queue */
328 section
= queue
->last
;
329 SGEN_ASSERT (0, section
, "Why we don't have any sections to steal?");
330 SGEN_ASSERT (0, !section
->next
, "Why aren't we stealing the tail?");
331 queue
->last
= section
->prev
;
332 section
->prev
= NULL
;
333 SGEN_ASSERT (0, queue
->last
, "Why are we stealing the last section?");
334 queue
->last
->next
= NULL
;
336 STATE_TRANSITION (section
, GRAY_QUEUE_SECTION_STATE_ENQUEUED
, GRAY_QUEUE_SECTION_STATE_FLOATING
);
339 mono_os_mutex_unlock (&queue
->steal_mutex
);
344 sgen_gray_object_enqueue_section (SgenGrayQueue
*queue
, GrayQueueSection
*section
, gboolean is_parallel
)
346 STATE_TRANSITION (section
, GRAY_QUEUE_SECTION_STATE_FLOATING
, GRAY_QUEUE_SECTION_STATE_ENQUEUED
);
349 queue
->first
->size
= queue
->cursor
- queue
->first
->entries
+ 1;
351 section
->next
= queue
->first
;
352 section
->prev
= NULL
;
354 queue
->first
->prev
= section
;
356 queue
->last
= section
;
357 queue
->first
= section
;
358 queue
->cursor
= queue
->first
->entries
+ queue
->first
->size
- 1;
359 #ifdef SGEN_CHECK_GRAY_OBJECT_ENQUEUE
360 if (queue
->enqueue_check_func
) {
362 for (i
= 0; i
< section
->size
; ++i
)
363 queue
->enqueue_check_func (section
->entries
[i
].obj
);
367 mono_memory_write_barrier ();
368 mono_atomic_inc_i32 (&queue
->num_sections
);
370 queue
->num_sections
++;
375 sgen_gray_object_queue_trim_free_list (SgenGrayQueue
*queue
)
377 GrayQueueSection
*section
, *next
;
379 for (section
= queue
->free_list
; section
&& i
< GRAY_QUEUE_LENGTH_LIMIT
- 1; section
= section
->next
) {
380 STATE_ASSERT (section
, GRAY_QUEUE_SECTION_STATE_FREE_LIST
);
385 while (section
->next
) {
386 next
= section
->next
;
387 section
->next
= next
->next
;
388 STATE_TRANSITION (next
, GRAY_QUEUE_SECTION_STATE_FREE_LIST
, GRAY_QUEUE_SECTION_STATE_FLOATING
);
389 sgen_gray_object_free_queue_section (next
);
394 sgen_gray_object_queue_init (SgenGrayQueue
*queue
, GrayQueueEnqueueCheckFunc enqueue_check_func
, gboolean reuse_free_list
)
396 memset (queue
, 0, sizeof (SgenGrayQueue
));
398 #ifdef SGEN_CHECK_GRAY_OBJECT_ENQUEUE
399 queue
->enqueue_check_func
= enqueue_check_func
;
402 mono_os_mutex_init (&queue
->steal_mutex
);
404 if (reuse_free_list
) {
405 queue
->free_list
= last_gray_queue_free_list
;
406 last_gray_queue_free_list
= NULL
;
411 sgen_gray_object_queue_dispose (SgenGrayQueue
*queue
)
413 SGEN_ASSERT (0, sgen_gray_object_queue_is_empty (queue
), "Why are we disposing a gray queue that's not empty?");
415 /* Free the extra sections allocated during the last collection */
416 sgen_gray_object_queue_trim_free_list (queue
);
418 SGEN_ASSERT (0, !last_gray_queue_free_list
, "Are we disposing two gray queues after another?");
419 last_gray_queue_free_list
= queue
->free_list
;
421 mono_os_mutex_destroy (&queue
->steal_mutex
);
423 /* just to make sure */
424 memset (queue
, 0, sizeof (SgenGrayQueue
));
428 sgen_gray_object_queue_deinit (SgenGrayQueue
*queue
)
430 g_assert (!queue
->first
);
431 while (queue
->free_list
) {
432 GrayQueueSection
*next
= queue
->free_list
->next
;
433 STATE_TRANSITION (queue
->free_list
, GRAY_QUEUE_SECTION_STATE_FREE_LIST
, GRAY_QUEUE_SECTION_STATE_FLOATING
);
434 sgen_gray_object_free_queue_section (queue
->free_list
);
435 queue
->free_list
= next
;
440 lock_section_queue (SgenSectionGrayQueue
*queue
)
445 mono_os_mutex_lock (&queue
->lock
);
449 unlock_section_queue (SgenSectionGrayQueue
*queue
)
454 mono_os_mutex_unlock (&queue
->lock
);
458 sgen_section_gray_queue_init (SgenSectionGrayQueue
*queue
, gboolean locked
, GrayQueueEnqueueCheckFunc enqueue_check_func
)
460 g_assert (sgen_section_gray_queue_is_empty (queue
));
462 queue
->locked
= locked
;
464 mono_os_mutex_init_recursive (&queue
->lock
);
467 #ifdef SGEN_CHECK_GRAY_OBJECT_ENQUEUE
468 queue
->enqueue_check_func
= enqueue_check_func
;
473 sgen_section_gray_queue_is_empty (SgenSectionGrayQueue
*queue
)
475 return !queue
->first
;
479 sgen_section_gray_queue_dequeue (SgenSectionGrayQueue
*queue
)
481 GrayQueueSection
*section
;
483 lock_section_queue (queue
);
486 section
= queue
->first
;
487 queue
->first
= section
->next
;
489 STATE_TRANSITION (section
, GRAY_QUEUE_SECTION_STATE_ENQUEUED
, GRAY_QUEUE_SECTION_STATE_FLOATING
);
491 section
->next
= NULL
;
496 unlock_section_queue (queue
);
502 sgen_section_gray_queue_enqueue (SgenSectionGrayQueue
*queue
, GrayQueueSection
*section
)
504 STATE_TRANSITION (section
, GRAY_QUEUE_SECTION_STATE_FLOATING
, GRAY_QUEUE_SECTION_STATE_ENQUEUED
);
506 lock_section_queue (queue
);
508 section
->next
= queue
->first
;
509 queue
->first
= section
;
510 #ifdef SGEN_CHECK_GRAY_OBJECT_ENQUEUE
511 if (queue
->enqueue_check_func
) {
513 for (i
= 0; i
< section
->size
; ++i
)
514 queue
->enqueue_check_func (section
->entries
[i
].obj
);
518 unlock_section_queue (queue
);
522 sgen_init_gray_queues (void)
524 #ifdef HEAVY_STATISTICS
525 mono_counters_register ("Gray Queue alloc section", MONO_COUNTER_GC
| MONO_COUNTER_ULONG
, &stat_gray_queue_section_alloc
);
526 mono_counters_register ("Gray Queue free section", MONO_COUNTER_GC
| MONO_COUNTER_ULONG
, &stat_gray_queue_section_free
);
527 mono_counters_register ("Gray Queue enqueue fast path", MONO_COUNTER_GC
| MONO_COUNTER_ULONG
, &stat_gray_queue_enqueue_fast_path
);
528 mono_counters_register ("Gray Queue dequeue fast path", MONO_COUNTER_GC
| MONO_COUNTER_ULONG
, &stat_gray_queue_dequeue_fast_path
);
529 mono_counters_register ("Gray Queue enqueue slow path", MONO_COUNTER_GC
| MONO_COUNTER_ULONG
, &stat_gray_queue_enqueue_slow_path
);
530 mono_counters_register ("Gray Queue dequeue slow path", MONO_COUNTER_GC
| MONO_COUNTER_ULONG
, &stat_gray_queue_dequeue_slow_path
);