[WinForms] Fix #18506 ActiveTracker, do not propagate message to control when it...
[mono-project.git] / mono / sgen / sgen-pinning.c
blob6776c576372e1544f4c6345e1b4c10a4c71d685e
1 /**
2 * \file
3 * The pin queue.
5 * Copyright 2001-2003 Ximian, Inc
6 * Copyright 2003-2010 Novell, Inc.
7 * Copyright (C) 2012 Xamarin Inc
9 * Licensed under the MIT license. See LICENSE file in the project root for full license information.
12 #include "config.h"
13 #ifdef HAVE_SGEN_GC
15 #include <string.h>
17 #include "mono/sgen/sgen-gc.h"
18 #include "mono/sgen/sgen-pinning.h"
19 #include "mono/sgen/sgen-protocol.h"
20 #include "mono/sgen/sgen-pointer-queue.h"
21 #include "mono/sgen/sgen-client.h"
23 static SgenPointerQueue pin_queue;
24 static size_t last_num_pinned = 0;
26 * While we hold the pin_queue_mutex, all objects in pin_queue_objs will
27 * stay pinned, which means they can't move, therefore they can be scanned.
29 static SgenPointerQueue pin_queue_objs;
30 static mono_mutex_t pin_queue_mutex;
32 #define PIN_HASH_SIZE 1024
33 static void *pin_hash_filter [PIN_HASH_SIZE];
35 void
36 sgen_pinning_init (void)
38 mono_os_mutex_init (&pin_queue_mutex);
41 void
42 sgen_init_pinning (void)
44 memset (pin_hash_filter, 0, sizeof (pin_hash_filter));
45 pin_queue.mem_type = INTERNAL_MEM_PIN_QUEUE;
46 sgen_client_pinning_start ();
49 void
50 sgen_init_pinning_for_conc (void)
52 mono_os_mutex_lock (&pin_queue_mutex);
53 sgen_pointer_queue_clear (&pin_queue_objs);
56 void
57 sgen_finish_pinning (void)
59 last_num_pinned = pin_queue.next_slot;
60 sgen_pointer_queue_clear (&pin_queue);
63 void
64 sgen_finish_pinning_for_conc (void)
66 mono_os_mutex_unlock (&pin_queue_mutex);
69 void
70 sgen_pinning_register_pinned_in_nursery (GCObject *obj)
72 sgen_pointer_queue_add (&pin_queue_objs, obj);
75 void
76 sgen_scan_pin_queue_objects (ScanCopyContext ctx)
78 int i;
79 ScanObjectFunc scan_func = ctx.ops->scan_object;
81 mono_os_mutex_lock (&pin_queue_mutex);
82 for (i = 0; i < pin_queue_objs.next_slot; ++i) {
83 GCObject *obj = (GCObject *)pin_queue_objs.data [i];
84 scan_func (obj, sgen_obj_get_descriptor_safe (obj), ctx.queue);
86 mono_os_mutex_unlock (&pin_queue_mutex);
89 void
90 sgen_pin_stage_ptr (void *ptr)
92 /*very simple multiplicative hash function, tons better than simple and'ng */
93 int hash_idx = ((mword)ptr * 1737350767) & (PIN_HASH_SIZE - 1);
94 if (pin_hash_filter [hash_idx] == ptr)
95 return;
97 pin_hash_filter [hash_idx] = ptr;
99 sgen_pointer_queue_add (&pin_queue, ptr);
102 gboolean
103 sgen_find_optimized_pin_queue_area (void *start, void *end, size_t *first_out, size_t *last_out)
105 size_t first = sgen_pointer_queue_search (&pin_queue, start);
106 size_t last = sgen_pointer_queue_search (&pin_queue, end);
107 SGEN_ASSERT (0, last == pin_queue.next_slot || pin_queue.data [last] >= end, "Pin queue search gone awry");
108 *first_out = first;
109 *last_out = last;
110 return first != last;
113 void**
114 sgen_pinning_get_entry (size_t index)
116 SGEN_ASSERT (0, index <= pin_queue.next_slot, "Pin queue entry out of range");
117 return &pin_queue.data [index];
120 void
121 sgen_find_section_pin_queue_start_end (GCMemSection *section)
123 SGEN_LOG (6, "Pinning from section %p (%p-%p)", section, section->data, section->end_data);
125 sgen_find_optimized_pin_queue_area (section->data, section->end_data,
126 &section->pin_queue_first_entry, &section->pin_queue_last_entry);
128 SGEN_LOG (6, "Found %" G_GSIZE_FORMAT "d pinning addresses in section %p",
129 section->pin_queue_last_entry - section->pin_queue_first_entry, section);
132 /*This will setup the given section for the while pin queue. */
133 void
134 sgen_pinning_setup_section (GCMemSection *section)
136 section->pin_queue_first_entry = 0;
137 section->pin_queue_last_entry = pin_queue.next_slot;
140 void
141 sgen_pinning_trim_queue_to_section (GCMemSection *section)
143 SGEN_ASSERT (0, section->pin_queue_first_entry == 0, "Pin queue trimming assumes the whole pin queue is used by the nursery");
144 pin_queue.next_slot = section->pin_queue_last_entry;
148 * This is called when we've run out of memory during a major collection.
150 * After collecting potential pin entries and sorting the array, this is what it looks like:
152 * +--------------------+---------------------------------------------+--------------------+
153 * | major heap entries | nursery entries | major heap entries |
154 * +--------------------+---------------------------------------------+--------------------+
156 * Of course there might not be major heap entries before and/or after the nursery entries,
157 * depending on where the major heap sections are in the address space, and whether there
158 * were any potential pointers there.
160 * When we pin nursery objects, we compact the nursery part of the pin array, which leaves
161 * discarded entries after the ones that actually pointed to nursery objects:
163 * +--------------------+-----------------+---------------------------+--------------------+
164 * | major heap entries | nursery entries | discarded nursery entries | major heap entries |
165 * +--------------------+-----------------+---------------------------+--------------------+
167 * When, due to being out of memory, we late pin more objects, the pin array looks like
168 * this:
170 * +--------------------+-----------------+---------------------------+--------------------+--------------+
171 * | major heap entries | nursery entries | discarded nursery entries | major heap entries | late entries |
172 * +--------------------+-----------------+---------------------------+--------------------+--------------+
174 * This function gets rid of the discarded nursery entries by nulling them out. Note that
175 * we can late pin objects not only in the nursery but also in the major heap, which happens
176 * when evacuation fails.
178 void
179 sgen_pin_queue_clear_discarded_entries (GCMemSection *section, size_t max_pin_slot)
181 void **start = sgen_pinning_get_entry (section->pin_queue_last_entry);
182 void **end = sgen_pinning_get_entry (max_pin_slot);
183 void *addr;
185 for (; start < end; ++start) {
186 addr = *start;
187 if ((char*)addr < section->data || (char*)addr > section->end_data)
188 break;
189 *start = NULL;
193 /* reduce the info in the pin queue, removing duplicate pointers and sorting them */
194 void
195 sgen_optimize_pin_queue (void)
197 sgen_pointer_queue_sort_uniq (&pin_queue);
200 size_t
201 sgen_get_pinned_count (void)
203 return pin_queue.next_slot;
206 void
207 sgen_dump_pin_queue (void)
209 int i;
211 for (i = 0; i < last_num_pinned; ++i) {
212 GCObject *ptr = (GCObject *)pin_queue.data [i];
213 SGEN_LOG (3, "Bastard pinning obj %p (%s), size: %ld", ptr, sgen_client_vtable_get_name (SGEN_LOAD_VTABLE (ptr)), (long)sgen_safe_object_get_size (ptr));
217 typedef struct _CementHashEntry CementHashEntry;
218 struct _CementHashEntry {
219 GCObject *obj;
220 unsigned int count;
221 gboolean forced; /* if it should stay cemented after the finishing pause */
224 static CementHashEntry cement_hash [SGEN_CEMENT_HASH_SIZE];
226 static gboolean cement_enabled = TRUE;
228 void
229 sgen_cement_init (gboolean enabled)
231 cement_enabled = enabled;
234 void
235 sgen_cement_reset (void)
237 int i;
238 for (i = 0; i < SGEN_CEMENT_HASH_SIZE; i++) {
239 if (cement_hash [i].forced) {
240 cement_hash [i].forced = FALSE;
241 } else {
242 cement_hash [i].obj = NULL;
243 cement_hash [i].count = 0;
246 sgen_binary_protocol_cement_reset ();
251 * The pin_queue should be full and sorted, without entries from the cemented
252 * objects. We traverse the cement hash and check if each object is pinned in
253 * the pin_queue (the pin_queue contains entries between obj and obj+obj_len)
255 void
256 sgen_cement_force_pinned (void)
258 int i;
260 if (!cement_enabled)
261 return;
263 for (i = 0; i < SGEN_CEMENT_HASH_SIZE; i++) {
264 GCObject *obj = cement_hash [i].obj;
265 size_t index;
266 if (!obj)
267 continue;
268 if (cement_hash [i].count < SGEN_CEMENT_THRESHOLD)
269 continue;
270 SGEN_ASSERT (0, !cement_hash [i].forced, "Why do we have a forced cemented object before forcing ?");
272 /* Returns the index of the target or of the first element greater than it */
273 index = sgen_pointer_queue_search (&pin_queue, obj);
274 if (index == pin_queue.next_slot)
275 continue;
276 SGEN_ASSERT (0, pin_queue.data [index] >= (gpointer)obj, "Binary search should return a pointer greater than the search target");
277 if (pin_queue.data [index] < (gpointer)((char*)obj + sgen_safe_object_get_size (obj)))
278 cement_hash [i].forced = TRUE;
282 gboolean
283 sgen_cement_is_forced (GCObject *obj)
285 guint hv = sgen_aligned_addr_hash (obj);
286 int i = SGEN_CEMENT_HASH (hv);
288 SGEN_ASSERT (5, sgen_ptr_in_nursery (obj), "Looking up cementing for non-nursery objects makes no sense");
290 if (!cement_enabled)
291 return FALSE;
293 if (!cement_hash [i].obj)
294 return FALSE;
295 if (cement_hash [i].obj != obj)
296 return FALSE;
298 return cement_hash [i].forced;
301 gboolean
302 sgen_cement_lookup (GCObject *obj)
304 guint hv = sgen_aligned_addr_hash (obj);
305 int i = SGEN_CEMENT_HASH (hv);
307 SGEN_ASSERT (5, sgen_ptr_in_nursery (obj), "Looking up cementing for non-nursery objects makes no sense");
309 if (!cement_enabled)
310 return FALSE;
312 if (!cement_hash [i].obj)
313 return FALSE;
314 if (cement_hash [i].obj != obj)
315 return FALSE;
317 return cement_hash [i].count >= SGEN_CEMENT_THRESHOLD;
320 gboolean
321 sgen_cement_lookup_or_register (GCObject *obj)
323 guint hv;
324 int i;
325 CementHashEntry *hash = cement_hash;
327 if (!cement_enabled)
328 return FALSE;
330 hv = sgen_aligned_addr_hash (obj);
331 i = SGEN_CEMENT_HASH (hv);
333 SGEN_ASSERT (5, sgen_ptr_in_nursery (obj), "Can only cement pointers to nursery objects");
335 if (!hash [i].obj) {
336 GCObject *old_obj;
337 old_obj = (GCObject*)mono_atomic_cas_ptr ((gpointer*)&hash [i].obj, obj, NULL);
338 /* Check if the slot was occupied by some other object */
339 if (old_obj != NULL && old_obj != obj)
340 return FALSE;
341 } else if (hash [i].obj != obj) {
342 return FALSE;
345 if (hash [i].count >= SGEN_CEMENT_THRESHOLD)
346 return TRUE;
348 if (mono_atomic_inc_i32 ((gint32*)&hash [i].count) == SGEN_CEMENT_THRESHOLD) {
349 SGEN_ASSERT (9, sgen_get_current_collection_generation () >= 0, "We can only cement objects when we're in a collection pause.");
350 SGEN_ASSERT (9, SGEN_OBJECT_IS_PINNED (obj), "Can only cement pinned objects");
351 SGEN_CEMENT_OBJECT (obj);
353 sgen_binary_protocol_cement (obj, (gpointer)SGEN_LOAD_VTABLE (obj),
354 (int)sgen_safe_object_get_size (obj));
357 return FALSE;
360 static void
361 pin_from_hash (CementHashEntry *hash, gboolean has_been_reset)
363 int i;
364 for (i = 0; i < SGEN_CEMENT_HASH_SIZE; ++i) {
365 if (!hash [i].count)
366 continue;
368 if (has_been_reset)
369 SGEN_ASSERT (5, hash [i].count >= SGEN_CEMENT_THRESHOLD, "Cementing hash inconsistent");
371 sgen_client_pinned_cemented_object (hash [i].obj);
372 sgen_pin_stage_ptr (hash [i].obj);
373 sgen_binary_protocol_cement_stage (hash [i].obj);
374 /* FIXME: do pin stats if enabled */
376 SGEN_CEMENT_OBJECT (hash [i].obj);
380 void
381 sgen_pin_cemented_objects (void)
383 pin_from_hash (cement_hash, TRUE);
386 void
387 sgen_cement_clear_below_threshold (void)
389 int i;
390 for (i = 0; i < SGEN_CEMENT_HASH_SIZE; ++i) {
391 if (cement_hash [i].count < SGEN_CEMENT_THRESHOLD) {
392 cement_hash [i].obj = NULL;
393 cement_hash [i].count = 0;
398 #endif /* HAVE_SGEN_GC */