Whittle away at warnings: (#18874)
[mono-project.git] / mono / utils / hazard-pointer.c
blob85fc04f5986d57fe027bf784f06da008b70ef654
1 /**
2 * \file
3 * Hazard pointer related code.
5 * (C) Copyright 2011 Novell, Inc
6 * Licensed under the MIT license. See LICENSE file in the project root for full license information.
7 */
9 #include <config.h>
11 #include <string.h>
13 #include <mono/utils/hazard-pointer.h>
14 #include <mono/utils/mono-membar.h>
15 #include <mono/utils/mono-memory-model.h>
16 #include <mono/utils/monobitset.h>
17 #include <mono/utils/lock-free-array-queue.h>
18 #include <mono/utils/atomic.h>
19 #include <mono/utils/mono-os-mutex.h>
20 #ifdef SGEN_WITHOUT_MONO
21 #include <mono/sgen/sgen-gc.h>
22 #include <mono/sgen/sgen-client.h>
23 #else
24 #include <mono/utils/mono-mmap.h>
25 #include <mono/utils/mono-threads.h>
26 #include <mono/utils/mono-counters.h>
27 #endif
29 #if _MSC_VER
30 #pragma warning(disable:4312) // FIXME pointer cast to different size
31 #endif
33 typedef struct {
34 gpointer p;
35 MonoHazardousFreeFunc free_func;
36 } DelayedFreeItem;
38 /* The hazard table */
39 #if MONO_SMALL_CONFIG
40 #define HAZARD_TABLE_MAX_SIZE 256
41 #define HAZARD_TABLE_OVERFLOW 4
42 #else
43 #define HAZARD_TABLE_MAX_SIZE 16384 /* There cannot be more threads than this number. */
44 #define HAZARD_TABLE_OVERFLOW 64
45 #endif
47 static volatile int hazard_table_size = 0;
48 static MonoThreadHazardPointers * volatile hazard_table = NULL;
49 static MonoHazardFreeQueueSizeCallback queue_size_cb;
52 * Each entry is either 0 or 1, indicating whether that overflow small
53 * ID is busy.
55 static volatile gint32 overflow_busy [HAZARD_TABLE_OVERFLOW];
57 /* The table where we keep pointers to blocks to be freed but that
58 have to wait because they're guarded by a hazard pointer. */
59 static MonoLockFreeArrayQueue delayed_free_queue = MONO_LOCK_FREE_ARRAY_QUEUE_INIT (sizeof (DelayedFreeItem), MONO_MEM_ACCOUNT_HAZARD_POINTERS);
61 /* The table for small ID assignment */
62 static mono_mutex_t small_id_mutex;
63 static int small_id_next;
64 static int highest_small_id = -1;
65 static MonoBitSet *small_id_table;
66 static int hazardous_pointer_count;
69 * Allocate a small thread id.
71 * FIXME: The biggest part of this function is very similar to
72 * domain_id_alloc() in domain.c and should be merged.
74 int
75 mono_thread_small_id_alloc (void)
77 int i, id = -1;
79 mono_os_mutex_lock (&small_id_mutex);
81 if (!small_id_table)
82 small_id_table = mono_bitset_new (1, 0);
84 id = mono_bitset_find_first_unset (small_id_table, small_id_next - 1);
85 if (id == -1)
86 id = mono_bitset_find_first_unset (small_id_table, -1);
88 if (id == -1) {
89 MonoBitSet *new_table;
90 if (small_id_table->size * 2 >= (1 << 16))
91 g_assert_not_reached ();
92 new_table = mono_bitset_clone (small_id_table, small_id_table->size * 2);
93 id = mono_bitset_find_first_unset (new_table, small_id_table->size - 1);
95 mono_bitset_free (small_id_table);
96 small_id_table = new_table;
99 g_assert (!mono_bitset_test_fast (small_id_table, id));
100 mono_bitset_set_fast (small_id_table, id);
102 small_id_next++;
103 if (small_id_next >= small_id_table->size)
104 small_id_next = 0;
106 g_assert (id < HAZARD_TABLE_MAX_SIZE);
107 if (id >= hazard_table_size) {
108 #if MONO_SMALL_CONFIG
109 hazard_table = g_malloc0 (sizeof (MonoThreadHazardPointers) * HAZARD_TABLE_MAX_SIZE);
110 hazard_table_size = HAZARD_TABLE_MAX_SIZE;
111 #else
112 gpointer page_addr;
113 #if defined(__PASE__)
115 * HACK: allocating the table with none prot will cause i 7.1
116 * to segfault when accessing or protecting it
118 int table_prot = MONO_MMAP_READ | MONO_MMAP_WRITE;
119 #else
120 int table_prot = MONO_MMAP_NONE;
121 #endif
122 int pagesize = mono_pagesize ();
123 int num_pages = (hazard_table_size * sizeof (MonoThreadHazardPointers) + pagesize - 1) / pagesize;
125 if (hazard_table == NULL) {
126 hazard_table = (MonoThreadHazardPointers*) mono_valloc (NULL,
127 sizeof (MonoThreadHazardPointers) * HAZARD_TABLE_MAX_SIZE,
128 table_prot, MONO_MEM_ACCOUNT_HAZARD_POINTERS);
131 g_assert (hazard_table != NULL);
132 page_addr = (guint8*)hazard_table + num_pages * pagesize;
134 mono_mprotect (page_addr, pagesize, MONO_MMAP_READ | MONO_MMAP_WRITE);
136 ++num_pages;
137 hazard_table_size = num_pages * pagesize / sizeof (MonoThreadHazardPointers);
139 #endif
140 g_assert (id < hazard_table_size);
141 for (i = 0; i < HAZARD_POINTER_COUNT; ++i)
142 hazard_table [id].hazard_pointers [i] = NULL;
145 if (id > highest_small_id) {
146 highest_small_id = id;
147 mono_memory_write_barrier ();
150 mono_os_mutex_unlock (&small_id_mutex);
152 return id;
155 void
156 mono_thread_small_id_free (int id)
158 /* MonoBitSet operations are not atomic. */
159 mono_os_mutex_lock (&small_id_mutex);
161 g_assert (id >= 0 && id < small_id_table->size);
162 g_assert (mono_bitset_test_fast (small_id_table, id));
163 mono_bitset_clear_fast (small_id_table, id);
165 mono_os_mutex_unlock (&small_id_mutex);
168 static gboolean
169 is_pointer_hazardous (gpointer p)
171 int i, j;
172 int highest = highest_small_id;
174 g_assert (highest < hazard_table_size);
176 for (i = 0; i <= highest; ++i) {
177 for (j = 0; j < HAZARD_POINTER_COUNT; ++j) {
178 if (hazard_table [i].hazard_pointers [j] == p)
179 return TRUE;
180 LOAD_LOAD_FENCE;
184 return FALSE;
187 MonoThreadHazardPointers*
188 mono_hazard_pointer_get (void)
190 int small_id = mono_thread_info_get_small_id ();
192 if (small_id < 0) {
193 static MonoThreadHazardPointers emerg_hazard_table;
194 g_warning ("Thread %p may have been prematurely finalized", (gpointer) (gsize) mono_native_thread_id_get ());
195 return &emerg_hazard_table;
198 return &hazard_table [small_id];
201 /* Can be called with hp==NULL, in which case it acts as an ordinary
202 pointer fetch. It's used that way indirectly from
203 mono_jit_info_table_add(), which doesn't have to care about hazards
204 because it holds the respective domain lock. */
205 gpointer
206 mono_get_hazardous_pointer (gpointer volatile *pp, MonoThreadHazardPointers *hp, int hazard_index)
208 gpointer p;
210 for (;;) {
211 /* Get the pointer */
212 p = *pp;
213 /* If we don't have hazard pointers just return the
214 pointer. */
215 if (!hp)
216 return p;
217 /* Make it hazardous */
218 mono_hazard_pointer_set (hp, hazard_index, p);
219 /* Check that it's still the same. If not, try
220 again. */
221 if (*pp != p) {
222 mono_hazard_pointer_clear (hp, hazard_index);
223 continue;
225 break;
228 return p;
232 mono_hazard_pointer_save_for_signal_handler (void)
234 int small_id, i;
235 MonoThreadHazardPointers *hp = mono_hazard_pointer_get ();
236 MonoThreadHazardPointers *hp_overflow;
238 for (i = 0; i < HAZARD_POINTER_COUNT; ++i)
239 if (hp->hazard_pointers [i])
240 goto search;
241 return -1;
243 search:
244 for (small_id = 0; small_id < HAZARD_TABLE_OVERFLOW; ++small_id) {
245 if (!overflow_busy [small_id])
246 break;
250 * If this assert fails we don't have enough overflow slots.
251 * We should contemplate adding them dynamically. If we can
252 * make mono_thread_small_id_alloc() lock-free we can just
253 * allocate them on-demand.
255 g_assert (small_id < HAZARD_TABLE_OVERFLOW);
257 if (mono_atomic_cas_i32 (&overflow_busy [small_id], 1, 0) != 0)
258 goto search;
260 hp_overflow = &hazard_table [small_id];
262 for (i = 0; i < HAZARD_POINTER_COUNT; ++i)
263 g_assert (!hp_overflow->hazard_pointers [i]);
264 *hp_overflow = *hp;
266 mono_memory_write_barrier ();
268 memset (hp, 0, sizeof (MonoThreadHazardPointers));
270 return small_id;
273 void
274 mono_hazard_pointer_restore_for_signal_handler (int small_id)
276 MonoThreadHazardPointers *hp = mono_hazard_pointer_get ();
277 MonoThreadHazardPointers *hp_overflow;
278 int i;
280 if (small_id < 0)
281 return;
283 g_assert (small_id < HAZARD_TABLE_OVERFLOW);
284 g_assert (overflow_busy [small_id]);
286 for (i = 0; i < HAZARD_POINTER_COUNT; ++i)
287 g_assert (!hp->hazard_pointers [i]);
289 hp_overflow = &hazard_table [small_id];
291 *hp = *hp_overflow;
293 mono_memory_write_barrier ();
295 memset (hp_overflow, 0, sizeof (MonoThreadHazardPointers));
297 mono_memory_write_barrier ();
299 overflow_busy [small_id] = 0;
303 * mono_thread_hazardous_try_free:
304 * \param p the pointer to free
305 * \param free_func the function that can free the pointer
307 * If \p p is not a hazardous pointer it will be immediately freed by calling \p free_func.
308 * Otherwise it will be queued for later.
310 * Use this function if \p free_func can ALWAYS be called in the context where this function is being called.
312 * This function doesn't pump the free queue so try to accommodate a call at an appropriate time.
313 * See mono_thread_hazardous_try_free_some for when it's appropriate.
315 * \returns TRUE if \p p was free or FALSE if it was queued.
317 gboolean
318 mono_thread_hazardous_try_free (gpointer p, MonoHazardousFreeFunc free_func)
320 if (!is_pointer_hazardous (p)) {
321 free_func (p);
322 return TRUE;
323 } else {
324 mono_thread_hazardous_queue_free (p, free_func);
325 return FALSE;
330 * mono_thread_hazardous_queue_free:
331 * \param p the pointer to free
332 * \param free_func the function that can free the pointer
333 * Queue \p p to be freed later. \p p will be freed once the hazard free queue is pumped.
335 * This function doesn't pump the free queue so try to accommodate a call at an appropriate time.
336 * See \c mono_thread_hazardous_try_free_some for when it's appropriate.
338 void
339 mono_thread_hazardous_queue_free (gpointer p, MonoHazardousFreeFunc free_func)
341 DelayedFreeItem item = { p, free_func };
343 mono_atomic_inc_i32 (&hazardous_pointer_count);
345 mono_lock_free_array_queue_push (&delayed_free_queue, &item);
347 guint32 queue_size = delayed_free_queue.num_used_entries;
348 if (queue_size && queue_size_cb)
349 queue_size_cb (queue_size);
353 void
354 mono_hazard_pointer_install_free_queue_size_callback (MonoHazardFreeQueueSizeCallback cb)
356 queue_size_cb = cb;
359 static void
360 try_free_delayed_free_items (guint32 limit)
362 GArray *hazardous = NULL;
363 DelayedFreeItem item;
364 guint32 freed = 0;
366 // Free all the items we can and re-add the ones we can't to the queue.
367 while (mono_lock_free_array_queue_pop (&delayed_free_queue, &item)) {
368 if (is_pointer_hazardous (item.p)) {
369 if (!hazardous)
370 hazardous = g_array_sized_new (FALSE, FALSE, sizeof (DelayedFreeItem), delayed_free_queue.num_used_entries);
372 g_array_append_val (hazardous, item);
373 continue;
376 item.free_func (item.p);
377 freed++;
379 if (limit && freed == limit)
380 break;
383 if (hazardous) {
384 for (gint i = 0; i < hazardous->len; i++)
385 mono_lock_free_array_queue_push (&delayed_free_queue, &g_array_index (hazardous, DelayedFreeItem, i));
387 g_array_free (hazardous, TRUE);
391 void
392 mono_thread_hazardous_try_free_all (void)
394 try_free_delayed_free_items (0);
397 void
398 mono_thread_hazardous_try_free_some (void)
400 try_free_delayed_free_items (10);
403 void
404 mono_thread_smr_init (void)
406 int i;
408 mono_os_mutex_init (&small_id_mutex);
409 mono_counters_register ("Hazardous pointers", MONO_COUNTER_JIT | MONO_COUNTER_INT, &hazardous_pointer_count);
411 for (i = 0; i < HAZARD_TABLE_OVERFLOW; ++i) {
412 int small_id = mono_thread_small_id_alloc ();
413 g_assert (small_id == i);
417 void
418 mono_thread_smr_cleanup (void)
420 mono_thread_hazardous_try_free_all ();
422 mono_lock_free_array_queue_cleanup (&delayed_free_queue);
424 /*FIXME, can't we release the small id table here?*/