bump rx to official 2.1 release.
[mono-project.git] / mono / utils / hazard-pointer.c
blob2192bf63d2523202568a93aa59191772f167e95e
1 /*
2 * hazard-pointer.c: Hazard pointer related code.
4 * (C) Copyright 2011 Novell, Inc
5 */
7 #include <config.h>
9 #include <mono/metadata/class-internals.h>
10 #include <mono/utils/hazard-pointer.h>
11 #include <mono/utils/mono-membar.h>
12 #include <mono/utils/mono-memory-model.h>
13 #include <mono/utils/mono-mmap.h>
14 #include <mono/utils/monobitset.h>
15 #include <mono/utils/mono-threads.h>
16 #include <mono/utils/lock-free-array-queue.h>
17 #include <mono/io-layer/io-layer.h>
19 typedef struct {
20 gpointer p;
21 MonoHazardousFreeFunc free_func;
22 gboolean might_lock;
23 } DelayedFreeItem;
25 /* The hazard table */
26 #if MONO_SMALL_CONFIG
27 #define HAZARD_TABLE_MAX_SIZE 256
28 #else
29 #define HAZARD_TABLE_MAX_SIZE 16384 /* There cannot be more threads than this number. */
30 #endif
32 static volatile int hazard_table_size = 0;
33 static MonoThreadHazardPointers * volatile hazard_table = NULL;
35 /* The table where we keep pointers to blocks to be freed but that
36 have to wait because they're guarded by a hazard pointer. */
37 static MonoLockFreeArrayQueue delayed_free_queue = MONO_LOCK_FREE_ARRAY_QUEUE_INIT (sizeof (DelayedFreeItem));
39 /* The table for small ID assignment */
40 static CRITICAL_SECTION small_id_mutex;
41 static int small_id_next;
42 static int highest_small_id = -1;
43 static MonoBitSet *small_id_table;
46 * Allocate a small thread id.
48 * FIXME: The biggest part of this function is very similar to
49 * domain_id_alloc() in domain.c and should be merged.
51 int
52 mono_thread_small_id_alloc (void)
54 int i, id = -1;
56 EnterCriticalSection (&small_id_mutex);
58 if (!small_id_table)
59 small_id_table = mono_bitset_new (1, 0);
61 id = mono_bitset_find_first_unset (small_id_table, small_id_next);
62 if (id == -1)
63 id = mono_bitset_find_first_unset (small_id_table, -1);
65 if (id == -1) {
66 MonoBitSet *new_table;
67 if (small_id_table->size * 2 >= (1 << 16))
68 g_assert_not_reached ();
69 new_table = mono_bitset_clone (small_id_table, small_id_table->size * 2);
70 id = mono_bitset_find_first_unset (new_table, small_id_table->size - 1);
72 mono_bitset_free (small_id_table);
73 small_id_table = new_table;
76 g_assert (!mono_bitset_test_fast (small_id_table, id));
77 mono_bitset_set_fast (small_id_table, id);
79 small_id_next++;
80 if (small_id_next >= small_id_table->size)
81 small_id_next = 0;
83 g_assert (id < HAZARD_TABLE_MAX_SIZE);
84 if (id >= hazard_table_size) {
85 #if MONO_SMALL_CONFIG
86 hazard_table = g_malloc0 (sizeof (MonoThreadHazardPointers) * HAZARD_TABLE_MAX_SIZE);
87 hazard_table_size = HAZARD_TABLE_MAX_SIZE;
88 #else
89 gpointer page_addr;
90 int pagesize = mono_pagesize ();
91 int num_pages = (hazard_table_size * sizeof (MonoThreadHazardPointers) + pagesize - 1) / pagesize;
93 if (hazard_table == NULL) {
94 hazard_table = mono_valloc (NULL,
95 sizeof (MonoThreadHazardPointers) * HAZARD_TABLE_MAX_SIZE,
96 MONO_MMAP_NONE);
99 g_assert (hazard_table != NULL);
100 page_addr = (guint8*)hazard_table + num_pages * pagesize;
102 mono_mprotect (page_addr, pagesize, MONO_MMAP_READ | MONO_MMAP_WRITE);
104 ++num_pages;
105 hazard_table_size = num_pages * pagesize / sizeof (MonoThreadHazardPointers);
107 #endif
108 g_assert (id < hazard_table_size);
109 for (i = 0; i < HAZARD_POINTER_COUNT; ++i)
110 hazard_table [id].hazard_pointers [i] = NULL;
113 if (id > highest_small_id) {
114 highest_small_id = id;
115 mono_memory_write_barrier ();
118 LeaveCriticalSection (&small_id_mutex);
120 return id;
123 void
124 mono_thread_small_id_free (int id)
126 /* MonoBitSet operations are not atomic. */
127 EnterCriticalSection (&small_id_mutex);
129 g_assert (id >= 0 && id < small_id_table->size);
130 g_assert (mono_bitset_test_fast (small_id_table, id));
131 mono_bitset_clear_fast (small_id_table, id);
133 LeaveCriticalSection (&small_id_mutex);
136 static gboolean
137 is_pointer_hazardous (gpointer p)
139 int i, j;
140 int highest = highest_small_id;
142 g_assert (highest < hazard_table_size);
144 for (i = 0; i <= highest; ++i) {
145 for (j = 0; j < HAZARD_POINTER_COUNT; ++j) {
146 if (hazard_table [i].hazard_pointers [j] == p)
147 return TRUE;
148 LOAD_LOAD_FENCE;
152 return FALSE;
155 MonoThreadHazardPointers*
156 mono_hazard_pointer_get (void)
158 int small_id = mono_thread_info_get_small_id ();
160 if (small_id < 0) {
161 static MonoThreadHazardPointers emerg_hazard_table;
162 g_warning ("Thread %p may have been prematurely finalized", (gpointer)mono_native_thread_id_get ());
163 return &emerg_hazard_table;
166 return &hazard_table [small_id];
169 /* Can be called with hp==NULL, in which case it acts as an ordinary
170 pointer fetch. It's used that way indirectly from
171 mono_jit_info_table_add(), which doesn't have to care about hazards
172 because it holds the respective domain lock. */
173 gpointer
174 get_hazardous_pointer (gpointer volatile *pp, MonoThreadHazardPointers *hp, int hazard_index)
176 gpointer p;
178 for (;;) {
179 /* Get the pointer */
180 p = *pp;
181 /* If we don't have hazard pointers just return the
182 pointer. */
183 if (!hp)
184 return p;
185 /* Make it hazardous */
186 mono_hazard_pointer_set (hp, hazard_index, p);
187 /* Check that it's still the same. If not, try
188 again. */
189 if (*pp != p) {
190 mono_hazard_pointer_clear (hp, hazard_index);
191 continue;
193 break;
196 return p;
199 static gboolean
200 try_free_delayed_free_item (gboolean lock_free_context)
202 DelayedFreeItem item;
203 gboolean popped = mono_lock_free_array_queue_pop (&delayed_free_queue, &item);
205 if (!popped)
206 return FALSE;
208 if ((lock_free_context && item.might_lock) || (is_pointer_hazardous (item.p))) {
209 mono_lock_free_array_queue_push (&delayed_free_queue, &item);
210 return FALSE;
213 item.free_func (item.p);
215 return TRUE;
218 void
219 mono_thread_hazardous_free_or_queue (gpointer p, MonoHazardousFreeFunc free_func,
220 gboolean free_func_might_lock, gboolean lock_free_context)
222 int i;
224 if (lock_free_context)
225 g_assert (!free_func_might_lock);
226 if (free_func_might_lock)
227 g_assert (!lock_free_context);
229 /* First try to free a few entries in the delayed free
230 table. */
231 for (i = 0; i < 3; ++i)
232 try_free_delayed_free_item (lock_free_context);
234 /* Now see if the pointer we're freeing is hazardous. If it
235 isn't, free it. Otherwise put it in the delay list. */
236 if (is_pointer_hazardous (p)) {
237 DelayedFreeItem item = { p, free_func, free_func_might_lock };
239 ++mono_stats.hazardous_pointer_count;
241 mono_lock_free_array_queue_push (&delayed_free_queue, &item);
242 } else {
243 free_func (p);
247 void
248 mono_thread_hazardous_try_free_all (void)
250 while (try_free_delayed_free_item (FALSE))
254 void
255 mono_thread_hazardous_try_free_some (void)
257 int i;
258 for (i = 0; i < 10; ++i)
259 try_free_delayed_free_item (FALSE);
262 void
263 mono_thread_smr_init (void)
265 InitializeCriticalSection(&small_id_mutex);
268 void
269 mono_thread_smr_cleanup (void)
271 mono_thread_hazardous_try_free_all ();
273 mono_lock_free_array_queue_cleanup (&delayed_free_queue);
275 /*FIXME, can't we release the small id table here?*/