[interp] Remove unreachable code (#12411)
[mono-project.git] / mono / utils / mono-conc-hashtable.c
blob6d6012453c6b882f62cf177c8a4f7334373339f9
1 /**
2 * \file
3 * A mostly concurrent hashtable
5 * Author:
6 * Rodrigo Kumpera (kumpera@gmail.com)
8 * (C) 2014 Xamarin
9 */
11 #include "mono-conc-hashtable.h"
12 #include <mono/utils/hazard-pointer.h>
14 /* Configuration knobs. */
16 #define INITIAL_SIZE 32
17 #define LOAD_FACTOR 0.75f
18 #define TOMBSTONE ((gpointer)(ssize_t)-1)
20 typedef struct {
21 gpointer key;
22 gpointer value;
23 } key_value_pair;
25 typedef struct {
26 int table_size;
27 key_value_pair *kvs;
28 } conc_table;
31 Design notes:
33 This is a single-writer, lock-free reader hash table. It's implemented using classical linear open addressing.
35 Reads are made concurrent by employing hazzard pointers to avoid dangling pointer and by carefully coordinating
36 table access between writer and readers - writer stores value before key and readers checks keys before values.
38 External locking/synchronization is required by all write operations. Additionally, this DS don't try to provide
39 any coordination/guarantee of key/values liveness outside of this DS. This means that a search will see dangling
40 memory if a concurrent thread removes&free after the search succeeded.
42 Deletion is done using tombstones, which increase the number of non-null elements and can lead to slow or infinite
43 searches as null keys are the termination condition used by lookup. We handle it by rehashing in case the number of
44 null values drops below what the load factor allows.
46 Possible improvements:
48 Experiment with KVM relocation during lookup as would reduce search length. The trick is coordinate which thread
49 won a tomstone and which thread won the relation of a given key.
52 struct _MonoConcurrentHashTable {
53 volatile conc_table *table; /* goes to HP0 */
54 GHashFunc hash_func;
55 GEqualFunc equal_func;
56 int element_count; //KVP + tombstones
57 int tombstone_count; //just tombstones
58 int overflow_count;
59 GDestroyNotify key_destroy_func;
60 GDestroyNotify value_destroy_func;
63 static conc_table*
64 conc_table_new (int size)
66 conc_table *res = g_new (conc_table, 1);
67 res->table_size = size;
68 res->kvs = g_new0 (key_value_pair, size);
69 return res;
72 static void
73 conc_table_free (gpointer ptr)
75 conc_table *table = (conc_table *)ptr;
76 g_free (table->kvs);
77 g_free (table);
80 static void
81 conc_table_lf_free (conc_table *table)
83 mono_thread_hazardous_try_free (table, conc_table_free);
88 A common problem with power of two hashtables is that it leads of bad clustering when dealing
89 with aligned numbers.
91 The solution here is to mix the bits from two primes plus the hash itself, it produces a better spread
92 than just the numbers.
95 static MONO_ALWAYS_INLINE int
96 mix_hash (int hash)
98 return ((hash * 215497) >> 16) ^ (hash * 1823231 + hash);
101 static MONO_ALWAYS_INLINE void
102 insert_one_local (conc_table *table, GHashFunc hash_func, gpointer key, gpointer value)
104 key_value_pair *kvs = table->kvs;
105 int table_mask = table->table_size - 1;
106 int hash = mix_hash (hash_func (key));
107 int i = hash & table_mask;
109 while (table->kvs [i].key)
110 i = (i + 1) & table_mask;
112 kvs [i].key = key;
113 kvs [i].value = value;
116 /* LOCKING: Must be called holding hash_table->mutex */
117 static void
118 rehash_table (MonoConcurrentHashTable *hash_table, int multiplier)
120 conc_table *old_table = (conc_table*)hash_table->table;
121 conc_table *new_table = conc_table_new (old_table->table_size * multiplier);
122 key_value_pair *kvs = old_table->kvs;
123 int i;
125 for (i = 0; i < old_table->table_size; ++i) {
126 if (kvs [i].key && kvs [i].key != TOMBSTONE)
127 insert_one_local (new_table, hash_table->hash_func, kvs [i].key, kvs [i].value);
129 mono_memory_barrier ();
130 hash_table->table = new_table;
131 hash_table->overflow_count = (int)(new_table->table_size * LOAD_FACTOR);
132 conc_table_lf_free (old_table);
135 static void
136 check_table_size (MonoConcurrentHashTable *hash_table)
138 if (hash_table->element_count >= hash_table->overflow_count) {
139 //if we have more tombstones than KVP we rehash to the same size
140 if (hash_table->tombstone_count > hash_table->element_count / 2)
141 rehash_table (hash_table, 1);
142 else
143 rehash_table (hash_table, 2);
149 MonoConcurrentHashTable*
150 mono_conc_hashtable_new (GHashFunc hash_func, GEqualFunc key_equal_func)
152 MonoConcurrentHashTable *res = g_new0 (MonoConcurrentHashTable, 1);
153 res->hash_func = hash_func ? hash_func : g_direct_hash;
154 res->equal_func = key_equal_func;
155 // res->equal_func = g_direct_equal;
156 res->table = conc_table_new (INITIAL_SIZE);
157 res->element_count = 0;
158 res->overflow_count = (int)(INITIAL_SIZE * LOAD_FACTOR);
159 return res;
162 MonoConcurrentHashTable*
163 mono_conc_hashtable_new_full (GHashFunc hash_func, GEqualFunc key_equal_func, GDestroyNotify key_destroy_func, GDestroyNotify value_destroy_func)
165 MonoConcurrentHashTable *res = mono_conc_hashtable_new (hash_func, key_equal_func);
166 res->key_destroy_func = key_destroy_func;
167 res->value_destroy_func = value_destroy_func;
168 return res;
172 void
173 mono_conc_hashtable_destroy (MonoConcurrentHashTable *hash_table)
175 if (hash_table->key_destroy_func || hash_table->value_destroy_func) {
176 int i;
177 conc_table *table = (conc_table*)hash_table->table;
178 key_value_pair *kvs = table->kvs;
180 for (i = 0; i < table->table_size; ++i) {
181 if (kvs [i].key && kvs [i].key != TOMBSTONE) {
182 if (hash_table->key_destroy_func)
183 (hash_table->key_destroy_func) (kvs [i].key);
184 if (hash_table->value_destroy_func)
185 (hash_table->value_destroy_func) (kvs [i].value);
189 conc_table_free ((gpointer)hash_table->table);
190 g_free (hash_table);
193 gpointer
194 mono_conc_hashtable_lookup (MonoConcurrentHashTable *hash_table, gpointer key)
196 MonoThreadHazardPointers* hp;
197 conc_table *table;
198 int hash, i, table_mask;
199 key_value_pair *kvs;
200 hash = mix_hash (hash_table->hash_func (key));
201 hp = mono_hazard_pointer_get ();
203 retry:
204 table = (conc_table *)mono_get_hazardous_pointer ((gpointer volatile*)&hash_table->table, hp, 0);
205 table_mask = table->table_size - 1;
206 kvs = table->kvs;
207 i = hash & table_mask;
209 if (G_LIKELY (!hash_table->equal_func)) {
210 while (kvs [i].key) {
211 if (key == kvs [i].key) {
212 gpointer value;
213 /* The read of keys must happen before the read of values */
214 mono_memory_barrier ();
215 value = kvs [i].value;
216 /* FIXME check for NULL if we add suppport for removal */
217 mono_hazard_pointer_clear (hp, 0);
218 return value;
220 i = (i + 1) & table_mask;
222 } else {
223 GEqualFunc equal = hash_table->equal_func;
225 while (kvs [i].key) {
226 if (kvs [i].key != TOMBSTONE && equal (key, kvs [i].key)) {
227 gpointer value;
228 /* The read of keys must happen before the read of values */
229 mono_memory_barrier ();
230 value = kvs [i].value;
232 /* We just read a value been deleted, try again. */
233 if (G_UNLIKELY (!value))
234 goto retry;
236 mono_hazard_pointer_clear (hp, 0);
237 return value;
239 i = (i + 1) & table_mask;
243 /* The table might have expanded and the value is now on the newer table */
244 mono_memory_barrier ();
245 if (hash_table->table != table)
246 goto retry;
248 mono_hazard_pointer_clear (hp, 0);
249 return NULL;
253 * mono_conc_hashtable_remove:
254 * Remove a value from the hashtable. Requires external locking
255 * \returns the old value if \p key is already present or NULL
257 gpointer
258 mono_conc_hashtable_remove (MonoConcurrentHashTable *hash_table, gpointer key)
260 conc_table *table;
261 key_value_pair *kvs;
262 int hash, i, table_mask;
264 g_assert (key != NULL && key != TOMBSTONE);
266 hash = mix_hash (hash_table->hash_func (key));
268 table = (conc_table*)hash_table->table;
269 kvs = table->kvs;
270 table_mask = table->table_size - 1;
271 i = hash & table_mask;
273 if (!hash_table->equal_func) {
274 for (;;) {
275 if (!kvs [i].key) {
276 return NULL; /*key not found*/
279 if (key == kvs [i].key) {
280 gpointer value = kvs [i].value;
281 kvs [i].value = NULL;
282 mono_memory_barrier ();
283 kvs [i].key = TOMBSTONE;
284 ++hash_table->tombstone_count;
286 if (hash_table->key_destroy_func != NULL)
287 (*hash_table->key_destroy_func) (key);
288 if (hash_table->value_destroy_func != NULL)
289 (*hash_table->value_destroy_func) (value);
291 check_table_size (hash_table);
292 return value;
294 i = (i + 1) & table_mask;
296 } else {
297 GEqualFunc equal = hash_table->equal_func;
298 for (;;) {
299 if (!kvs [i].key) {
300 return NULL; /*key not found*/
303 if (kvs [i].key != TOMBSTONE && equal (key, kvs [i].key)) {
304 gpointer old_key = kvs [i].key;
305 gpointer value = kvs [i].value;
306 kvs [i].value = NULL;
307 mono_memory_barrier ();
308 kvs [i].key = TOMBSTONE;
309 ++hash_table->tombstone_count;
311 if (hash_table->key_destroy_func != NULL)
312 (*hash_table->key_destroy_func) (old_key);
313 if (hash_table->value_destroy_func != NULL)
314 (*hash_table->value_destroy_func) (value);
316 check_table_size (hash_table);
317 return value;
320 i = (i + 1) & table_mask;
325 * mono_conc_hashtable_insert:
326 * Insert a value into the hashtable. Requires external locking.
327 * \returns the old value if \p key is already present or NULL
329 gpointer
330 mono_conc_hashtable_insert (MonoConcurrentHashTable *hash_table, gpointer key, gpointer value)
332 conc_table *table;
333 key_value_pair *kvs;
334 int hash, i, table_mask;
336 g_assert (key != NULL && key != TOMBSTONE);
337 g_assert (value != NULL);
339 hash = mix_hash (hash_table->hash_func (key));
341 check_table_size (hash_table);
343 table = (conc_table*)hash_table->table;
344 kvs = table->kvs;
345 table_mask = table->table_size - 1;
346 i = hash & table_mask;
348 if (!hash_table->equal_func) {
349 for (;;) {
350 if (!kvs [i].key || kvs [i].key == TOMBSTONE) {
351 kvs [i].value = value;
352 /* The write to values must happen after the write to keys */
353 mono_memory_barrier ();
354 kvs [i].key = key;
355 if (kvs [i].key == TOMBSTONE)
356 --hash_table->tombstone_count;
357 else
358 ++hash_table->element_count;
360 return NULL;
362 if (key == kvs [i].key) {
363 gpointer value = kvs [i].value;
364 return value;
366 i = (i + 1) & table_mask;
368 } else {
369 GEqualFunc equal = hash_table->equal_func;
370 for (;;) {
371 if (!kvs [i].key || kvs [i].key == TOMBSTONE) {
372 kvs [i].value = value;
373 /* The write to values must happen after the write to keys */
374 mono_memory_barrier ();
375 kvs [i].key = key;
376 if (kvs [i].key == TOMBSTONE)
377 --hash_table->tombstone_count;
378 else
379 ++hash_table->element_count;
380 return NULL;
382 if (equal (key, kvs [i].key)) {
383 gpointer value = kvs [i].value;
384 return value;
386 i = (i + 1) & table_mask;
392 * mono_conc_hashtable_foreach:
393 * Calls \p func for each value in the hashtable. Requires external locking.
395 void
396 mono_conc_hashtable_foreach (MonoConcurrentHashTable *hash_table, GHFunc func, gpointer userdata)
398 int i;
399 conc_table *table = (conc_table*)hash_table->table;
400 key_value_pair *kvs = table->kvs;
402 for (i = 0; i < table->table_size; ++i) {
403 if (kvs [i].key && kvs [i].key != TOMBSTONE) {
404 func (kvs [i].key, kvs [i].value, userdata);
410 * mono_conc_hashtable_foreach_steal:
412 * Calls @func for each entry in the hashtable, if @func returns true, remove from the hashtable. Requires external locking.
413 * Same semantics as g_hash_table_foreach_steal.
415 void
416 mono_conc_hashtable_foreach_steal (MonoConcurrentHashTable *hash_table, GHRFunc func, gpointer userdata)
418 int i;
419 conc_table *table = (conc_table*)hash_table->table;
420 key_value_pair *kvs = table->kvs;
422 for (i = 0; i < table->table_size; ++i) {
423 if (kvs [i].key && kvs [i].key != TOMBSTONE) {
424 if (func (kvs [i].key, kvs [i].value, userdata)) {
425 kvs [i].value = NULL;
426 mono_memory_barrier ();
427 kvs [i].key = TOMBSTONE;
428 ++hash_table->tombstone_count;
432 check_table_size (hash_table);