exp2l: Work around a NetBSD 10.0/i386 bug.
[gnulib.git] / lib / hamt.c
blobc8ce3f26ff09cd5e2c261103e8e21c449cd2da50
1 /* (Persistent) hash array mapped tries.
2 Copyright (C) 2021-2024 Free Software Foundation, Inc.
4 This program is free software: you can redistribute it and/or modify
5 it under the terms of the GNU General Public License as published by
6 the Free Software Foundation, either version 3 of the License, or
7 (at your option) any later version.
9 This program is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 GNU General Public License for more details.
14 You should have received a copy of the GNU General Public License
15 along with this program. If not, see <https://www.gnu.org/licenses/>. */
17 /* Written by Marc Nieper-Wißkirchen <marc@nieper-wisskirchen.de>, 2021. */
19 #include <config.h>
20 #define _GL_HAMT_INLINE _GL_EXTERN_INLINE
21 #include "hamt.h"
23 #include <flexmember.h>
24 #include <inttypes.h>
25 #include <stdlib.h>
26 #include "count-one-bits.h"
27 #include "verify.h"
28 #include "xalloc.h"
30 /* Reference counters can be shared between different threads if the
31 entry they belong to is shared between different threads.
32 Operations on them therefore have to be atomic so that no invalid
33 state is observable.
35 A thread must not modify an entry or its children (!) if its
36 reference count implies that the entry is shared by at least two
37 hamts. */
38 typedef
39 #if GL_HAMT_THREAD_SAFE
40 _Atomic
41 #endif
42 size_t ref_counter;
44 /***************/
45 /* Entry Types */
46 /***************/
48 /* Leaf nodes are of type element. Non-leaf nodes are either subtries
49 or, if at maximal depth, buckets. The entry type is stored in the
50 lower two bits of the reference counter, whereas reference counters
51 for entries are incremented and decremented in multiples of 4. */
52 enum entry_type
54 element_entry = 0,
55 subtrie_entry = 1,
56 bucket_entry = 2
59 /* Return the type an entry. */
60 static enum entry_type
61 entry_type (const Hamt_entry *entry)
63 return entry->ref_count & 3;
66 /********************/
67 /* Reference Counts */
68 /********************/
70 /* Initialize the reference counter, storing its type. */
71 static void
72 init_ref_counter (ref_counter *counter, enum entry_type type)
74 *counter = 4 + type;
77 /* Increase the reference counter of an entry. */
78 static void
79 inc_ref_counter (ref_counter *counter)
81 *counter += 4;
84 /* Decrease the entry reference counter. Return false if the entry
85 can be deleted. */
86 static bool
87 dec_ref_counter (ref_counter *counter)
89 *counter -= 4;
90 return *counter >= 4;
93 /**************/
94 /* Structures */
95 /**************/
97 /* Different generations of a hamt share a function table. */
98 struct function_table
100 Hamt_hasher *hasher;
101 Hamt_comparator *comparator;
102 Hamt_freer *freer;
103 ref_counter ref_count;
106 /* Different generations of a hamt share subtries. A singleton
107 subtrie is modelled as a single element. */
108 struct subtrie
110 ref_counter ref_count;
111 /* Nodes carry labels from 0 to 31. The i-th bit in MAP is set if
112 the node labelled i is present. */
113 uint32_t map;
114 /* The length of the NODES array is the population count of MAP.
115 The order of the nodes corresponds to the order of the 1-bits in
116 MAP. */
117 Hamt_entry *nodes[FLEXIBLE_ARRAY_MEMBER];
120 /* Buckets are used when different elements have the same hash values. */
121 struct bucket
123 ref_counter ref_counter;
124 size_t elt_count;
125 Hamt_entry *elts[FLEXIBLE_ARRAY_MEMBER];
128 /* A hamt consists of its function table and the root entry. */
129 struct hamt
131 struct function_table *functions;
132 /* The root entry is NULL for an empty HAMT. */
133 Hamt_entry *root;
136 /*******************/
137 /* Function Tables */
138 /*******************/
140 /* Allocate and initialize a function table. */
141 static struct function_table *
142 create_function_table (Hamt_hasher *hasher, Hamt_comparator *comparator,
143 Hamt_freer *freer)
145 struct function_table *functions = XMALLOC (struct function_table);
146 functions->hasher = hasher;
147 functions->comparator = comparator;
148 functions->freer = freer;
149 functions->ref_count = 1;
150 return functions;
153 /* Increment the reference count and return the function table. */
154 static struct function_table *
155 copy_function_table (struct function_table *function_table)
157 ++function_table->ref_count;
158 return function_table;
161 /* Decrease the reference count and free the function table if the
162 reference count drops to zero. */
163 static void
164 free_function_table (struct function_table *function_table)
166 if (--function_table->ref_count)
167 return;
168 free (function_table);
171 /************/
172 /* Elements */
173 /************/
175 /* Return an element's hash. */
176 static size_t
177 hash_element (const struct function_table *functions, const Hamt_entry *elt)
179 return functions->hasher (elt);
182 /* Compare two elements. */
183 static bool
184 compare_elements (const struct function_table *functions,
185 const Hamt_entry *elt1, const Hamt_entry *elt2)
187 return functions->comparator (elt1, elt2);
190 /* Free an element. */
191 static void
192 free_element (const struct function_table *functions, Hamt_entry *elt)
194 if (dec_ref_counter (&elt->ref_count))
195 return;
196 functions->freer (elt);
199 /* Return the initialized element. */
200 _GL_ATTRIBUTE_MAYBE_UNUSED
201 static Hamt_entry *
202 init_element (Hamt_entry *elt)
204 init_ref_counter (&elt->ref_count, element_entry);
205 return elt;
208 /***********/
209 /* Buckets */
210 /***********/
212 /* Allocate a partially initialized bucket with a given number of elements. */
213 static struct bucket *
214 alloc_bucket (size_t elt_count)
216 struct bucket *bucket
217 = xmalloc (FLEXNSIZEOF (struct bucket, elts, elt_count));
218 init_ref_counter (&bucket->ref_counter, bucket_entry);
219 bucket->elt_count = elt_count;
220 return bucket;
223 /***********/
224 /* Entries */
225 /***********/
227 /* Return true if the entry is shared between two or more hamts.
228 Otherwise, return false.
230 This procedure is used for destructive updates. If an entry and
231 all its parents are not shared, it can be updated destructively
232 without effecting other hamts. */
233 static bool
234 is_shared (const Hamt_entry *entry)
236 return entry->ref_count >= 8;
239 /* Calculate and return the number of nodes in a subtrie. */
240 static int
241 trienode_count (const struct subtrie *subtrie)
243 return count_one_bits (subtrie->map); /* In Gnulib, we assume that
244 an integer has at least 32
245 bits. */
248 /* Allocate a partially initialized subtrie with a given number of nodes. */
249 static struct subtrie *
250 alloc_subtrie (int node_count)
252 struct subtrie *subtrie
253 = xmalloc (FLEXNSIZEOF (struct subtrie, nodes, node_count));
254 init_ref_counter (&subtrie->ref_count, subtrie_entry);
255 return subtrie;
258 /* Return a conceptually copy of an entry. */
259 static Hamt_entry *
260 copy_entry (Hamt_entry *entry)
262 inc_ref_counter (&entry->ref_count);
263 return entry;
266 /* Return a new bucket that has the j-th element replaced. */
267 static struct bucket *
268 replace_bucket_element (struct bucket *bucket, int j, Hamt_entry *elt)
270 int n = bucket->elt_count;
271 struct bucket *new_bucket = alloc_bucket (n);
272 for (int k = 0; k < n; ++k)
273 if (k == j)
274 new_bucket->elts[k] = elt;
275 else
276 new_bucket->elts[k] = copy_entry (bucket->elts[k]);
277 return new_bucket;
280 /* Return a new subtrie that has the j-th node replaced. */
281 static struct subtrie *
282 replace_entry (struct subtrie *subtrie, int j, Hamt_entry *entry)
284 int n = trienode_count (subtrie);
285 struct subtrie *new_subtrie = alloc_subtrie (n);
286 new_subtrie->map = subtrie->map;
287 for (int k = 0; k < n; ++k)
288 if (k == j)
289 new_subtrie->nodes[k] = entry;
290 else
291 new_subtrie->nodes[k] = copy_entry (subtrie->nodes[k]);
292 return new_subtrie;
295 /* Return a new subtrie that has an entry labelled i inserted at
296 the j-th position. */
297 static struct subtrie *
298 insert_entry (struct subtrie *subtrie, int i, int j, Hamt_entry *entry)
300 int n = trienode_count (subtrie) + 1;
301 struct subtrie *new_subtrie = alloc_subtrie (n);
302 new_subtrie->map = subtrie->map | (1 << i);
303 for (int k = 0; k < n; ++k)
305 if (k < j)
306 new_subtrie->nodes[k] = copy_entry (subtrie->nodes[k]);
307 else if (k > j)
308 new_subtrie->nodes[k] = copy_entry (subtrie->nodes[k - 1]);
309 else
310 new_subtrie->nodes[k] = entry;
312 return new_subtrie;
315 /* Return a new entry that has the entry labelled i removed from
316 position j. */
317 static Hamt_entry *
318 remove_subtrie_entry (struct subtrie *subtrie, int i, int j)
320 int n = trienode_count (subtrie) - 1;
321 if (n == 1)
323 if (j == 0)
324 return copy_entry (subtrie->nodes[1]);
325 return copy_entry (subtrie->nodes[0]);
327 struct subtrie *new_subtrie = alloc_subtrie (n);
328 new_subtrie->map = subtrie->map & ~(1 << i);
329 for (int k = 0; k < n; ++k)
331 if (k < j)
332 new_subtrie->nodes[k] = copy_entry (subtrie->nodes[k]);
333 else if (k >= j)
334 new_subtrie->nodes[k] = copy_entry (subtrie->nodes[k + 1]);
336 return (Hamt_entry *) new_subtrie;
339 /* Return a new entry that has the entry at position j removed. */
340 static Hamt_entry *
341 remove_bucket_entry (struct bucket *bucket, int j)
343 int n = bucket->elt_count - 1;
344 if (n == 1)
346 if (j == 0)
347 return copy_entry (bucket->elts[1]);
348 return copy_entry (bucket->elts[0]);
350 struct bucket *new_bucket = alloc_bucket (n);
351 for (int k = 0; k < n; ++k)
353 if (k < j)
354 new_bucket->elts[k] = copy_entry (bucket->elts[k]);
355 else if (k >= j)
356 new_bucket->elts[k] = copy_entry (bucket->elts[k + 1]);
358 return (Hamt_entry *) new_bucket;
361 /****************************/
362 /* Creation and Destruction */
363 /****************************/
365 /* Create a new, empty hash array mapped trie. */
366 Hamt *
367 hamt_create (Hamt_hasher *hasher, Hamt_comparator *comparator,
368 Hamt_freer *freer)
370 struct function_table *functions
371 = create_function_table (hasher, comparator, freer);
372 Hamt *hamt = XMALLOC (Hamt);
373 hamt->functions = functions;
374 hamt->root = NULL;
375 return hamt;
378 /* Return a copy of the hamt. */
379 Hamt *
380 hamt_copy (Hamt *hamt)
382 Hamt *new_hamt = XMALLOC (Hamt);
383 new_hamt->functions = copy_function_table (hamt->functions);
384 new_hamt->root = hamt->root == NULL ? NULL : copy_entry (hamt->root);
385 return new_hamt;
388 /* Free a bucket. */
389 static void
390 free_bucket (struct function_table const *functions, struct bucket *bucket)
392 if (dec_ref_counter (&bucket->ref_counter))
393 return;
394 size_t elt_count = bucket->elt_count;
395 Hamt_entry *const *elts = bucket->elts;
396 for (size_t i = 0; i < elt_count; ++i)
397 free_element (functions, elts[i]);
398 free (bucket);
401 /* Forward declaration. */
402 static void free_subtrie (struct function_table const *functions,
403 struct subtrie *subtrie);
405 /* Free an entry. */
406 static void
407 free_entry (struct function_table const *functions, Hamt_entry *entry)
409 switch (entry_type (entry))
411 case element_entry:
412 free_element (functions, entry);
413 break;
414 case subtrie_entry:
415 free_subtrie (functions, (struct subtrie *) entry);
416 break;
417 case bucket_entry:
418 free_bucket (functions, (struct bucket *) entry);
419 break;
420 default:
421 assume (0);
425 /* Free a trie recursively. */
426 static void
427 free_subtrie (struct function_table const *functions, struct subtrie *subtrie)
429 if (dec_ref_counter (&subtrie->ref_count))
430 return;
431 int n = trienode_count (subtrie);
432 Hamt_entry **node_ptr = subtrie->nodes;
433 for (int j = 0; j < n; ++j)
434 free_entry (functions, *node_ptr++);
435 free (subtrie);
438 /* Free a hamt. */
439 void
440 hamt_free (Hamt *hamt)
442 if (hamt->root != NULL)
443 free_entry (hamt->functions, hamt->root);
444 free_function_table (hamt->functions);
445 free (hamt);
448 /**********/
449 /* Lookup */
450 /**********/
452 /* Lookup an element in a bucket. */
453 static Hamt_entry *
454 bucket_lookup (const struct function_table *functions,
455 const struct bucket *bucket, const void *elt)
457 size_t elt_count = bucket->elt_count;
458 Hamt_entry *const *elts = bucket->elts;
459 for (size_t i = 0; i < elt_count; ++i)
461 if (compare_elements (functions, elt, elts[i]))
462 return *elts;
464 return NULL;
467 /* Forward declaration. */
468 static Hamt_entry *entry_lookup (const struct function_table *functions,
469 Hamt_entry *entry,
470 const void *elt, size_t hash);
472 /* Lookup an element in a bucket. */
473 static Hamt_entry *
474 subtrie_lookup (const struct function_table *functions,
475 const struct subtrie *subtrie, const void *elt,
476 size_t hash)
478 uint32_t map = subtrie->map;
479 int i = hash & 31;
481 if (! (map & (1 << i)))
482 return NULL;
484 int j = i == 0 ? 0 : count_one_bits (map << (32 - i));
485 return entry_lookup (functions, subtrie->nodes[j], elt, hash >> 5);
488 /* Lookup an element in an entry. */
489 static Hamt_entry *
490 entry_lookup (const struct function_table *functions, Hamt_entry *entry,
491 const void *elt, size_t hash)
493 switch (entry_type (entry))
495 case element_entry:
496 if (compare_elements (functions, elt, entry))
497 return entry;
498 return NULL;
499 case subtrie_entry:
500 return subtrie_lookup (functions, (struct subtrie *) entry, elt, hash);
501 case bucket_entry:
502 return bucket_lookup (functions, (struct bucket *) entry, elt);
503 default:
504 assume (0);
508 /* If ELT matches an entry in HAMT, return this entry. Otherwise,
509 return NULL. */
510 Hamt_entry *
511 hamt_lookup (const Hamt *hamt, const void *elt)
513 if (hamt->root == NULL)
514 return NULL;
516 return entry_lookup (hamt->functions, hamt->root, elt,
517 hash_element (hamt->functions, elt));
520 /**************************/
521 /* Insertion and Deletion */
522 /**************************/
524 /* Create a bucket populated with two elements. */
525 static struct bucket *
526 create_populated_bucket (Hamt_entry *elt1, Hamt_entry *elt2)
528 struct bucket *bucket = alloc_bucket (2);
529 bucket->elts[0] = elt1;
530 bucket->elts[1] = elt2;
531 return bucket;
534 /* Create a chain of subtrie nodes so that the resulting trie is
535 populated with exactly two elements. */
536 static Hamt_entry *
537 create_populated_subtrie (Hamt_entry *elt1, Hamt_entry *elt2, size_t hash1,
538 size_t hash2, int depth)
540 if (depth >= _GL_HAMT_MAX_DEPTH)
541 return (Hamt_entry *) create_populated_bucket (elt1, elt2);
543 struct subtrie *subtrie;
544 int i1 = hash1 & 31;
545 int i2 = hash2 & 31;
546 if (i1 != i2)
548 subtrie = alloc_subtrie (2);
549 subtrie->map = (1 << i1) | (1 << i2);
550 if (i1 < i2)
552 subtrie->nodes[0] = elt1;
553 subtrie->nodes[1] = elt2;
555 else
557 subtrie->nodes[0] = elt2;
558 subtrie->nodes[1] = elt1;
561 else
563 subtrie = alloc_subtrie (1);
564 subtrie->map = 1 << i1;
565 subtrie->nodes[0]
566 = create_populated_subtrie (elt1, elt2, hash1 >> 5, hash2 >> 5,
567 depth + 1);
569 return (Hamt_entry *) subtrie;
572 /* Insert or replace an element in a bucket. */
573 static struct bucket *
574 bucket_insert (const struct function_table *functions, struct bucket *bucket,
575 Hamt_entry **elt_ptr, bool replace, bool shared)
577 size_t elt_count = bucket->elt_count;
578 Hamt_entry **elts = bucket->elts;
579 for (size_t i = 0; i < elt_count; ++i)
581 if (compare_elements (functions, *elt_ptr, elts[i]))
583 if (replace)
585 if (shared)
587 struct bucket *new_bucket
588 = replace_bucket_element (bucket, i,
589 copy_entry (*elt_ptr));
590 *elt_ptr = elts[i];
591 return new_bucket;
593 free_element (functions, elts[i]);
594 elts[i] = copy_entry (*elt_ptr);
595 return bucket;
597 *elt_ptr = *elt_ptr == elts[i] ? NULL : elts[i];
598 return bucket;
601 struct bucket *new_bucket = alloc_bucket (elt_count + 1);
602 new_bucket->elts[0] = copy_entry (*elt_ptr);
603 for (size_t i = 0; i < elt_count; ++i)
605 new_bucket->elts[i + 1] = copy_entry (bucket->elts[i]);
607 if (replace)
608 *elt_ptr = NULL;
609 return new_bucket;
612 /* Forward declaration. */
613 static Hamt_entry *entry_insert (const struct function_table *functions,
614 Hamt_entry *subtrie, Hamt_entry **elt_ptr,
615 size_t hash, int depth, bool replace,
616 bool shared);
618 /* Insert or replace an element in a subtrie. */
619 static struct subtrie *
620 subtrie_insert (const struct function_table *functions, struct subtrie *subtrie,
621 Hamt_entry **elt_ptr, size_t hash, int depth, bool replace,
622 bool shared)
624 uint32_t map = subtrie->map;
625 int i = hash & 31;
626 int j = i == 0 ? 0 : count_one_bits (map << (32 - i));
627 if (map & (1 << i))
629 Hamt_entry *entry = subtrie->nodes[j];
630 Hamt_entry *new_entry
631 = entry_insert (functions, entry, elt_ptr, hash >> 5, depth + 1,
632 replace, shared);
633 if (new_entry != entry)
635 if (shared)
636 return replace_entry (subtrie, j, new_entry);
637 free_entry (functions, entry);
638 subtrie->nodes[j] = new_entry;
640 return subtrie;
642 Hamt_entry *entry = copy_entry (*elt_ptr);
643 if (replace)
644 *elt_ptr = NULL;
645 return insert_entry (subtrie, i, j, entry);
648 /* Insert or replace an element in an entry.
650 REPLACE is true if we want replace instead of insert semantics.
651 SHARED is false if a destructive update has been requested and none
652 of the parent nodes are shared. If an entry cannot be inserted
653 because the same entry with respect to pointer equality is already
654 present, *ELT_PTR is set to NULL to mark this special case. */
655 static Hamt_entry *
656 entry_insert (const struct function_table *functions, Hamt_entry *entry,
657 Hamt_entry **elt_ptr, size_t hash, int depth, bool replace,
658 bool shared)
660 shared |= is_shared (entry);
661 switch (entry_type (entry))
663 case element_entry:
664 if (compare_elements (functions, *elt_ptr, entry))
666 if (replace)
668 if (shared)
670 Hamt_entry *new_entry = copy_entry (*elt_ptr);
671 *elt_ptr = entry;
672 return new_entry;
674 return copy_entry (*elt_ptr);
676 *elt_ptr = *elt_ptr == entry ? NULL : entry;
677 return entry;
679 Hamt_entry *new_entry = copy_entry (*elt_ptr);
680 if (replace)
681 *elt_ptr = NULL;
682 /* We have to take this shortcut as shifting an integer of N
683 bits by N or more bits triggers undefined behavior.
684 See: https://lists.gnu.org/archive/html/bug-gnulib/2022-04/msg00023.html. */
685 if (depth >= _GL_HAMT_MAX_DEPTH)
686 return (Hamt_entry *) create_populated_bucket (new_entry, copy_entry (entry));
687 return create_populated_subtrie (new_entry, copy_entry (entry), hash,
688 (hash_element (functions, entry)
689 >> (5 * depth)), depth);
690 case subtrie_entry:
691 return (Hamt_entry *)
692 subtrie_insert (functions, (struct subtrie *) entry, elt_ptr, hash,
693 depth, replace, shared);
694 case bucket_entry:
695 return (Hamt_entry *)
696 bucket_insert (functions, (struct bucket *) entry, elt_ptr, replace,
697 shared);
698 default:
699 assume (0);
703 /* Insert or replace an element in the root. */
704 static Hamt_entry *
705 root_insert (const struct function_table *functions, Hamt_entry *root,
706 Hamt_entry **elt_ptr, bool replace, bool shared)
708 if (root == NULL)
709 return copy_entry (*elt_ptr);
711 return entry_insert (functions, root, elt_ptr,
712 hash_element (functions, *elt_ptr), 0, replace, shared);
715 /* If *ELT_PTR matches an element already in HAMT, set *ELT_PTR to the
716 element from the table and return HAMT. Otherwise, insert *ELT_PTR
717 into a copy of the HAMT and return the copy. */
718 Hamt *
719 hamt_insert (Hamt *hamt, Hamt_entry **elt_ptr)
721 Hamt_entry *elt = *elt_ptr;
722 Hamt_entry *new_entry = root_insert (hamt->functions, hamt->root,
723 elt_ptr, false, true);
724 if (*elt_ptr == NULL)
725 *elt_ptr = elt;
727 if (new_entry == hamt->root)
728 return hamt;
730 Hamt *new_hamt = XMALLOC (Hamt);
731 new_hamt->functions = copy_function_table (hamt->functions);
732 new_hamt->root = new_entry;
733 return new_hamt;
736 /* Insert *ELT_PTR into a copy of HAMT and return the copy. If an
737 existing element was replaced, set *ELT_PTR to this element, and to
738 NULL otherwise. */
739 Hamt *
740 hamt_replace (Hamt *hamt, Hamt_entry **elt_ptr)
742 Hamt *new_hamt = XMALLOC (Hamt);
743 new_hamt->functions = copy_function_table (hamt->functions);
744 new_hamt->root = root_insert (hamt->functions, hamt->root, elt_ptr, true,
745 true);
746 return new_hamt;
749 /* Remove an element in a bucket if found. */
750 static Hamt_entry *
751 bucket_remove (const struct function_table *functions, struct bucket *bucket,
752 Hamt_entry **elt_ptr)
754 size_t elt_count = bucket->elt_count;
755 Hamt_entry *const *elts = bucket->elts;
756 for (size_t i = 0; i < elt_count; ++i)
758 if (compare_elements (functions, *elt_ptr, elts[i]))
760 *elt_ptr = elts[i];
761 return remove_bucket_entry (bucket, i);
764 *elt_ptr = NULL;
765 return (Hamt_entry *) bucket;
768 /* Forward declaration. */
769 static Hamt_entry *entry_remove (const struct function_table *functions,
770 Hamt_entry *entry, Hamt_entry **elt_ptr,
771 size_t hash, int depth, bool shared);
773 /* Remove an element in a subtrie if found. */
774 static Hamt_entry *
775 subtrie_remove (const struct function_table *functions, struct subtrie *subtrie,
776 Hamt_entry **elt_ptr, size_t hash, int depth, bool shared)
778 uint32_t map = subtrie->map;
779 int i = hash & 31;
780 int j = i == 0 ? 0 : count_one_bits (map << (32 - i));
781 if (map & (1 << i))
783 Hamt_entry *entry = subtrie->nodes[j];
784 Hamt_entry *new_entry
785 = entry_remove (functions, entry, elt_ptr, hash >> 5, depth + 1,
786 shared);
787 if (new_entry == NULL)
788 return remove_subtrie_entry (subtrie, i, j);
789 if (new_entry != entry)
791 if (shared)
792 return (Hamt_entry *) replace_entry (subtrie, j, new_entry);
793 free_entry (functions, entry);
794 subtrie->nodes[j] = new_entry;
796 return (Hamt_entry *) subtrie;
798 *elt_ptr = NULL;
799 return (Hamt_entry *) subtrie;
802 /* Remove an element in an entry if found.
804 SHARED is false if a destructive update has been requested and none
805 of the parent nodes are shared. If an entry cannot be
806 removed, *ELT_PTR is set to NULL. */
807 static Hamt_entry *
808 entry_remove (const struct function_table *functions, Hamt_entry *entry,
809 Hamt_entry **elt_ptr, size_t hash, int depth, bool shared)
811 shared |= is_shared (entry);
812 switch (entry_type (entry))
814 case element_entry:
815 if (compare_elements (functions, *elt_ptr, entry))
817 *elt_ptr = entry;
818 return NULL;
820 *elt_ptr = NULL;
821 return entry;
822 case subtrie_entry:
823 return subtrie_remove (functions, (struct subtrie *) entry, elt_ptr, hash,
824 depth, shared);
825 case bucket_entry:
826 return bucket_remove (functions, (struct bucket *) entry, elt_ptr);
827 default:
828 assume (0);
832 /* Remove an element in the root. */
833 static Hamt_entry *
834 root_remove (const struct function_table *functions, Hamt_entry *root,
835 Hamt_entry **elt_ptr, bool shared)
837 if (root == NULL)
838 return NULL;
840 return entry_remove (functions, root, elt_ptr,
841 hash_element (functions, *elt_ptr), 0, shared);
844 /* If *ELT_PTR matches an element already in HAMT, set *ELT_PTR to the
845 element from the table, remove the element from a copy of the hamt and
846 return the copy. Otherwise, return HAMT. */
847 Hamt *
848 hamt_remove (Hamt *hamt, Hamt_entry **elt_ptr)
850 Hamt_entry *elt = *elt_ptr;
851 Hamt_entry *new_entry = root_remove (hamt->functions, hamt->root, elt_ptr,
852 true);
853 if (*elt_ptr == NULL)
854 *elt_ptr = elt;
856 if (new_entry == hamt->root)
857 return hamt;
859 Hamt *new_hamt = XMALLOC (Hamt);
860 new_hamt->functions = copy_function_table (hamt->functions);
861 new_hamt->root = new_entry;
862 return new_hamt;
865 /*************/
866 /* Iteration */
867 /*************/
869 /* Walk a bucket. */
870 static size_t
871 bucket_do_while (const struct bucket *bucket, Hamt_processor *proc, void *data,
872 bool *success)
874 size_t cnt = 0;
875 size_t elt_count = bucket->elt_count;
876 Hamt_entry *const *elts = bucket->elts;
877 for (size_t i = 0; i < elt_count; ++i)
879 *success = proc (elts[i], data);
880 if (*success == false)
881 return cnt;
882 ++cnt;
884 return cnt;
887 /* Forward declaration. */
888 static size_t entry_do_while (Hamt_entry *entry, Hamt_processor *proc,
889 void *data, bool *success);
891 /* Walk a subtrie. */
892 static size_t subtrie_do_while (const struct subtrie *subtrie,
893 Hamt_processor *proc, void *data, bool *success)
895 size_t cnt = 0;
896 int n = trienode_count (subtrie);
897 Hamt_entry *const *node_ptr = subtrie->nodes;
898 for (int j = 0; j < n; ++j)
900 cnt += entry_do_while (*node_ptr++, proc, data, success);
901 if (!success)
902 return cnt;
904 return cnt;
907 /* Walk an entry. */
908 static size_t
909 entry_do_while (Hamt_entry *entry, Hamt_processor *proc, void *data,
910 bool *success)
912 switch (entry_type (entry))
914 case element_entry:
915 *success = proc (entry, data);
916 return *success ? 1 : 0;
917 case subtrie_entry:
918 return subtrie_do_while ((struct subtrie *) entry, proc, data, success);
919 case bucket_entry:
920 return bucket_do_while ((struct bucket *) entry, proc, data, success);
921 default:
922 assume (0);
926 /* Call PROC for every entry of the hamt until it returns false. The
927 first argument of PROC is the entry, the second argument is the value
928 of DATA as received. Return the number of calls that returned
929 true. */
930 size_t
931 hamt_do_while (const Hamt *hamt, Hamt_processor *proc, void *data)
933 if (hamt->root == NULL)
934 return 0;
936 bool success = true;
937 return entry_do_while (hamt->root, proc, data, &success);
940 /* Create an iterator with a copy of the hamt.
942 For a valid iterator state the following is true: If DEPTH is
943 negative, the iterator is exhausted. Otherwise, ENTRY[DEPTH] is
944 either the element that is produced next, or a bucket such that the
945 element at POSITION of the bucket is produced next.
947 Hamt_iterator
948 hamt_iterator (Hamt *hamt)
950 Hamt_iterator iter;
951 iter.hamt = hamt_copy (hamt);
952 Hamt_entry *entry = hamt->root;
953 iter.path = 0;
954 iter.position = 0;
955 if (entry == NULL)
957 iter.depth = -1;
958 return iter;
960 iter.depth = 0;
961 while (iter.entry[iter.depth] = entry, entry_type (entry) == subtrie_entry)
963 const struct subtrie *subtrie = (const struct subtrie *) entry;
964 ++iter.depth;
965 entry = subtrie->nodes[0];
967 return iter;
970 /* Free the iterator and the associated hamt copy. */
971 void
972 hamt_iterator_free (Hamt_iterator *iter)
974 hamt_free (iter->hamt);
977 /* Create a copy of the complete iterator state, including the
978 hamt. */
979 Hamt_iterator
980 hamt_iterator_copy (Hamt_iterator *iter)
982 Hamt_iterator new_iter = *iter;
983 new_iter.hamt = hamt_copy (iter->hamt);
984 return new_iter;
987 /* Return the number of significant bits at DEPTH. */
988 static int
989 bit_width (int depth)
991 if (depth < _GL_HAMT_MAX_DEPTH - 1)
992 return 5;
993 return SIZE_WIDTH - 5 * (_GL_HAMT_MAX_DEPTH - 1);
996 /* The actual iteration. */
997 bool
998 hamt_iterator_next (Hamt_iterator *iter, Hamt_entry **elt_ptr)
1000 int depth = iter->depth;
1001 if (depth < 0)
1002 return false;
1004 Hamt_entry *entry = iter->entry[depth];
1005 if (entry_type (entry) == bucket_entry)
1007 struct bucket *bucket = (struct bucket *) entry;
1008 *elt_ptr = bucket->elts[iter->position];
1009 if (++iter->position < bucket->elt_count)
1010 return true;
1012 else
1013 *elt_ptr = entry;
1015 struct subtrie *subtrie;
1016 while (iter->depth-- > 0)
1018 int width = bit_width (iter->depth);
1019 int j = (iter->path & ((1 << width) - 1)) + 1;
1020 subtrie = (struct subtrie *) iter->entry[iter->depth];
1021 if (j < trienode_count (subtrie))
1023 ++iter->path;
1024 while (iter->entry[++iter->depth] = subtrie->nodes[j],
1025 entry_type (iter->entry[iter->depth]) == subtrie_entry)
1027 width = bit_width (iter->depth);
1028 iter->path <<= width;
1029 j = 0;
1030 subtrie = (struct subtrie *) iter->entry[iter->depth];
1032 iter->position = 0;
1033 break;
1035 iter->path >>= width;
1038 return true;
1041 /***********************/
1042 /* Destructive Updates */
1043 /***********************/
1045 /* If *ELT_PTR matches an element already in HAMT, set *ELT_PTR to the
1046 element from the table and return false. Otherwise, insert *ELT_PTR
1047 destructively into the hamt and return true. */
1048 bool
1049 hamt_insert_x (Hamt *hamt, Hamt_entry **elt_ptr)
1051 Hamt_entry *elt = *elt_ptr;
1052 Hamt_entry *old_root = hamt->root;
1053 hamt->root = root_insert (hamt->functions, old_root, elt_ptr, false, false);
1054 if (old_root != hamt->root && old_root != NULL)
1055 free_entry (hamt->functions, old_root);
1056 if (*elt_ptr == NULL)
1058 *elt_ptr = elt;
1059 return false;
1061 return *elt_ptr == elt;
1064 /* Insert ELT destructively into HAMT. If an existing element was
1065 replaced, return true. Otherwise, return false. */
1066 bool
1067 hamt_replace_x (Hamt *hamt, Hamt_entry *elt)
1069 Hamt_entry *old_root = hamt->root;
1070 hamt->root = root_insert (hamt->functions, old_root, &elt, true, false);
1071 if (old_root != hamt->root && old_root != NULL)
1072 free_entry (hamt->functions, old_root);
1073 return elt != NULL;
1076 /* If ELT matches an element already in HAMT, remove the element
1077 destructively from the hamt and return true. Otherwise, return
1078 false. */
1079 bool
1080 hamt_remove_x (Hamt *hamt, Hamt_entry *elt)
1082 Hamt_entry *old_root = hamt->root;
1083 hamt->root = root_remove (hamt->functions, old_root, &elt, false);
1084 if (old_root != hamt->root)
1085 free_entry (hamt->functions, old_root);
1086 return elt != NULL;