libstdc++: testsuite: Enhance codecvt_unicode with tests for length()
[official-gcc.git] / libgcc / unwind-dw2-btree.h
blobae957e91fbd1d39b6811181c5f2415ab8a456cf6
1 /* Lock-free btree for manually registered unwind frames. */
2 /* Copyright (C) 2022-2023 Free Software Foundation, Inc.
3 Contributed by Thomas Neumann
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 Under Section 7 of GPL version 3, you are granted additional
18 permissions described in the GCC Runtime Library Exception, version
19 3.1, as published by the Free Software Foundation.
21 You should have received a copy of the GNU General Public License and
22 a copy of the GCC Runtime Library Exception along with this program;
23 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
24 <http://www.gnu.org/licenses/>. */
26 #ifndef GCC_UNWIND_DW2_BTREE_H
27 #define GCC_UNWIND_DW2_BTREE_H
29 #include <stdbool.h>
31 // Common logic for version locks.
32 struct version_lock
34 // The lock itself. The lowest bit indicates an exclusive lock,
35 // the second bit indicates waiting threads. All other bits are
36 // used as counter to recognize changes.
37 // Overflows are okay here, we must only prevent overflow to the
38 // same value within one lock_optimistic/validate
39 // range. Even on 32 bit platforms that would require 1 billion
40 // frame registrations within the time span of a few assembler
41 // instructions.
42 uintptr_type version_lock;
45 #ifdef __GTHREAD_HAS_COND
46 // We should never get contention within the tree as it rarely changes.
47 // But if we ever do get contention we use these for waiting.
48 static __gthread_mutex_t version_lock_mutex = __GTHREAD_MUTEX_INIT;
49 static __gthread_cond_t version_lock_cond = __GTHREAD_COND_INIT;
50 #endif
52 // Initialize in locked state.
53 static inline void
54 version_lock_initialize_locked_exclusive (struct version_lock *vl)
56 vl->version_lock = 1;
59 // Try to lock the node exclusive.
60 static inline bool
61 version_lock_try_lock_exclusive (struct version_lock *vl)
63 uintptr_type state = __atomic_load_n (&(vl->version_lock), __ATOMIC_SEQ_CST);
64 if (state & 1)
65 return false;
66 return __atomic_compare_exchange_n (&(vl->version_lock), &state, state | 1,
67 false, __ATOMIC_SEQ_CST,
68 __ATOMIC_SEQ_CST);
71 // Lock the node exclusive, blocking as needed.
72 static void
73 version_lock_lock_exclusive (struct version_lock *vl)
75 #ifndef __GTHREAD_HAS_COND
76 restart:
77 #endif
79 // We should virtually never get contention here, as frame
80 // changes are rare.
81 uintptr_type state = __atomic_load_n (&(vl->version_lock), __ATOMIC_SEQ_CST);
82 if (!(state & 1))
84 if (__atomic_compare_exchange_n (&(vl->version_lock), &state, state | 1,
85 false, __ATOMIC_SEQ_CST,
86 __ATOMIC_SEQ_CST))
87 return;
90 // We did get contention, wait properly.
91 #ifdef __GTHREAD_HAS_COND
92 __gthread_mutex_lock (&version_lock_mutex);
93 state = __atomic_load_n (&(vl->version_lock), __ATOMIC_SEQ_CST);
94 while (true)
96 // Check if the lock is still held.
97 if (!(state & 1))
99 if (__atomic_compare_exchange_n (&(vl->version_lock), &state,
100 state | 1, false, __ATOMIC_SEQ_CST,
101 __ATOMIC_SEQ_CST))
103 __gthread_mutex_unlock (&version_lock_mutex);
104 return;
106 else
108 continue;
112 // Register waiting thread.
113 if (!(state & 2))
115 if (!__atomic_compare_exchange_n (&(vl->version_lock), &state,
116 state | 2, false, __ATOMIC_SEQ_CST,
117 __ATOMIC_SEQ_CST))
118 continue;
121 // And sleep.
122 __gthread_cond_wait (&version_lock_cond, &version_lock_mutex);
123 state = __atomic_load_n (&(vl->version_lock), __ATOMIC_SEQ_CST);
125 #else
126 // Spin if we do not have condition variables available.
127 // We expect no contention here, spinning should be okay.
128 goto restart;
129 #endif
132 // Release a locked node and increase the version lock.
133 static void
134 version_lock_unlock_exclusive (struct version_lock *vl)
136 // increase version, reset exclusive lock bits
137 uintptr_type state = __atomic_load_n (&(vl->version_lock), __ATOMIC_SEQ_CST);
138 uintptr_type ns = (state + 4) & (~((uintptr_type) 3));
139 state = __atomic_exchange_n (&(vl->version_lock), ns, __ATOMIC_SEQ_CST);
141 #ifdef __GTHREAD_HAS_COND
142 if (state & 2)
144 // Wake up waiting threads. This should be extremely rare.
145 __gthread_mutex_lock (&version_lock_mutex);
146 __gthread_cond_broadcast (&version_lock_cond);
147 __gthread_mutex_unlock (&version_lock_mutex);
149 #endif
152 // Acquire an optimistic "lock". Note that this does not lock at all, it
153 // only allows for validation later.
154 static inline bool
155 version_lock_lock_optimistic (const struct version_lock *vl, uintptr_type *lock)
157 uintptr_type state = __atomic_load_n (&(vl->version_lock), __ATOMIC_SEQ_CST);
158 *lock = state;
160 // Acquiring the lock fails when there is currently an exclusive lock.
161 return !(state & 1);
164 // Validate a previously acquired "lock".
165 static inline bool
166 version_lock_validate (const struct version_lock *vl, uintptr_type lock)
168 // Prevent the reordering of non-atomic loads behind the atomic load.
169 // Hans Boehm, Can Seqlocks Get Along with Programming Language Memory
170 // Models?, Section 4.
171 __atomic_thread_fence (__ATOMIC_ACQUIRE);
173 // Check that the node is still in the same state.
174 uintptr_type state = __atomic_load_n (&(vl->version_lock), __ATOMIC_SEQ_CST);
175 return (state == lock);
178 // The largest possible separator value.
179 static const uintptr_type max_separator = ~((uintptr_type) (0));
181 struct btree_node;
183 // Inner entry. The child tree contains all entries <= separator.
184 struct inner_entry
186 uintptr_type separator;
187 struct btree_node *child;
190 // Leaf entry. Stores an object entry.
191 struct leaf_entry
193 uintptr_type base, size;
194 struct object *ob;
197 // Node types.
198 enum node_type
200 btree_node_inner,
201 btree_node_leaf,
202 btree_node_free
205 // Node sizes. Chosen such that the result size is roughly 256 bytes.
206 #define max_fanout_inner 15
207 #define max_fanout_leaf 10
209 // A btree node.
210 struct btree_node
212 // The version lock used for optimistic lock coupling.
213 struct version_lock version_lock;
214 // The number of entries.
215 unsigned entry_count;
216 // The type.
217 enum node_type type;
218 // The payload.
219 union
221 // The inner nodes have fence keys, i.e., the right-most entry includes a
222 // separator.
223 struct inner_entry children[max_fanout_inner];
224 struct leaf_entry entries[max_fanout_leaf];
225 } content;
228 // Is an inner node?
229 static inline bool
230 btree_node_is_inner (const struct btree_node *n)
232 return n->type == btree_node_inner;
235 // Is a leaf node?
236 static inline bool
237 btree_node_is_leaf (const struct btree_node *n)
239 return n->type == btree_node_leaf;
242 // Should the node be merged?
243 static inline bool
244 btree_node_needs_merge (const struct btree_node *n)
246 return n->entry_count < (btree_node_is_inner (n) ? (max_fanout_inner / 2)
247 : (max_fanout_leaf / 2));
250 // Get the fence key for inner nodes.
251 static inline uintptr_type
252 btree_node_get_fence_key (const struct btree_node *n)
254 // For inner nodes we just return our right-most entry.
255 return n->content.children[n->entry_count - 1].separator;
258 // Find the position for a slot in an inner node.
259 static unsigned
260 btree_node_find_inner_slot (const struct btree_node *n, uintptr_type value)
262 for (unsigned index = 0, ec = n->entry_count; index != ec; ++index)
263 if (n->content.children[index].separator >= value)
264 return index;
265 return n->entry_count;
268 // Find the position for a slot in a leaf node.
269 static unsigned
270 btree_node_find_leaf_slot (const struct btree_node *n, uintptr_type value)
272 for (unsigned index = 0, ec = n->entry_count; index != ec; ++index)
273 if (n->content.entries[index].base + n->content.entries[index].size > value)
274 return index;
275 return n->entry_count;
278 // Try to lock the node exclusive.
279 static inline bool
280 btree_node_try_lock_exclusive (struct btree_node *n)
282 return version_lock_try_lock_exclusive (&(n->version_lock));
285 // Lock the node exclusive, blocking as needed.
286 static inline void
287 btree_node_lock_exclusive (struct btree_node *n)
289 version_lock_lock_exclusive (&(n->version_lock));
292 // Release a locked node and increase the version lock.
293 static inline void
294 btree_node_unlock_exclusive (struct btree_node *n)
296 version_lock_unlock_exclusive (&(n->version_lock));
299 // Acquire an optimistic "lock". Note that this does not lock at all, it
300 // only allows for validation later.
301 static inline bool
302 btree_node_lock_optimistic (const struct btree_node *n, uintptr_type *lock)
304 return version_lock_lock_optimistic (&(n->version_lock), lock);
307 // Validate a previously acquire lock.
308 static inline bool
309 btree_node_validate (const struct btree_node *n, uintptr_type lock)
311 return version_lock_validate (&(n->version_lock), lock);
314 // Insert a new separator after splitting.
315 static void
316 btree_node_update_separator_after_split (struct btree_node *n,
317 uintptr_type old_separator,
318 uintptr_type new_separator,
319 struct btree_node *new_right)
321 unsigned slot = btree_node_find_inner_slot (n, old_separator);
322 for (unsigned index = n->entry_count; index > slot; --index)
323 n->content.children[index] = n->content.children[index - 1];
324 n->content.children[slot].separator = new_separator;
325 n->content.children[slot + 1].child = new_right;
326 n->entry_count++;
329 // A btree. Suitable for static initialization, all members are zero at the
330 // beginning.
331 struct btree
333 // The root of the btree.
334 struct btree_node *root;
335 // The free list of released node.
336 struct btree_node *free_list;
337 // The version lock used to protect the root.
338 struct version_lock root_lock;
341 // Initialize a btree. Not actually used, just for exposition.
342 static inline void
343 btree_init (struct btree *t)
345 t->root = NULL;
346 t->free_list = NULL;
347 t->root_lock.version_lock = 0;
350 static void
351 btree_release_tree_recursively (struct btree *t, struct btree_node *n);
353 // Destroy a tree and release all nodes.
354 static void
355 btree_destroy (struct btree *t)
357 // Disable the mechanism before cleaning up.
358 struct btree_node *old_root
359 = __atomic_exchange_n (&(t->root), NULL, __ATOMIC_SEQ_CST);
360 if (old_root)
361 btree_release_tree_recursively (t, old_root);
363 // Release all free nodes.
364 while (t->free_list)
366 struct btree_node *next = t->free_list->content.children[0].child;
367 free (t->free_list);
368 t->free_list = next;
372 // Allocate a node. This node will be returned in locked exclusive state.
373 static struct btree_node *
374 btree_allocate_node (struct btree *t, bool inner)
376 while (true)
378 // Try the free list first.
379 struct btree_node *next_free
380 = __atomic_load_n (&(t->free_list), __ATOMIC_SEQ_CST);
381 if (next_free)
383 if (!btree_node_try_lock_exclusive (next_free))
384 continue;
385 // The node might no longer be free, check that again after acquiring
386 // the exclusive lock.
387 if (next_free->type == btree_node_free)
389 struct btree_node *ex = next_free;
390 if (__atomic_compare_exchange_n (
391 &(t->free_list), &ex, next_free->content.children[0].child,
392 false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST))
394 next_free->entry_count = 0;
395 next_free->type = inner ? btree_node_inner : btree_node_leaf;
396 return next_free;
399 btree_node_unlock_exclusive (next_free);
400 continue;
403 // No free node available, allocate a new one.
404 struct btree_node *new_node
405 = (struct btree_node *) (malloc (sizeof (struct btree_node)));
406 version_lock_initialize_locked_exclusive (
407 &(new_node->version_lock)); // initialize the node in locked state.
408 new_node->entry_count = 0;
409 new_node->type = inner ? btree_node_inner : btree_node_leaf;
410 return new_node;
414 // Release a node. This node must be currently locked exclusively and will
415 // be placed in the free list.
416 static void
417 btree_release_node (struct btree *t, struct btree_node *node)
419 // We cannot release the memory immediately because there might still be
420 // concurrent readers on that node. Put it in the free list instead.
421 node->type = btree_node_free;
422 struct btree_node *next_free
423 = __atomic_load_n (&(t->free_list), __ATOMIC_SEQ_CST);
426 node->content.children[0].child = next_free;
427 } while (!__atomic_compare_exchange_n (&(t->free_list), &next_free, node,
428 false, __ATOMIC_SEQ_CST,
429 __ATOMIC_SEQ_CST));
430 btree_node_unlock_exclusive (node);
433 // Recursively release a tree. The btree is by design very shallow, thus
434 // we can risk recursion here.
435 static void
436 btree_release_tree_recursively (struct btree *t, struct btree_node *node)
438 btree_node_lock_exclusive (node);
439 if (btree_node_is_inner (node))
441 for (unsigned index = 0; index < node->entry_count; ++index)
442 btree_release_tree_recursively (t, node->content.children[index].child);
444 btree_release_node (t, node);
447 // Check if we are splitting the root.
448 static void
449 btree_handle_root_split (struct btree *t, struct btree_node **node,
450 struct btree_node **parent)
452 // We want to keep the root pointer stable to allow for contention
453 // free reads. Thus, we split the root by first moving the content
454 // of the root node to a new node, and then split that new node.
455 if (!*parent)
457 // Allocate a new node, this guarantees us that we will have a parent
458 // afterwards.
459 struct btree_node *new_node
460 = btree_allocate_node (t, btree_node_is_inner (*node));
461 struct btree_node *old_node = *node;
462 new_node->entry_count = old_node->entry_count;
463 new_node->content = old_node->content;
464 old_node->content.children[0].separator = max_separator;
465 old_node->content.children[0].child = new_node;
466 old_node->entry_count = 1;
467 old_node->type = btree_node_inner;
469 *parent = old_node;
470 *node = new_node;
474 // Split an inner node.
475 static void
476 btree_split_inner (struct btree *t, struct btree_node **inner,
477 struct btree_node **parent, uintptr_type target)
479 // Check for the root.
480 btree_handle_root_split (t, inner, parent);
482 // Create two inner node.
483 uintptr_type right_fence = btree_node_get_fence_key (*inner);
484 struct btree_node *left_inner = *inner;
485 struct btree_node *right_inner = btree_allocate_node (t, true);
486 unsigned split = left_inner->entry_count / 2;
487 right_inner->entry_count = left_inner->entry_count - split;
488 for (unsigned index = 0; index < right_inner->entry_count; ++index)
489 right_inner->content.children[index]
490 = left_inner->content.children[split + index];
491 left_inner->entry_count = split;
492 uintptr_type left_fence = btree_node_get_fence_key (left_inner);
493 btree_node_update_separator_after_split (*parent, right_fence, left_fence,
494 right_inner);
495 if (target <= left_fence)
497 *inner = left_inner;
498 btree_node_unlock_exclusive (right_inner);
500 else
502 *inner = right_inner;
503 btree_node_unlock_exclusive (left_inner);
507 // Split a leaf node.
508 static void
509 btree_split_leaf (struct btree *t, struct btree_node **leaf,
510 struct btree_node **parent, uintptr_type fence,
511 uintptr_type target)
513 // Check for the root.
514 btree_handle_root_split (t, leaf, parent);
516 // Create two leaf nodes.
517 uintptr_type right_fence = fence;
518 struct btree_node *left_leaf = *leaf;
519 struct btree_node *right_leaf = btree_allocate_node (t, false);
520 unsigned split = left_leaf->entry_count / 2;
521 right_leaf->entry_count = left_leaf->entry_count - split;
522 for (unsigned index = 0; index != right_leaf->entry_count; ++index)
523 right_leaf->content.entries[index]
524 = left_leaf->content.entries[split + index];
525 left_leaf->entry_count = split;
526 uintptr_type left_fence = right_leaf->content.entries[0].base - 1;
527 btree_node_update_separator_after_split (*parent, right_fence, left_fence,
528 right_leaf);
529 if (target <= left_fence)
531 *leaf = left_leaf;
532 btree_node_unlock_exclusive (right_leaf);
534 else
536 *leaf = right_leaf;
537 btree_node_unlock_exclusive (left_leaf);
541 // Merge (or balance) child nodes.
542 static struct btree_node *
543 btree_merge_node (struct btree *t, unsigned child_slot,
544 struct btree_node *parent, uintptr_type target)
546 // Choose the emptiest neighbor and lock both. The target child is already
547 // locked.
548 unsigned left_slot;
549 struct btree_node *left_node, *right_node;
550 if ((child_slot == 0)
551 || (((child_slot + 1) < parent->entry_count)
552 && (parent->content.children[child_slot + 1].child->entry_count
553 < parent->content.children[child_slot - 1].child->entry_count)))
555 left_slot = child_slot;
556 left_node = parent->content.children[left_slot].child;
557 right_node = parent->content.children[left_slot + 1].child;
558 btree_node_lock_exclusive (right_node);
560 else
562 left_slot = child_slot - 1;
563 left_node = parent->content.children[left_slot].child;
564 right_node = parent->content.children[left_slot + 1].child;
565 btree_node_lock_exclusive (left_node);
568 // Can we merge both nodes into one node?
569 unsigned total_count = left_node->entry_count + right_node->entry_count;
570 unsigned max_count
571 = btree_node_is_inner (left_node) ? max_fanout_inner : max_fanout_leaf;
572 if (total_count <= max_count)
574 // Merge into the parent?
575 if (parent->entry_count == 2)
577 // Merge children into parent. This can only happen at the root.
578 if (btree_node_is_inner (left_node))
580 for (unsigned index = 0; index != left_node->entry_count; ++index)
581 parent->content.children[index]
582 = left_node->content.children[index];
583 for (unsigned index = 0; index != right_node->entry_count;
584 ++index)
585 parent->content.children[index + left_node->entry_count]
586 = right_node->content.children[index];
588 else
590 parent->type = btree_node_leaf;
591 for (unsigned index = 0; index != left_node->entry_count; ++index)
592 parent->content.entries[index]
593 = left_node->content.entries[index];
594 for (unsigned index = 0; index != right_node->entry_count;
595 ++index)
596 parent->content.entries[index + left_node->entry_count]
597 = right_node->content.entries[index];
599 parent->entry_count = total_count;
600 btree_release_node (t, left_node);
601 btree_release_node (t, right_node);
602 return parent;
604 else
606 // Regular merge.
607 if (btree_node_is_inner (left_node))
609 for (unsigned index = 0; index != right_node->entry_count;
610 ++index)
611 left_node->content.children[left_node->entry_count++]
612 = right_node->content.children[index];
614 else
616 for (unsigned index = 0; index != right_node->entry_count;
617 ++index)
618 left_node->content.entries[left_node->entry_count++]
619 = right_node->content.entries[index];
621 parent->content.children[left_slot].separator
622 = parent->content.children[left_slot + 1].separator;
623 for (unsigned index = left_slot + 1; index + 1 < parent->entry_count;
624 ++index)
625 parent->content.children[index]
626 = parent->content.children[index + 1];
627 parent->entry_count--;
628 btree_release_node (t, right_node);
629 btree_node_unlock_exclusive (parent);
630 return left_node;
634 // No merge possible, rebalance instead.
635 if (left_node->entry_count > right_node->entry_count)
637 // Shift from left to right.
638 unsigned to_shift
639 = (left_node->entry_count - right_node->entry_count) / 2;
640 if (btree_node_is_inner (left_node))
642 for (unsigned index = 0; index != right_node->entry_count; ++index)
644 unsigned pos = right_node->entry_count - 1 - index;
645 right_node->content.children[pos + to_shift]
646 = right_node->content.children[pos];
648 for (unsigned index = 0; index != to_shift; ++index)
649 right_node->content.children[index]
650 = left_node->content
651 .children[left_node->entry_count - to_shift + index];
653 else
655 for (unsigned index = 0; index != right_node->entry_count; ++index)
657 unsigned pos = right_node->entry_count - 1 - index;
658 right_node->content.entries[pos + to_shift]
659 = right_node->content.entries[pos];
661 for (unsigned index = 0; index != to_shift; ++index)
662 right_node->content.entries[index]
663 = left_node->content
664 .entries[left_node->entry_count - to_shift + index];
666 left_node->entry_count -= to_shift;
667 right_node->entry_count += to_shift;
669 else
671 // Shift from right to left.
672 unsigned to_shift
673 = (right_node->entry_count - left_node->entry_count) / 2;
674 if (btree_node_is_inner (left_node))
676 for (unsigned index = 0; index != to_shift; ++index)
677 left_node->content.children[left_node->entry_count + index]
678 = right_node->content.children[index];
679 for (unsigned index = 0; index != right_node->entry_count - to_shift;
680 ++index)
681 right_node->content.children[index]
682 = right_node->content.children[index + to_shift];
684 else
686 for (unsigned index = 0; index != to_shift; ++index)
687 left_node->content.entries[left_node->entry_count + index]
688 = right_node->content.entries[index];
689 for (unsigned index = 0; index != right_node->entry_count - to_shift;
690 ++index)
691 right_node->content.entries[index]
692 = right_node->content.entries[index + to_shift];
694 left_node->entry_count += to_shift;
695 right_node->entry_count -= to_shift;
697 uintptr_type left_fence;
698 if (btree_node_is_leaf (left_node))
700 left_fence = right_node->content.entries[0].base - 1;
702 else
704 left_fence = btree_node_get_fence_key (left_node);
706 parent->content.children[left_slot].separator = left_fence;
707 btree_node_unlock_exclusive (parent);
708 if (target <= left_fence)
710 btree_node_unlock_exclusive (right_node);
711 return left_node;
713 else
715 btree_node_unlock_exclusive (left_node);
716 return right_node;
720 // Insert an entry.
721 static bool
722 btree_insert (struct btree *t, uintptr_type base, uintptr_type size,
723 struct object *ob)
725 // Sanity check.
726 if (!size)
727 return false;
729 // Access the root.
730 struct btree_node *iter, *parent = NULL;
732 version_lock_lock_exclusive (&(t->root_lock));
733 iter = t->root;
734 if (iter)
736 btree_node_lock_exclusive (iter);
738 else
740 t->root = iter = btree_allocate_node (t, false);
742 version_lock_unlock_exclusive (&(t->root_lock));
745 // Walk down the btree with classic lock coupling and eager splits.
746 // Strictly speaking this is not performance optimal, we could use
747 // optimistic lock coupling until we hit a node that has to be modified.
748 // But that is more difficult to implement and frame registration is
749 // rare anyway, we use simple locking for now.
751 uintptr_type fence = max_separator;
752 while (btree_node_is_inner (iter))
754 // Use eager splits to avoid lock coupling up.
755 if (iter->entry_count == max_fanout_inner)
756 btree_split_inner (t, &iter, &parent, base);
758 unsigned slot = btree_node_find_inner_slot (iter, base);
759 if (parent)
760 btree_node_unlock_exclusive (parent);
761 parent = iter;
762 fence = iter->content.children[slot].separator;
763 iter = iter->content.children[slot].child;
764 btree_node_lock_exclusive (iter);
767 // Make sure we have space.
768 if (iter->entry_count == max_fanout_leaf)
769 btree_split_leaf (t, &iter, &parent, fence, base);
770 if (parent)
771 btree_node_unlock_exclusive (parent);
773 // Insert in node.
774 unsigned slot = btree_node_find_leaf_slot (iter, base);
775 if ((slot < iter->entry_count) && (iter->content.entries[slot].base == base))
777 // Duplicate entry, this should never happen.
778 btree_node_unlock_exclusive (iter);
779 return false;
781 for (unsigned index = iter->entry_count; index > slot; --index)
782 iter->content.entries[index] = iter->content.entries[index - 1];
783 struct leaf_entry *e = &(iter->content.entries[slot]);
784 e->base = base;
785 e->size = size;
786 e->ob = ob;
787 iter->entry_count++;
788 btree_node_unlock_exclusive (iter);
789 return true;
792 // Remove an entry.
793 static struct object *
794 btree_remove (struct btree *t, uintptr_type base)
796 // Access the root.
797 version_lock_lock_exclusive (&(t->root_lock));
798 struct btree_node *iter = t->root;
799 if (iter)
800 btree_node_lock_exclusive (iter);
801 version_lock_unlock_exclusive (&(t->root_lock));
802 if (!iter)
803 return NULL;
805 // Same strategy as with insert, walk down with lock coupling and
806 // merge eagerly.
807 while (btree_node_is_inner (iter))
809 unsigned slot = btree_node_find_inner_slot (iter, base);
810 struct btree_node *next = iter->content.children[slot].child;
811 btree_node_lock_exclusive (next);
812 if (btree_node_needs_merge (next))
814 // Use eager merges to avoid lock coupling up.
815 iter = btree_merge_node (t, slot, iter, base);
817 else
819 btree_node_unlock_exclusive (iter);
820 iter = next;
824 // Remove existing entry.
825 unsigned slot = btree_node_find_leaf_slot (iter, base);
826 if ((slot >= iter->entry_count) || (iter->content.entries[slot].base != base))
828 // Not found, this should never happen.
829 btree_node_unlock_exclusive (iter);
830 return NULL;
832 struct object *ob = iter->content.entries[slot].ob;
833 for (unsigned index = slot; index + 1 < iter->entry_count; ++index)
834 iter->content.entries[index] = iter->content.entries[index + 1];
835 iter->entry_count--;
836 btree_node_unlock_exclusive (iter);
837 return ob;
840 // Find the corresponding entry for the given address.
841 static struct object *
842 btree_lookup (const struct btree *t, uintptr_type target_addr)
844 // Within this function many loads are relaxed atomic loads.
845 // Use a macro to keep the code reasonable.
846 #define RLOAD(x) __atomic_load_n (&(x), __ATOMIC_RELAXED)
848 // For targets where unwind info is usually not registered through these
849 // APIs anymore, avoid any sequential consistent atomics.
850 // Use relaxed MO here, it is up to the app to ensure that the library
851 // loading/initialization happens-before using that library in other
852 // threads (in particular unwinding with that library's functions
853 // appearing in the backtraces). Calling that library's functions
854 // without waiting for the library to initialize would be racy.
855 if (__builtin_expect (!RLOAD (t->root), 1))
856 return NULL;
858 // The unwinding tables are mostly static, they only change when
859 // frames are added or removed. This makes it extremely unlikely that they
860 // change during a given unwinding sequence. Thus, we optimize for the
861 // contention free case and use optimistic lock coupling. This does not
862 // require any writes to shared state, instead we validate every read. It is
863 // important that we do not trust any value that we have read until we call
864 // validate again. Data can change at arbitrary points in time, thus we always
865 // copy something into a local variable and validate again before acting on
866 // the read. In the unlikely event that we encounter a concurrent change we
867 // simply restart and try again.
869 restart:
870 struct btree_node *iter;
871 uintptr_type lock;
873 // Accessing the root node requires defending against concurrent pointer
874 // changes Thus we couple rootLock -> lock on root node -> validate rootLock
875 if (!version_lock_lock_optimistic (&(t->root_lock), &lock))
876 goto restart;
877 iter = RLOAD (t->root);
878 if (!version_lock_validate (&(t->root_lock), lock))
879 goto restart;
880 if (!iter)
881 return NULL;
882 uintptr_type child_lock;
883 if ((!btree_node_lock_optimistic (iter, &child_lock))
884 || (!version_lock_validate (&(t->root_lock), lock)))
885 goto restart;
886 lock = child_lock;
889 // Now we can walk down towards the right leaf node.
890 while (true)
892 enum node_type type = RLOAD (iter->type);
893 unsigned entry_count = RLOAD (iter->entry_count);
894 if (!btree_node_validate (iter, lock))
895 goto restart;
896 if (!entry_count)
897 return NULL;
899 if (type == btree_node_inner)
901 // We cannot call find_inner_slot here because we need (relaxed)
902 // atomic reads here.
903 unsigned slot = 0;
904 while (
905 ((slot + 1) < entry_count)
906 && (RLOAD (iter->content.children[slot].separator) < target_addr))
907 ++slot;
908 struct btree_node *child = RLOAD (iter->content.children[slot].child);
909 if (!btree_node_validate (iter, lock))
910 goto restart;
912 // The node content can change at any point in time, thus we must
913 // interleave parent and child checks.
914 uintptr_type child_lock;
915 if (!btree_node_lock_optimistic (child, &child_lock))
916 goto restart;
917 if (!btree_node_validate (iter, lock))
918 goto restart; // make sure we still point to the correct node after
919 // acquiring the optimistic lock.
921 // Go down
922 iter = child;
923 lock = child_lock;
925 else
927 // We cannot call find_leaf_slot here because we need (relaxed)
928 // atomic reads here.
929 unsigned slot = 0;
930 while (((slot + 1) < entry_count)
931 && (RLOAD (iter->content.entries[slot].base)
932 + RLOAD (iter->content.entries[slot].size)
933 <= target_addr))
934 ++slot;
935 struct leaf_entry entry;
936 entry.base = RLOAD (iter->content.entries[slot].base);
937 entry.size = RLOAD (iter->content.entries[slot].size);
938 entry.ob = RLOAD (iter->content.entries[slot].ob);
939 if (!btree_node_validate (iter, lock))
940 goto restart;
942 // Check if we have a hit.
943 if ((entry.base <= target_addr)
944 && (target_addr < entry.base + entry.size))
946 return entry.ob;
948 return NULL;
951 #undef RLOAD
954 #endif /* unwind-dw2-btree.h */