1 /* Lock-free btree for manually registered unwind frames. */
2 /* Copyright (C) 2022-2023 Free Software Foundation, Inc.
3 Contributed by Thomas Neumann
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 Under Section 7 of GPL version 3, you are granted additional
18 permissions described in the GCC Runtime Library Exception, version
19 3.1, as published by the Free Software Foundation.
21 You should have received a copy of the GNU General Public License and
22 a copy of the GCC Runtime Library Exception along with this program;
23 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
24 <http://www.gnu.org/licenses/>. */
26 #ifndef GCC_UNWIND_DW2_BTREE_H
27 #define GCC_UNWIND_DW2_BTREE_H
31 // Common logic for version locks.
34 // The lock itself. The lowest bit indicates an exclusive lock,
35 // the second bit indicates waiting threads. All other bits are
36 // used as counter to recognize changes.
37 // Overflows are okay here, we must only prevent overflow to the
38 // same value within one lock_optimistic/validate
39 // range. Even on 32 bit platforms that would require 1 billion
40 // frame registrations within the time span of a few assembler
42 uintptr_type version_lock
;
45 #ifdef __GTHREAD_HAS_COND
46 // We should never get contention within the tree as it rarely changes.
47 // But if we ever do get contention we use these for waiting.
48 static __gthread_mutex_t version_lock_mutex
= __GTHREAD_MUTEX_INIT
;
49 static __gthread_cond_t version_lock_cond
= __GTHREAD_COND_INIT
;
52 // Initialize in locked state.
54 version_lock_initialize_locked_exclusive (struct version_lock
*vl
)
59 // Try to lock the node exclusive.
61 version_lock_try_lock_exclusive (struct version_lock
*vl
)
63 uintptr_type state
= __atomic_load_n (&(vl
->version_lock
), __ATOMIC_SEQ_CST
);
66 return __atomic_compare_exchange_n (&(vl
->version_lock
), &state
, state
| 1,
67 false, __ATOMIC_SEQ_CST
,
71 // Lock the node exclusive, blocking as needed.
73 version_lock_lock_exclusive (struct version_lock
*vl
)
75 #ifndef __GTHREAD_HAS_COND
79 // We should virtually never get contention here, as frame
81 uintptr_type state
= __atomic_load_n (&(vl
->version_lock
), __ATOMIC_SEQ_CST
);
84 if (__atomic_compare_exchange_n (&(vl
->version_lock
), &state
, state
| 1,
85 false, __ATOMIC_SEQ_CST
,
90 // We did get contention, wait properly.
91 #ifdef __GTHREAD_HAS_COND
92 __gthread_mutex_lock (&version_lock_mutex
);
93 state
= __atomic_load_n (&(vl
->version_lock
), __ATOMIC_SEQ_CST
);
96 // Check if the lock is still held.
99 if (__atomic_compare_exchange_n (&(vl
->version_lock
), &state
,
100 state
| 1, false, __ATOMIC_SEQ_CST
,
103 __gthread_mutex_unlock (&version_lock_mutex
);
112 // Register waiting thread.
115 if (!__atomic_compare_exchange_n (&(vl
->version_lock
), &state
,
116 state
| 2, false, __ATOMIC_SEQ_CST
,
122 __gthread_cond_wait (&version_lock_cond
, &version_lock_mutex
);
123 state
= __atomic_load_n (&(vl
->version_lock
), __ATOMIC_SEQ_CST
);
126 // Spin if we do not have condition variables available.
127 // We expect no contention here, spinning should be okay.
132 // Release a locked node and increase the version lock.
134 version_lock_unlock_exclusive (struct version_lock
*vl
)
136 // increase version, reset exclusive lock bits
137 uintptr_type state
= __atomic_load_n (&(vl
->version_lock
), __ATOMIC_SEQ_CST
);
138 uintptr_type ns
= (state
+ 4) & (~((uintptr_type
) 3));
139 state
= __atomic_exchange_n (&(vl
->version_lock
), ns
, __ATOMIC_SEQ_CST
);
141 #ifdef __GTHREAD_HAS_COND
144 // Wake up waiting threads. This should be extremely rare.
145 __gthread_mutex_lock (&version_lock_mutex
);
146 __gthread_cond_broadcast (&version_lock_cond
);
147 __gthread_mutex_unlock (&version_lock_mutex
);
152 // Acquire an optimistic "lock". Note that this does not lock at all, it
153 // only allows for validation later.
155 version_lock_lock_optimistic (const struct version_lock
*vl
, uintptr_type
*lock
)
157 uintptr_type state
= __atomic_load_n (&(vl
->version_lock
), __ATOMIC_SEQ_CST
);
160 // Acquiring the lock fails when there is currently an exclusive lock.
164 // Validate a previously acquired "lock".
166 version_lock_validate (const struct version_lock
*vl
, uintptr_type lock
)
168 // Prevent the reordering of non-atomic loads behind the atomic load.
169 // Hans Boehm, Can Seqlocks Get Along with Programming Language Memory
170 // Models?, Section 4.
171 __atomic_thread_fence (__ATOMIC_ACQUIRE
);
173 // Check that the node is still in the same state.
174 uintptr_type state
= __atomic_load_n (&(vl
->version_lock
), __ATOMIC_SEQ_CST
);
175 return (state
== lock
);
178 // The largest possible separator value.
179 static const uintptr_type max_separator
= ~((uintptr_type
) (0));
183 // Inner entry. The child tree contains all entries <= separator.
186 uintptr_type separator
;
187 struct btree_node
*child
;
190 // Leaf entry. Stores an object entry.
193 uintptr_type base
, size
;
205 // Node sizes. Chosen such that the result size is roughly 256 bytes.
206 #define max_fanout_inner 15
207 #define max_fanout_leaf 10
212 // The version lock used for optimistic lock coupling.
213 struct version_lock version_lock
;
214 // The number of entries.
215 unsigned entry_count
;
221 // The inner nodes have fence keys, i.e., the right-most entry includes a
223 struct inner_entry children
[max_fanout_inner
];
224 struct leaf_entry entries
[max_fanout_leaf
];
230 btree_node_is_inner (const struct btree_node
*n
)
232 return n
->type
== btree_node_inner
;
237 btree_node_is_leaf (const struct btree_node
*n
)
239 return n
->type
== btree_node_leaf
;
242 // Should the node be merged?
244 btree_node_needs_merge (const struct btree_node
*n
)
246 return n
->entry_count
< (btree_node_is_inner (n
) ? (max_fanout_inner
/ 2)
247 : (max_fanout_leaf
/ 2));
250 // Get the fence key for inner nodes.
251 static inline uintptr_type
252 btree_node_get_fence_key (const struct btree_node
*n
)
254 // For inner nodes we just return our right-most entry.
255 return n
->content
.children
[n
->entry_count
- 1].separator
;
258 // Find the position for a slot in an inner node.
260 btree_node_find_inner_slot (const struct btree_node
*n
, uintptr_type value
)
262 for (unsigned index
= 0, ec
= n
->entry_count
; index
!= ec
; ++index
)
263 if (n
->content
.children
[index
].separator
>= value
)
265 return n
->entry_count
;
268 // Find the position for a slot in a leaf node.
270 btree_node_find_leaf_slot (const struct btree_node
*n
, uintptr_type value
)
272 for (unsigned index
= 0, ec
= n
->entry_count
; index
!= ec
; ++index
)
273 if (n
->content
.entries
[index
].base
+ n
->content
.entries
[index
].size
> value
)
275 return n
->entry_count
;
278 // Try to lock the node exclusive.
280 btree_node_try_lock_exclusive (struct btree_node
*n
)
282 return version_lock_try_lock_exclusive (&(n
->version_lock
));
285 // Lock the node exclusive, blocking as needed.
287 btree_node_lock_exclusive (struct btree_node
*n
)
289 version_lock_lock_exclusive (&(n
->version_lock
));
292 // Release a locked node and increase the version lock.
294 btree_node_unlock_exclusive (struct btree_node
*n
)
296 version_lock_unlock_exclusive (&(n
->version_lock
));
299 // Acquire an optimistic "lock". Note that this does not lock at all, it
300 // only allows for validation later.
302 btree_node_lock_optimistic (const struct btree_node
*n
, uintptr_type
*lock
)
304 return version_lock_lock_optimistic (&(n
->version_lock
), lock
);
307 // Validate a previously acquire lock.
309 btree_node_validate (const struct btree_node
*n
, uintptr_type lock
)
311 return version_lock_validate (&(n
->version_lock
), lock
);
314 // Insert a new separator after splitting.
316 btree_node_update_separator_after_split (struct btree_node
*n
,
317 uintptr_type old_separator
,
318 uintptr_type new_separator
,
319 struct btree_node
*new_right
)
321 unsigned slot
= btree_node_find_inner_slot (n
, old_separator
);
322 for (unsigned index
= n
->entry_count
; index
> slot
; --index
)
323 n
->content
.children
[index
] = n
->content
.children
[index
- 1];
324 n
->content
.children
[slot
].separator
= new_separator
;
325 n
->content
.children
[slot
+ 1].child
= new_right
;
329 // A btree. Suitable for static initialization, all members are zero at the
333 // The root of the btree.
334 struct btree_node
*root
;
335 // The free list of released node.
336 struct btree_node
*free_list
;
337 // The version lock used to protect the root.
338 struct version_lock root_lock
;
341 // Initialize a btree. Not actually used, just for exposition.
343 btree_init (struct btree
*t
)
347 t
->root_lock
.version_lock
= 0;
351 btree_release_tree_recursively (struct btree
*t
, struct btree_node
*n
);
353 // Destroy a tree and release all nodes.
355 btree_destroy (struct btree
*t
)
357 // Disable the mechanism before cleaning up.
358 struct btree_node
*old_root
359 = __atomic_exchange_n (&(t
->root
), NULL
, __ATOMIC_SEQ_CST
);
361 btree_release_tree_recursively (t
, old_root
);
363 // Release all free nodes.
366 struct btree_node
*next
= t
->free_list
->content
.children
[0].child
;
372 // Allocate a node. This node will be returned in locked exclusive state.
373 static struct btree_node
*
374 btree_allocate_node (struct btree
*t
, bool inner
)
378 // Try the free list first.
379 struct btree_node
*next_free
380 = __atomic_load_n (&(t
->free_list
), __ATOMIC_SEQ_CST
);
383 if (!btree_node_try_lock_exclusive (next_free
))
385 // The node might no longer be free, check that again after acquiring
386 // the exclusive lock.
387 if (next_free
->type
== btree_node_free
)
389 struct btree_node
*ex
= next_free
;
390 if (__atomic_compare_exchange_n (
391 &(t
->free_list
), &ex
, next_free
->content
.children
[0].child
,
392 false, __ATOMIC_SEQ_CST
, __ATOMIC_SEQ_CST
))
394 next_free
->entry_count
= 0;
395 next_free
->type
= inner
? btree_node_inner
: btree_node_leaf
;
399 btree_node_unlock_exclusive (next_free
);
403 // No free node available, allocate a new one.
404 struct btree_node
*new_node
405 = (struct btree_node
*) (malloc (sizeof (struct btree_node
)));
406 version_lock_initialize_locked_exclusive (
407 &(new_node
->version_lock
)); // initialize the node in locked state.
408 new_node
->entry_count
= 0;
409 new_node
->type
= inner
? btree_node_inner
: btree_node_leaf
;
414 // Release a node. This node must be currently locked exclusively and will
415 // be placed in the free list.
417 btree_release_node (struct btree
*t
, struct btree_node
*node
)
419 // We cannot release the memory immediately because there might still be
420 // concurrent readers on that node. Put it in the free list instead.
421 node
->type
= btree_node_free
;
422 struct btree_node
*next_free
423 = __atomic_load_n (&(t
->free_list
), __ATOMIC_SEQ_CST
);
426 node
->content
.children
[0].child
= next_free
;
427 } while (!__atomic_compare_exchange_n (&(t
->free_list
), &next_free
, node
,
428 false, __ATOMIC_SEQ_CST
,
430 btree_node_unlock_exclusive (node
);
433 // Recursively release a tree. The btree is by design very shallow, thus
434 // we can risk recursion here.
436 btree_release_tree_recursively (struct btree
*t
, struct btree_node
*node
)
438 btree_node_lock_exclusive (node
);
439 if (btree_node_is_inner (node
))
441 for (unsigned index
= 0; index
< node
->entry_count
; ++index
)
442 btree_release_tree_recursively (t
, node
->content
.children
[index
].child
);
444 btree_release_node (t
, node
);
447 // Check if we are splitting the root.
449 btree_handle_root_split (struct btree
*t
, struct btree_node
**node
,
450 struct btree_node
**parent
)
452 // We want to keep the root pointer stable to allow for contention
453 // free reads. Thus, we split the root by first moving the content
454 // of the root node to a new node, and then split that new node.
457 // Allocate a new node, this guarantees us that we will have a parent
459 struct btree_node
*new_node
460 = btree_allocate_node (t
, btree_node_is_inner (*node
));
461 struct btree_node
*old_node
= *node
;
462 new_node
->entry_count
= old_node
->entry_count
;
463 new_node
->content
= old_node
->content
;
464 old_node
->content
.children
[0].separator
= max_separator
;
465 old_node
->content
.children
[0].child
= new_node
;
466 old_node
->entry_count
= 1;
467 old_node
->type
= btree_node_inner
;
474 // Split an inner node.
476 btree_split_inner (struct btree
*t
, struct btree_node
**inner
,
477 struct btree_node
**parent
, uintptr_type target
)
479 // Check for the root.
480 btree_handle_root_split (t
, inner
, parent
);
482 // Create two inner node.
483 uintptr_type right_fence
= btree_node_get_fence_key (*inner
);
484 struct btree_node
*left_inner
= *inner
;
485 struct btree_node
*right_inner
= btree_allocate_node (t
, true);
486 unsigned split
= left_inner
->entry_count
/ 2;
487 right_inner
->entry_count
= left_inner
->entry_count
- split
;
488 for (unsigned index
= 0; index
< right_inner
->entry_count
; ++index
)
489 right_inner
->content
.children
[index
]
490 = left_inner
->content
.children
[split
+ index
];
491 left_inner
->entry_count
= split
;
492 uintptr_type left_fence
= btree_node_get_fence_key (left_inner
);
493 btree_node_update_separator_after_split (*parent
, right_fence
, left_fence
,
495 if (target
<= left_fence
)
498 btree_node_unlock_exclusive (right_inner
);
502 *inner
= right_inner
;
503 btree_node_unlock_exclusive (left_inner
);
507 // Split a leaf node.
509 btree_split_leaf (struct btree
*t
, struct btree_node
**leaf
,
510 struct btree_node
**parent
, uintptr_type fence
,
513 // Check for the root.
514 btree_handle_root_split (t
, leaf
, parent
);
516 // Create two leaf nodes.
517 uintptr_type right_fence
= fence
;
518 struct btree_node
*left_leaf
= *leaf
;
519 struct btree_node
*right_leaf
= btree_allocate_node (t
, false);
520 unsigned split
= left_leaf
->entry_count
/ 2;
521 right_leaf
->entry_count
= left_leaf
->entry_count
- split
;
522 for (unsigned index
= 0; index
!= right_leaf
->entry_count
; ++index
)
523 right_leaf
->content
.entries
[index
]
524 = left_leaf
->content
.entries
[split
+ index
];
525 left_leaf
->entry_count
= split
;
526 uintptr_type left_fence
= right_leaf
->content
.entries
[0].base
- 1;
527 btree_node_update_separator_after_split (*parent
, right_fence
, left_fence
,
529 if (target
<= left_fence
)
532 btree_node_unlock_exclusive (right_leaf
);
537 btree_node_unlock_exclusive (left_leaf
);
541 // Merge (or balance) child nodes.
542 static struct btree_node
*
543 btree_merge_node (struct btree
*t
, unsigned child_slot
,
544 struct btree_node
*parent
, uintptr_type target
)
546 // Choose the emptiest neighbor and lock both. The target child is already
549 struct btree_node
*left_node
, *right_node
;
550 if ((child_slot
== 0)
551 || (((child_slot
+ 1) < parent
->entry_count
)
552 && (parent
->content
.children
[child_slot
+ 1].child
->entry_count
553 < parent
->content
.children
[child_slot
- 1].child
->entry_count
)))
555 left_slot
= child_slot
;
556 left_node
= parent
->content
.children
[left_slot
].child
;
557 right_node
= parent
->content
.children
[left_slot
+ 1].child
;
558 btree_node_lock_exclusive (right_node
);
562 left_slot
= child_slot
- 1;
563 left_node
= parent
->content
.children
[left_slot
].child
;
564 right_node
= parent
->content
.children
[left_slot
+ 1].child
;
565 btree_node_lock_exclusive (left_node
);
568 // Can we merge both nodes into one node?
569 unsigned total_count
= left_node
->entry_count
+ right_node
->entry_count
;
571 = btree_node_is_inner (left_node
) ? max_fanout_inner
: max_fanout_leaf
;
572 if (total_count
<= max_count
)
574 // Merge into the parent?
575 if (parent
->entry_count
== 2)
577 // Merge children into parent. This can only happen at the root.
578 if (btree_node_is_inner (left_node
))
580 for (unsigned index
= 0; index
!= left_node
->entry_count
; ++index
)
581 parent
->content
.children
[index
]
582 = left_node
->content
.children
[index
];
583 for (unsigned index
= 0; index
!= right_node
->entry_count
;
585 parent
->content
.children
[index
+ left_node
->entry_count
]
586 = right_node
->content
.children
[index
];
590 parent
->type
= btree_node_leaf
;
591 for (unsigned index
= 0; index
!= left_node
->entry_count
; ++index
)
592 parent
->content
.entries
[index
]
593 = left_node
->content
.entries
[index
];
594 for (unsigned index
= 0; index
!= right_node
->entry_count
;
596 parent
->content
.entries
[index
+ left_node
->entry_count
]
597 = right_node
->content
.entries
[index
];
599 parent
->entry_count
= total_count
;
600 btree_release_node (t
, left_node
);
601 btree_release_node (t
, right_node
);
607 if (btree_node_is_inner (left_node
))
609 for (unsigned index
= 0; index
!= right_node
->entry_count
;
611 left_node
->content
.children
[left_node
->entry_count
++]
612 = right_node
->content
.children
[index
];
616 for (unsigned index
= 0; index
!= right_node
->entry_count
;
618 left_node
->content
.entries
[left_node
->entry_count
++]
619 = right_node
->content
.entries
[index
];
621 parent
->content
.children
[left_slot
].separator
622 = parent
->content
.children
[left_slot
+ 1].separator
;
623 for (unsigned index
= left_slot
+ 1; index
+ 1 < parent
->entry_count
;
625 parent
->content
.children
[index
]
626 = parent
->content
.children
[index
+ 1];
627 parent
->entry_count
--;
628 btree_release_node (t
, right_node
);
629 btree_node_unlock_exclusive (parent
);
634 // No merge possible, rebalance instead.
635 if (left_node
->entry_count
> right_node
->entry_count
)
637 // Shift from left to right.
639 = (left_node
->entry_count
- right_node
->entry_count
) / 2;
640 if (btree_node_is_inner (left_node
))
642 for (unsigned index
= 0; index
!= right_node
->entry_count
; ++index
)
644 unsigned pos
= right_node
->entry_count
- 1 - index
;
645 right_node
->content
.children
[pos
+ to_shift
]
646 = right_node
->content
.children
[pos
];
648 for (unsigned index
= 0; index
!= to_shift
; ++index
)
649 right_node
->content
.children
[index
]
651 .children
[left_node
->entry_count
- to_shift
+ index
];
655 for (unsigned index
= 0; index
!= right_node
->entry_count
; ++index
)
657 unsigned pos
= right_node
->entry_count
- 1 - index
;
658 right_node
->content
.entries
[pos
+ to_shift
]
659 = right_node
->content
.entries
[pos
];
661 for (unsigned index
= 0; index
!= to_shift
; ++index
)
662 right_node
->content
.entries
[index
]
664 .entries
[left_node
->entry_count
- to_shift
+ index
];
666 left_node
->entry_count
-= to_shift
;
667 right_node
->entry_count
+= to_shift
;
671 // Shift from right to left.
673 = (right_node
->entry_count
- left_node
->entry_count
) / 2;
674 if (btree_node_is_inner (left_node
))
676 for (unsigned index
= 0; index
!= to_shift
; ++index
)
677 left_node
->content
.children
[left_node
->entry_count
+ index
]
678 = right_node
->content
.children
[index
];
679 for (unsigned index
= 0; index
!= right_node
->entry_count
- to_shift
;
681 right_node
->content
.children
[index
]
682 = right_node
->content
.children
[index
+ to_shift
];
686 for (unsigned index
= 0; index
!= to_shift
; ++index
)
687 left_node
->content
.entries
[left_node
->entry_count
+ index
]
688 = right_node
->content
.entries
[index
];
689 for (unsigned index
= 0; index
!= right_node
->entry_count
- to_shift
;
691 right_node
->content
.entries
[index
]
692 = right_node
->content
.entries
[index
+ to_shift
];
694 left_node
->entry_count
+= to_shift
;
695 right_node
->entry_count
-= to_shift
;
697 uintptr_type left_fence
;
698 if (btree_node_is_leaf (left_node
))
700 left_fence
= right_node
->content
.entries
[0].base
- 1;
704 left_fence
= btree_node_get_fence_key (left_node
);
706 parent
->content
.children
[left_slot
].separator
= left_fence
;
707 btree_node_unlock_exclusive (parent
);
708 if (target
<= left_fence
)
710 btree_node_unlock_exclusive (right_node
);
715 btree_node_unlock_exclusive (left_node
);
722 btree_insert (struct btree
*t
, uintptr_type base
, uintptr_type size
,
730 struct btree_node
*iter
, *parent
= NULL
;
732 version_lock_lock_exclusive (&(t
->root_lock
));
736 btree_node_lock_exclusive (iter
);
740 t
->root
= iter
= btree_allocate_node (t
, false);
742 version_lock_unlock_exclusive (&(t
->root_lock
));
745 // Walk down the btree with classic lock coupling and eager splits.
746 // Strictly speaking this is not performance optimal, we could use
747 // optimistic lock coupling until we hit a node that has to be modified.
748 // But that is more difficult to implement and frame registration is
749 // rare anyway, we use simple locking for now.
751 uintptr_type fence
= max_separator
;
752 while (btree_node_is_inner (iter
))
754 // Use eager splits to avoid lock coupling up.
755 if (iter
->entry_count
== max_fanout_inner
)
756 btree_split_inner (t
, &iter
, &parent
, base
);
758 unsigned slot
= btree_node_find_inner_slot (iter
, base
);
760 btree_node_unlock_exclusive (parent
);
762 fence
= iter
->content
.children
[slot
].separator
;
763 iter
= iter
->content
.children
[slot
].child
;
764 btree_node_lock_exclusive (iter
);
767 // Make sure we have space.
768 if (iter
->entry_count
== max_fanout_leaf
)
769 btree_split_leaf (t
, &iter
, &parent
, fence
, base
);
771 btree_node_unlock_exclusive (parent
);
774 unsigned slot
= btree_node_find_leaf_slot (iter
, base
);
775 if ((slot
< iter
->entry_count
) && (iter
->content
.entries
[slot
].base
== base
))
777 // Duplicate entry, this should never happen.
778 btree_node_unlock_exclusive (iter
);
781 for (unsigned index
= iter
->entry_count
; index
> slot
; --index
)
782 iter
->content
.entries
[index
] = iter
->content
.entries
[index
- 1];
783 struct leaf_entry
*e
= &(iter
->content
.entries
[slot
]);
788 btree_node_unlock_exclusive (iter
);
793 static struct object
*
794 btree_remove (struct btree
*t
, uintptr_type base
)
797 version_lock_lock_exclusive (&(t
->root_lock
));
798 struct btree_node
*iter
= t
->root
;
800 btree_node_lock_exclusive (iter
);
801 version_lock_unlock_exclusive (&(t
->root_lock
));
805 // Same strategy as with insert, walk down with lock coupling and
807 while (btree_node_is_inner (iter
))
809 unsigned slot
= btree_node_find_inner_slot (iter
, base
);
810 struct btree_node
*next
= iter
->content
.children
[slot
].child
;
811 btree_node_lock_exclusive (next
);
812 if (btree_node_needs_merge (next
))
814 // Use eager merges to avoid lock coupling up.
815 iter
= btree_merge_node (t
, slot
, iter
, base
);
819 btree_node_unlock_exclusive (iter
);
824 // Remove existing entry.
825 unsigned slot
= btree_node_find_leaf_slot (iter
, base
);
826 if ((slot
>= iter
->entry_count
) || (iter
->content
.entries
[slot
].base
!= base
))
828 // Not found, this should never happen.
829 btree_node_unlock_exclusive (iter
);
832 struct object
*ob
= iter
->content
.entries
[slot
].ob
;
833 for (unsigned index
= slot
; index
+ 1 < iter
->entry_count
; ++index
)
834 iter
->content
.entries
[index
] = iter
->content
.entries
[index
+ 1];
836 btree_node_unlock_exclusive (iter
);
840 // Find the corresponding entry for the given address.
841 static struct object
*
842 btree_lookup (const struct btree
*t
, uintptr_type target_addr
)
844 // Within this function many loads are relaxed atomic loads.
845 // Use a macro to keep the code reasonable.
846 #define RLOAD(x) __atomic_load_n (&(x), __ATOMIC_RELAXED)
848 // For targets where unwind info is usually not registered through these
849 // APIs anymore, avoid any sequential consistent atomics.
850 // Use relaxed MO here, it is up to the app to ensure that the library
851 // loading/initialization happens-before using that library in other
852 // threads (in particular unwinding with that library's functions
853 // appearing in the backtraces). Calling that library's functions
854 // without waiting for the library to initialize would be racy.
855 if (__builtin_expect (!RLOAD (t
->root
), 1))
858 // The unwinding tables are mostly static, they only change when
859 // frames are added or removed. This makes it extremely unlikely that they
860 // change during a given unwinding sequence. Thus, we optimize for the
861 // contention free case and use optimistic lock coupling. This does not
862 // require any writes to shared state, instead we validate every read. It is
863 // important that we do not trust any value that we have read until we call
864 // validate again. Data can change at arbitrary points in time, thus we always
865 // copy something into a local variable and validate again before acting on
866 // the read. In the unlikely event that we encounter a concurrent change we
867 // simply restart and try again.
870 struct btree_node
*iter
;
873 // Accessing the root node requires defending against concurrent pointer
874 // changes Thus we couple rootLock -> lock on root node -> validate rootLock
875 if (!version_lock_lock_optimistic (&(t
->root_lock
), &lock
))
877 iter
= RLOAD (t
->root
);
878 if (!version_lock_validate (&(t
->root_lock
), lock
))
882 uintptr_type child_lock
;
883 if ((!btree_node_lock_optimistic (iter
, &child_lock
))
884 || (!version_lock_validate (&(t
->root_lock
), lock
)))
889 // Now we can walk down towards the right leaf node.
892 enum node_type type
= RLOAD (iter
->type
);
893 unsigned entry_count
= RLOAD (iter
->entry_count
);
894 if (!btree_node_validate (iter
, lock
))
899 if (type
== btree_node_inner
)
901 // We cannot call find_inner_slot here because we need (relaxed)
902 // atomic reads here.
905 ((slot
+ 1) < entry_count
)
906 && (RLOAD (iter
->content
.children
[slot
].separator
) < target_addr
))
908 struct btree_node
*child
= RLOAD (iter
->content
.children
[slot
].child
);
909 if (!btree_node_validate (iter
, lock
))
912 // The node content can change at any point in time, thus we must
913 // interleave parent and child checks.
914 uintptr_type child_lock
;
915 if (!btree_node_lock_optimistic (child
, &child_lock
))
917 if (!btree_node_validate (iter
, lock
))
918 goto restart
; // make sure we still point to the correct node after
919 // acquiring the optimistic lock.
927 // We cannot call find_leaf_slot here because we need (relaxed)
928 // atomic reads here.
930 while (((slot
+ 1) < entry_count
)
931 && (RLOAD (iter
->content
.entries
[slot
].base
)
932 + RLOAD (iter
->content
.entries
[slot
].size
)
935 struct leaf_entry entry
;
936 entry
.base
= RLOAD (iter
->content
.entries
[slot
].base
);
937 entry
.size
= RLOAD (iter
->content
.entries
[slot
].size
);
938 entry
.ob
= RLOAD (iter
->content
.entries
[slot
].ob
);
939 if (!btree_node_validate (iter
, lock
))
942 // Check if we have a hit.
943 if ((entry
.base
<= target_addr
)
944 && (target_addr
< entry
.base
+ entry
.size
))
954 #endif /* unwind-dw2-btree.h */