1 //===-- sanitizer_addrhashmap.h ---------------------------------*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // Concurrent uptr->T hashmap.
11 //===----------------------------------------------------------------------===//
13 #ifndef SANITIZER_ADDRHASHMAP_H
14 #define SANITIZER_ADDRHASHMAP_H
16 #include "sanitizer_common.h"
17 #include "sanitizer_mutex.h"
18 #include "sanitizer_atomic.h"
19 #include "sanitizer_allocator_internal.h"
21 namespace __sanitizer
{
23 // Concurrent uptr->T hashmap.
24 // T must be a POD type, kSize is preferably a prime but can be any number.
27 // typedef AddrHashMap<uptr, 11> Map;
30 // Map::Handle h(&m, addr);
31 // use h.operator->() to access the data
32 // if h.created() then the element was just created, and the current thread
33 // has exclusive access to it
34 // otherwise the current thread has only read access to the data
37 // Map::Handle h(&m, addr, true);
38 // this will remove the data from the map in Handle dtor
39 // the current thread has exclusive access to the data
40 // if !h.exists() then the element never existed
43 // Map::Handle h(&m, addr, false, true);
44 // this will create a new element or return a handle to an existing element
45 // if !h.created() this thread does *not* have exclusive access to the data
47 template<typename T
, uptr kSize
>
51 atomic_uintptr_t addr
;
58 Cell cells
[1]; // variable len
61 static const uptr kBucketSize
= 3;
66 Cell cells
[kBucketSize
];
74 Handle(AddrHashMap
<T
, kSize
> *map
, uptr addr
);
75 Handle(AddrHashMap
<T
, kSize
> *map
, uptr addr
, bool remove
);
76 Handle(AddrHashMap
<T
, kSize
> *map
, uptr addr
, bool remove
, bool create
);
81 const T
&operator*() const;
86 friend AddrHashMap
<T
, kSize
>;
87 AddrHashMap
<T
, kSize
> *map_
;
97 typedef void (*ForEachCallback
)(const uptr key
, const T
&val
, void *arg
);
98 // ForEach acquires a lock on each bucket while iterating over
99 // elements. Note that this only ensures that the structure of the hashmap is
100 // unchanged, there may be a data race to the element itself.
101 void ForEach(ForEachCallback cb
, void *arg
);
107 void acquire(Handle
*h
);
108 void release(Handle
*h
);
109 uptr
calcHash(uptr addr
);
112 template <typename T
, uptr kSize
>
113 void AddrHashMap
<T
, kSize
>::ForEach(ForEachCallback cb
, void *arg
) {
114 for (uptr n
= 0; n
< kSize
; n
++) {
115 Bucket
*bucket
= &table_
[n
];
117 ReadLock
lock(&bucket
->mtx
);
119 for (uptr i
= 0; i
< kBucketSize
; i
++) {
120 Cell
*c
= &bucket
->cells
[i
];
121 uptr addr1
= atomic_load(&c
->addr
, memory_order_acquire
);
123 cb(addr1
, c
->val
, arg
);
126 // Iterate over any additional cells.
128 (AddBucket
*)atomic_load(&bucket
->add
, memory_order_acquire
)) {
129 for (uptr i
= 0; i
< add
->size
; i
++) {
130 Cell
*c
= &add
->cells
[i
];
131 uptr addr1
= atomic_load(&c
->addr
, memory_order_acquire
);
133 cb(addr1
, c
->val
, arg
);
139 template<typename T
, uptr kSize
>
140 AddrHashMap
<T
, kSize
>::Handle::Handle(AddrHashMap
<T
, kSize
> *map
, uptr addr
) {
148 template<typename T
, uptr kSize
>
149 AddrHashMap
<T
, kSize
>::Handle::Handle(AddrHashMap
<T
, kSize
> *map
, uptr addr
,
158 template<typename T
, uptr kSize
>
159 AddrHashMap
<T
, kSize
>::Handle::Handle(AddrHashMap
<T
, kSize
> *map
, uptr addr
,
160 bool remove
, bool create
) {
168 template<typename T
, uptr kSize
>
169 AddrHashMap
<T
, kSize
>::Handle::~Handle() {
173 template <typename T
, uptr kSize
>
174 T
*AddrHashMap
<T
, kSize
>::Handle::operator->() {
178 template <typename T
, uptr kSize
>
179 const T
&AddrHashMap
<T
, kSize
>::Handle::operator*() const {
183 template <typename T
, uptr kSize
>
184 T
&AddrHashMap
<T
, kSize
>::Handle::operator*() {
188 template<typename T
, uptr kSize
>
189 bool AddrHashMap
<T
, kSize
>::Handle::created() const {
193 template<typename T
, uptr kSize
>
194 bool AddrHashMap
<T
, kSize
>::Handle::exists() const {
195 return cell_
!= nullptr;
198 template<typename T
, uptr kSize
>
199 AddrHashMap
<T
, kSize
>::AddrHashMap() {
200 table_
= (Bucket
*)MmapOrDie(kSize
* sizeof(table_
[0]), "AddrHashMap");
203 template <typename T
, uptr kSize
>
204 void AddrHashMap
<T
, kSize
>::acquire(Handle
*h
)
205 SANITIZER_NO_THREAD_SAFETY_ANALYSIS
{
206 uptr addr
= h
->addr_
;
207 uptr hash
= calcHash(addr
);
208 Bucket
*b
= &table_
[hash
];
215 // If we want to remove the element, we need exclusive access to the bucket,
216 // so skip the lock-free phase.
221 // First try to find an existing element w/o read mutex.
223 // Check the embed cells.
224 for (uptr i
= 0; i
< kBucketSize
; i
++) {
225 Cell
*c
= &b
->cells
[i
];
226 uptr addr1
= atomic_load(&c
->addr
, memory_order_acquire
);
233 // Check the add cells with read lock.
234 if (atomic_load(&b
->add
, memory_order_relaxed
)) {
236 AddBucket
*add
= (AddBucket
*)atomic_load(&b
->add
, memory_order_relaxed
);
237 for (uptr i
= 0; i
< add
->size
; i
++) {
238 Cell
*c
= &add
->cells
[i
];
239 uptr addr1
= atomic_load(&c
->addr
, memory_order_relaxed
);
250 // Re-check existence under write lock.
253 for (uptr i
= 0; i
< kBucketSize
; i
++) {
254 Cell
*c
= &b
->cells
[i
];
255 uptr addr1
= atomic_load(&c
->addr
, memory_order_relaxed
);
267 AddBucket
*add
= (AddBucket
*)atomic_load(&b
->add
, memory_order_relaxed
);
269 for (uptr i
= 0; i
< add
->size
; i
++) {
270 Cell
*c
= &add
->cells
[i
];
271 uptr addr1
= atomic_load(&c
->addr
, memory_order_relaxed
);
284 // The element does not exist, no need to create it if we want to remove.
285 if (h
->remove_
|| !h
->create_
) {
290 // Now try to create it under the mutex.
292 // See if we have a free embed cell.
293 for (uptr i
= 0; i
< kBucketSize
; i
++) {
294 Cell
*c
= &b
->cells
[i
];
295 uptr addr1
= atomic_load(&c
->addr
, memory_order_relaxed
);
302 // Store in the add cells.
304 // Allocate a new add array.
305 const uptr kInitSize
= 64;
306 add
= (AddBucket
*)InternalAlloc(kInitSize
);
307 internal_memset(add
, 0, kInitSize
);
308 add
->cap
= (kInitSize
- sizeof(*add
)) / sizeof(add
->cells
[0]) + 1;
310 atomic_store(&b
->add
, (uptr
)add
, memory_order_relaxed
);
312 if (add
->size
== add
->cap
) {
313 // Grow existing add array.
314 uptr oldsize
= sizeof(*add
) + (add
->cap
- 1) * sizeof(add
->cells
[0]);
315 uptr newsize
= oldsize
* 2;
316 AddBucket
*add1
= (AddBucket
*)InternalAlloc(newsize
);
317 internal_memset(add1
, 0, newsize
);
318 add1
->cap
= (newsize
- sizeof(*add
)) / sizeof(add
->cells
[0]) + 1;
319 add1
->size
= add
->size
;
320 internal_memcpy(add1
->cells
, add
->cells
, add
->size
* sizeof(add
->cells
[0]));
322 atomic_store(&b
->add
, (uptr
)add1
, memory_order_relaxed
);
326 uptr i
= add
->size
++;
327 Cell
*c
= &add
->cells
[i
];
328 CHECK_EQ(atomic_load(&c
->addr
, memory_order_relaxed
), 0);
333 template <typename T
, uptr kSize
>
334 void AddrHashMap
<T
, kSize
>::release(Handle
*h
)
335 SANITIZER_NO_THREAD_SAFETY_ANALYSIS
{
338 Bucket
*b
= h
->bucket_
;
340 uptr addr1
= atomic_load(&c
->addr
, memory_order_relaxed
);
342 // Denote completion of insertion.
344 // After the following store, the element becomes available
345 // for lock-free reads.
346 atomic_store(&c
->addr
, h
->addr_
, memory_order_release
);
348 } else if (h
->remove_
) {
349 // Denote that the cell is empty now.
350 CHECK_EQ(addr1
, h
->addr_
);
351 atomic_store(&c
->addr
, 0, memory_order_release
);
352 // See if we need to compact the bucket.
353 AddBucket
*add
= (AddBucket
*)atomic_load(&b
->add
, memory_order_relaxed
);
354 if (h
->addidx_
== -1U) {
355 // Removed from embed array, move an add element into the freed cell.
356 if (add
&& add
->size
!= 0) {
357 uptr last
= --add
->size
;
358 Cell
*c1
= &add
->cells
[last
];
360 uptr addr1
= atomic_load(&c1
->addr
, memory_order_relaxed
);
361 atomic_store(&c
->addr
, addr1
, memory_order_release
);
362 atomic_store(&c1
->addr
, 0, memory_order_release
);
365 // Removed from add array, compact it.
366 uptr last
= --add
->size
;
367 Cell
*c1
= &add
->cells
[last
];
370 atomic_store(&c1
->addr
, 0, memory_order_relaxed
);
373 if (add
&& add
->size
== 0) {
374 // FIXME(dvyukov): free add?
378 CHECK_EQ(addr1
, h
->addr_
);
379 if (h
->addidx_
!= -1U)
384 template<typename T
, uptr kSize
>
385 uptr AddrHashMap
<T
, kSize
>::calcHash(uptr addr
) {
391 } // namespace __sanitizer
393 #endif // SANITIZER_ADDRHASHMAP_H