1 //===-- sanitizer_addrhashmap.h ---------------------------------*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // Concurrent uptr->T hashmap.
11 //===----------------------------------------------------------------------===//
13 #ifndef SANITIZER_ADDRHASHMAP_H
14 #define SANITIZER_ADDRHASHMAP_H
16 #include "sanitizer_common.h"
17 #include "sanitizer_mutex.h"
18 #include "sanitizer_atomic.h"
19 #include "sanitizer_allocator_internal.h"
21 namespace __sanitizer
{
23 // Concurrent uptr->T hashmap.
24 // T must be a POD type, kSize is preferably a prime but can be any number.
27 // typedef AddrHashMap<uptr, 11> Map;
30 // Map::Handle h(&m, addr);
31 // use h.operator->() to access the data
32 // if h.created() then the element was just created, and the current thread
33 // has exclusive access to it
34 // otherwise the current thread has only read access to the data
37 // Map::Handle h(&m, addr, true);
38 // this will remove the data from the map in Handle dtor
39 // the current thread has exclusive access to the data
40 // if !h.exists() then the element never existed
42 template<typename T
, uptr kSize
>
46 atomic_uintptr_t addr
;
53 Cell cells
[1]; // variable len
56 static const uptr kBucketSize
= 3;
61 Cell cells
[kBucketSize
];
69 Handle(AddrHashMap
<T
, kSize
> *map
, uptr addr
);
70 Handle(AddrHashMap
<T
, kSize
> *map
, uptr addr
, bool remove
);
71 Handle(AddrHashMap
<T
, kSize
> *map
, uptr addr
, bool remove
, bool create
);
76 const T
&operator*() const;
81 friend AddrHashMap
<T
, kSize
>;
82 AddrHashMap
<T
, kSize
> *map_
;
96 void acquire(Handle
*h
);
97 void release(Handle
*h
);
98 uptr
calcHash(uptr addr
);
101 template<typename T
, uptr kSize
>
102 AddrHashMap
<T
, kSize
>::Handle::Handle(AddrHashMap
<T
, kSize
> *map
, uptr addr
) {
110 template<typename T
, uptr kSize
>
111 AddrHashMap
<T
, kSize
>::Handle::Handle(AddrHashMap
<T
, kSize
> *map
, uptr addr
,
120 template<typename T
, uptr kSize
>
121 AddrHashMap
<T
, kSize
>::Handle::Handle(AddrHashMap
<T
, kSize
> *map
, uptr addr
,
122 bool remove
, bool create
) {
130 template<typename T
, uptr kSize
>
131 AddrHashMap
<T
, kSize
>::Handle::~Handle() {
135 template <typename T
, uptr kSize
>
136 T
*AddrHashMap
<T
, kSize
>::Handle::operator->() {
140 template <typename T
, uptr kSize
>
141 const T
&AddrHashMap
<T
, kSize
>::Handle::operator*() const {
145 template <typename T
, uptr kSize
>
146 T
&AddrHashMap
<T
, kSize
>::Handle::operator*() {
150 template<typename T
, uptr kSize
>
151 bool AddrHashMap
<T
, kSize
>::Handle::created() const {
155 template<typename T
, uptr kSize
>
156 bool AddrHashMap
<T
, kSize
>::Handle::exists() const {
157 return cell_
!= nullptr;
160 template<typename T
, uptr kSize
>
161 AddrHashMap
<T
, kSize
>::AddrHashMap() {
162 table_
= (Bucket
*)MmapOrDie(kSize
* sizeof(table_
[0]), "AddrHashMap");
165 template<typename T
, uptr kSize
>
166 void AddrHashMap
<T
, kSize
>::acquire(Handle
*h
) {
167 uptr addr
= h
->addr_
;
168 uptr hash
= calcHash(addr
);
169 Bucket
*b
= &table_
[hash
];
176 // If we want to remove the element, we need exclusive access to the bucket,
177 // so skip the lock-free phase.
182 // First try to find an existing element w/o read mutex.
184 // Check the embed cells.
185 for (uptr i
= 0; i
< kBucketSize
; i
++) {
186 Cell
*c
= &b
->cells
[i
];
187 uptr addr1
= atomic_load(&c
->addr
, memory_order_acquire
);
194 // Check the add cells with read lock.
195 if (atomic_load(&b
->add
, memory_order_relaxed
)) {
197 AddBucket
*add
= (AddBucket
*)atomic_load(&b
->add
, memory_order_relaxed
);
198 for (uptr i
= 0; i
< add
->size
; i
++) {
199 Cell
*c
= &add
->cells
[i
];
200 uptr addr1
= atomic_load(&c
->addr
, memory_order_relaxed
);
211 // Re-check existence under write lock.
214 for (uptr i
= 0; i
< kBucketSize
; i
++) {
215 Cell
*c
= &b
->cells
[i
];
216 uptr addr1
= atomic_load(&c
->addr
, memory_order_relaxed
);
228 AddBucket
*add
= (AddBucket
*)atomic_load(&b
->add
, memory_order_relaxed
);
230 for (uptr i
= 0; i
< add
->size
; i
++) {
231 Cell
*c
= &add
->cells
[i
];
232 uptr addr1
= atomic_load(&c
->addr
, memory_order_relaxed
);
245 // The element does not exist, no need to create it if we want to remove.
246 if (h
->remove_
|| !h
->create_
) {
251 // Now try to create it under the mutex.
253 // See if we have a free embed cell.
254 for (uptr i
= 0; i
< kBucketSize
; i
++) {
255 Cell
*c
= &b
->cells
[i
];
256 uptr addr1
= atomic_load(&c
->addr
, memory_order_relaxed
);
263 // Store in the add cells.
265 // Allocate a new add array.
266 const uptr kInitSize
= 64;
267 add
= (AddBucket
*)InternalAlloc(kInitSize
);
268 internal_memset(add
, 0, kInitSize
);
269 add
->cap
= (kInitSize
- sizeof(*add
)) / sizeof(add
->cells
[0]) + 1;
271 atomic_store(&b
->add
, (uptr
)add
, memory_order_relaxed
);
273 if (add
->size
== add
->cap
) {
274 // Grow existing add array.
275 uptr oldsize
= sizeof(*add
) + (add
->cap
- 1) * sizeof(add
->cells
[0]);
276 uptr newsize
= oldsize
* 2;
277 AddBucket
*add1
= (AddBucket
*)InternalAlloc(newsize
);
278 internal_memset(add1
, 0, newsize
);
279 add1
->cap
= (newsize
- sizeof(*add
)) / sizeof(add
->cells
[0]) + 1;
280 add1
->size
= add
->size
;
281 internal_memcpy(add1
->cells
, add
->cells
, add
->size
* sizeof(add
->cells
[0]));
283 atomic_store(&b
->add
, (uptr
)add1
, memory_order_relaxed
);
287 uptr i
= add
->size
++;
288 Cell
*c
= &add
->cells
[i
];
289 CHECK_EQ(atomic_load(&c
->addr
, memory_order_relaxed
), 0);
294 template<typename T
, uptr kSize
>
295 void AddrHashMap
<T
, kSize
>::release(Handle
*h
) {
298 Bucket
*b
= h
->bucket_
;
300 uptr addr1
= atomic_load(&c
->addr
, memory_order_relaxed
);
302 // Denote completion of insertion.
304 // After the following store, the element becomes available
305 // for lock-free reads.
306 atomic_store(&c
->addr
, h
->addr_
, memory_order_release
);
308 } else if (h
->remove_
) {
309 // Denote that the cell is empty now.
310 CHECK_EQ(addr1
, h
->addr_
);
311 atomic_store(&c
->addr
, 0, memory_order_release
);
312 // See if we need to compact the bucket.
313 AddBucket
*add
= (AddBucket
*)atomic_load(&b
->add
, memory_order_relaxed
);
314 if (h
->addidx_
== -1U) {
315 // Removed from embed array, move an add element into the freed cell.
316 if (add
&& add
->size
!= 0) {
317 uptr last
= --add
->size
;
318 Cell
*c1
= &add
->cells
[last
];
320 uptr addr1
= atomic_load(&c1
->addr
, memory_order_relaxed
);
321 atomic_store(&c
->addr
, addr1
, memory_order_release
);
322 atomic_store(&c1
->addr
, 0, memory_order_release
);
325 // Removed from add array, compact it.
326 uptr last
= --add
->size
;
327 Cell
*c1
= &add
->cells
[last
];
330 atomic_store(&c1
->addr
, 0, memory_order_relaxed
);
333 if (add
&& add
->size
== 0) {
334 // FIXME(dvyukov): free add?
338 CHECK_EQ(addr1
, h
->addr_
);
339 if (h
->addidx_
!= -1U)
344 template<typename T
, uptr kSize
>
345 uptr AddrHashMap
<T
, kSize
>::calcHash(uptr addr
) {
351 } // namespace __sanitizer
353 #endif // SANITIZER_ADDRHASHMAP_H