1 //===-- sanitizer_addrhashmap.h ---------------------------------*- C++ -*-===//
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
6 //===----------------------------------------------------------------------===//
8 // Concurrent uptr->T hashmap.
10 //===----------------------------------------------------------------------===//
12 #ifndef SANITIZER_ADDRHASHMAP_H
13 #define SANITIZER_ADDRHASHMAP_H
15 #include "sanitizer_common.h"
16 #include "sanitizer_mutex.h"
17 #include "sanitizer_atomic.h"
18 #include "sanitizer_allocator_internal.h"
20 namespace __sanitizer
{
22 // Concurrent uptr->T hashmap.
23 // T must be a POD type, kSize is preferably a prime but can be any number.
26 // typedef AddrHashMap<uptr, 11> Map;
29 // Map::Handle h(&m, addr);
30 // use h.operator->() to access the data
31 // if h.created() then the element was just created, and the current thread
32 // has exclusive access to it
33 // otherwise the current thread has only read access to the data
36 // Map::Handle h(&m, addr, true);
37 // this will remove the data from the map in Handle dtor
38 // the current thread has exclusive access to the data
39 // if !h.exists() then the element never existed
41 template<typename T
, uptr kSize
>
45 atomic_uintptr_t addr
;
52 Cell cells
[1]; // variable len
55 static const uptr kBucketSize
= 3;
60 Cell cells
[kBucketSize
];
68 Handle(AddrHashMap
<T
, kSize
> *map
, uptr addr
);
69 Handle(AddrHashMap
<T
, kSize
> *map
, uptr addr
, bool remove
);
70 Handle(AddrHashMap
<T
, kSize
> *map
, uptr addr
, bool remove
, bool create
);
75 const T
&operator*() const;
80 friend AddrHashMap
<T
, kSize
>;
81 AddrHashMap
<T
, kSize
> *map_
;
95 void acquire(Handle
*h
);
96 void release(Handle
*h
);
97 uptr
calcHash(uptr addr
);
100 template<typename T
, uptr kSize
>
101 AddrHashMap
<T
, kSize
>::Handle::Handle(AddrHashMap
<T
, kSize
> *map
, uptr addr
) {
109 template<typename T
, uptr kSize
>
110 AddrHashMap
<T
, kSize
>::Handle::Handle(AddrHashMap
<T
, kSize
> *map
, uptr addr
,
119 template<typename T
, uptr kSize
>
120 AddrHashMap
<T
, kSize
>::Handle::Handle(AddrHashMap
<T
, kSize
> *map
, uptr addr
,
121 bool remove
, bool create
) {
129 template<typename T
, uptr kSize
>
130 AddrHashMap
<T
, kSize
>::Handle::~Handle() {
134 template <typename T
, uptr kSize
>
135 T
*AddrHashMap
<T
, kSize
>::Handle::operator->() {
139 template <typename T
, uptr kSize
>
140 const T
&AddrHashMap
<T
, kSize
>::Handle::operator*() const {
144 template <typename T
, uptr kSize
>
145 T
&AddrHashMap
<T
, kSize
>::Handle::operator*() {
149 template<typename T
, uptr kSize
>
150 bool AddrHashMap
<T
, kSize
>::Handle::created() const {
154 template<typename T
, uptr kSize
>
155 bool AddrHashMap
<T
, kSize
>::Handle::exists() const {
156 return cell_
!= nullptr;
159 template<typename T
, uptr kSize
>
160 AddrHashMap
<T
, kSize
>::AddrHashMap() {
161 table_
= (Bucket
*)MmapOrDie(kSize
* sizeof(table_
[0]), "AddrHashMap");
164 template<typename T
, uptr kSize
>
165 void AddrHashMap
<T
, kSize
>::acquire(Handle
*h
) {
166 uptr addr
= h
->addr_
;
167 uptr hash
= calcHash(addr
);
168 Bucket
*b
= &table_
[hash
];
175 // If we want to remove the element, we need exclusive access to the bucket,
176 // so skip the lock-free phase.
181 // First try to find an existing element w/o read mutex.
183 // Check the embed cells.
184 for (uptr i
= 0; i
< kBucketSize
; i
++) {
185 Cell
*c
= &b
->cells
[i
];
186 uptr addr1
= atomic_load(&c
->addr
, memory_order_acquire
);
193 // Check the add cells with read lock.
194 if (atomic_load(&b
->add
, memory_order_relaxed
)) {
196 AddBucket
*add
= (AddBucket
*)atomic_load(&b
->add
, memory_order_relaxed
);
197 for (uptr i
= 0; i
< add
->size
; i
++) {
198 Cell
*c
= &add
->cells
[i
];
199 uptr addr1
= atomic_load(&c
->addr
, memory_order_relaxed
);
210 // Re-check existence under write lock.
213 for (uptr i
= 0; i
< kBucketSize
; i
++) {
214 Cell
*c
= &b
->cells
[i
];
215 uptr addr1
= atomic_load(&c
->addr
, memory_order_relaxed
);
227 AddBucket
*add
= (AddBucket
*)atomic_load(&b
->add
, memory_order_relaxed
);
229 for (uptr i
= 0; i
< add
->size
; i
++) {
230 Cell
*c
= &add
->cells
[i
];
231 uptr addr1
= atomic_load(&c
->addr
, memory_order_relaxed
);
244 // The element does not exist, no need to create it if we want to remove.
245 if (h
->remove_
|| !h
->create_
) {
250 // Now try to create it under the mutex.
252 // See if we have a free embed cell.
253 for (uptr i
= 0; i
< kBucketSize
; i
++) {
254 Cell
*c
= &b
->cells
[i
];
255 uptr addr1
= atomic_load(&c
->addr
, memory_order_relaxed
);
262 // Store in the add cells.
264 // Allocate a new add array.
265 const uptr kInitSize
= 64;
266 add
= (AddBucket
*)InternalAlloc(kInitSize
);
267 internal_memset(add
, 0, kInitSize
);
268 add
->cap
= (kInitSize
- sizeof(*add
)) / sizeof(add
->cells
[0]) + 1;
270 atomic_store(&b
->add
, (uptr
)add
, memory_order_relaxed
);
272 if (add
->size
== add
->cap
) {
273 // Grow existing add array.
274 uptr oldsize
= sizeof(*add
) + (add
->cap
- 1) * sizeof(add
->cells
[0]);
275 uptr newsize
= oldsize
* 2;
276 AddBucket
*add1
= (AddBucket
*)InternalAlloc(newsize
);
277 internal_memset(add1
, 0, newsize
);
278 add1
->cap
= (newsize
- sizeof(*add
)) / sizeof(add
->cells
[0]) + 1;
279 add1
->size
= add
->size
;
280 internal_memcpy(add1
->cells
, add
->cells
, add
->size
* sizeof(add
->cells
[0]));
282 atomic_store(&b
->add
, (uptr
)add1
, memory_order_relaxed
);
286 uptr i
= add
->size
++;
287 Cell
*c
= &add
->cells
[i
];
288 CHECK_EQ(atomic_load(&c
->addr
, memory_order_relaxed
), 0);
293 template<typename T
, uptr kSize
>
294 void AddrHashMap
<T
, kSize
>::release(Handle
*h
) {
297 Bucket
*b
= h
->bucket_
;
299 uptr addr1
= atomic_load(&c
->addr
, memory_order_relaxed
);
301 // Denote completion of insertion.
303 // After the following store, the element becomes available
304 // for lock-free reads.
305 atomic_store(&c
->addr
, h
->addr_
, memory_order_release
);
307 } else if (h
->remove_
) {
308 // Denote that the cell is empty now.
309 CHECK_EQ(addr1
, h
->addr_
);
310 atomic_store(&c
->addr
, 0, memory_order_release
);
311 // See if we need to compact the bucket.
312 AddBucket
*add
= (AddBucket
*)atomic_load(&b
->add
, memory_order_relaxed
);
313 if (h
->addidx_
== -1U) {
314 // Removed from embed array, move an add element into the freed cell.
315 if (add
&& add
->size
!= 0) {
316 uptr last
= --add
->size
;
317 Cell
*c1
= &add
->cells
[last
];
319 uptr addr1
= atomic_load(&c1
->addr
, memory_order_relaxed
);
320 atomic_store(&c
->addr
, addr1
, memory_order_release
);
321 atomic_store(&c1
->addr
, 0, memory_order_release
);
324 // Removed from add array, compact it.
325 uptr last
= --add
->size
;
326 Cell
*c1
= &add
->cells
[last
];
329 atomic_store(&c1
->addr
, 0, memory_order_relaxed
);
332 if (add
&& add
->size
== 0) {
333 // FIXME(dvyukov): free add?
337 CHECK_EQ(addr1
, h
->addr_
);
338 if (h
->addidx_
!= -1U)
343 template<typename T
, uptr kSize
>
344 uptr AddrHashMap
<T
, kSize
>::calcHash(uptr addr
) {
350 } // namespace __sanitizer
352 #endif // SANITIZER_ADDRHASHMAP_H