1 //===-- sanitizer_addrhashmap.h ---------------------------------*- C++ -*-===//
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
6 //===----------------------------------------------------------------------===//
8 // Concurrent uptr->T hashmap.
10 //===----------------------------------------------------------------------===//
12 #ifndef SANITIZER_ADDRHASHMAP_H
13 #define SANITIZER_ADDRHASHMAP_H
15 #include "sanitizer_common.h"
16 #include "sanitizer_mutex.h"
17 #include "sanitizer_atomic.h"
18 #include "sanitizer_allocator_internal.h"
20 namespace __sanitizer
{
22 // Concurrent uptr->T hashmap.
23 // T must be a POD type, kSize is preferably a prime but can be any number.
26 // typedef AddrHashMap<uptr, 11> Map;
29 // Map::Handle h(&m, addr);
30 // use h.operator->() to access the data
31 // if h.created() then the element was just created, and the current thread
32 // has exclusive access to it
33 // otherwise the current thread has only read access to the data
36 // Map::Handle h(&m, addr, true);
37 // this will remove the data from the map in Handle dtor
38 // the current thread has exclusive access to the data
39 // if !h.exists() then the element never existed
41 template<typename T
, uptr kSize
>
45 atomic_uintptr_t addr
;
52 Cell cells
[1]; // variable len
55 static const uptr kBucketSize
= 3;
60 Cell cells
[kBucketSize
];
68 Handle(AddrHashMap
<T
, kSize
> *map
, uptr addr
);
69 Handle(AddrHashMap
<T
, kSize
> *map
, uptr addr
, bool remove
);
70 Handle(AddrHashMap
<T
, kSize
> *map
, uptr addr
, bool remove
, bool create
);
78 friend AddrHashMap
<T
, kSize
>;
79 AddrHashMap
<T
, kSize
> *map_
;
93 void acquire(Handle
*h
);
94 void release(Handle
*h
);
95 uptr
calcHash(uptr addr
);
98 template<typename T
, uptr kSize
>
99 AddrHashMap
<T
, kSize
>::Handle::Handle(AddrHashMap
<T
, kSize
> *map
, uptr addr
) {
107 template<typename T
, uptr kSize
>
108 AddrHashMap
<T
, kSize
>::Handle::Handle(AddrHashMap
<T
, kSize
> *map
, uptr addr
,
117 template<typename T
, uptr kSize
>
118 AddrHashMap
<T
, kSize
>::Handle::Handle(AddrHashMap
<T
, kSize
> *map
, uptr addr
,
119 bool remove
, bool create
) {
127 template<typename T
, uptr kSize
>
128 AddrHashMap
<T
, kSize
>::Handle::~Handle() {
132 template <typename T
, uptr kSize
>
133 T
*AddrHashMap
<T
, kSize
>::Handle::operator->() {
137 template<typename T
, uptr kSize
>
138 bool AddrHashMap
<T
, kSize
>::Handle::created() const {
142 template<typename T
, uptr kSize
>
143 bool AddrHashMap
<T
, kSize
>::Handle::exists() const {
144 return cell_
!= nullptr;
147 template<typename T
, uptr kSize
>
148 AddrHashMap
<T
, kSize
>::AddrHashMap() {
149 table_
= (Bucket
*)MmapOrDie(kSize
* sizeof(table_
[0]), "AddrHashMap");
152 template<typename T
, uptr kSize
>
153 void AddrHashMap
<T
, kSize
>::acquire(Handle
*h
) {
154 uptr addr
= h
->addr_
;
155 uptr hash
= calcHash(addr
);
156 Bucket
*b
= &table_
[hash
];
163 // If we want to remove the element, we need exclusive access to the bucket,
164 // so skip the lock-free phase.
169 // First try to find an existing element w/o read mutex.
171 // Check the embed cells.
172 for (uptr i
= 0; i
< kBucketSize
; i
++) {
173 Cell
*c
= &b
->cells
[i
];
174 uptr addr1
= atomic_load(&c
->addr
, memory_order_acquire
);
181 // Check the add cells with read lock.
182 if (atomic_load(&b
->add
, memory_order_relaxed
)) {
184 AddBucket
*add
= (AddBucket
*)atomic_load(&b
->add
, memory_order_relaxed
);
185 for (uptr i
= 0; i
< add
->size
; i
++) {
186 Cell
*c
= &add
->cells
[i
];
187 uptr addr1
= atomic_load(&c
->addr
, memory_order_relaxed
);
198 // Re-check existence under write lock.
201 for (uptr i
= 0; i
< kBucketSize
; i
++) {
202 Cell
*c
= &b
->cells
[i
];
203 uptr addr1
= atomic_load(&c
->addr
, memory_order_relaxed
);
215 AddBucket
*add
= (AddBucket
*)atomic_load(&b
->add
, memory_order_relaxed
);
217 for (uptr i
= 0; i
< add
->size
; i
++) {
218 Cell
*c
= &add
->cells
[i
];
219 uptr addr1
= atomic_load(&c
->addr
, memory_order_relaxed
);
232 // The element does not exist, no need to create it if we want to remove.
233 if (h
->remove_
|| !h
->create_
) {
238 // Now try to create it under the mutex.
240 // See if we have a free embed cell.
241 for (uptr i
= 0; i
< kBucketSize
; i
++) {
242 Cell
*c
= &b
->cells
[i
];
243 uptr addr1
= atomic_load(&c
->addr
, memory_order_relaxed
);
250 // Store in the add cells.
252 // Allocate a new add array.
253 const uptr kInitSize
= 64;
254 add
= (AddBucket
*)InternalAlloc(kInitSize
);
255 internal_memset(add
, 0, kInitSize
);
256 add
->cap
= (kInitSize
- sizeof(*add
)) / sizeof(add
->cells
[0]) + 1;
258 atomic_store(&b
->add
, (uptr
)add
, memory_order_relaxed
);
260 if (add
->size
== add
->cap
) {
261 // Grow existing add array.
262 uptr oldsize
= sizeof(*add
) + (add
->cap
- 1) * sizeof(add
->cells
[0]);
263 uptr newsize
= oldsize
* 2;
264 AddBucket
*add1
= (AddBucket
*)InternalAlloc(newsize
);
265 internal_memset(add1
, 0, newsize
);
266 add1
->cap
= (newsize
- sizeof(*add
)) / sizeof(add
->cells
[0]) + 1;
267 add1
->size
= add
->size
;
268 internal_memcpy(add1
->cells
, add
->cells
, add
->size
* sizeof(add
->cells
[0]));
270 atomic_store(&b
->add
, (uptr
)add1
, memory_order_relaxed
);
274 uptr i
= add
->size
++;
275 Cell
*c
= &add
->cells
[i
];
276 CHECK_EQ(atomic_load(&c
->addr
, memory_order_relaxed
), 0);
281 template<typename T
, uptr kSize
>
282 void AddrHashMap
<T
, kSize
>::release(Handle
*h
) {
285 Bucket
*b
= h
->bucket_
;
287 uptr addr1
= atomic_load(&c
->addr
, memory_order_relaxed
);
289 // Denote completion of insertion.
291 // After the following store, the element becomes available
292 // for lock-free reads.
293 atomic_store(&c
->addr
, h
->addr_
, memory_order_release
);
295 } else if (h
->remove_
) {
296 // Denote that the cell is empty now.
297 CHECK_EQ(addr1
, h
->addr_
);
298 atomic_store(&c
->addr
, 0, memory_order_release
);
299 // See if we need to compact the bucket.
300 AddBucket
*add
= (AddBucket
*)atomic_load(&b
->add
, memory_order_relaxed
);
301 if (h
->addidx_
== -1U) {
302 // Removed from embed array, move an add element into the freed cell.
303 if (add
&& add
->size
!= 0) {
304 uptr last
= --add
->size
;
305 Cell
*c1
= &add
->cells
[last
];
307 uptr addr1
= atomic_load(&c1
->addr
, memory_order_relaxed
);
308 atomic_store(&c
->addr
, addr1
, memory_order_release
);
309 atomic_store(&c1
->addr
, 0, memory_order_release
);
312 // Removed from add array, compact it.
313 uptr last
= --add
->size
;
314 Cell
*c1
= &add
->cells
[last
];
317 atomic_store(&c1
->addr
, 0, memory_order_relaxed
);
320 if (add
&& add
->size
== 0) {
321 // FIXME(dvyukov): free add?
325 CHECK_EQ(addr1
, h
->addr_
);
326 if (h
->addidx_
!= -1U)
331 template<typename T
, uptr kSize
>
332 uptr AddrHashMap
<T
, kSize
>::calcHash(uptr addr
) {
338 } // namespace __sanitizer
340 #endif // SANITIZER_ADDRHASHMAP_H