Small ChangeLog tweak.
[official-gcc.git] / libsanitizer / sanitizer_common / sanitizer_addrhashmap.h
blob3bc40ef64f010220ea9c8475560947ad201f903e
1 //===-- sanitizer_addrhashmap.h ---------------------------------*- C++ -*-===//
2 //
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
5 //
6 //===----------------------------------------------------------------------===//
7 //
8 // Concurrent uptr->T hashmap.
9 //
10 //===----------------------------------------------------------------------===//
12 #ifndef SANITIZER_ADDRHASHMAP_H
13 #define SANITIZER_ADDRHASHMAP_H
15 #include "sanitizer_common.h"
16 #include "sanitizer_mutex.h"
17 #include "sanitizer_atomic.h"
18 #include "sanitizer_allocator_internal.h"
20 namespace __sanitizer {
22 // Concurrent uptr->T hashmap.
23 // T must be a POD type, kSize is preferably a prime but can be any number.
24 // Usage example:
26 // typedef AddrHashMap<uptr, 11> Map;
27 // Map m;
28 // {
29 // Map::Handle h(&m, addr);
30 // use h.operator->() to access the data
31 // if h.created() then the element was just created, and the current thread
32 // has exclusive access to it
33 // otherwise the current thread has only read access to the data
34 // }
35 // {
36 // Map::Handle h(&m, addr, true);
37 // this will remove the data from the map in Handle dtor
38 // the current thread has exclusive access to the data
39 // if !h.exists() then the element never existed
40 // }
41 template<typename T, uptr kSize>
42 class AddrHashMap {
43 private:
44 struct Cell {
45 atomic_uintptr_t addr;
46 T val;
49 struct AddBucket {
50 uptr cap;
51 uptr size;
52 Cell cells[1]; // variable len
55 static const uptr kBucketSize = 3;
57 struct Bucket {
58 RWMutex mtx;
59 atomic_uintptr_t add;
60 Cell cells[kBucketSize];
63 public:
64 AddrHashMap();
66 class Handle {
67 public:
68 Handle(AddrHashMap<T, kSize> *map, uptr addr);
69 Handle(AddrHashMap<T, kSize> *map, uptr addr, bool remove);
70 Handle(AddrHashMap<T, kSize> *map, uptr addr, bool remove, bool create);
72 ~Handle();
73 T *operator->();
74 bool created() const;
75 bool exists() const;
77 private:
78 friend AddrHashMap<T, kSize>;
79 AddrHashMap<T, kSize> *map_;
80 Bucket *bucket_;
81 Cell *cell_;
82 uptr addr_;
83 uptr addidx_;
84 bool created_;
85 bool remove_;
86 bool create_;
89 private:
90 friend class Handle;
91 Bucket *table_;
93 void acquire(Handle *h);
94 void release(Handle *h);
95 uptr calcHash(uptr addr);
98 template<typename T, uptr kSize>
99 AddrHashMap<T, kSize>::Handle::Handle(AddrHashMap<T, kSize> *map, uptr addr) {
100 map_ = map;
101 addr_ = addr;
102 remove_ = false;
103 create_ = true;
104 map_->acquire(this);
107 template<typename T, uptr kSize>
108 AddrHashMap<T, kSize>::Handle::Handle(AddrHashMap<T, kSize> *map, uptr addr,
109 bool remove) {
110 map_ = map;
111 addr_ = addr;
112 remove_ = remove;
113 create_ = true;
114 map_->acquire(this);
117 template<typename T, uptr kSize>
118 AddrHashMap<T, kSize>::Handle::Handle(AddrHashMap<T, kSize> *map, uptr addr,
119 bool remove, bool create) {
120 map_ = map;
121 addr_ = addr;
122 remove_ = remove;
123 create_ = create;
124 map_->acquire(this);
127 template<typename T, uptr kSize>
128 AddrHashMap<T, kSize>::Handle::~Handle() {
129 map_->release(this);
132 template <typename T, uptr kSize>
133 T *AddrHashMap<T, kSize>::Handle::operator->() {
134 return &cell_->val;
137 template<typename T, uptr kSize>
138 bool AddrHashMap<T, kSize>::Handle::created() const {
139 return created_;
142 template<typename T, uptr kSize>
143 bool AddrHashMap<T, kSize>::Handle::exists() const {
144 return cell_ != nullptr;
147 template<typename T, uptr kSize>
148 AddrHashMap<T, kSize>::AddrHashMap() {
149 table_ = (Bucket*)MmapOrDie(kSize * sizeof(table_[0]), "AddrHashMap");
152 template<typename T, uptr kSize>
153 void AddrHashMap<T, kSize>::acquire(Handle *h) {
154 uptr addr = h->addr_;
155 uptr hash = calcHash(addr);
156 Bucket *b = &table_[hash];
158 h->created_ = false;
159 h->addidx_ = -1U;
160 h->bucket_ = b;
161 h->cell_ = nullptr;
163 // If we want to remove the element, we need exclusive access to the bucket,
164 // so skip the lock-free phase.
165 if (h->remove_)
166 goto locked;
168 retry:
169 // First try to find an existing element w/o read mutex.
170 CHECK(!h->remove_);
171 // Check the embed cells.
172 for (uptr i = 0; i < kBucketSize; i++) {
173 Cell *c = &b->cells[i];
174 uptr addr1 = atomic_load(&c->addr, memory_order_acquire);
175 if (addr1 == addr) {
176 h->cell_ = c;
177 return;
181 // Check the add cells with read lock.
182 if (atomic_load(&b->add, memory_order_relaxed)) {
183 b->mtx.ReadLock();
184 AddBucket *add = (AddBucket*)atomic_load(&b->add, memory_order_relaxed);
185 for (uptr i = 0; i < add->size; i++) {
186 Cell *c = &add->cells[i];
187 uptr addr1 = atomic_load(&c->addr, memory_order_relaxed);
188 if (addr1 == addr) {
189 h->addidx_ = i;
190 h->cell_ = c;
191 return;
194 b->mtx.ReadUnlock();
197 locked:
198 // Re-check existence under write lock.
199 // Embed cells.
200 b->mtx.Lock();
201 for (uptr i = 0; i < kBucketSize; i++) {
202 Cell *c = &b->cells[i];
203 uptr addr1 = atomic_load(&c->addr, memory_order_relaxed);
204 if (addr1 == addr) {
205 if (h->remove_) {
206 h->cell_ = c;
207 return;
209 b->mtx.Unlock();
210 goto retry;
214 // Add cells.
215 AddBucket *add = (AddBucket*)atomic_load(&b->add, memory_order_relaxed);
216 if (add) {
217 for (uptr i = 0; i < add->size; i++) {
218 Cell *c = &add->cells[i];
219 uptr addr1 = atomic_load(&c->addr, memory_order_relaxed);
220 if (addr1 == addr) {
221 if (h->remove_) {
222 h->addidx_ = i;
223 h->cell_ = c;
224 return;
226 b->mtx.Unlock();
227 goto retry;
232 // The element does not exist, no need to create it if we want to remove.
233 if (h->remove_ || !h->create_) {
234 b->mtx.Unlock();
235 return;
238 // Now try to create it under the mutex.
239 h->created_ = true;
240 // See if we have a free embed cell.
241 for (uptr i = 0; i < kBucketSize; i++) {
242 Cell *c = &b->cells[i];
243 uptr addr1 = atomic_load(&c->addr, memory_order_relaxed);
244 if (addr1 == 0) {
245 h->cell_ = c;
246 return;
250 // Store in the add cells.
251 if (!add) {
252 // Allocate a new add array.
253 const uptr kInitSize = 64;
254 add = (AddBucket*)InternalAlloc(kInitSize);
255 internal_memset(add, 0, kInitSize);
256 add->cap = (kInitSize - sizeof(*add)) / sizeof(add->cells[0]) + 1;
257 add->size = 0;
258 atomic_store(&b->add, (uptr)add, memory_order_relaxed);
260 if (add->size == add->cap) {
261 // Grow existing add array.
262 uptr oldsize = sizeof(*add) + (add->cap - 1) * sizeof(add->cells[0]);
263 uptr newsize = oldsize * 2;
264 AddBucket *add1 = (AddBucket*)InternalAlloc(newsize);
265 internal_memset(add1, 0, newsize);
266 add1->cap = (newsize - sizeof(*add)) / sizeof(add->cells[0]) + 1;
267 add1->size = add->size;
268 internal_memcpy(add1->cells, add->cells, add->size * sizeof(add->cells[0]));
269 InternalFree(add);
270 atomic_store(&b->add, (uptr)add1, memory_order_relaxed);
271 add = add1;
273 // Store.
274 uptr i = add->size++;
275 Cell *c = &add->cells[i];
276 CHECK_EQ(atomic_load(&c->addr, memory_order_relaxed), 0);
277 h->addidx_ = i;
278 h->cell_ = c;
281 template<typename T, uptr kSize>
282 void AddrHashMap<T, kSize>::release(Handle *h) {
283 if (!h->cell_)
284 return;
285 Bucket *b = h->bucket_;
286 Cell *c = h->cell_;
287 uptr addr1 = atomic_load(&c->addr, memory_order_relaxed);
288 if (h->created_) {
289 // Denote completion of insertion.
290 CHECK_EQ(addr1, 0);
291 // After the following store, the element becomes available
292 // for lock-free reads.
293 atomic_store(&c->addr, h->addr_, memory_order_release);
294 b->mtx.Unlock();
295 } else if (h->remove_) {
296 // Denote that the cell is empty now.
297 CHECK_EQ(addr1, h->addr_);
298 atomic_store(&c->addr, 0, memory_order_release);
299 // See if we need to compact the bucket.
300 AddBucket *add = (AddBucket*)atomic_load(&b->add, memory_order_relaxed);
301 if (h->addidx_ == -1U) {
302 // Removed from embed array, move an add element into the freed cell.
303 if (add && add->size != 0) {
304 uptr last = --add->size;
305 Cell *c1 = &add->cells[last];
306 c->val = c1->val;
307 uptr addr1 = atomic_load(&c1->addr, memory_order_relaxed);
308 atomic_store(&c->addr, addr1, memory_order_release);
309 atomic_store(&c1->addr, 0, memory_order_release);
311 } else {
312 // Removed from add array, compact it.
313 uptr last = --add->size;
314 Cell *c1 = &add->cells[last];
315 if (c != c1) {
316 *c = *c1;
317 atomic_store(&c1->addr, 0, memory_order_relaxed);
320 if (add && add->size == 0) {
321 // FIXME(dvyukov): free add?
323 b->mtx.Unlock();
324 } else {
325 CHECK_EQ(addr1, h->addr_);
326 if (h->addidx_ != -1U)
327 b->mtx.ReadUnlock();
331 template<typename T, uptr kSize>
332 uptr AddrHashMap<T, kSize>::calcHash(uptr addr) {
333 addr += addr << 10;
334 addr ^= addr >> 6;
335 return addr % kSize;
338 } // namespace __sanitizer
340 #endif // SANITIZER_ADDRHASHMAP_H