* elf.c (codes) [GENERATE_FIXED_HUFFMAN_TABLE]: Fix size to be
[official-gcc.git] / libsanitizer / sanitizer_common / sanitizer_addrhashmap.h
bloba67ec84373ef4a784e809e34b7cd74793f52f04b
1 //===-- sanitizer_addrhashmap.h ---------------------------------*- C++ -*-===//
2 //
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
5 //
6 //===----------------------------------------------------------------------===//
7 //
8 // Concurrent uptr->T hashmap.
9 //
10 //===----------------------------------------------------------------------===//
12 #ifndef SANITIZER_ADDRHASHMAP_H
13 #define SANITIZER_ADDRHASHMAP_H
15 #include "sanitizer_common.h"
16 #include "sanitizer_mutex.h"
17 #include "sanitizer_atomic.h"
18 #include "sanitizer_allocator_internal.h"
20 namespace __sanitizer {
22 // Concurrent uptr->T hashmap.
23 // T must be a POD type, kSize is preferably a prime but can be any number.
24 // Usage example:
26 // typedef AddrHashMap<uptr, 11> Map;
27 // Map m;
28 // {
29 // Map::Handle h(&m, addr);
30 // use h.operator->() to access the data
31 // if h.created() then the element was just created, and the current thread
32 // has exclusive access to it
33 // otherwise the current thread has only read access to the data
34 // }
35 // {
36 // Map::Handle h(&m, addr, true);
37 // this will remove the data from the map in Handle dtor
38 // the current thread has exclusive access to the data
39 // if !h.exists() then the element never existed
40 // }
41 template<typename T, uptr kSize>
42 class AddrHashMap {
43 private:
44 struct Cell {
45 atomic_uintptr_t addr;
46 T val;
49 struct AddBucket {
50 uptr cap;
51 uptr size;
52 Cell cells[1]; // variable len
55 static const uptr kBucketSize = 3;
57 struct Bucket {
58 RWMutex mtx;
59 atomic_uintptr_t add;
60 Cell cells[kBucketSize];
63 public:
64 AddrHashMap();
66 class Handle {
67 public:
68 Handle(AddrHashMap<T, kSize> *map, uptr addr);
69 Handle(AddrHashMap<T, kSize> *map, uptr addr, bool remove);
70 Handle(AddrHashMap<T, kSize> *map, uptr addr, bool remove, bool create);
72 ~Handle();
73 T *operator->();
74 T &operator*();
75 const T &operator*() const;
76 bool created() const;
77 bool exists() const;
79 private:
80 friend AddrHashMap<T, kSize>;
81 AddrHashMap<T, kSize> *map_;
82 Bucket *bucket_;
83 Cell *cell_;
84 uptr addr_;
85 uptr addidx_;
86 bool created_;
87 bool remove_;
88 bool create_;
91 private:
92 friend class Handle;
93 Bucket *table_;
95 void acquire(Handle *h);
96 void release(Handle *h);
97 uptr calcHash(uptr addr);
100 template<typename T, uptr kSize>
101 AddrHashMap<T, kSize>::Handle::Handle(AddrHashMap<T, kSize> *map, uptr addr) {
102 map_ = map;
103 addr_ = addr;
104 remove_ = false;
105 create_ = true;
106 map_->acquire(this);
109 template<typename T, uptr kSize>
110 AddrHashMap<T, kSize>::Handle::Handle(AddrHashMap<T, kSize> *map, uptr addr,
111 bool remove) {
112 map_ = map;
113 addr_ = addr;
114 remove_ = remove;
115 create_ = true;
116 map_->acquire(this);
119 template<typename T, uptr kSize>
120 AddrHashMap<T, kSize>::Handle::Handle(AddrHashMap<T, kSize> *map, uptr addr,
121 bool remove, bool create) {
122 map_ = map;
123 addr_ = addr;
124 remove_ = remove;
125 create_ = create;
126 map_->acquire(this);
129 template<typename T, uptr kSize>
130 AddrHashMap<T, kSize>::Handle::~Handle() {
131 map_->release(this);
134 template <typename T, uptr kSize>
135 T *AddrHashMap<T, kSize>::Handle::operator->() {
136 return &cell_->val;
139 template <typename T, uptr kSize>
140 const T &AddrHashMap<T, kSize>::Handle::operator*() const {
141 return cell_->val;
144 template <typename T, uptr kSize>
145 T &AddrHashMap<T, kSize>::Handle::operator*() {
146 return cell_->val;
149 template<typename T, uptr kSize>
150 bool AddrHashMap<T, kSize>::Handle::created() const {
151 return created_;
154 template<typename T, uptr kSize>
155 bool AddrHashMap<T, kSize>::Handle::exists() const {
156 return cell_ != nullptr;
159 template<typename T, uptr kSize>
160 AddrHashMap<T, kSize>::AddrHashMap() {
161 table_ = (Bucket*)MmapOrDie(kSize * sizeof(table_[0]), "AddrHashMap");
164 template<typename T, uptr kSize>
165 void AddrHashMap<T, kSize>::acquire(Handle *h) {
166 uptr addr = h->addr_;
167 uptr hash = calcHash(addr);
168 Bucket *b = &table_[hash];
170 h->created_ = false;
171 h->addidx_ = -1U;
172 h->bucket_ = b;
173 h->cell_ = nullptr;
175 // If we want to remove the element, we need exclusive access to the bucket,
176 // so skip the lock-free phase.
177 if (h->remove_)
178 goto locked;
180 retry:
181 // First try to find an existing element w/o read mutex.
182 CHECK(!h->remove_);
183 // Check the embed cells.
184 for (uptr i = 0; i < kBucketSize; i++) {
185 Cell *c = &b->cells[i];
186 uptr addr1 = atomic_load(&c->addr, memory_order_acquire);
187 if (addr1 == addr) {
188 h->cell_ = c;
189 return;
193 // Check the add cells with read lock.
194 if (atomic_load(&b->add, memory_order_relaxed)) {
195 b->mtx.ReadLock();
196 AddBucket *add = (AddBucket*)atomic_load(&b->add, memory_order_relaxed);
197 for (uptr i = 0; i < add->size; i++) {
198 Cell *c = &add->cells[i];
199 uptr addr1 = atomic_load(&c->addr, memory_order_relaxed);
200 if (addr1 == addr) {
201 h->addidx_ = i;
202 h->cell_ = c;
203 return;
206 b->mtx.ReadUnlock();
209 locked:
210 // Re-check existence under write lock.
211 // Embed cells.
212 b->mtx.Lock();
213 for (uptr i = 0; i < kBucketSize; i++) {
214 Cell *c = &b->cells[i];
215 uptr addr1 = atomic_load(&c->addr, memory_order_relaxed);
216 if (addr1 == addr) {
217 if (h->remove_) {
218 h->cell_ = c;
219 return;
221 b->mtx.Unlock();
222 goto retry;
226 // Add cells.
227 AddBucket *add = (AddBucket*)atomic_load(&b->add, memory_order_relaxed);
228 if (add) {
229 for (uptr i = 0; i < add->size; i++) {
230 Cell *c = &add->cells[i];
231 uptr addr1 = atomic_load(&c->addr, memory_order_relaxed);
232 if (addr1 == addr) {
233 if (h->remove_) {
234 h->addidx_ = i;
235 h->cell_ = c;
236 return;
238 b->mtx.Unlock();
239 goto retry;
244 // The element does not exist, no need to create it if we want to remove.
245 if (h->remove_ || !h->create_) {
246 b->mtx.Unlock();
247 return;
250 // Now try to create it under the mutex.
251 h->created_ = true;
252 // See if we have a free embed cell.
253 for (uptr i = 0; i < kBucketSize; i++) {
254 Cell *c = &b->cells[i];
255 uptr addr1 = atomic_load(&c->addr, memory_order_relaxed);
256 if (addr1 == 0) {
257 h->cell_ = c;
258 return;
262 // Store in the add cells.
263 if (!add) {
264 // Allocate a new add array.
265 const uptr kInitSize = 64;
266 add = (AddBucket*)InternalAlloc(kInitSize);
267 internal_memset(add, 0, kInitSize);
268 add->cap = (kInitSize - sizeof(*add)) / sizeof(add->cells[0]) + 1;
269 add->size = 0;
270 atomic_store(&b->add, (uptr)add, memory_order_relaxed);
272 if (add->size == add->cap) {
273 // Grow existing add array.
274 uptr oldsize = sizeof(*add) + (add->cap - 1) * sizeof(add->cells[0]);
275 uptr newsize = oldsize * 2;
276 AddBucket *add1 = (AddBucket*)InternalAlloc(newsize);
277 internal_memset(add1, 0, newsize);
278 add1->cap = (newsize - sizeof(*add)) / sizeof(add->cells[0]) + 1;
279 add1->size = add->size;
280 internal_memcpy(add1->cells, add->cells, add->size * sizeof(add->cells[0]));
281 InternalFree(add);
282 atomic_store(&b->add, (uptr)add1, memory_order_relaxed);
283 add = add1;
285 // Store.
286 uptr i = add->size++;
287 Cell *c = &add->cells[i];
288 CHECK_EQ(atomic_load(&c->addr, memory_order_relaxed), 0);
289 h->addidx_ = i;
290 h->cell_ = c;
293 template<typename T, uptr kSize>
294 void AddrHashMap<T, kSize>::release(Handle *h) {
295 if (!h->cell_)
296 return;
297 Bucket *b = h->bucket_;
298 Cell *c = h->cell_;
299 uptr addr1 = atomic_load(&c->addr, memory_order_relaxed);
300 if (h->created_) {
301 // Denote completion of insertion.
302 CHECK_EQ(addr1, 0);
303 // After the following store, the element becomes available
304 // for lock-free reads.
305 atomic_store(&c->addr, h->addr_, memory_order_release);
306 b->mtx.Unlock();
307 } else if (h->remove_) {
308 // Denote that the cell is empty now.
309 CHECK_EQ(addr1, h->addr_);
310 atomic_store(&c->addr, 0, memory_order_release);
311 // See if we need to compact the bucket.
312 AddBucket *add = (AddBucket*)atomic_load(&b->add, memory_order_relaxed);
313 if (h->addidx_ == -1U) {
314 // Removed from embed array, move an add element into the freed cell.
315 if (add && add->size != 0) {
316 uptr last = --add->size;
317 Cell *c1 = &add->cells[last];
318 c->val = c1->val;
319 uptr addr1 = atomic_load(&c1->addr, memory_order_relaxed);
320 atomic_store(&c->addr, addr1, memory_order_release);
321 atomic_store(&c1->addr, 0, memory_order_release);
323 } else {
324 // Removed from add array, compact it.
325 uptr last = --add->size;
326 Cell *c1 = &add->cells[last];
327 if (c != c1) {
328 *c = *c1;
329 atomic_store(&c1->addr, 0, memory_order_relaxed);
332 if (add && add->size == 0) {
333 // FIXME(dvyukov): free add?
335 b->mtx.Unlock();
336 } else {
337 CHECK_EQ(addr1, h->addr_);
338 if (h->addidx_ != -1U)
339 b->mtx.ReadUnlock();
343 template<typename T, uptr kSize>
344 uptr AddrHashMap<T, kSize>::calcHash(uptr addr) {
345 addr += addr << 10;
346 addr ^= addr >> 6;
347 return addr % kSize;
350 } // namespace __sanitizer
352 #endif // SANITIZER_ADDRHASHMAP_H