[AArch64] Fix PR target/77822: Use tighter predicates for zero_extract patterns
[official-gcc.git] / libsanitizer / tsan / tsan_dense_alloc.h
blob651c112c78aece4468cc2d086b220597ebb020a1
1 //===-- tsan_dense_alloc.h --------------------------------------*- C++ -*-===//
2 //
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
5 //
6 //===----------------------------------------------------------------------===//
7 //
8 // This file is a part of ThreadSanitizer (TSan), a race detector.
9 //
10 // A DenseSlabAlloc is a freelist-based allocator of fixed-size objects.
11 // DenseSlabAllocCache is a thread-local cache for DenseSlabAlloc.
12 // The only difference with traditional slab allocators is that DenseSlabAlloc
13 // allocates/free indices of objects and provide a functionality to map
14 // the index onto the real pointer. The index is u32, that is, 2 times smaller
15 // than uptr (hense the Dense prefix).
16 //===----------------------------------------------------------------------===//
17 #ifndef TSAN_DENSE_ALLOC_H
18 #define TSAN_DENSE_ALLOC_H
20 #include "sanitizer_common/sanitizer_common.h"
21 #include "tsan_defs.h"
22 #include "tsan_mutex.h"
24 namespace __tsan {
26 class DenseSlabAllocCache {
27 static const uptr kSize = 128;
28 typedef u32 IndexT;
29 uptr pos;
30 IndexT cache[kSize];
31 template<typename T, uptr kL1Size, uptr kL2Size> friend class DenseSlabAlloc;
34 template<typename T, uptr kL1Size, uptr kL2Size>
35 class DenseSlabAlloc {
36 public:
37 typedef DenseSlabAllocCache Cache;
38 typedef typename Cache::IndexT IndexT;
40 DenseSlabAlloc() {
41 // Check that kL1Size and kL2Size are sane.
42 CHECK_EQ(kL1Size & (kL1Size - 1), 0);
43 CHECK_EQ(kL2Size & (kL2Size - 1), 0);
44 CHECK_GE(1ull << (sizeof(IndexT) * 8), kL1Size * kL2Size);
45 // Check that it makes sense to use the dense alloc.
46 CHECK_GE(sizeof(T), sizeof(IndexT));
47 internal_memset(map_, 0, sizeof(map_));
48 freelist_ = 0;
49 fillpos_ = 0;
52 ~DenseSlabAlloc() {
53 for (uptr i = 0; i < kL1Size; i++) {
54 if (map_[i] != 0)
55 UnmapOrDie(map_[i], kL2Size * sizeof(T));
59 IndexT Alloc(Cache *c) {
60 if (c->pos == 0)
61 Refill(c);
62 return c->cache[--c->pos];
65 void Free(Cache *c, IndexT idx) {
66 DCHECK_NE(idx, 0);
67 if (c->pos == Cache::kSize)
68 Drain(c);
69 c->cache[c->pos++] = idx;
72 T *Map(IndexT idx) {
73 DCHECK_NE(idx, 0);
74 DCHECK_LE(idx, kL1Size * kL2Size);
75 return &map_[idx / kL2Size][idx % kL2Size];
78 void FlushCache(Cache *c) {
79 SpinMutexLock lock(&mtx_);
80 while (c->pos) {
81 IndexT idx = c->cache[--c->pos];
82 *(IndexT*)Map(idx) = freelist_;
83 freelist_ = idx;
87 void InitCache(Cache *c) {
88 c->pos = 0;
89 internal_memset(c->cache, 0, sizeof(c->cache));
92 private:
93 T *map_[kL1Size];
94 SpinMutex mtx_;
95 IndexT freelist_;
96 uptr fillpos_;
98 void Refill(Cache *c) {
99 SpinMutexLock lock(&mtx_);
100 if (freelist_ == 0) {
101 if (fillpos_ == kL1Size) {
102 Printf("ThreadSanitizer: DenseSlabAllocator overflow. Dying.\n");
103 Die();
105 T *batch = (T*)MmapOrDie(kL2Size * sizeof(T), "DenseSlabAllocator");
106 // Reserve 0 as invalid index.
107 IndexT start = fillpos_ == 0 ? 1 : 0;
108 for (IndexT i = start; i < kL2Size; i++) {
109 new(batch + i) T();
110 *(IndexT*)(batch + i) = i + 1 + fillpos_ * kL2Size;
112 *(IndexT*)(batch + kL2Size - 1) = 0;
113 freelist_ = fillpos_ * kL2Size + start;
114 map_[fillpos_++] = batch;
116 for (uptr i = 0; i < Cache::kSize / 2 && freelist_ != 0; i++) {
117 IndexT idx = freelist_;
118 c->cache[c->pos++] = idx;
119 freelist_ = *(IndexT*)Map(idx);
123 void Drain(Cache *c) {
124 SpinMutexLock lock(&mtx_);
125 for (uptr i = 0; i < Cache::kSize / 2; i++) {
126 IndexT idx = c->cache[--c->pos];
127 *(IndexT*)Map(idx) = freelist_;
128 freelist_ = idx;
133 } // namespace __tsan
135 #endif // TSAN_DENSE_ALLOC_H