[TSan] Revert r212531 and r212532.
[blocksruntime.git] / lib / tsan / rtl / tsan_sync.cc
blob8c48cadfb861b01de02f945211b8f9cb4315e602
1 //===-- tsan_sync.cc ------------------------------------------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file is a part of ThreadSanitizer (TSan), a race detector.
12 //===----------------------------------------------------------------------===//
13 #include "sanitizer_common/sanitizer_placement_new.h"
14 #include "tsan_sync.h"
15 #include "tsan_rtl.h"
16 #include "tsan_mman.h"
18 namespace __tsan {
20 void DDMutexInit(ThreadState *thr, uptr pc, SyncVar *s);
22 SyncVar::SyncVar()
23 : mtx(MutexTypeSyncVar, StatMtxSyncVar) {
24 Reset();
27 void SyncVar::Init(ThreadState *thr, uptr pc, uptr addr, u64 uid) {
28 this->addr = addr;
29 this->uid = uid;
31 creation_stack_id = 0;
32 if (kCppMode) // Go does not use them
33 creation_stack_id = CurrentStackId(thr, pc);
34 if (flags()->detect_deadlocks)
35 DDMutexInit(thr, pc, this);
38 void SyncVar::Reset() {
39 addr = 0;
40 uid = 0;
41 creation_stack_id = 0;
42 owner_tid = kInvalidTid;
43 last_lock = 0;
44 recursion = 0;
45 is_rw = 0;
46 is_recursive = 0;
47 is_broken = 0;
48 is_linker_init = 0;
49 next = 0;
51 clock.Zero();
52 read_clock.Reset();
55 MetaMap::MetaMap() {
56 atomic_store(&uid_gen_, 0, memory_order_relaxed);
59 void MetaMap::AllocBlock(ThreadState *thr, uptr pc, uptr p, uptr sz) {
60 u32 idx = block_alloc_.Alloc(&thr->block_cache);
61 MBlock *b = block_alloc_.Map(idx);
62 b->siz = sz;
63 b->tid = thr->tid;
64 b->stk = CurrentStackId(thr, pc);
65 u32 *meta = MemToMeta(p);
66 DCHECK_EQ(*meta, 0);
67 *meta = idx | kFlagBlock;
70 uptr MetaMap::FreeBlock(ThreadState *thr, uptr pc, uptr p) {
71 MBlock* b = GetBlock(p);
72 if (b == 0)
73 return 0;
74 uptr sz = RoundUpTo(b->siz, kMetaShadowCell);
75 FreeRange(thr, pc, p, sz);
76 return sz;
79 void MetaMap::FreeRange(ThreadState *thr, uptr pc, uptr p, uptr sz) {
80 u32 *meta = MemToMeta(p);
81 u32 *end = MemToMeta(p + sz);
82 if (end == meta)
83 end++;
84 for (; meta < end; meta++) {
85 u32 idx = *meta;
86 *meta = 0;
87 for (;;) {
88 if (idx == 0)
89 break;
90 if (idx & kFlagBlock) {
91 block_alloc_.Free(&thr->block_cache, idx & ~kFlagMask);
92 break;
93 } else if (idx & kFlagSync) {
94 DCHECK(idx & kFlagSync);
95 SyncVar *s = sync_alloc_.Map(idx & ~kFlagMask);
96 u32 next = s->next;
97 s->Reset();
98 sync_alloc_.Free(&thr->sync_cache, idx & ~kFlagMask);
99 idx = next;
100 } else {
101 CHECK(0);
107 MBlock* MetaMap::GetBlock(uptr p) {
108 u32 *meta = MemToMeta(p);
109 u32 idx = *meta;
110 for (;;) {
111 if (idx == 0)
112 return 0;
113 if (idx & kFlagBlock)
114 return block_alloc_.Map(idx & ~kFlagMask);
115 DCHECK(idx & kFlagSync);
116 SyncVar * s = sync_alloc_.Map(idx & ~kFlagMask);
117 idx = s->next;
121 SyncVar* MetaMap::GetOrCreateAndLock(ThreadState *thr, uptr pc,
122 uptr addr, bool write_lock) {
123 return GetAndLock(thr, pc, addr, write_lock, true);
126 SyncVar* MetaMap::GetIfExistsAndLock(uptr addr) {
127 return GetAndLock(0, 0, addr, true, false);
130 SyncVar* MetaMap::GetAndLock(ThreadState *thr, uptr pc,
131 uptr addr, bool write_lock, bool create) {
132 u32 *meta = MemToMeta(addr);
133 u32 idx0 = *meta;
134 u32 myidx = 0;
135 SyncVar *mys = 0;
136 for (;;) {
137 u32 idx = *meta;
138 for (;;) {
139 if (idx == 0)
140 break;
141 if (idx & kFlagBlock)
142 break;
143 DCHECK(idx & kFlagSync);
144 SyncVar * s = sync_alloc_.Map(idx & ~kFlagMask);
145 if (s->addr == addr) {
146 if (myidx != 0) {
147 mys->Reset();
148 sync_alloc_.Free(&thr->sync_cache, myidx);
150 if (write_lock)
151 s->mtx.Lock();
152 else
153 s->mtx.ReadLock();
154 return s;
156 idx = s->next;
158 if (!create)
159 return 0;
160 if (*meta != idx0)
161 continue;
163 if (myidx == 0) {
164 const u64 uid = atomic_fetch_add(&uid_gen_, 1, memory_order_relaxed);
165 myidx = sync_alloc_.Alloc(&thr->sync_cache);
166 mys = sync_alloc_.Map(myidx);
167 mys->Init(thr, pc, addr, uid);
169 mys->next = idx0;
170 if (atomic_compare_exchange_strong((atomic_uint32_t*)meta, &idx0,
171 myidx | kFlagSync, memory_order_release)) {
172 if (write_lock)
173 mys->mtx.Lock();
174 else
175 mys->mtx.ReadLock();
176 return mys;
181 void MetaMap::MoveMemory(uptr src, uptr dst, uptr sz) {
182 // src and dst can overlap,
183 // there are no concurrent accesses to the regions (e.g. stop-the-world).
184 CHECK_NE(src, dst);
185 CHECK_NE(sz, 0);
186 uptr diff = dst - src;
187 u32 *src_meta = MemToMeta(src);
188 u32 *dst_meta = MemToMeta(dst);
189 u32 *src_meta_end = MemToMeta(src + sz);
190 uptr inc = 1;
191 if (dst > src) {
192 src_meta = MemToMeta(src + sz) - 1;
193 dst_meta = MemToMeta(dst + sz) - 1;
194 src_meta_end = MemToMeta(src) - 1;
195 inc = -1;
197 for (; src_meta != src_meta_end; src_meta += inc, dst_meta += inc) {
198 CHECK_EQ(*dst_meta, 0);
199 u32 idx = *src_meta;
200 *src_meta = 0;
201 *dst_meta = idx;
202 // Patch the addresses in sync objects.
203 while (idx != 0) {
204 if (idx & kFlagBlock)
205 break;
206 CHECK(idx & kFlagSync);
207 SyncVar *s = sync_alloc_.Map(idx & ~kFlagMask);
208 s->addr += diff;
209 idx = s->next;
214 void MetaMap::OnThreadIdle(ThreadState *thr) {
215 block_alloc_.FlushCache(&thr->block_cache);
216 sync_alloc_.FlushCache(&thr->sync_cache);
219 } // namespace __tsan