[PR67828] don't unswitch on default defs of non-parms
[official-gcc.git] / libsanitizer / tsan / tsan_sync.cc
blob2209199ac483eeac5603234ce1dc14ab0928504c
1 //===-- tsan_sync.cc ------------------------------------------------------===//
2 //
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
5 //
6 //===----------------------------------------------------------------------===//
7 //
8 // This file is a part of ThreadSanitizer (TSan), a race detector.
9 //
10 //===----------------------------------------------------------------------===//
11 #include "sanitizer_common/sanitizer_placement_new.h"
12 #include "tsan_sync.h"
13 #include "tsan_rtl.h"
14 #include "tsan_mman.h"
16 namespace __tsan {
18 void DDMutexInit(ThreadState *thr, uptr pc, SyncVar *s);
20 SyncVar::SyncVar()
21 : mtx(MutexTypeSyncVar, StatMtxSyncVar) {
22 Reset(0);
25 void SyncVar::Init(ThreadState *thr, uptr pc, uptr addr, u64 uid) {
26 this->addr = addr;
27 this->uid = uid;
28 this->next = 0;
30 creation_stack_id = 0;
31 if (kCppMode) // Go does not use them
32 creation_stack_id = CurrentStackId(thr, pc);
33 if (common_flags()->detect_deadlocks)
34 DDMutexInit(thr, pc, this);
37 void SyncVar::Reset(ThreadState *thr) {
38 uid = 0;
39 creation_stack_id = 0;
40 owner_tid = kInvalidTid;
41 last_lock = 0;
42 recursion = 0;
43 is_rw = 0;
44 is_recursive = 0;
45 is_broken = 0;
46 is_linker_init = 0;
48 if (thr == 0) {
49 CHECK_EQ(clock.size(), 0);
50 CHECK_EQ(read_clock.size(), 0);
51 } else {
52 clock.Reset(&thr->clock_cache);
53 read_clock.Reset(&thr->clock_cache);
57 MetaMap::MetaMap() {
58 atomic_store(&uid_gen_, 0, memory_order_relaxed);
61 void MetaMap::AllocBlock(ThreadState *thr, uptr pc, uptr p, uptr sz) {
62 u32 idx = block_alloc_.Alloc(&thr->block_cache);
63 MBlock *b = block_alloc_.Map(idx);
64 b->siz = sz;
65 b->tid = thr->tid;
66 b->stk = CurrentStackId(thr, pc);
67 u32 *meta = MemToMeta(p);
68 DCHECK_EQ(*meta, 0);
69 *meta = idx | kFlagBlock;
72 uptr MetaMap::FreeBlock(ThreadState *thr, uptr pc, uptr p) {
73 MBlock* b = GetBlock(p);
74 if (b == 0)
75 return 0;
76 uptr sz = RoundUpTo(b->siz, kMetaShadowCell);
77 FreeRange(thr, pc, p, sz);
78 return sz;
81 void MetaMap::FreeRange(ThreadState *thr, uptr pc, uptr p, uptr sz) {
82 u32 *meta = MemToMeta(p);
83 u32 *end = MemToMeta(p + sz);
84 if (end == meta)
85 end++;
86 for (; meta < end; meta++) {
87 u32 idx = *meta;
88 *meta = 0;
89 for (;;) {
90 if (idx == 0)
91 break;
92 if (idx & kFlagBlock) {
93 block_alloc_.Free(&thr->block_cache, idx & ~kFlagMask);
94 break;
95 } else if (idx & kFlagSync) {
96 DCHECK(idx & kFlagSync);
97 SyncVar *s = sync_alloc_.Map(idx & ~kFlagMask);
98 u32 next = s->next;
99 s->Reset(thr);
100 sync_alloc_.Free(&thr->sync_cache, idx & ~kFlagMask);
101 idx = next;
102 } else {
103 CHECK(0);
109 MBlock* MetaMap::GetBlock(uptr p) {
110 u32 *meta = MemToMeta(p);
111 u32 idx = *meta;
112 for (;;) {
113 if (idx == 0)
114 return 0;
115 if (idx & kFlagBlock)
116 return block_alloc_.Map(idx & ~kFlagMask);
117 DCHECK(idx & kFlagSync);
118 SyncVar * s = sync_alloc_.Map(idx & ~kFlagMask);
119 idx = s->next;
123 SyncVar* MetaMap::GetOrCreateAndLock(ThreadState *thr, uptr pc,
124 uptr addr, bool write_lock) {
125 return GetAndLock(thr, pc, addr, write_lock, true);
128 SyncVar* MetaMap::GetIfExistsAndLock(uptr addr) {
129 return GetAndLock(0, 0, addr, true, false);
132 SyncVar* MetaMap::GetAndLock(ThreadState *thr, uptr pc,
133 uptr addr, bool write_lock, bool create) {
134 u32 *meta = MemToMeta(addr);
135 u32 idx0 = *meta;
136 u32 myidx = 0;
137 SyncVar *mys = 0;
138 for (;;) {
139 u32 idx = idx0;
140 for (;;) {
141 if (idx == 0)
142 break;
143 if (idx & kFlagBlock)
144 break;
145 DCHECK(idx & kFlagSync);
146 SyncVar * s = sync_alloc_.Map(idx & ~kFlagMask);
147 if (s->addr == addr) {
148 if (myidx != 0) {
149 mys->Reset(thr);
150 sync_alloc_.Free(&thr->sync_cache, myidx);
152 if (write_lock)
153 s->mtx.Lock();
154 else
155 s->mtx.ReadLock();
156 return s;
158 idx = s->next;
160 if (!create)
161 return 0;
162 if (*meta != idx0) {
163 idx0 = *meta;
164 continue;
167 if (myidx == 0) {
168 const u64 uid = atomic_fetch_add(&uid_gen_, 1, memory_order_relaxed);
169 myidx = sync_alloc_.Alloc(&thr->sync_cache);
170 mys = sync_alloc_.Map(myidx);
171 mys->Init(thr, pc, addr, uid);
173 mys->next = idx0;
174 if (atomic_compare_exchange_strong((atomic_uint32_t*)meta, &idx0,
175 myidx | kFlagSync, memory_order_release)) {
176 if (write_lock)
177 mys->mtx.Lock();
178 else
179 mys->mtx.ReadLock();
180 return mys;
185 void MetaMap::MoveMemory(uptr src, uptr dst, uptr sz) {
186 // src and dst can overlap,
187 // there are no concurrent accesses to the regions (e.g. stop-the-world).
188 CHECK_NE(src, dst);
189 CHECK_NE(sz, 0);
190 uptr diff = dst - src;
191 u32 *src_meta = MemToMeta(src);
192 u32 *dst_meta = MemToMeta(dst);
193 u32 *src_meta_end = MemToMeta(src + sz);
194 uptr inc = 1;
195 if (dst > src) {
196 src_meta = MemToMeta(src + sz) - 1;
197 dst_meta = MemToMeta(dst + sz) - 1;
198 src_meta_end = MemToMeta(src) - 1;
199 inc = -1;
201 for (; src_meta != src_meta_end; src_meta += inc, dst_meta += inc) {
202 CHECK_EQ(*dst_meta, 0);
203 u32 idx = *src_meta;
204 *src_meta = 0;
205 *dst_meta = idx;
206 // Patch the addresses in sync objects.
207 while (idx != 0) {
208 if (idx & kFlagBlock)
209 break;
210 CHECK(idx & kFlagSync);
211 SyncVar *s = sync_alloc_.Map(idx & ~kFlagMask);
212 s->addr += diff;
213 idx = s->next;
218 void MetaMap::OnThreadIdle(ThreadState *thr) {
219 block_alloc_.FlushCache(&thr->block_cache);
220 sync_alloc_.FlushCache(&thr->sync_cache);
223 } // namespace __tsan