1 //===-- tsan_sync.cc ------------------------------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file is a part of ThreadSanitizer (TSan), a race detector.
12 //===----------------------------------------------------------------------===//
13 #include "sanitizer_common/sanitizer_placement_new.h"
14 #include "tsan_sync.h"
16 #include "tsan_mman.h"
20 void DDMutexInit(ThreadState
*thr
, uptr pc
, SyncVar
*s
);
23 : mtx(MutexTypeSyncVar
, StatMtxSyncVar
) {
27 void SyncVar::Init(ThreadState
*thr
, uptr pc
, uptr addr
, u64 uid
) {
31 creation_stack_id
= 0;
32 if (kCppMode
) // Go does not use them
33 creation_stack_id
= CurrentStackId(thr
, pc
);
34 if (flags()->detect_deadlocks
)
35 DDMutexInit(thr
, pc
, this);
38 void SyncVar::Reset() {
41 creation_stack_id
= 0;
42 owner_tid
= kInvalidTid
;
56 atomic_store(&uid_gen_
, 0, memory_order_relaxed
);
59 void MetaMap::AllocBlock(ThreadState
*thr
, uptr pc
, uptr p
, uptr sz
) {
60 u32 idx
= block_alloc_
.Alloc(&thr
->block_cache
);
61 MBlock
*b
= block_alloc_
.Map(idx
);
64 b
->stk
= CurrentStackId(thr
, pc
);
65 u32
*meta
= MemToMeta(p
);
67 *meta
= idx
| kFlagBlock
;
70 uptr
MetaMap::FreeBlock(ThreadState
*thr
, uptr pc
, uptr p
) {
71 MBlock
* b
= GetBlock(p
);
74 uptr sz
= RoundUpTo(b
->siz
, kMetaShadowCell
);
75 FreeRange(thr
, pc
, p
, sz
);
79 void MetaMap::FreeRange(ThreadState
*thr
, uptr pc
, uptr p
, uptr sz
) {
80 u32
*meta
= MemToMeta(p
);
81 u32
*end
= MemToMeta(p
+ sz
);
84 for (; meta
< end
; meta
++) {
90 if (idx
& kFlagBlock
) {
91 block_alloc_
.Free(&thr
->block_cache
, idx
& ~kFlagMask
);
93 } else if (idx
& kFlagSync
) {
94 DCHECK(idx
& kFlagSync
);
95 SyncVar
*s
= sync_alloc_
.Map(idx
& ~kFlagMask
);
98 sync_alloc_
.Free(&thr
->sync_cache
, idx
& ~kFlagMask
);
107 MBlock
* MetaMap::GetBlock(uptr p
) {
108 u32
*meta
= MemToMeta(p
);
113 if (idx
& kFlagBlock
)
114 return block_alloc_
.Map(idx
& ~kFlagMask
);
115 DCHECK(idx
& kFlagSync
);
116 SyncVar
* s
= sync_alloc_
.Map(idx
& ~kFlagMask
);
121 SyncVar
* MetaMap::GetOrCreateAndLock(ThreadState
*thr
, uptr pc
,
122 uptr addr
, bool write_lock
) {
123 return GetAndLock(thr
, pc
, addr
, write_lock
, true);
126 SyncVar
* MetaMap::GetIfExistsAndLock(uptr addr
) {
127 return GetAndLock(0, 0, addr
, true, false);
130 SyncVar
* MetaMap::GetAndLock(ThreadState
*thr
, uptr pc
,
131 uptr addr
, bool write_lock
, bool create
) {
132 u32
*meta
= MemToMeta(addr
);
141 if (idx
& kFlagBlock
)
143 DCHECK(idx
& kFlagSync
);
144 SyncVar
* s
= sync_alloc_
.Map(idx
& ~kFlagMask
);
145 if (s
->addr
== addr
) {
148 sync_alloc_
.Free(&thr
->sync_cache
, myidx
);
164 const u64 uid
= atomic_fetch_add(&uid_gen_
, 1, memory_order_relaxed
);
165 myidx
= sync_alloc_
.Alloc(&thr
->sync_cache
);
166 mys
= sync_alloc_
.Map(myidx
);
167 mys
->Init(thr
, pc
, addr
, uid
);
170 if (atomic_compare_exchange_strong((atomic_uint32_t
*)meta
, &idx0
,
171 myidx
| kFlagSync
, memory_order_release
)) {
181 void MetaMap::MoveMemory(uptr src
, uptr dst
, uptr sz
) {
182 // src and dst can overlap,
183 // there are no concurrent accesses to the regions (e.g. stop-the-world).
186 uptr diff
= dst
- src
;
187 u32
*src_meta
= MemToMeta(src
);
188 u32
*dst_meta
= MemToMeta(dst
);
189 u32
*src_meta_end
= MemToMeta(src
+ sz
);
192 src_meta
= MemToMeta(src
+ sz
) - 1;
193 dst_meta
= MemToMeta(dst
+ sz
) - 1;
194 src_meta_end
= MemToMeta(src
) - 1;
197 for (; src_meta
!= src_meta_end
; src_meta
+= inc
, dst_meta
+= inc
) {
198 CHECK_EQ(*dst_meta
, 0);
202 // Patch the addresses in sync objects.
204 if (idx
& kFlagBlock
)
206 CHECK(idx
& kFlagSync
);
207 SyncVar
*s
= sync_alloc_
.Map(idx
& ~kFlagMask
);
214 void MetaMap::OnThreadIdle(ThreadState
*thr
) {
215 block_alloc_
.FlushCache(&thr
->block_cache
);
216 sync_alloc_
.FlushCache(&thr
->sync_cache
);
219 } // namespace __tsan