1 //===-- tsan_sync.cc ------------------------------------------------------===//
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
6 //===----------------------------------------------------------------------===//
8 // This file is a part of ThreadSanitizer (TSan), a race detector.
10 //===----------------------------------------------------------------------===//
11 #include "sanitizer_common/sanitizer_placement_new.h"
12 #include "tsan_sync.h"
14 #include "tsan_mman.h"
18 void DDMutexInit(ThreadState
*thr
, uptr pc
, SyncVar
*s
);
21 : mtx(MutexTypeSyncVar
, StatMtxSyncVar
) {
25 void SyncVar::Init(ThreadState
*thr
, uptr pc
, uptr addr
, u64 uid
) {
30 creation_stack_id
= 0;
31 if (kCppMode
) // Go does not use them
32 creation_stack_id
= CurrentStackId(thr
, pc
);
33 if (common_flags()->detect_deadlocks
)
34 DDMutexInit(thr
, pc
, this);
37 void SyncVar::Reset(ThreadState
*thr
) {
39 creation_stack_id
= 0;
40 owner_tid
= kInvalidTid
;
49 CHECK_EQ(clock
.size(), 0);
50 CHECK_EQ(read_clock
.size(), 0);
52 clock
.Reset(&thr
->clock_cache
);
53 read_clock
.Reset(&thr
->clock_cache
);
58 atomic_store(&uid_gen_
, 0, memory_order_relaxed
);
61 void MetaMap::AllocBlock(ThreadState
*thr
, uptr pc
, uptr p
, uptr sz
) {
62 u32 idx
= block_alloc_
.Alloc(&thr
->block_cache
);
63 MBlock
*b
= block_alloc_
.Map(idx
);
66 b
->stk
= CurrentStackId(thr
, pc
);
67 u32
*meta
= MemToMeta(p
);
69 *meta
= idx
| kFlagBlock
;
72 uptr
MetaMap::FreeBlock(ThreadState
*thr
, uptr pc
, uptr p
) {
73 MBlock
* b
= GetBlock(p
);
76 uptr sz
= RoundUpTo(b
->siz
, kMetaShadowCell
);
77 FreeRange(thr
, pc
, p
, sz
);
81 void MetaMap::FreeRange(ThreadState
*thr
, uptr pc
, uptr p
, uptr sz
) {
82 u32
*meta
= MemToMeta(p
);
83 u32
*end
= MemToMeta(p
+ sz
);
86 for (; meta
< end
; meta
++) {
92 if (idx
& kFlagBlock
) {
93 block_alloc_
.Free(&thr
->block_cache
, idx
& ~kFlagMask
);
95 } else if (idx
& kFlagSync
) {
96 DCHECK(idx
& kFlagSync
);
97 SyncVar
*s
= sync_alloc_
.Map(idx
& ~kFlagMask
);
100 sync_alloc_
.Free(&thr
->sync_cache
, idx
& ~kFlagMask
);
109 MBlock
* MetaMap::GetBlock(uptr p
) {
110 u32
*meta
= MemToMeta(p
);
115 if (idx
& kFlagBlock
)
116 return block_alloc_
.Map(idx
& ~kFlagMask
);
117 DCHECK(idx
& kFlagSync
);
118 SyncVar
* s
= sync_alloc_
.Map(idx
& ~kFlagMask
);
123 SyncVar
* MetaMap::GetOrCreateAndLock(ThreadState
*thr
, uptr pc
,
124 uptr addr
, bool write_lock
) {
125 return GetAndLock(thr
, pc
, addr
, write_lock
, true);
128 SyncVar
* MetaMap::GetIfExistsAndLock(uptr addr
) {
129 return GetAndLock(0, 0, addr
, true, false);
132 SyncVar
* MetaMap::GetAndLock(ThreadState
*thr
, uptr pc
,
133 uptr addr
, bool write_lock
, bool create
) {
134 u32
*meta
= MemToMeta(addr
);
143 if (idx
& kFlagBlock
)
145 DCHECK(idx
& kFlagSync
);
146 SyncVar
* s
= sync_alloc_
.Map(idx
& ~kFlagMask
);
147 if (s
->addr
== addr
) {
150 sync_alloc_
.Free(&thr
->sync_cache
, myidx
);
168 const u64 uid
= atomic_fetch_add(&uid_gen_
, 1, memory_order_relaxed
);
169 myidx
= sync_alloc_
.Alloc(&thr
->sync_cache
);
170 mys
= sync_alloc_
.Map(myidx
);
171 mys
->Init(thr
, pc
, addr
, uid
);
174 if (atomic_compare_exchange_strong((atomic_uint32_t
*)meta
, &idx0
,
175 myidx
| kFlagSync
, memory_order_release
)) {
185 void MetaMap::MoveMemory(uptr src
, uptr dst
, uptr sz
) {
186 // src and dst can overlap,
187 // there are no concurrent accesses to the regions (e.g. stop-the-world).
190 uptr diff
= dst
- src
;
191 u32
*src_meta
= MemToMeta(src
);
192 u32
*dst_meta
= MemToMeta(dst
);
193 u32
*src_meta_end
= MemToMeta(src
+ sz
);
196 src_meta
= MemToMeta(src
+ sz
) - 1;
197 dst_meta
= MemToMeta(dst
+ sz
) - 1;
198 src_meta_end
= MemToMeta(src
) - 1;
201 for (; src_meta
!= src_meta_end
; src_meta
+= inc
, dst_meta
+= inc
) {
202 CHECK_EQ(*dst_meta
, 0);
206 // Patch the addresses in sync objects.
208 if (idx
& kFlagBlock
)
210 CHECK(idx
& kFlagSync
);
211 SyncVar
*s
= sync_alloc_
.Map(idx
& ~kFlagMask
);
218 void MetaMap::OnThreadIdle(ThreadState
*thr
) {
219 block_alloc_
.FlushCache(&thr
->block_cache
);
220 sync_alloc_
.FlushCache(&thr
->sync_cache
);
223 } // namespace __tsan