Daily bump.
[official-gcc.git] / libsanitizer / tsan / tsan_sync.cc
blobf6f2cb731e7ae7d2d02de9f8d64224de57c6f456
1 //===-- tsan_sync.cc ------------------------------------------------------===//
2 //
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
5 //
6 //===----------------------------------------------------------------------===//
7 //
8 // This file is a part of ThreadSanitizer (TSan), a race detector.
9 //
10 //===----------------------------------------------------------------------===//
11 #include "sanitizer_common/sanitizer_placement_new.h"
12 #include "tsan_sync.h"
13 #include "tsan_rtl.h"
14 #include "tsan_mman.h"
16 namespace __tsan {
18 void DDMutexInit(ThreadState *thr, uptr pc, SyncVar *s);
20 SyncVar::SyncVar(uptr addr, u64 uid)
21 : mtx(MutexTypeSyncVar, StatMtxSyncVar)
22 , addr(addr)
23 , uid(uid)
24 , creation_stack_id()
25 , owner_tid(kInvalidTid)
26 , last_lock()
27 , recursion()
28 , is_rw()
29 , is_recursive()
30 , is_broken()
31 , is_linker_init() {
34 SyncTab::Part::Part()
35 : mtx(MutexTypeSyncTab, StatMtxSyncTab)
36 , val() {
39 SyncTab::SyncTab() {
42 SyncTab::~SyncTab() {
43 for (int i = 0; i < kPartCount; i++) {
44 while (tab_[i].val) {
45 SyncVar *tmp = tab_[i].val;
46 tab_[i].val = tmp->next;
47 DestroyAndFree(tmp);
52 SyncVar* SyncTab::GetOrCreateAndLock(ThreadState *thr, uptr pc,
53 uptr addr, bool write_lock) {
54 return GetAndLock(thr, pc, addr, write_lock, true);
57 SyncVar* SyncTab::GetIfExistsAndLock(uptr addr, bool write_lock) {
58 return GetAndLock(0, 0, addr, write_lock, false);
61 SyncVar* SyncTab::Create(ThreadState *thr, uptr pc, uptr addr) {
62 StatInc(thr, StatSyncCreated);
63 void *mem = internal_alloc(MBlockSync, sizeof(SyncVar));
64 const u64 uid = atomic_fetch_add(&uid_gen_, 1, memory_order_relaxed);
65 SyncVar *res = new(mem) SyncVar(addr, uid);
66 res->creation_stack_id = 0;
67 if (!kGoMode) // Go does not use them
68 res->creation_stack_id = CurrentStackId(thr, pc);
69 if (flags()->detect_deadlocks)
70 DDMutexInit(thr, pc, res);
71 return res;
74 SyncVar* SyncTab::GetAndLock(ThreadState *thr, uptr pc,
75 uptr addr, bool write_lock, bool create) {
76 #ifndef TSAN_GO
77 { // NOLINT
78 SyncVar *res = GetJavaSync(thr, pc, addr, write_lock, create);
79 if (res)
80 return res;
83 // Here we ask only PrimaryAllocator, because
84 // SecondaryAllocator::PointerIsMine() is slow and we have fallback on
85 // the hashmap anyway.
86 if (PrimaryAllocator::PointerIsMine((void*)addr)) {
87 MBlock *b = user_mblock(thr, (void*)addr);
88 CHECK_NE(b, 0);
89 MBlock::ScopedLock l(b);
90 SyncVar *res = 0;
91 for (res = b->ListHead(); res; res = res->next) {
92 if (res->addr == addr)
93 break;
95 if (res == 0) {
96 if (!create)
97 return 0;
98 res = Create(thr, pc, addr);
99 b->ListPush(res);
101 if (write_lock)
102 res->mtx.Lock();
103 else
104 res->mtx.ReadLock();
105 return res;
107 #endif
109 Part *p = &tab_[PartIdx(addr)];
111 ReadLock l(&p->mtx);
112 for (SyncVar *res = p->val; res; res = res->next) {
113 if (res->addr == addr) {
114 if (write_lock)
115 res->mtx.Lock();
116 else
117 res->mtx.ReadLock();
118 return res;
122 if (!create)
123 return 0;
125 Lock l(&p->mtx);
126 SyncVar *res = p->val;
127 for (; res; res = res->next) {
128 if (res->addr == addr)
129 break;
131 if (res == 0) {
132 res = Create(thr, pc, addr);
133 res->next = p->val;
134 p->val = res;
136 if (write_lock)
137 res->mtx.Lock();
138 else
139 res->mtx.ReadLock();
140 return res;
144 SyncVar* SyncTab::GetAndRemove(ThreadState *thr, uptr pc, uptr addr) {
145 #ifndef TSAN_GO
146 { // NOLINT
147 SyncVar *res = GetAndRemoveJavaSync(thr, pc, addr);
148 if (res)
149 return res;
151 if (PrimaryAllocator::PointerIsMine((void*)addr)) {
152 MBlock *b = user_mblock(thr, (void*)addr);
153 CHECK_NE(b, 0);
154 SyncVar *res = 0;
156 MBlock::ScopedLock l(b);
157 res = b->ListHead();
158 if (res) {
159 if (res->addr == addr) {
160 if (res->is_linker_init)
161 return 0;
162 b->ListPop();
163 } else {
164 SyncVar **prev = &res->next;
165 res = *prev;
166 while (res) {
167 if (res->addr == addr) {
168 if (res->is_linker_init)
169 return 0;
170 *prev = res->next;
171 break;
173 prev = &res->next;
174 res = *prev;
177 if (res) {
178 StatInc(thr, StatSyncDestroyed);
179 res->mtx.Lock();
180 res->mtx.Unlock();
184 return res;
186 #endif
188 Part *p = &tab_[PartIdx(addr)];
189 SyncVar *res = 0;
191 Lock l(&p->mtx);
192 SyncVar **prev = &p->val;
193 res = *prev;
194 while (res) {
195 if (res->addr == addr) {
196 if (res->is_linker_init)
197 return 0;
198 *prev = res->next;
199 break;
201 prev = &res->next;
202 res = *prev;
205 if (res) {
206 StatInc(thr, StatSyncDestroyed);
207 res->mtx.Lock();
208 res->mtx.Unlock();
210 return res;
213 int SyncTab::PartIdx(uptr addr) {
214 return (addr >> 3) % kPartCount;
217 StackTrace::StackTrace()
218 : n_()
219 , s_()
220 , c_() {
223 StackTrace::StackTrace(uptr *buf, uptr cnt)
224 : n_()
225 , s_(buf)
226 , c_(cnt) {
227 CHECK_NE(buf, 0);
228 CHECK_NE(cnt, 0);
231 StackTrace::~StackTrace() {
232 Reset();
235 void StackTrace::Reset() {
236 if (s_ && !c_) {
237 CHECK_NE(n_, 0);
238 internal_free(s_);
239 s_ = 0;
241 n_ = 0;
244 void StackTrace::Init(const uptr *pcs, uptr cnt) {
245 Reset();
246 if (cnt == 0)
247 return;
248 if (c_) {
249 CHECK_NE(s_, 0);
250 CHECK_LE(cnt, c_);
251 } else {
252 s_ = (uptr*)internal_alloc(MBlockStackTrace, cnt * sizeof(s_[0]));
254 n_ = cnt;
255 internal_memcpy(s_, pcs, cnt * sizeof(s_[0]));
258 void StackTrace::ObtainCurrent(ThreadState *thr, uptr toppc) {
259 Reset();
260 n_ = thr->shadow_stack_pos - thr->shadow_stack;
261 if (n_ + !!toppc == 0)
262 return;
263 uptr start = 0;
264 if (c_) {
265 CHECK_NE(s_, 0);
266 if (n_ + !!toppc > c_) {
267 start = n_ - c_ + !!toppc;
268 n_ = c_ - !!toppc;
270 } else {
271 // Cap potentially huge stacks.
272 if (n_ + !!toppc > kTraceStackSize) {
273 start = n_ - kTraceStackSize + !!toppc;
274 n_ = kTraceStackSize - !!toppc;
276 s_ = (uptr*)internal_alloc(MBlockStackTrace,
277 (n_ + !!toppc) * sizeof(s_[0]));
279 for (uptr i = 0; i < n_; i++)
280 s_[i] = thr->shadow_stack[start + i];
281 if (toppc) {
282 s_[n_] = toppc;
283 n_++;
287 void StackTrace::CopyFrom(const StackTrace& other) {
288 Reset();
289 Init(other.Begin(), other.Size());
292 bool StackTrace::IsEmpty() const {
293 return n_ == 0;
296 uptr StackTrace::Size() const {
297 return n_;
300 uptr StackTrace::Get(uptr i) const {
301 CHECK_LT(i, n_);
302 return s_[i];
305 const uptr *StackTrace::Begin() const {
306 return s_;
309 } // namespace __tsan