* config/sh/sh.c (barrier_align): Return 0 when barrier_or_label
[official-gcc.git] / libsanitizer / tsan / tsan_sync.cc
blob04fef615531742507e6ff2b8ca57b67c68afa4de
1 //===-- tsan_sync.cc ------------------------------------------------------===//
2 //
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
5 //
6 //===----------------------------------------------------------------------===//
7 //
8 // This file is a part of ThreadSanitizer (TSan), a race detector.
9 //
10 //===----------------------------------------------------------------------===//
11 #include "sanitizer_common/sanitizer_placement_new.h"
12 #include "tsan_sync.h"
13 #include "tsan_rtl.h"
14 #include "tsan_mman.h"
16 namespace __tsan {
18 SyncVar::SyncVar(uptr addr, u64 uid)
19 : mtx(MutexTypeSyncVar, StatMtxSyncVar)
20 , addr(addr)
21 , uid(uid)
22 , owner_tid(kInvalidTid)
23 , last_lock()
24 , recursion()
25 , is_rw()
26 , is_recursive()
27 , is_broken()
28 , is_linker_init() {
31 SyncTab::Part::Part()
32 : mtx(MutexTypeSyncTab, StatMtxSyncTab)
33 , val() {
36 SyncTab::SyncTab() {
39 SyncTab::~SyncTab() {
40 for (int i = 0; i < kPartCount; i++) {
41 while (tab_[i].val) {
42 SyncVar *tmp = tab_[i].val;
43 tab_[i].val = tmp->next;
44 DestroyAndFree(tmp);
49 SyncVar* SyncTab::GetOrCreateAndLock(ThreadState *thr, uptr pc,
50 uptr addr, bool write_lock) {
51 return GetAndLock(thr, pc, addr, write_lock, true);
54 SyncVar* SyncTab::GetIfExistsAndLock(uptr addr, bool write_lock) {
55 return GetAndLock(0, 0, addr, write_lock, false);
58 SyncVar* SyncTab::Create(ThreadState *thr, uptr pc, uptr addr) {
59 StatInc(thr, StatSyncCreated);
60 void *mem = internal_alloc(MBlockSync, sizeof(SyncVar));
61 const u64 uid = atomic_fetch_add(&uid_gen_, 1, memory_order_relaxed);
62 SyncVar *res = new(mem) SyncVar(addr, uid);
63 #ifndef TSAN_GO
64 res->creation_stack_id = CurrentStackId(thr, pc);
65 #endif
66 return res;
69 SyncVar* SyncTab::GetAndLock(ThreadState *thr, uptr pc,
70 uptr addr, bool write_lock, bool create) {
71 #ifndef TSAN_GO
72 { // NOLINT
73 SyncVar *res = GetJavaSync(thr, pc, addr, write_lock, create);
74 if (res)
75 return res;
78 // Here we ask only PrimaryAllocator, because
79 // SecondaryAllocator::PointerIsMine() is slow and we have fallback on
80 // the hashmap anyway.
81 if (PrimaryAllocator::PointerIsMine((void*)addr)) {
82 MBlock *b = user_mblock(thr, (void*)addr);
83 CHECK_NE(b, 0);
84 MBlock::ScopedLock l(b);
85 SyncVar *res = 0;
86 for (res = b->ListHead(); res; res = res->next) {
87 if (res->addr == addr)
88 break;
90 if (res == 0) {
91 if (!create)
92 return 0;
93 res = Create(thr, pc, addr);
94 b->ListPush(res);
96 if (write_lock)
97 res->mtx.Lock();
98 else
99 res->mtx.ReadLock();
100 return res;
102 #endif
104 Part *p = &tab_[PartIdx(addr)];
106 ReadLock l(&p->mtx);
107 for (SyncVar *res = p->val; res; res = res->next) {
108 if (res->addr == addr) {
109 if (write_lock)
110 res->mtx.Lock();
111 else
112 res->mtx.ReadLock();
113 return res;
117 if (!create)
118 return 0;
120 Lock l(&p->mtx);
121 SyncVar *res = p->val;
122 for (; res; res = res->next) {
123 if (res->addr == addr)
124 break;
126 if (res == 0) {
127 res = Create(thr, pc, addr);
128 res->next = p->val;
129 p->val = res;
131 if (write_lock)
132 res->mtx.Lock();
133 else
134 res->mtx.ReadLock();
135 return res;
139 SyncVar* SyncTab::GetAndRemove(ThreadState *thr, uptr pc, uptr addr) {
140 #ifndef TSAN_GO
141 { // NOLINT
142 SyncVar *res = GetAndRemoveJavaSync(thr, pc, addr);
143 if (res)
144 return res;
146 if (PrimaryAllocator::PointerIsMine((void*)addr)) {
147 MBlock *b = user_mblock(thr, (void*)addr);
148 CHECK_NE(b, 0);
149 SyncVar *res = 0;
151 MBlock::ScopedLock l(b);
152 res = b->ListHead();
153 if (res) {
154 if (res->addr == addr) {
155 if (res->is_linker_init)
156 return 0;
157 b->ListPop();
158 } else {
159 SyncVar **prev = &res->next;
160 res = *prev;
161 while (res) {
162 if (res->addr == addr) {
163 if (res->is_linker_init)
164 return 0;
165 *prev = res->next;
166 break;
168 prev = &res->next;
169 res = *prev;
172 if (res) {
173 StatInc(thr, StatSyncDestroyed);
174 res->mtx.Lock();
175 res->mtx.Unlock();
179 return res;
181 #endif
183 Part *p = &tab_[PartIdx(addr)];
184 SyncVar *res = 0;
186 Lock l(&p->mtx);
187 SyncVar **prev = &p->val;
188 res = *prev;
189 while (res) {
190 if (res->addr == addr) {
191 if (res->is_linker_init)
192 return 0;
193 *prev = res->next;
194 break;
196 prev = &res->next;
197 res = *prev;
200 if (res) {
201 StatInc(thr, StatSyncDestroyed);
202 res->mtx.Lock();
203 res->mtx.Unlock();
205 return res;
208 int SyncTab::PartIdx(uptr addr) {
209 return (addr >> 3) % kPartCount;
212 StackTrace::StackTrace()
213 : n_()
214 , s_()
215 , c_() {
218 StackTrace::StackTrace(uptr *buf, uptr cnt)
219 : n_()
220 , s_(buf)
221 , c_(cnt) {
222 CHECK_NE(buf, 0);
223 CHECK_NE(cnt, 0);
226 StackTrace::~StackTrace() {
227 Reset();
230 void StackTrace::Reset() {
231 if (s_ && !c_) {
232 CHECK_NE(n_, 0);
233 internal_free(s_);
234 s_ = 0;
236 n_ = 0;
239 void StackTrace::Init(const uptr *pcs, uptr cnt) {
240 Reset();
241 if (cnt == 0)
242 return;
243 if (c_) {
244 CHECK_NE(s_, 0);
245 CHECK_LE(cnt, c_);
246 } else {
247 s_ = (uptr*)internal_alloc(MBlockStackTrace, cnt * sizeof(s_[0]));
249 n_ = cnt;
250 internal_memcpy(s_, pcs, cnt * sizeof(s_[0]));
253 void StackTrace::ObtainCurrent(ThreadState *thr, uptr toppc) {
254 Reset();
255 n_ = thr->shadow_stack_pos - thr->shadow_stack;
256 if (n_ + !!toppc == 0)
257 return;
258 uptr start = 0;
259 if (c_) {
260 CHECK_NE(s_, 0);
261 if (n_ + !!toppc > c_) {
262 start = n_ - c_ + !!toppc;
263 n_ = c_ - !!toppc;
265 } else {
266 s_ = (uptr*)internal_alloc(MBlockStackTrace,
267 (n_ + !!toppc) * sizeof(s_[0]));
269 for (uptr i = 0; i < n_; i++)
270 s_[i] = thr->shadow_stack[start + i];
271 if (toppc) {
272 s_[n_] = toppc;
273 n_++;
277 void StackTrace::CopyFrom(const StackTrace& other) {
278 Reset();
279 Init(other.Begin(), other.Size());
282 bool StackTrace::IsEmpty() const {
283 return n_ == 0;
286 uptr StackTrace::Size() const {
287 return n_;
290 uptr StackTrace::Get(uptr i) const {
291 CHECK_LT(i, n_);
292 return s_[i];
295 const uptr *StackTrace::Begin() const {
296 return s_;
299 } // namespace __tsan