* MAINTAINERS: Explicitly add myself as AIX maintainer.
[official-gcc.git] / libsanitizer / tsan / tsan_sync.cc
blobd392408fd888f23aa0279d99a60e707e7480c80c
1 //===-- tsan_sync.cc ------------------------------------------------------===//
2 //
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
5 //
6 //===----------------------------------------------------------------------===//
7 //
8 // This file is a part of ThreadSanitizer (TSan), a race detector.
9 //
10 //===----------------------------------------------------------------------===//
11 #include "sanitizer_common/sanitizer_placement_new.h"
12 #include "tsan_sync.h"
13 #include "tsan_rtl.h"
14 #include "tsan_mman.h"
16 namespace __tsan {
18 SyncVar::SyncVar(uptr addr, u64 uid)
19 : mtx(MutexTypeSyncVar, StatMtxSyncVar)
20 , addr(addr)
21 , uid(uid)
22 , owner_tid(kInvalidTid)
23 , last_lock()
24 , recursion()
25 , is_rw()
26 , is_recursive()
27 , is_broken()
28 , is_linker_init() {
31 SyncTab::Part::Part()
32 : mtx(MutexTypeSyncTab, StatMtxSyncTab)
33 , val() {
36 SyncTab::SyncTab() {
39 SyncTab::~SyncTab() {
40 for (int i = 0; i < kPartCount; i++) {
41 while (tab_[i].val) {
42 SyncVar *tmp = tab_[i].val;
43 tab_[i].val = tmp->next;
44 DestroyAndFree(tmp);
49 SyncVar* SyncTab::GetOrCreateAndLock(ThreadState *thr, uptr pc,
50 uptr addr, bool write_lock) {
51 return GetAndLock(thr, pc, addr, write_lock, true);
54 SyncVar* SyncTab::GetIfExistsAndLock(uptr addr, bool write_lock) {
55 return GetAndLock(0, 0, addr, write_lock, false);
58 SyncVar* SyncTab::Create(ThreadState *thr, uptr pc, uptr addr) {
59 StatInc(thr, StatSyncCreated);
60 void *mem = internal_alloc(MBlockSync, sizeof(SyncVar));
61 const u64 uid = atomic_fetch_add(&uid_gen_, 1, memory_order_relaxed);
62 SyncVar *res = new(mem) SyncVar(addr, uid);
63 #ifndef TSAN_GO
64 res->creation_stack.ObtainCurrent(thr, pc);
65 #endif
66 return res;
69 SyncVar* SyncTab::GetAndLock(ThreadState *thr, uptr pc,
70 uptr addr, bool write_lock, bool create) {
71 #ifndef TSAN_GO
72 { // NOLINT
73 SyncVar *res = GetJavaSync(thr, pc, addr, write_lock, create);
74 if (res)
75 return res;
78 // Here we ask only PrimaryAllocator, because
79 // SecondaryAllocator::PointerIsMine() is slow and we have fallback on
80 // the hashmap anyway.
81 if (PrimaryAllocator::PointerIsMine((void*)addr)) {
82 MBlock *b = user_mblock(thr, (void*)addr);
83 Lock l(&b->mtx);
84 SyncVar *res = 0;
85 for (res = b->head; res; res = res->next) {
86 if (res->addr == addr)
87 break;
89 if (res == 0) {
90 if (!create)
91 return 0;
92 res = Create(thr, pc, addr);
93 res->next = b->head;
94 b->head = res;
96 if (write_lock)
97 res->mtx.Lock();
98 else
99 res->mtx.ReadLock();
100 return res;
102 #endif
104 Part *p = &tab_[PartIdx(addr)];
106 ReadLock l(&p->mtx);
107 for (SyncVar *res = p->val; res; res = res->next) {
108 if (res->addr == addr) {
109 if (write_lock)
110 res->mtx.Lock();
111 else
112 res->mtx.ReadLock();
113 return res;
117 if (!create)
118 return 0;
120 Lock l(&p->mtx);
121 SyncVar *res = p->val;
122 for (; res; res = res->next) {
123 if (res->addr == addr)
124 break;
126 if (res == 0) {
127 res = Create(thr, pc, addr);
128 res->next = p->val;
129 p->val = res;
131 if (write_lock)
132 res->mtx.Lock();
133 else
134 res->mtx.ReadLock();
135 return res;
139 SyncVar* SyncTab::GetAndRemove(ThreadState *thr, uptr pc, uptr addr) {
140 #ifndef TSAN_GO
141 { // NOLINT
142 SyncVar *res = GetAndRemoveJavaSync(thr, pc, addr);
143 if (res)
144 return res;
146 if (PrimaryAllocator::PointerIsMine((void*)addr)) {
147 MBlock *b = user_mblock(thr, (void*)addr);
148 SyncVar *res = 0;
150 Lock l(&b->mtx);
151 SyncVar **prev = &b->head;
152 res = *prev;
153 while (res) {
154 if (res->addr == addr) {
155 if (res->is_linker_init)
156 return 0;
157 *prev = res->next;
158 break;
160 prev = &res->next;
161 res = *prev;
164 if (res) {
165 StatInc(thr, StatSyncDestroyed);
166 res->mtx.Lock();
167 res->mtx.Unlock();
169 return res;
171 #endif
173 Part *p = &tab_[PartIdx(addr)];
174 SyncVar *res = 0;
176 Lock l(&p->mtx);
177 SyncVar **prev = &p->val;
178 res = *prev;
179 while (res) {
180 if (res->addr == addr) {
181 if (res->is_linker_init)
182 return 0;
183 *prev = res->next;
184 break;
186 prev = &res->next;
187 res = *prev;
190 if (res) {
191 StatInc(thr, StatSyncDestroyed);
192 res->mtx.Lock();
193 res->mtx.Unlock();
195 return res;
198 uptr SyncVar::GetMemoryConsumption() {
199 return sizeof(*this)
200 + clock.size() * sizeof(u64)
201 + read_clock.size() * sizeof(u64)
202 + creation_stack.Size() * sizeof(uptr);
205 uptr SyncTab::GetMemoryConsumption(uptr *nsync) {
206 uptr mem = 0;
207 for (int i = 0; i < kPartCount; i++) {
208 Part *p = &tab_[i];
209 Lock l(&p->mtx);
210 for (SyncVar *s = p->val; s; s = s->next) {
211 *nsync += 1;
212 mem += s->GetMemoryConsumption();
215 return mem;
218 int SyncTab::PartIdx(uptr addr) {
219 return (addr >> 3) % kPartCount;
222 StackTrace::StackTrace()
223 : n_()
224 , s_()
225 , c_() {
228 StackTrace::StackTrace(uptr *buf, uptr cnt)
229 : n_()
230 , s_(buf)
231 , c_(cnt) {
232 CHECK_NE(buf, 0);
233 CHECK_NE(cnt, 0);
236 StackTrace::~StackTrace() {
237 Reset();
240 void StackTrace::Reset() {
241 if (s_ && !c_) {
242 CHECK_NE(n_, 0);
243 internal_free(s_);
244 s_ = 0;
246 n_ = 0;
249 void StackTrace::Init(const uptr *pcs, uptr cnt) {
250 Reset();
251 if (cnt == 0)
252 return;
253 if (c_) {
254 CHECK_NE(s_, 0);
255 CHECK_LE(cnt, c_);
256 } else {
257 s_ = (uptr*)internal_alloc(MBlockStackTrace, cnt * sizeof(s_[0]));
259 n_ = cnt;
260 internal_memcpy(s_, pcs, cnt * sizeof(s_[0]));
263 void StackTrace::ObtainCurrent(ThreadState *thr, uptr toppc) {
264 Reset();
265 n_ = thr->shadow_stack_pos - thr->shadow_stack;
266 if (n_ + !!toppc == 0)
267 return;
268 uptr start = 0;
269 if (c_) {
270 CHECK_NE(s_, 0);
271 if (n_ + !!toppc > c_) {
272 start = n_ - c_ + !!toppc;
273 n_ = c_ - !!toppc;
275 } else {
276 s_ = (uptr*)internal_alloc(MBlockStackTrace,
277 (n_ + !!toppc) * sizeof(s_[0]));
279 for (uptr i = 0; i < n_; i++)
280 s_[i] = thr->shadow_stack[start + i];
281 if (toppc) {
282 s_[n_] = toppc;
283 n_++;
287 void StackTrace::CopyFrom(const StackTrace& other) {
288 Reset();
289 Init(other.Begin(), other.Size());
292 bool StackTrace::IsEmpty() const {
293 return n_ == 0;
296 uptr StackTrace::Size() const {
297 return n_;
300 uptr StackTrace::Get(uptr i) const {
301 CHECK_LT(i, n_);
302 return s_[i];
305 const uptr *StackTrace::Begin() const {
306 return s_;
309 } // namespace __tsan