* config.sub: Merge from config repo.
[official-gcc.git] / libsanitizer / tsan / tsan_rtl_mutex.cc
blobe5b43be6a4972796a4ff81f1fcb3174f4e91ad34
1 //===-- tsan_rtl_mutex.cc -------------------------------------------------===//
2 //
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
5 //
6 //===----------------------------------------------------------------------===//
7 //
8 // This file is a part of ThreadSanitizer (TSan), a race detector.
9 //
10 //===----------------------------------------------------------------------===//
12 #include "tsan_rtl.h"
13 #include "tsan_flags.h"
14 #include "tsan_sync.h"
15 #include "tsan_report.h"
16 #include "tsan_symbolize.h"
17 #include "tsan_platform.h"
19 namespace __tsan {
21 void MutexCreate(ThreadState *thr, uptr pc, uptr addr,
22 bool rw, bool recursive, bool linker_init) {
23 Context *ctx = CTX();
24 CHECK_GT(thr->in_rtl, 0);
25 DPrintf("#%d: MutexCreate %zx\n", thr->tid, addr);
26 StatInc(thr, StatMutexCreate);
27 if (!linker_init && IsAppMem(addr))
28 MemoryWrite1Byte(thr, pc, addr);
29 SyncVar *s = ctx->synctab.GetAndLock(thr, pc, addr, true);
30 s->is_rw = rw;
31 s->is_recursive = recursive;
32 s->is_linker_init = linker_init;
33 s->mtx.Unlock();
36 void MutexDestroy(ThreadState *thr, uptr pc, uptr addr) {
37 Context *ctx = CTX();
38 CHECK_GT(thr->in_rtl, 0);
39 DPrintf("#%d: MutexDestroy %zx\n", thr->tid, addr);
40 StatInc(thr, StatMutexDestroy);
41 #ifndef TSAN_GO
42 // Global mutexes not marked as LINKER_INITIALIZED
43 // cause tons of not interesting reports, so just ignore it.
44 if (IsGlobalVar(addr))
45 return;
46 #endif
47 SyncVar *s = ctx->synctab.GetAndRemove(thr, pc, addr);
48 if (s == 0)
49 return;
50 if (IsAppMem(addr))
51 MemoryWrite1Byte(thr, pc, addr);
52 if (flags()->report_destroy_locked
53 && s->owner_tid != SyncVar::kInvalidTid
54 && !s->is_broken) {
55 s->is_broken = true;
56 ScopedReport rep(ReportTypeMutexDestroyLocked);
57 rep.AddMutex(s);
58 StackTrace trace;
59 trace.ObtainCurrent(thr, pc);
60 rep.AddStack(&trace);
61 FastState last(s->last_lock);
62 RestoreStack(last.tid(), last.epoch(), &trace);
63 rep.AddStack(&trace);
64 rep.AddLocation(s->addr, 1);
65 OutputReport(ctx, rep);
67 DestroyAndFree(s);
70 void MutexLock(ThreadState *thr, uptr pc, uptr addr) {
71 CHECK_GT(thr->in_rtl, 0);
72 DPrintf("#%d: MutexLock %zx\n", thr->tid, addr);
73 if (IsAppMem(addr))
74 MemoryRead1Byte(thr, pc, addr);
75 thr->fast_state.IncrementEpoch();
76 TraceAddEvent(thr, thr->fast_state, EventTypeLock, addr);
77 SyncVar *s = CTX()->synctab.GetAndLock(thr, pc, addr, true);
78 if (s->owner_tid == SyncVar::kInvalidTid) {
79 CHECK_EQ(s->recursion, 0);
80 s->owner_tid = thr->tid;
81 s->last_lock = thr->fast_state.raw();
82 } else if (s->owner_tid == thr->tid) {
83 CHECK_GT(s->recursion, 0);
84 } else {
85 Printf("ThreadSanitizer WARNING: double lock\n");
86 PrintCurrentStack(thr, pc);
88 if (s->recursion == 0) {
89 StatInc(thr, StatMutexLock);
90 thr->clock.set(thr->tid, thr->fast_state.epoch());
91 thr->clock.acquire(&s->clock);
92 StatInc(thr, StatSyncAcquire);
93 thr->clock.acquire(&s->read_clock);
94 StatInc(thr, StatSyncAcquire);
95 } else if (!s->is_recursive) {
96 StatInc(thr, StatMutexRecLock);
98 s->recursion++;
99 s->mtx.Unlock();
102 void MutexUnlock(ThreadState *thr, uptr pc, uptr addr) {
103 CHECK_GT(thr->in_rtl, 0);
104 DPrintf("#%d: MutexUnlock %zx\n", thr->tid, addr);
105 if (IsAppMem(addr))
106 MemoryRead1Byte(thr, pc, addr);
107 thr->fast_state.IncrementEpoch();
108 TraceAddEvent(thr, thr->fast_state, EventTypeUnlock, addr);
109 SyncVar *s = CTX()->synctab.GetAndLock(thr, pc, addr, true);
110 if (s->recursion == 0) {
111 if (!s->is_broken) {
112 s->is_broken = true;
113 Printf("ThreadSanitizer WARNING: unlock of unlocked mutex\n");
114 PrintCurrentStack(thr, pc);
116 } else if (s->owner_tid != thr->tid) {
117 if (!s->is_broken) {
118 s->is_broken = true;
119 Printf("ThreadSanitizer WARNING: mutex unlock by another thread\n");
120 PrintCurrentStack(thr, pc);
122 } else {
123 s->recursion--;
124 if (s->recursion == 0) {
125 StatInc(thr, StatMutexUnlock);
126 s->owner_tid = SyncVar::kInvalidTid;
127 thr->clock.set(thr->tid, thr->fast_state.epoch());
128 thr->fast_synch_epoch = thr->fast_state.epoch();
129 thr->clock.ReleaseStore(&s->clock);
130 StatInc(thr, StatSyncRelease);
131 } else {
132 StatInc(thr, StatMutexRecUnlock);
135 s->mtx.Unlock();
138 void MutexReadLock(ThreadState *thr, uptr pc, uptr addr) {
139 CHECK_GT(thr->in_rtl, 0);
140 DPrintf("#%d: MutexReadLock %zx\n", thr->tid, addr);
141 StatInc(thr, StatMutexReadLock);
142 if (IsAppMem(addr))
143 MemoryRead1Byte(thr, pc, addr);
144 thr->fast_state.IncrementEpoch();
145 TraceAddEvent(thr, thr->fast_state, EventTypeRLock, addr);
146 SyncVar *s = CTX()->synctab.GetAndLock(thr, pc, addr, false);
147 if (s->owner_tid != SyncVar::kInvalidTid) {
148 Printf("ThreadSanitizer WARNING: read lock of a write locked mutex\n");
149 PrintCurrentStack(thr, pc);
151 thr->clock.set(thr->tid, thr->fast_state.epoch());
152 thr->clock.acquire(&s->clock);
153 s->last_lock = thr->fast_state.raw();
154 StatInc(thr, StatSyncAcquire);
155 s->mtx.ReadUnlock();
158 void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr) {
159 CHECK_GT(thr->in_rtl, 0);
160 DPrintf("#%d: MutexReadUnlock %zx\n", thr->tid, addr);
161 StatInc(thr, StatMutexReadUnlock);
162 if (IsAppMem(addr))
163 MemoryRead1Byte(thr, pc, addr);
164 thr->fast_state.IncrementEpoch();
165 TraceAddEvent(thr, thr->fast_state, EventTypeRUnlock, addr);
166 SyncVar *s = CTX()->synctab.GetAndLock(thr, pc, addr, true);
167 if (s->owner_tid != SyncVar::kInvalidTid) {
168 Printf("ThreadSanitizer WARNING: read unlock of a write "
169 "locked mutex\n");
170 PrintCurrentStack(thr, pc);
172 thr->clock.set(thr->tid, thr->fast_state.epoch());
173 thr->fast_synch_epoch = thr->fast_state.epoch();
174 thr->clock.release(&s->read_clock);
175 StatInc(thr, StatSyncRelease);
176 s->mtx.Unlock();
179 void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr) {
180 CHECK_GT(thr->in_rtl, 0);
181 DPrintf("#%d: MutexReadOrWriteUnlock %zx\n", thr->tid, addr);
182 if (IsAppMem(addr))
183 MemoryRead1Byte(thr, pc, addr);
184 SyncVar *s = CTX()->synctab.GetAndLock(thr, pc, addr, true);
185 if (s->owner_tid == SyncVar::kInvalidTid) {
186 // Seems to be read unlock.
187 StatInc(thr, StatMutexReadUnlock);
188 thr->fast_state.IncrementEpoch();
189 TraceAddEvent(thr, thr->fast_state, EventTypeRUnlock, addr);
190 thr->clock.set(thr->tid, thr->fast_state.epoch());
191 thr->fast_synch_epoch = thr->fast_state.epoch();
192 thr->clock.release(&s->read_clock);
193 StatInc(thr, StatSyncRelease);
194 } else if (s->owner_tid == thr->tid) {
195 // Seems to be write unlock.
196 CHECK_GT(s->recursion, 0);
197 s->recursion--;
198 if (s->recursion == 0) {
199 StatInc(thr, StatMutexUnlock);
200 s->owner_tid = SyncVar::kInvalidTid;
201 // FIXME: Refactor me, plz.
202 // The sequence of events is quite tricky and doubled in several places.
203 // First, it's a bug to increment the epoch w/o writing to the trace.
204 // Then, the acquire/release logic can be factored out as well.
205 thr->fast_state.IncrementEpoch();
206 TraceAddEvent(thr, thr->fast_state, EventTypeUnlock, addr);
207 thr->clock.set(thr->tid, thr->fast_state.epoch());
208 thr->fast_synch_epoch = thr->fast_state.epoch();
209 thr->clock.ReleaseStore(&s->clock);
210 StatInc(thr, StatSyncRelease);
211 } else {
212 StatInc(thr, StatMutexRecUnlock);
214 } else if (!s->is_broken) {
215 s->is_broken = true;
216 Printf("ThreadSanitizer WARNING: mutex unlock by another thread\n");
217 PrintCurrentStack(thr, pc);
219 s->mtx.Unlock();
222 void Acquire(ThreadState *thr, uptr pc, uptr addr) {
223 CHECK_GT(thr->in_rtl, 0);
224 DPrintf("#%d: Acquire %zx\n", thr->tid, addr);
225 SyncVar *s = CTX()->synctab.GetAndLock(thr, pc, addr, false);
226 thr->clock.set(thr->tid, thr->fast_state.epoch());
227 thr->clock.acquire(&s->clock);
228 StatInc(thr, StatSyncAcquire);
229 s->mtx.ReadUnlock();
232 void AcquireGlobal(ThreadState *thr, uptr pc) {
233 Context *ctx = CTX();
234 Lock l(&ctx->thread_mtx);
235 for (unsigned i = 0; i < kMaxTid; i++) {
236 ThreadContext *tctx = ctx->threads[i];
237 if (tctx == 0)
238 continue;
239 if (tctx->status == ThreadStatusRunning)
240 thr->clock.set(i, tctx->thr->fast_state.epoch());
241 else
242 thr->clock.set(i, tctx->epoch1);
246 void Release(ThreadState *thr, uptr pc, uptr addr) {
247 CHECK_GT(thr->in_rtl, 0);
248 DPrintf("#%d: Release %zx\n", thr->tid, addr);
249 SyncVar *s = CTX()->synctab.GetAndLock(thr, pc, addr, true);
250 thr->clock.set(thr->tid, thr->fast_state.epoch());
251 thr->clock.release(&s->clock);
252 StatInc(thr, StatSyncRelease);
253 s->mtx.Unlock();
256 void ReleaseStore(ThreadState *thr, uptr pc, uptr addr) {
257 CHECK_GT(thr->in_rtl, 0);
258 DPrintf("#%d: ReleaseStore %zx\n", thr->tid, addr);
259 SyncVar *s = CTX()->synctab.GetAndLock(thr, pc, addr, true);
260 thr->clock.set(thr->tid, thr->fast_state.epoch());
261 thr->clock.ReleaseStore(&s->clock);
262 StatInc(thr, StatSyncRelease);
263 s->mtx.Unlock();
266 #ifndef TSAN_GO
267 void AfterSleep(ThreadState *thr, uptr pc) {
268 Context *ctx = CTX();
269 thr->last_sleep_stack_id = CurrentStackId(thr, pc);
270 Lock l(&ctx->thread_mtx);
271 for (unsigned i = 0; i < kMaxTid; i++) {
272 ThreadContext *tctx = ctx->threads[i];
273 if (tctx == 0)
274 continue;
275 if (tctx->status == ThreadStatusRunning)
276 thr->last_sleep_clock.set(i, tctx->thr->fast_state.epoch());
277 else
278 thr->last_sleep_clock.set(i, tctx->epoch1);
281 #endif
283 } // namespace __tsan