2013-05-30 Ed Smith-Rowland <3dw4rd@verizon.net>
[official-gcc.git] / libsanitizer / tsan / tsan_rtl_mutex.cc
blob22a71503c5c484bfeb1d5a8642fd729675a80e9f
1 //===-- tsan_rtl_mutex.cc -------------------------------------------------===//
2 //
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
5 //
6 //===----------------------------------------------------------------------===//
7 //
8 // This file is a part of ThreadSanitizer (TSan), a race detector.
9 //
10 //===----------------------------------------------------------------------===//
12 #include "tsan_rtl.h"
13 #include "tsan_flags.h"
14 #include "tsan_sync.h"
15 #include "tsan_report.h"
16 #include "tsan_symbolize.h"
17 #include "tsan_platform.h"
19 namespace __tsan {
21 void MutexCreate(ThreadState *thr, uptr pc, uptr addr,
22 bool rw, bool recursive, bool linker_init) {
23 Context *ctx = CTX();
24 CHECK_GT(thr->in_rtl, 0);
25 DPrintf("#%d: MutexCreate %zx\n", thr->tid, addr);
26 StatInc(thr, StatMutexCreate);
27 if (!linker_init && IsAppMem(addr)) {
28 CHECK(!thr->is_freeing);
29 thr->is_freeing = true;
30 MemoryWrite(thr, pc, addr, kSizeLog1);
31 thr->is_freeing = false;
33 SyncVar *s = ctx->synctab.GetOrCreateAndLock(thr, pc, addr, true);
34 s->is_rw = rw;
35 s->is_recursive = recursive;
36 s->is_linker_init = linker_init;
37 s->mtx.Unlock();
40 void MutexDestroy(ThreadState *thr, uptr pc, uptr addr) {
41 Context *ctx = CTX();
42 CHECK_GT(thr->in_rtl, 0);
43 DPrintf("#%d: MutexDestroy %zx\n", thr->tid, addr);
44 StatInc(thr, StatMutexDestroy);
45 #ifndef TSAN_GO
46 // Global mutexes not marked as LINKER_INITIALIZED
47 // cause tons of not interesting reports, so just ignore it.
48 if (IsGlobalVar(addr))
49 return;
50 #endif
51 SyncVar *s = ctx->synctab.GetAndRemove(thr, pc, addr);
52 if (s == 0)
53 return;
54 if (IsAppMem(addr)) {
55 CHECK(!thr->is_freeing);
56 thr->is_freeing = true;
57 MemoryWrite(thr, pc, addr, kSizeLog1);
58 thr->is_freeing = false;
60 if (flags()->report_destroy_locked
61 && s->owner_tid != SyncVar::kInvalidTid
62 && !s->is_broken) {
63 s->is_broken = true;
64 Lock l(&ctx->thread_mtx);
65 ScopedReport rep(ReportTypeMutexDestroyLocked);
66 rep.AddMutex(s);
67 StackTrace trace;
68 trace.ObtainCurrent(thr, pc);
69 rep.AddStack(&trace);
70 FastState last(s->last_lock);
71 RestoreStack(last.tid(), last.epoch(), &trace, 0);
72 rep.AddStack(&trace);
73 rep.AddLocation(s->addr, 1);
74 OutputReport(ctx, rep);
76 thr->mset.Remove(s->GetId());
77 DestroyAndFree(s);
80 void MutexLock(ThreadState *thr, uptr pc, uptr addr) {
81 CHECK_GT(thr->in_rtl, 0);
82 DPrintf("#%d: MutexLock %zx\n", thr->tid, addr);
83 if (IsAppMem(addr))
84 MemoryReadAtomic(thr, pc, addr, kSizeLog1);
85 SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, true);
86 thr->fast_state.IncrementEpoch();
87 TraceAddEvent(thr, thr->fast_state, EventTypeLock, s->GetId());
88 if (s->owner_tid == SyncVar::kInvalidTid) {
89 CHECK_EQ(s->recursion, 0);
90 s->owner_tid = thr->tid;
91 s->last_lock = thr->fast_state.raw();
92 } else if (s->owner_tid == thr->tid) {
93 CHECK_GT(s->recursion, 0);
94 } else {
95 Printf("ThreadSanitizer WARNING: double lock\n");
96 PrintCurrentStack(thr, pc);
98 if (s->recursion == 0) {
99 StatInc(thr, StatMutexLock);
100 thr->clock.set(thr->tid, thr->fast_state.epoch());
101 thr->clock.acquire(&s->clock);
102 StatInc(thr, StatSyncAcquire);
103 thr->clock.acquire(&s->read_clock);
104 StatInc(thr, StatSyncAcquire);
105 } else if (!s->is_recursive) {
106 StatInc(thr, StatMutexRecLock);
108 s->recursion++;
109 thr->mset.Add(s->GetId(), true, thr->fast_state.epoch());
110 s->mtx.Unlock();
113 void MutexUnlock(ThreadState *thr, uptr pc, uptr addr) {
114 CHECK_GT(thr->in_rtl, 0);
115 DPrintf("#%d: MutexUnlock %zx\n", thr->tid, addr);
116 if (IsAppMem(addr))
117 MemoryReadAtomic(thr, pc, addr, kSizeLog1);
118 SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, true);
119 thr->fast_state.IncrementEpoch();
120 TraceAddEvent(thr, thr->fast_state, EventTypeUnlock, s->GetId());
121 if (s->recursion == 0) {
122 if (!s->is_broken) {
123 s->is_broken = true;
124 Printf("ThreadSanitizer WARNING: unlock of unlocked mutex\n");
125 PrintCurrentStack(thr, pc);
127 } else if (s->owner_tid != thr->tid) {
128 if (!s->is_broken) {
129 s->is_broken = true;
130 Printf("ThreadSanitizer WARNING: mutex unlock by another thread\n");
131 PrintCurrentStack(thr, pc);
133 } else {
134 s->recursion--;
135 if (s->recursion == 0) {
136 StatInc(thr, StatMutexUnlock);
137 s->owner_tid = SyncVar::kInvalidTid;
138 thr->clock.set(thr->tid, thr->fast_state.epoch());
139 thr->fast_synch_epoch = thr->fast_state.epoch();
140 thr->clock.ReleaseStore(&s->clock);
141 StatInc(thr, StatSyncRelease);
142 } else {
143 StatInc(thr, StatMutexRecUnlock);
146 thr->mset.Del(s->GetId(), true);
147 s->mtx.Unlock();
150 void MutexReadLock(ThreadState *thr, uptr pc, uptr addr) {
151 CHECK_GT(thr->in_rtl, 0);
152 DPrintf("#%d: MutexReadLock %zx\n", thr->tid, addr);
153 StatInc(thr, StatMutexReadLock);
154 if (IsAppMem(addr))
155 MemoryReadAtomic(thr, pc, addr, kSizeLog1);
156 SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, false);
157 thr->fast_state.IncrementEpoch();
158 TraceAddEvent(thr, thr->fast_state, EventTypeRLock, s->GetId());
159 if (s->owner_tid != SyncVar::kInvalidTid) {
160 Printf("ThreadSanitizer WARNING: read lock of a write locked mutex\n");
161 PrintCurrentStack(thr, pc);
163 thr->clock.set(thr->tid, thr->fast_state.epoch());
164 thr->clock.acquire(&s->clock);
165 s->last_lock = thr->fast_state.raw();
166 StatInc(thr, StatSyncAcquire);
167 thr->mset.Add(s->GetId(), false, thr->fast_state.epoch());
168 s->mtx.ReadUnlock();
171 void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr) {
172 CHECK_GT(thr->in_rtl, 0);
173 DPrintf("#%d: MutexReadUnlock %zx\n", thr->tid, addr);
174 StatInc(thr, StatMutexReadUnlock);
175 if (IsAppMem(addr))
176 MemoryReadAtomic(thr, pc, addr, kSizeLog1);
177 SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, true);
178 thr->fast_state.IncrementEpoch();
179 TraceAddEvent(thr, thr->fast_state, EventTypeRUnlock, s->GetId());
180 if (s->owner_tid != SyncVar::kInvalidTid) {
181 Printf("ThreadSanitizer WARNING: read unlock of a write "
182 "locked mutex\n");
183 PrintCurrentStack(thr, pc);
185 thr->clock.set(thr->tid, thr->fast_state.epoch());
186 thr->fast_synch_epoch = thr->fast_state.epoch();
187 thr->clock.release(&s->read_clock);
188 StatInc(thr, StatSyncRelease);
189 s->mtx.Unlock();
190 thr->mset.Del(s->GetId(), false);
193 void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr) {
194 CHECK_GT(thr->in_rtl, 0);
195 DPrintf("#%d: MutexReadOrWriteUnlock %zx\n", thr->tid, addr);
196 if (IsAppMem(addr))
197 MemoryReadAtomic(thr, pc, addr, kSizeLog1);
198 SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, true);
199 bool write = true;
200 if (s->owner_tid == SyncVar::kInvalidTid) {
201 // Seems to be read unlock.
202 write = false;
203 StatInc(thr, StatMutexReadUnlock);
204 thr->fast_state.IncrementEpoch();
205 TraceAddEvent(thr, thr->fast_state, EventTypeRUnlock, s->GetId());
206 thr->clock.set(thr->tid, thr->fast_state.epoch());
207 thr->fast_synch_epoch = thr->fast_state.epoch();
208 thr->clock.release(&s->read_clock);
209 StatInc(thr, StatSyncRelease);
210 } else if (s->owner_tid == thr->tid) {
211 // Seems to be write unlock.
212 thr->fast_state.IncrementEpoch();
213 TraceAddEvent(thr, thr->fast_state, EventTypeUnlock, s->GetId());
214 CHECK_GT(s->recursion, 0);
215 s->recursion--;
216 if (s->recursion == 0) {
217 StatInc(thr, StatMutexUnlock);
218 s->owner_tid = SyncVar::kInvalidTid;
219 // FIXME: Refactor me, plz.
220 // The sequence of events is quite tricky and doubled in several places.
221 // First, it's a bug to increment the epoch w/o writing to the trace.
222 // Then, the acquire/release logic can be factored out as well.
223 thr->clock.set(thr->tid, thr->fast_state.epoch());
224 thr->fast_synch_epoch = thr->fast_state.epoch();
225 thr->clock.ReleaseStore(&s->clock);
226 StatInc(thr, StatSyncRelease);
227 } else {
228 StatInc(thr, StatMutexRecUnlock);
230 } else if (!s->is_broken) {
231 s->is_broken = true;
232 Printf("ThreadSanitizer WARNING: mutex unlock by another thread\n");
233 PrintCurrentStack(thr, pc);
235 thr->mset.Del(s->GetId(), write);
236 s->mtx.Unlock();
239 void Acquire(ThreadState *thr, uptr pc, uptr addr) {
240 CHECK_GT(thr->in_rtl, 0);
241 DPrintf("#%d: Acquire %zx\n", thr->tid, addr);
242 SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, false);
243 thr->clock.set(thr->tid, thr->fast_state.epoch());
244 thr->clock.acquire(&s->clock);
245 StatInc(thr, StatSyncAcquire);
246 s->mtx.ReadUnlock();
249 void AcquireGlobal(ThreadState *thr, uptr pc) {
250 Context *ctx = CTX();
251 Lock l(&ctx->thread_mtx);
252 for (unsigned i = 0; i < kMaxTid; i++) {
253 ThreadContext *tctx = ctx->threads[i];
254 if (tctx == 0)
255 continue;
256 if (tctx->status == ThreadStatusRunning)
257 thr->clock.set(i, tctx->thr->fast_state.epoch());
258 else
259 thr->clock.set(i, tctx->epoch1);
263 void Release(ThreadState *thr, uptr pc, uptr addr) {
264 CHECK_GT(thr->in_rtl, 0);
265 DPrintf("#%d: Release %zx\n", thr->tid, addr);
266 SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, true);
267 thr->clock.set(thr->tid, thr->fast_state.epoch());
268 thr->clock.release(&s->clock);
269 StatInc(thr, StatSyncRelease);
270 s->mtx.Unlock();
273 void ReleaseStore(ThreadState *thr, uptr pc, uptr addr) {
274 CHECK_GT(thr->in_rtl, 0);
275 DPrintf("#%d: ReleaseStore %zx\n", thr->tid, addr);
276 SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, true);
277 thr->clock.set(thr->tid, thr->fast_state.epoch());
278 thr->clock.ReleaseStore(&s->clock);
279 StatInc(thr, StatSyncRelease);
280 s->mtx.Unlock();
283 #ifndef TSAN_GO
284 void AfterSleep(ThreadState *thr, uptr pc) {
285 Context *ctx = CTX();
286 thr->last_sleep_stack_id = CurrentStackId(thr, pc);
287 Lock l(&ctx->thread_mtx);
288 for (unsigned i = 0; i < kMaxTid; i++) {
289 ThreadContext *tctx = ctx->threads[i];
290 if (tctx == 0)
291 continue;
292 if (tctx->status == ThreadStatusRunning)
293 thr->last_sleep_clock.set(i, tctx->thr->fast_state.epoch());
294 else
295 thr->last_sleep_clock.set(i, tctx->epoch1);
298 #endif
300 } // namespace __tsan