* config/i386/i386.c (ix86_expand_prologue): Tighten assert
[official-gcc.git] / libsanitizer / tsan / tsan_interface_ann.cc
blob083138fc046fff6e3e59c26184deb32bb9283dc0
1 //===-- tsan_interface_ann.cc ---------------------------------------------===//
2 //
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
5 //
6 //===----------------------------------------------------------------------===//
7 //
8 // This file is a part of ThreadSanitizer (TSan), a race detector.
9 //
10 //===----------------------------------------------------------------------===//
11 #include "sanitizer_common/sanitizer_libc.h"
12 #include "sanitizer_common/sanitizer_internal_defs.h"
13 #include "sanitizer_common/sanitizer_placement_new.h"
14 #include "sanitizer_common/sanitizer_stacktrace.h"
15 #include "tsan_interface_ann.h"
16 #include "tsan_mutex.h"
17 #include "tsan_report.h"
18 #include "tsan_rtl.h"
19 #include "tsan_mman.h"
20 #include "tsan_flags.h"
21 #include "tsan_platform.h"
22 #include "tsan_vector.h"
24 #define CALLERPC ((uptr)__builtin_return_address(0))
26 using namespace __tsan; // NOLINT
28 namespace __tsan {
30 class ScopedAnnotation {
31 public:
32 ScopedAnnotation(ThreadState *thr, const char *aname, uptr pc)
33 : thr_(thr) {
34 FuncEntry(thr_, pc);
35 DPrintf("#%d: annotation %s()\n", thr_->tid, aname);
38 ~ScopedAnnotation() {
39 FuncExit(thr_);
40 CheckNoLocks(thr_);
42 private:
43 ThreadState *const thr_;
46 #define SCOPED_ANNOTATION_RET(typ, ret) \
47 if (!flags()->enable_annotations) \
48 return ret; \
49 ThreadState *thr = cur_thread(); \
50 const uptr caller_pc = (uptr)__builtin_return_address(0); \
51 StatInc(thr, StatAnnotation); \
52 StatInc(thr, Stat##typ); \
53 ScopedAnnotation sa(thr, __func__, caller_pc); \
54 const uptr pc = StackTrace::GetCurrentPc(); \
55 (void)pc; \
56 /**/
58 #define SCOPED_ANNOTATION(typ) SCOPED_ANNOTATION_RET(typ, )
60 static const int kMaxDescLen = 128;
62 struct ExpectRace {
63 ExpectRace *next;
64 ExpectRace *prev;
65 atomic_uintptr_t hitcount;
66 atomic_uintptr_t addcount;
67 uptr addr;
68 uptr size;
69 char *file;
70 int line;
71 char desc[kMaxDescLen];
74 struct DynamicAnnContext {
75 Mutex mtx;
76 ExpectRace expect;
77 ExpectRace benign;
79 DynamicAnnContext()
80 : mtx(MutexTypeAnnotations, StatMtxAnnotations) {
84 static DynamicAnnContext *dyn_ann_ctx;
85 static char dyn_ann_ctx_placeholder[sizeof(DynamicAnnContext)] ALIGNED(64);
87 static void AddExpectRace(ExpectRace *list,
88 char *f, int l, uptr addr, uptr size, char *desc) {
89 ExpectRace *race = list->next;
90 for (; race != list; race = race->next) {
91 if (race->addr == addr && race->size == size) {
92 atomic_store_relaxed(&race->addcount,
93 atomic_load_relaxed(&race->addcount) + 1);
94 return;
97 race = (ExpectRace*)internal_alloc(MBlockExpectRace, sizeof(ExpectRace));
98 race->addr = addr;
99 race->size = size;
100 race->file = f;
101 race->line = l;
102 race->desc[0] = 0;
103 atomic_store_relaxed(&race->hitcount, 0);
104 atomic_store_relaxed(&race->addcount, 1);
105 if (desc) {
106 int i = 0;
107 for (; i < kMaxDescLen - 1 && desc[i]; i++)
108 race->desc[i] = desc[i];
109 race->desc[i] = 0;
111 race->prev = list;
112 race->next = list->next;
113 race->next->prev = race;
114 list->next = race;
117 static ExpectRace *FindRace(ExpectRace *list, uptr addr, uptr size) {
118 for (ExpectRace *race = list->next; race != list; race = race->next) {
119 uptr maxbegin = max(race->addr, addr);
120 uptr minend = min(race->addr + race->size, addr + size);
121 if (maxbegin < minend)
122 return race;
124 return 0;
127 static bool CheckContains(ExpectRace *list, uptr addr, uptr size) {
128 ExpectRace *race = FindRace(list, addr, size);
129 if (race == 0)
130 return false;
131 DPrintf("Hit expected/benign race: %s addr=%zx:%d %s:%d\n",
132 race->desc, race->addr, (int)race->size, race->file, race->line);
133 atomic_fetch_add(&race->hitcount, 1, memory_order_relaxed);
134 return true;
137 static void InitList(ExpectRace *list) {
138 list->next = list;
139 list->prev = list;
142 void InitializeDynamicAnnotations() {
143 dyn_ann_ctx = new(dyn_ann_ctx_placeholder) DynamicAnnContext;
144 InitList(&dyn_ann_ctx->expect);
145 InitList(&dyn_ann_ctx->benign);
148 bool IsExpectedReport(uptr addr, uptr size) {
149 ReadLock lock(&dyn_ann_ctx->mtx);
150 if (CheckContains(&dyn_ann_ctx->expect, addr, size))
151 return true;
152 if (CheckContains(&dyn_ann_ctx->benign, addr, size))
153 return true;
154 return false;
157 static void CollectMatchedBenignRaces(Vector<ExpectRace> *matched,
158 int *unique_count, int *hit_count, atomic_uintptr_t ExpectRace::*counter) {
159 ExpectRace *list = &dyn_ann_ctx->benign;
160 for (ExpectRace *race = list->next; race != list; race = race->next) {
161 (*unique_count)++;
162 const uptr cnt = atomic_load_relaxed(&(race->*counter));
163 if (cnt == 0)
164 continue;
165 *hit_count += cnt;
166 uptr i = 0;
167 for (; i < matched->Size(); i++) {
168 ExpectRace *race0 = &(*matched)[i];
169 if (race->line == race0->line
170 && internal_strcmp(race->file, race0->file) == 0
171 && internal_strcmp(race->desc, race0->desc) == 0) {
172 atomic_fetch_add(&(race0->*counter), cnt, memory_order_relaxed);
173 break;
176 if (i == matched->Size())
177 matched->PushBack(*race);
181 void PrintMatchedBenignRaces() {
182 Lock lock(&dyn_ann_ctx->mtx);
183 int unique_count = 0;
184 int hit_count = 0;
185 int add_count = 0;
186 Vector<ExpectRace> hit_matched(MBlockScopedBuf);
187 CollectMatchedBenignRaces(&hit_matched, &unique_count, &hit_count,
188 &ExpectRace::hitcount);
189 Vector<ExpectRace> add_matched(MBlockScopedBuf);
190 CollectMatchedBenignRaces(&add_matched, &unique_count, &add_count,
191 &ExpectRace::addcount);
192 if (hit_matched.Size()) {
193 Printf("ThreadSanitizer: Matched %d \"benign\" races (pid=%d):\n",
194 hit_count, (int)internal_getpid());
195 for (uptr i = 0; i < hit_matched.Size(); i++) {
196 Printf("%d %s:%d %s\n",
197 atomic_load_relaxed(&hit_matched[i].hitcount),
198 hit_matched[i].file, hit_matched[i].line, hit_matched[i].desc);
201 if (hit_matched.Size()) {
202 Printf("ThreadSanitizer: Annotated %d \"benign\" races, %d unique"
203 " (pid=%d):\n",
204 add_count, unique_count, (int)internal_getpid());
205 for (uptr i = 0; i < add_matched.Size(); i++) {
206 Printf("%d %s:%d %s\n",
207 atomic_load_relaxed(&add_matched[i].addcount),
208 add_matched[i].file, add_matched[i].line, add_matched[i].desc);
213 static void ReportMissedExpectedRace(ExpectRace *race) {
214 Printf("==================\n");
215 Printf("WARNING: ThreadSanitizer: missed expected data race\n");
216 Printf(" %s addr=%zx %s:%d\n",
217 race->desc, race->addr, race->file, race->line);
218 Printf("==================\n");
220 } // namespace __tsan
222 using namespace __tsan; // NOLINT
224 extern "C" {
225 void INTERFACE_ATTRIBUTE AnnotateHappensBefore(char *f, int l, uptr addr) {
226 SCOPED_ANNOTATION(AnnotateHappensBefore);
227 Release(thr, pc, addr);
230 void INTERFACE_ATTRIBUTE AnnotateHappensAfter(char *f, int l, uptr addr) {
231 SCOPED_ANNOTATION(AnnotateHappensAfter);
232 Acquire(thr, pc, addr);
235 void INTERFACE_ATTRIBUTE AnnotateCondVarSignal(char *f, int l, uptr cv) {
236 SCOPED_ANNOTATION(AnnotateCondVarSignal);
239 void INTERFACE_ATTRIBUTE AnnotateCondVarSignalAll(char *f, int l, uptr cv) {
240 SCOPED_ANNOTATION(AnnotateCondVarSignalAll);
243 void INTERFACE_ATTRIBUTE AnnotateMutexIsNotPHB(char *f, int l, uptr mu) {
244 SCOPED_ANNOTATION(AnnotateMutexIsNotPHB);
247 void INTERFACE_ATTRIBUTE AnnotateCondVarWait(char *f, int l, uptr cv,
248 uptr lock) {
249 SCOPED_ANNOTATION(AnnotateCondVarWait);
252 void INTERFACE_ATTRIBUTE AnnotateRWLockCreate(char *f, int l, uptr m) {
253 SCOPED_ANNOTATION(AnnotateRWLockCreate);
254 MutexCreate(thr, pc, m, MutexFlagWriteReentrant);
257 void INTERFACE_ATTRIBUTE AnnotateRWLockCreateStatic(char *f, int l, uptr m) {
258 SCOPED_ANNOTATION(AnnotateRWLockCreateStatic);
259 MutexCreate(thr, pc, m, MutexFlagWriteReentrant | MutexFlagLinkerInit);
262 void INTERFACE_ATTRIBUTE AnnotateRWLockDestroy(char *f, int l, uptr m) {
263 SCOPED_ANNOTATION(AnnotateRWLockDestroy);
264 MutexDestroy(thr, pc, m);
267 void INTERFACE_ATTRIBUTE AnnotateRWLockAcquired(char *f, int l, uptr m,
268 uptr is_w) {
269 SCOPED_ANNOTATION(AnnotateRWLockAcquired);
270 if (is_w)
271 MutexPostLock(thr, pc, m, MutexFlagDoPreLockOnPostLock);
272 else
273 MutexPostReadLock(thr, pc, m, MutexFlagDoPreLockOnPostLock);
276 void INTERFACE_ATTRIBUTE AnnotateRWLockReleased(char *f, int l, uptr m,
277 uptr is_w) {
278 SCOPED_ANNOTATION(AnnotateRWLockReleased);
279 if (is_w)
280 MutexUnlock(thr, pc, m);
281 else
282 MutexReadUnlock(thr, pc, m);
285 void INTERFACE_ATTRIBUTE AnnotateTraceMemory(char *f, int l, uptr mem) {
286 SCOPED_ANNOTATION(AnnotateTraceMemory);
289 void INTERFACE_ATTRIBUTE AnnotateFlushState(char *f, int l) {
290 SCOPED_ANNOTATION(AnnotateFlushState);
293 void INTERFACE_ATTRIBUTE AnnotateNewMemory(char *f, int l, uptr mem,
294 uptr size) {
295 SCOPED_ANNOTATION(AnnotateNewMemory);
298 void INTERFACE_ATTRIBUTE AnnotateNoOp(char *f, int l, uptr mem) {
299 SCOPED_ANNOTATION(AnnotateNoOp);
302 void INTERFACE_ATTRIBUTE AnnotateFlushExpectedRaces(char *f, int l) {
303 SCOPED_ANNOTATION(AnnotateFlushExpectedRaces);
304 Lock lock(&dyn_ann_ctx->mtx);
305 while (dyn_ann_ctx->expect.next != &dyn_ann_ctx->expect) {
306 ExpectRace *race = dyn_ann_ctx->expect.next;
307 if (atomic_load_relaxed(&race->hitcount) == 0) {
308 ctx->nmissed_expected++;
309 ReportMissedExpectedRace(race);
311 race->prev->next = race->next;
312 race->next->prev = race->prev;
313 internal_free(race);
317 void INTERFACE_ATTRIBUTE AnnotateEnableRaceDetection(
318 char *f, int l, int enable) {
319 SCOPED_ANNOTATION(AnnotateEnableRaceDetection);
320 // FIXME: Reconsider this functionality later. It may be irrelevant.
323 void INTERFACE_ATTRIBUTE AnnotateMutexIsUsedAsCondVar(
324 char *f, int l, uptr mu) {
325 SCOPED_ANNOTATION(AnnotateMutexIsUsedAsCondVar);
328 void INTERFACE_ATTRIBUTE AnnotatePCQGet(
329 char *f, int l, uptr pcq) {
330 SCOPED_ANNOTATION(AnnotatePCQGet);
333 void INTERFACE_ATTRIBUTE AnnotatePCQPut(
334 char *f, int l, uptr pcq) {
335 SCOPED_ANNOTATION(AnnotatePCQPut);
338 void INTERFACE_ATTRIBUTE AnnotatePCQDestroy(
339 char *f, int l, uptr pcq) {
340 SCOPED_ANNOTATION(AnnotatePCQDestroy);
343 void INTERFACE_ATTRIBUTE AnnotatePCQCreate(
344 char *f, int l, uptr pcq) {
345 SCOPED_ANNOTATION(AnnotatePCQCreate);
348 void INTERFACE_ATTRIBUTE AnnotateExpectRace(
349 char *f, int l, uptr mem, char *desc) {
350 SCOPED_ANNOTATION(AnnotateExpectRace);
351 Lock lock(&dyn_ann_ctx->mtx);
352 AddExpectRace(&dyn_ann_ctx->expect,
353 f, l, mem, 1, desc);
354 DPrintf("Add expected race: %s addr=%zx %s:%d\n", desc, mem, f, l);
357 static void BenignRaceImpl(
358 char *f, int l, uptr mem, uptr size, char *desc) {
359 Lock lock(&dyn_ann_ctx->mtx);
360 AddExpectRace(&dyn_ann_ctx->benign,
361 f, l, mem, size, desc);
362 DPrintf("Add benign race: %s addr=%zx %s:%d\n", desc, mem, f, l);
365 // FIXME: Turn it off later. WTF is benign race?1?? Go talk to Hans Boehm.
366 void INTERFACE_ATTRIBUTE AnnotateBenignRaceSized(
367 char *f, int l, uptr mem, uptr size, char *desc) {
368 SCOPED_ANNOTATION(AnnotateBenignRaceSized);
369 BenignRaceImpl(f, l, mem, size, desc);
372 void INTERFACE_ATTRIBUTE AnnotateBenignRace(
373 char *f, int l, uptr mem, char *desc) {
374 SCOPED_ANNOTATION(AnnotateBenignRace);
375 BenignRaceImpl(f, l, mem, 1, desc);
378 void INTERFACE_ATTRIBUTE AnnotateIgnoreReadsBegin(char *f, int l) {
379 SCOPED_ANNOTATION(AnnotateIgnoreReadsBegin);
380 ThreadIgnoreBegin(thr, pc);
383 void INTERFACE_ATTRIBUTE AnnotateIgnoreReadsEnd(char *f, int l) {
384 SCOPED_ANNOTATION(AnnotateIgnoreReadsEnd);
385 ThreadIgnoreEnd(thr, pc);
388 void INTERFACE_ATTRIBUTE AnnotateIgnoreWritesBegin(char *f, int l) {
389 SCOPED_ANNOTATION(AnnotateIgnoreWritesBegin);
390 ThreadIgnoreBegin(thr, pc);
393 void INTERFACE_ATTRIBUTE AnnotateIgnoreWritesEnd(char *f, int l) {
394 SCOPED_ANNOTATION(AnnotateIgnoreWritesEnd);
395 ThreadIgnoreEnd(thr, pc);
398 void INTERFACE_ATTRIBUTE AnnotateIgnoreSyncBegin(char *f, int l) {
399 SCOPED_ANNOTATION(AnnotateIgnoreSyncBegin);
400 ThreadIgnoreSyncBegin(thr, pc);
403 void INTERFACE_ATTRIBUTE AnnotateIgnoreSyncEnd(char *f, int l) {
404 SCOPED_ANNOTATION(AnnotateIgnoreSyncEnd);
405 ThreadIgnoreSyncEnd(thr, pc);
408 void INTERFACE_ATTRIBUTE AnnotatePublishMemoryRange(
409 char *f, int l, uptr addr, uptr size) {
410 SCOPED_ANNOTATION(AnnotatePublishMemoryRange);
413 void INTERFACE_ATTRIBUTE AnnotateUnpublishMemoryRange(
414 char *f, int l, uptr addr, uptr size) {
415 SCOPED_ANNOTATION(AnnotateUnpublishMemoryRange);
418 void INTERFACE_ATTRIBUTE AnnotateThreadName(
419 char *f, int l, char *name) {
420 SCOPED_ANNOTATION(AnnotateThreadName);
421 ThreadSetName(thr, name);
424 // We deliberately omit the implementation of WTFAnnotateHappensBefore() and
425 // WTFAnnotateHappensAfter(). Those are being used by Webkit to annotate
426 // atomic operations, which should be handled by ThreadSanitizer correctly.
427 void INTERFACE_ATTRIBUTE WTFAnnotateHappensBefore(char *f, int l, uptr addr) {
428 SCOPED_ANNOTATION(AnnotateHappensBefore);
431 void INTERFACE_ATTRIBUTE WTFAnnotateHappensAfter(char *f, int l, uptr addr) {
432 SCOPED_ANNOTATION(AnnotateHappensAfter);
435 void INTERFACE_ATTRIBUTE WTFAnnotateBenignRaceSized(
436 char *f, int l, uptr mem, uptr sz, char *desc) {
437 SCOPED_ANNOTATION(AnnotateBenignRaceSized);
438 BenignRaceImpl(f, l, mem, sz, desc);
441 int INTERFACE_ATTRIBUTE RunningOnValgrind() {
442 return flags()->running_on_valgrind;
445 double __attribute__((weak)) INTERFACE_ATTRIBUTE ValgrindSlowdown(void) {
446 return 10.0;
449 const char INTERFACE_ATTRIBUTE* ThreadSanitizerQuery(const char *query) {
450 if (internal_strcmp(query, "pure_happens_before") == 0)
451 return "1";
452 else
453 return "0";
456 void INTERFACE_ATTRIBUTE
457 AnnotateMemoryIsInitialized(char *f, int l, uptr mem, uptr sz) {}
458 void INTERFACE_ATTRIBUTE
459 AnnotateMemoryIsUninitialized(char *f, int l, uptr mem, uptr sz) {}
461 // Note: the parameter is called flagz, because flags is already taken
462 // by the global function that returns flags.
463 INTERFACE_ATTRIBUTE
464 void __tsan_mutex_create(void *m, unsigned flagz) {
465 SCOPED_ANNOTATION(__tsan_mutex_create);
466 MutexCreate(thr, pc, (uptr)m, flagz & MutexCreationFlagMask);
469 INTERFACE_ATTRIBUTE
470 void __tsan_mutex_destroy(void *m, unsigned flagz) {
471 SCOPED_ANNOTATION(__tsan_mutex_destroy);
472 MutexDestroy(thr, pc, (uptr)m, flagz);
475 INTERFACE_ATTRIBUTE
476 void __tsan_mutex_pre_lock(void *m, unsigned flagz) {
477 SCOPED_ANNOTATION(__tsan_mutex_pre_lock);
478 if (!(flagz & MutexFlagTryLock)) {
479 if (flagz & MutexFlagReadLock)
480 MutexPreReadLock(thr, pc, (uptr)m);
481 else
482 MutexPreLock(thr, pc, (uptr)m);
484 ThreadIgnoreBegin(thr, pc, /*save_stack=*/false);
485 ThreadIgnoreSyncBegin(thr, pc, /*save_stack=*/false);
488 INTERFACE_ATTRIBUTE
489 void __tsan_mutex_post_lock(void *m, unsigned flagz, int rec) {
490 SCOPED_ANNOTATION(__tsan_mutex_post_lock);
491 ThreadIgnoreSyncEnd(thr, pc);
492 ThreadIgnoreEnd(thr, pc);
493 if (!(flagz & MutexFlagTryLockFailed)) {
494 if (flagz & MutexFlagReadLock)
495 MutexPostReadLock(thr, pc, (uptr)m, flagz);
496 else
497 MutexPostLock(thr, pc, (uptr)m, flagz, rec);
501 INTERFACE_ATTRIBUTE
502 int __tsan_mutex_pre_unlock(void *m, unsigned flagz) {
503 SCOPED_ANNOTATION_RET(__tsan_mutex_pre_unlock, 0);
504 int ret = 0;
505 if (flagz & MutexFlagReadLock) {
506 CHECK(!(flagz & MutexFlagRecursiveUnlock));
507 MutexReadUnlock(thr, pc, (uptr)m);
508 } else {
509 ret = MutexUnlock(thr, pc, (uptr)m, flagz);
511 ThreadIgnoreBegin(thr, pc, /*save_stack=*/false);
512 ThreadIgnoreSyncBegin(thr, pc, /*save_stack=*/false);
513 return ret;
516 INTERFACE_ATTRIBUTE
517 void __tsan_mutex_post_unlock(void *m, unsigned flagz) {
518 SCOPED_ANNOTATION(__tsan_mutex_post_unlock);
519 ThreadIgnoreSyncEnd(thr, pc);
520 ThreadIgnoreEnd(thr, pc);
523 INTERFACE_ATTRIBUTE
524 void __tsan_mutex_pre_signal(void *addr, unsigned flagz) {
525 SCOPED_ANNOTATION(__tsan_mutex_pre_signal);
526 ThreadIgnoreBegin(thr, pc, /*save_stack=*/false);
527 ThreadIgnoreSyncBegin(thr, pc, /*save_stack=*/false);
530 INTERFACE_ATTRIBUTE
531 void __tsan_mutex_post_signal(void *addr, unsigned flagz) {
532 SCOPED_ANNOTATION(__tsan_mutex_post_signal);
533 ThreadIgnoreSyncEnd(thr, pc);
534 ThreadIgnoreEnd(thr, pc);
537 INTERFACE_ATTRIBUTE
538 void __tsan_mutex_pre_divert(void *addr, unsigned flagz) {
539 SCOPED_ANNOTATION(__tsan_mutex_pre_divert);
540 // Exit from ignore region started in __tsan_mutex_pre_lock/unlock/signal.
541 ThreadIgnoreSyncEnd(thr, pc);
542 ThreadIgnoreEnd(thr, pc);
545 INTERFACE_ATTRIBUTE
546 void __tsan_mutex_post_divert(void *addr, unsigned flagz) {
547 SCOPED_ANNOTATION(__tsan_mutex_post_divert);
548 ThreadIgnoreBegin(thr, pc, /*save_stack=*/false);
549 ThreadIgnoreSyncBegin(thr, pc, /*save_stack=*/false);
551 } // extern "C"