1 //===-- tsan_interface_ann.cc ---------------------------------------------===//
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
6 //===----------------------------------------------------------------------===//
8 // This file is a part of ThreadSanitizer (TSan), a race detector.
10 //===----------------------------------------------------------------------===//
11 #include "sanitizer_common/sanitizer_libc.h"
12 #include "sanitizer_common/sanitizer_internal_defs.h"
13 #include "sanitizer_common/sanitizer_placement_new.h"
14 #include "sanitizer_common/sanitizer_stacktrace.h"
15 #include "tsan_interface_ann.h"
16 #include "tsan_mutex.h"
17 #include "tsan_report.h"
19 #include "tsan_mman.h"
20 #include "tsan_flags.h"
21 #include "tsan_platform.h"
22 #include "tsan_vector.h"
24 #define CALLERPC ((uptr)__builtin_return_address(0))
26 using namespace __tsan
; // NOLINT
30 class ScopedAnnotation
{
32 ScopedAnnotation(ThreadState
*thr
, const char *aname
, uptr pc
)
35 DPrintf("#%d: annotation %s()\n", thr_
->tid
, aname
);
43 ThreadState
*const thr_
;
46 #define SCOPED_ANNOTATION_RET(typ, ret) \
47 if (!flags()->enable_annotations) \
49 ThreadState *thr = cur_thread(); \
50 const uptr caller_pc = (uptr)__builtin_return_address(0); \
51 StatInc(thr, StatAnnotation); \
52 StatInc(thr, Stat##typ); \
53 ScopedAnnotation sa(thr, __func__, caller_pc); \
54 const uptr pc = StackTrace::GetCurrentPc(); \
58 #define SCOPED_ANNOTATION(typ) SCOPED_ANNOTATION_RET(typ, )
60 static const int kMaxDescLen
= 128;
65 atomic_uintptr_t hitcount
;
66 atomic_uintptr_t addcount
;
71 char desc
[kMaxDescLen
];
74 struct DynamicAnnContext
{
80 : mtx(MutexTypeAnnotations
, StatMtxAnnotations
) {
84 static DynamicAnnContext
*dyn_ann_ctx
;
85 static char dyn_ann_ctx_placeholder
[sizeof(DynamicAnnContext
)] ALIGNED(64);
87 static void AddExpectRace(ExpectRace
*list
,
88 char *f
, int l
, uptr addr
, uptr size
, char *desc
) {
89 ExpectRace
*race
= list
->next
;
90 for (; race
!= list
; race
= race
->next
) {
91 if (race
->addr
== addr
&& race
->size
== size
) {
92 atomic_store_relaxed(&race
->addcount
,
93 atomic_load_relaxed(&race
->addcount
) + 1);
97 race
= (ExpectRace
*)internal_alloc(MBlockExpectRace
, sizeof(ExpectRace
));
103 atomic_store_relaxed(&race
->hitcount
, 0);
104 atomic_store_relaxed(&race
->addcount
, 1);
107 for (; i
< kMaxDescLen
- 1 && desc
[i
]; i
++)
108 race
->desc
[i
] = desc
[i
];
112 race
->next
= list
->next
;
113 race
->next
->prev
= race
;
117 static ExpectRace
*FindRace(ExpectRace
*list
, uptr addr
, uptr size
) {
118 for (ExpectRace
*race
= list
->next
; race
!= list
; race
= race
->next
) {
119 uptr maxbegin
= max(race
->addr
, addr
);
120 uptr minend
= min(race
->addr
+ race
->size
, addr
+ size
);
121 if (maxbegin
< minend
)
127 static bool CheckContains(ExpectRace
*list
, uptr addr
, uptr size
) {
128 ExpectRace
*race
= FindRace(list
, addr
, size
);
131 DPrintf("Hit expected/benign race: %s addr=%zx:%d %s:%d\n",
132 race
->desc
, race
->addr
, (int)race
->size
, race
->file
, race
->line
);
133 atomic_fetch_add(&race
->hitcount
, 1, memory_order_relaxed
);
137 static void InitList(ExpectRace
*list
) {
142 void InitializeDynamicAnnotations() {
143 dyn_ann_ctx
= new(dyn_ann_ctx_placeholder
) DynamicAnnContext
;
144 InitList(&dyn_ann_ctx
->expect
);
145 InitList(&dyn_ann_ctx
->benign
);
148 bool IsExpectedReport(uptr addr
, uptr size
) {
149 ReadLock
lock(&dyn_ann_ctx
->mtx
);
150 if (CheckContains(&dyn_ann_ctx
->expect
, addr
, size
))
152 if (CheckContains(&dyn_ann_ctx
->benign
, addr
, size
))
157 static void CollectMatchedBenignRaces(Vector
<ExpectRace
> *matched
,
158 int *unique_count
, int *hit_count
, atomic_uintptr_t
ExpectRace::*counter
) {
159 ExpectRace
*list
= &dyn_ann_ctx
->benign
;
160 for (ExpectRace
*race
= list
->next
; race
!= list
; race
= race
->next
) {
162 const uptr cnt
= atomic_load_relaxed(&(race
->*counter
));
167 for (; i
< matched
->Size(); i
++) {
168 ExpectRace
*race0
= &(*matched
)[i
];
169 if (race
->line
== race0
->line
170 && internal_strcmp(race
->file
, race0
->file
) == 0
171 && internal_strcmp(race
->desc
, race0
->desc
) == 0) {
172 atomic_fetch_add(&(race0
->*counter
), cnt
, memory_order_relaxed
);
176 if (i
== matched
->Size())
177 matched
->PushBack(*race
);
181 void PrintMatchedBenignRaces() {
182 Lock
lock(&dyn_ann_ctx
->mtx
);
183 int unique_count
= 0;
186 Vector
<ExpectRace
> hit_matched(MBlockScopedBuf
);
187 CollectMatchedBenignRaces(&hit_matched
, &unique_count
, &hit_count
,
188 &ExpectRace::hitcount
);
189 Vector
<ExpectRace
> add_matched(MBlockScopedBuf
);
190 CollectMatchedBenignRaces(&add_matched
, &unique_count
, &add_count
,
191 &ExpectRace::addcount
);
192 if (hit_matched
.Size()) {
193 Printf("ThreadSanitizer: Matched %d \"benign\" races (pid=%d):\n",
194 hit_count
, (int)internal_getpid());
195 for (uptr i
= 0; i
< hit_matched
.Size(); i
++) {
196 Printf("%d %s:%d %s\n",
197 atomic_load_relaxed(&hit_matched
[i
].hitcount
),
198 hit_matched
[i
].file
, hit_matched
[i
].line
, hit_matched
[i
].desc
);
201 if (hit_matched
.Size()) {
202 Printf("ThreadSanitizer: Annotated %d \"benign\" races, %d unique"
204 add_count
, unique_count
, (int)internal_getpid());
205 for (uptr i
= 0; i
< add_matched
.Size(); i
++) {
206 Printf("%d %s:%d %s\n",
207 atomic_load_relaxed(&add_matched
[i
].addcount
),
208 add_matched
[i
].file
, add_matched
[i
].line
, add_matched
[i
].desc
);
213 static void ReportMissedExpectedRace(ExpectRace
*race
) {
214 Printf("==================\n");
215 Printf("WARNING: ThreadSanitizer: missed expected data race\n");
216 Printf(" %s addr=%zx %s:%d\n",
217 race
->desc
, race
->addr
, race
->file
, race
->line
);
218 Printf("==================\n");
220 } // namespace __tsan
222 using namespace __tsan
; // NOLINT
225 void INTERFACE_ATTRIBUTE
AnnotateHappensBefore(char *f
, int l
, uptr addr
) {
226 SCOPED_ANNOTATION(AnnotateHappensBefore
);
227 Release(thr
, pc
, addr
);
230 void INTERFACE_ATTRIBUTE
AnnotateHappensAfter(char *f
, int l
, uptr addr
) {
231 SCOPED_ANNOTATION(AnnotateHappensAfter
);
232 Acquire(thr
, pc
, addr
);
235 void INTERFACE_ATTRIBUTE
AnnotateCondVarSignal(char *f
, int l
, uptr cv
) {
236 SCOPED_ANNOTATION(AnnotateCondVarSignal
);
239 void INTERFACE_ATTRIBUTE
AnnotateCondVarSignalAll(char *f
, int l
, uptr cv
) {
240 SCOPED_ANNOTATION(AnnotateCondVarSignalAll
);
243 void INTERFACE_ATTRIBUTE
AnnotateMutexIsNotPHB(char *f
, int l
, uptr mu
) {
244 SCOPED_ANNOTATION(AnnotateMutexIsNotPHB
);
247 void INTERFACE_ATTRIBUTE
AnnotateCondVarWait(char *f
, int l
, uptr cv
,
249 SCOPED_ANNOTATION(AnnotateCondVarWait
);
252 void INTERFACE_ATTRIBUTE
AnnotateRWLockCreate(char *f
, int l
, uptr m
) {
253 SCOPED_ANNOTATION(AnnotateRWLockCreate
);
254 MutexCreate(thr
, pc
, m
, MutexFlagWriteReentrant
);
257 void INTERFACE_ATTRIBUTE
AnnotateRWLockCreateStatic(char *f
, int l
, uptr m
) {
258 SCOPED_ANNOTATION(AnnotateRWLockCreateStatic
);
259 MutexCreate(thr
, pc
, m
, MutexFlagWriteReentrant
| MutexFlagLinkerInit
);
262 void INTERFACE_ATTRIBUTE
AnnotateRWLockDestroy(char *f
, int l
, uptr m
) {
263 SCOPED_ANNOTATION(AnnotateRWLockDestroy
);
264 MutexDestroy(thr
, pc
, m
);
267 void INTERFACE_ATTRIBUTE
AnnotateRWLockAcquired(char *f
, int l
, uptr m
,
269 SCOPED_ANNOTATION(AnnotateRWLockAcquired
);
271 MutexPostLock(thr
, pc
, m
, MutexFlagDoPreLockOnPostLock
);
273 MutexPostReadLock(thr
, pc
, m
, MutexFlagDoPreLockOnPostLock
);
276 void INTERFACE_ATTRIBUTE
AnnotateRWLockReleased(char *f
, int l
, uptr m
,
278 SCOPED_ANNOTATION(AnnotateRWLockReleased
);
280 MutexUnlock(thr
, pc
, m
);
282 MutexReadUnlock(thr
, pc
, m
);
285 void INTERFACE_ATTRIBUTE
AnnotateTraceMemory(char *f
, int l
, uptr mem
) {
286 SCOPED_ANNOTATION(AnnotateTraceMemory
);
289 void INTERFACE_ATTRIBUTE
AnnotateFlushState(char *f
, int l
) {
290 SCOPED_ANNOTATION(AnnotateFlushState
);
293 void INTERFACE_ATTRIBUTE
AnnotateNewMemory(char *f
, int l
, uptr mem
,
295 SCOPED_ANNOTATION(AnnotateNewMemory
);
298 void INTERFACE_ATTRIBUTE
AnnotateNoOp(char *f
, int l
, uptr mem
) {
299 SCOPED_ANNOTATION(AnnotateNoOp
);
302 void INTERFACE_ATTRIBUTE
AnnotateFlushExpectedRaces(char *f
, int l
) {
303 SCOPED_ANNOTATION(AnnotateFlushExpectedRaces
);
304 Lock
lock(&dyn_ann_ctx
->mtx
);
305 while (dyn_ann_ctx
->expect
.next
!= &dyn_ann_ctx
->expect
) {
306 ExpectRace
*race
= dyn_ann_ctx
->expect
.next
;
307 if (atomic_load_relaxed(&race
->hitcount
) == 0) {
308 ctx
->nmissed_expected
++;
309 ReportMissedExpectedRace(race
);
311 race
->prev
->next
= race
->next
;
312 race
->next
->prev
= race
->prev
;
317 void INTERFACE_ATTRIBUTE
AnnotateEnableRaceDetection(
318 char *f
, int l
, int enable
) {
319 SCOPED_ANNOTATION(AnnotateEnableRaceDetection
);
320 // FIXME: Reconsider this functionality later. It may be irrelevant.
323 void INTERFACE_ATTRIBUTE
AnnotateMutexIsUsedAsCondVar(
324 char *f
, int l
, uptr mu
) {
325 SCOPED_ANNOTATION(AnnotateMutexIsUsedAsCondVar
);
328 void INTERFACE_ATTRIBUTE
AnnotatePCQGet(
329 char *f
, int l
, uptr pcq
) {
330 SCOPED_ANNOTATION(AnnotatePCQGet
);
333 void INTERFACE_ATTRIBUTE
AnnotatePCQPut(
334 char *f
, int l
, uptr pcq
) {
335 SCOPED_ANNOTATION(AnnotatePCQPut
);
338 void INTERFACE_ATTRIBUTE
AnnotatePCQDestroy(
339 char *f
, int l
, uptr pcq
) {
340 SCOPED_ANNOTATION(AnnotatePCQDestroy
);
343 void INTERFACE_ATTRIBUTE
AnnotatePCQCreate(
344 char *f
, int l
, uptr pcq
) {
345 SCOPED_ANNOTATION(AnnotatePCQCreate
);
348 void INTERFACE_ATTRIBUTE
AnnotateExpectRace(
349 char *f
, int l
, uptr mem
, char *desc
) {
350 SCOPED_ANNOTATION(AnnotateExpectRace
);
351 Lock
lock(&dyn_ann_ctx
->mtx
);
352 AddExpectRace(&dyn_ann_ctx
->expect
,
354 DPrintf("Add expected race: %s addr=%zx %s:%d\n", desc
, mem
, f
, l
);
357 static void BenignRaceImpl(
358 char *f
, int l
, uptr mem
, uptr size
, char *desc
) {
359 Lock
lock(&dyn_ann_ctx
->mtx
);
360 AddExpectRace(&dyn_ann_ctx
->benign
,
361 f
, l
, mem
, size
, desc
);
362 DPrintf("Add benign race: %s addr=%zx %s:%d\n", desc
, mem
, f
, l
);
365 // FIXME: Turn it off later. WTF is benign race?1?? Go talk to Hans Boehm.
366 void INTERFACE_ATTRIBUTE
AnnotateBenignRaceSized(
367 char *f
, int l
, uptr mem
, uptr size
, char *desc
) {
368 SCOPED_ANNOTATION(AnnotateBenignRaceSized
);
369 BenignRaceImpl(f
, l
, mem
, size
, desc
);
372 void INTERFACE_ATTRIBUTE
AnnotateBenignRace(
373 char *f
, int l
, uptr mem
, char *desc
) {
374 SCOPED_ANNOTATION(AnnotateBenignRace
);
375 BenignRaceImpl(f
, l
, mem
, 1, desc
);
378 void INTERFACE_ATTRIBUTE
AnnotateIgnoreReadsBegin(char *f
, int l
) {
379 SCOPED_ANNOTATION(AnnotateIgnoreReadsBegin
);
380 ThreadIgnoreBegin(thr
, pc
);
383 void INTERFACE_ATTRIBUTE
AnnotateIgnoreReadsEnd(char *f
, int l
) {
384 SCOPED_ANNOTATION(AnnotateIgnoreReadsEnd
);
385 ThreadIgnoreEnd(thr
, pc
);
388 void INTERFACE_ATTRIBUTE
AnnotateIgnoreWritesBegin(char *f
, int l
) {
389 SCOPED_ANNOTATION(AnnotateIgnoreWritesBegin
);
390 ThreadIgnoreBegin(thr
, pc
);
393 void INTERFACE_ATTRIBUTE
AnnotateIgnoreWritesEnd(char *f
, int l
) {
394 SCOPED_ANNOTATION(AnnotateIgnoreWritesEnd
);
395 ThreadIgnoreEnd(thr
, pc
);
398 void INTERFACE_ATTRIBUTE
AnnotateIgnoreSyncBegin(char *f
, int l
) {
399 SCOPED_ANNOTATION(AnnotateIgnoreSyncBegin
);
400 ThreadIgnoreSyncBegin(thr
, pc
);
403 void INTERFACE_ATTRIBUTE
AnnotateIgnoreSyncEnd(char *f
, int l
) {
404 SCOPED_ANNOTATION(AnnotateIgnoreSyncEnd
);
405 ThreadIgnoreSyncEnd(thr
, pc
);
408 void INTERFACE_ATTRIBUTE
AnnotatePublishMemoryRange(
409 char *f
, int l
, uptr addr
, uptr size
) {
410 SCOPED_ANNOTATION(AnnotatePublishMemoryRange
);
413 void INTERFACE_ATTRIBUTE
AnnotateUnpublishMemoryRange(
414 char *f
, int l
, uptr addr
, uptr size
) {
415 SCOPED_ANNOTATION(AnnotateUnpublishMemoryRange
);
418 void INTERFACE_ATTRIBUTE
AnnotateThreadName(
419 char *f
, int l
, char *name
) {
420 SCOPED_ANNOTATION(AnnotateThreadName
);
421 ThreadSetName(thr
, name
);
424 // We deliberately omit the implementation of WTFAnnotateHappensBefore() and
425 // WTFAnnotateHappensAfter(). Those are being used by Webkit to annotate
426 // atomic operations, which should be handled by ThreadSanitizer correctly.
427 void INTERFACE_ATTRIBUTE
WTFAnnotateHappensBefore(char *f
, int l
, uptr addr
) {
428 SCOPED_ANNOTATION(AnnotateHappensBefore
);
431 void INTERFACE_ATTRIBUTE
WTFAnnotateHappensAfter(char *f
, int l
, uptr addr
) {
432 SCOPED_ANNOTATION(AnnotateHappensAfter
);
435 void INTERFACE_ATTRIBUTE
WTFAnnotateBenignRaceSized(
436 char *f
, int l
, uptr mem
, uptr sz
, char *desc
) {
437 SCOPED_ANNOTATION(AnnotateBenignRaceSized
);
438 BenignRaceImpl(f
, l
, mem
, sz
, desc
);
441 int INTERFACE_ATTRIBUTE
RunningOnValgrind() {
442 return flags()->running_on_valgrind
;
445 double __attribute__((weak
)) INTERFACE_ATTRIBUTE
ValgrindSlowdown(void) {
449 const char INTERFACE_ATTRIBUTE
* ThreadSanitizerQuery(const char *query
) {
450 if (internal_strcmp(query
, "pure_happens_before") == 0)
456 void INTERFACE_ATTRIBUTE
457 AnnotateMemoryIsInitialized(char *f
, int l
, uptr mem
, uptr sz
) {}
458 void INTERFACE_ATTRIBUTE
459 AnnotateMemoryIsUninitialized(char *f
, int l
, uptr mem
, uptr sz
) {}
461 // Note: the parameter is called flagz, because flags is already taken
462 // by the global function that returns flags.
464 void __tsan_mutex_create(void *m
, unsigned flagz
) {
465 SCOPED_ANNOTATION(__tsan_mutex_create
);
466 MutexCreate(thr
, pc
, (uptr
)m
, flagz
& MutexCreationFlagMask
);
470 void __tsan_mutex_destroy(void *m
, unsigned flagz
) {
471 SCOPED_ANNOTATION(__tsan_mutex_destroy
);
472 MutexDestroy(thr
, pc
, (uptr
)m
, flagz
);
476 void __tsan_mutex_pre_lock(void *m
, unsigned flagz
) {
477 SCOPED_ANNOTATION(__tsan_mutex_pre_lock
);
478 if (!(flagz
& MutexFlagTryLock
)) {
479 if (flagz
& MutexFlagReadLock
)
480 MutexPreReadLock(thr
, pc
, (uptr
)m
);
482 MutexPreLock(thr
, pc
, (uptr
)m
);
484 ThreadIgnoreBegin(thr
, pc
, /*save_stack=*/false);
485 ThreadIgnoreSyncBegin(thr
, pc
, /*save_stack=*/false);
489 void __tsan_mutex_post_lock(void *m
, unsigned flagz
, int rec
) {
490 SCOPED_ANNOTATION(__tsan_mutex_post_lock
);
491 ThreadIgnoreSyncEnd(thr
, pc
);
492 ThreadIgnoreEnd(thr
, pc
);
493 if (!(flagz
& MutexFlagTryLockFailed
)) {
494 if (flagz
& MutexFlagReadLock
)
495 MutexPostReadLock(thr
, pc
, (uptr
)m
, flagz
);
497 MutexPostLock(thr
, pc
, (uptr
)m
, flagz
, rec
);
502 int __tsan_mutex_pre_unlock(void *m
, unsigned flagz
) {
503 SCOPED_ANNOTATION_RET(__tsan_mutex_pre_unlock
, 0);
505 if (flagz
& MutexFlagReadLock
) {
506 CHECK(!(flagz
& MutexFlagRecursiveUnlock
));
507 MutexReadUnlock(thr
, pc
, (uptr
)m
);
509 ret
= MutexUnlock(thr
, pc
, (uptr
)m
, flagz
);
511 ThreadIgnoreBegin(thr
, pc
, /*save_stack=*/false);
512 ThreadIgnoreSyncBegin(thr
, pc
, /*save_stack=*/false);
517 void __tsan_mutex_post_unlock(void *m
, unsigned flagz
) {
518 SCOPED_ANNOTATION(__tsan_mutex_post_unlock
);
519 ThreadIgnoreSyncEnd(thr
, pc
);
520 ThreadIgnoreEnd(thr
, pc
);
524 void __tsan_mutex_pre_signal(void *addr
, unsigned flagz
) {
525 SCOPED_ANNOTATION(__tsan_mutex_pre_signal
);
526 ThreadIgnoreBegin(thr
, pc
, /*save_stack=*/false);
527 ThreadIgnoreSyncBegin(thr
, pc
, /*save_stack=*/false);
531 void __tsan_mutex_post_signal(void *addr
, unsigned flagz
) {
532 SCOPED_ANNOTATION(__tsan_mutex_post_signal
);
533 ThreadIgnoreSyncEnd(thr
, pc
);
534 ThreadIgnoreEnd(thr
, pc
);
538 void __tsan_mutex_pre_divert(void *addr
, unsigned flagz
) {
539 SCOPED_ANNOTATION(__tsan_mutex_pre_divert
);
540 // Exit from ignore region started in __tsan_mutex_pre_lock/unlock/signal.
541 ThreadIgnoreSyncEnd(thr
, pc
);
542 ThreadIgnoreEnd(thr
, pc
);
546 void __tsan_mutex_post_divert(void *addr
, unsigned flagz
) {
547 SCOPED_ANNOTATION(__tsan_mutex_post_divert
);
548 ThreadIgnoreBegin(thr
, pc
, /*save_stack=*/false);
549 ThreadIgnoreSyncBegin(thr
, pc
, /*save_stack=*/false);