Configuration bits for ARC port:
[official-gcc.git] / libsanitizer / tsan / tsan_update_shadow_word_inl.h
blobb9aa51c7957542e64390a2062bc601759bf237b0
1 //===-- tsan_update_shadow_word_inl.h ---------------------------*- C++ -*-===//
2 //
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
5 //
6 //===----------------------------------------------------------------------===//
7 //
8 // This file is a part of ThreadSanitizer (TSan), a race detector.
9 //
10 // Body of the hottest inner loop.
11 // If we wrap this body into a function, compilers (both gcc and clang)
12 // produce sligtly less efficient code.
13 //===----------------------------------------------------------------------===//
14 do {
15 StatInc(thr, StatShadowProcessed);
16 const unsigned kAccessSize = 1 << kAccessSizeLog;
17 unsigned off = cur.ComputeSearchOffset();
18 u64 *sp = &shadow_mem[(idx + off) % kShadowCnt];
19 old = LoadShadow(sp);
20 if (old.IsZero()) {
21 StatInc(thr, StatShadowZero);
22 if (store_word)
23 StoreIfNotYetStored(sp, &store_word);
24 // The above StoreIfNotYetStored could be done unconditionally
25 // and it even shows 4% gain on synthetic benchmarks (r4307).
26 break;
28 // is the memory access equal to the previous?
29 if (Shadow::Addr0AndSizeAreEqual(cur, old)) {
30 StatInc(thr, StatShadowSameSize);
31 // same thread?
32 if (Shadow::TidsAreEqual(old, cur)) {
33 StatInc(thr, StatShadowSameThread);
34 if (OldIsInSameSynchEpoch(old, thr)) {
35 if (old.IsRWNotWeaker(kAccessIsWrite, kIsAtomic)) {
36 // found a slot that holds effectively the same info
37 // (that is, same tid, same sync epoch and same size)
38 StatInc(thr, StatMopSame);
39 return;
41 StoreIfNotYetStored(sp, &store_word);
42 break;
44 if (old.IsRWWeakerOrEqual(kAccessIsWrite, kIsAtomic))
45 StoreIfNotYetStored(sp, &store_word);
46 break;
48 StatInc(thr, StatShadowAnotherThread);
49 if (HappensBefore(old, thr)) {
50 StoreIfNotYetStored(sp, &store_word);
51 break;
53 if (old.IsBothReadsOrAtomic(kAccessIsWrite, kIsAtomic))
54 break;
55 goto RACE;
57 // Do the memory access intersect?
58 // In Go all memory accesses are 1 byte, so there can be no intersections.
59 if (kCppMode && Shadow::TwoRangesIntersect(old, cur, kAccessSize)) {
60 StatInc(thr, StatShadowIntersect);
61 if (Shadow::TidsAreEqual(old, cur)) {
62 StatInc(thr, StatShadowSameThread);
63 break;
65 StatInc(thr, StatShadowAnotherThread);
66 if (old.IsBothReadsOrAtomic(kAccessIsWrite, kIsAtomic))
67 break;
68 if (HappensBefore(old, thr))
69 break;
70 goto RACE;
72 // The accesses do not intersect.
73 StatInc(thr, StatShadowNotIntersect);
74 break;
75 } while (0);