2016-08-31 Paul Thomas <pault@gcc.gnu.org>
[official-gcc.git] / libsanitizer / tsan / tsan_sync.cc
blob91ad8c8b2284ac038eda9eb35e4f79fd3d17fc7c
1 //===-- tsan_sync.cc ------------------------------------------------------===//
2 //
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
5 //
6 //===----------------------------------------------------------------------===//
7 //
8 // This file is a part of ThreadSanitizer (TSan), a race detector.
9 //
10 //===----------------------------------------------------------------------===//
11 #include "sanitizer_common/sanitizer_placement_new.h"
12 #include "tsan_sync.h"
13 #include "tsan_rtl.h"
14 #include "tsan_mman.h"
16 namespace __tsan {
18 void DDMutexInit(ThreadState *thr, uptr pc, SyncVar *s);
20 SyncVar::SyncVar()
21 : mtx(MutexTypeSyncVar, StatMtxSyncVar) {
22 Reset(0);
25 void SyncVar::Init(ThreadState *thr, uptr pc, uptr addr, u64 uid) {
26 this->addr = addr;
27 this->uid = uid;
28 this->next = 0;
30 creation_stack_id = 0;
31 if (kCppMode) // Go does not use them
32 creation_stack_id = CurrentStackId(thr, pc);
33 if (common_flags()->detect_deadlocks)
34 DDMutexInit(thr, pc, this);
37 void SyncVar::Reset(ThreadState *thr) {
38 uid = 0;
39 creation_stack_id = 0;
40 owner_tid = kInvalidTid;
41 last_lock = 0;
42 recursion = 0;
43 is_rw = 0;
44 is_recursive = 0;
45 is_broken = 0;
46 is_linker_init = 0;
48 if (thr == 0) {
49 CHECK_EQ(clock.size(), 0);
50 CHECK_EQ(read_clock.size(), 0);
51 } else {
52 clock.Reset(&thr->clock_cache);
53 read_clock.Reset(&thr->clock_cache);
57 MetaMap::MetaMap() {
58 atomic_store(&uid_gen_, 0, memory_order_relaxed);
61 void MetaMap::AllocBlock(ThreadState *thr, uptr pc, uptr p, uptr sz) {
62 u32 idx = block_alloc_.Alloc(&thr->block_cache);
63 MBlock *b = block_alloc_.Map(idx);
64 b->siz = sz;
65 b->tid = thr->tid;
66 b->stk = CurrentStackId(thr, pc);
67 u32 *meta = MemToMeta(p);
68 DCHECK_EQ(*meta, 0);
69 *meta = idx | kFlagBlock;
72 uptr MetaMap::FreeBlock(ThreadState *thr, uptr pc, uptr p) {
73 MBlock* b = GetBlock(p);
74 if (b == 0)
75 return 0;
76 uptr sz = RoundUpTo(b->siz, kMetaShadowCell);
77 FreeRange(thr, pc, p, sz);
78 return sz;
81 bool MetaMap::FreeRange(ThreadState *thr, uptr pc, uptr p, uptr sz) {
82 bool has_something = false;
83 u32 *meta = MemToMeta(p);
84 u32 *end = MemToMeta(p + sz);
85 if (end == meta)
86 end++;
87 for (; meta < end; meta++) {
88 u32 idx = *meta;
89 if (idx == 0) {
90 // Note: don't write to meta in this case -- the block can be huge.
91 continue;
93 *meta = 0;
94 has_something = true;
95 while (idx != 0) {
96 if (idx & kFlagBlock) {
97 block_alloc_.Free(&thr->block_cache, idx & ~kFlagMask);
98 break;
99 } else if (idx & kFlagSync) {
100 DCHECK(idx & kFlagSync);
101 SyncVar *s = sync_alloc_.Map(idx & ~kFlagMask);
102 u32 next = s->next;
103 s->Reset(thr);
104 sync_alloc_.Free(&thr->sync_cache, idx & ~kFlagMask);
105 idx = next;
106 } else {
107 CHECK(0);
111 return has_something;
114 // ResetRange removes all meta objects from the range.
115 // It is called for large mmap-ed regions. The function is best-effort wrt
116 // freeing of meta objects, because we don't want to page in the whole range
117 // which can be huge. The function probes pages one-by-one until it finds a page
118 // without meta objects, at this point it stops freeing meta objects. Because
119 // thread stacks grow top-down, we do the same starting from end as well.
120 void MetaMap::ResetRange(ThreadState *thr, uptr pc, uptr p, uptr sz) {
121 const uptr kMetaRatio = kMetaShadowCell / kMetaShadowSize;
122 const uptr kPageSize = GetPageSizeCached() * kMetaRatio;
123 if (sz <= 4 * kPageSize) {
124 // If the range is small, just do the normal free procedure.
125 FreeRange(thr, pc, p, sz);
126 return;
128 // First, round both ends of the range to page size.
129 uptr diff = RoundUp(p, kPageSize) - p;
130 if (diff != 0) {
131 FreeRange(thr, pc, p, diff);
132 p += diff;
133 sz -= diff;
135 diff = p + sz - RoundDown(p + sz, kPageSize);
136 if (diff != 0) {
137 FreeRange(thr, pc, p + sz - diff, diff);
138 sz -= diff;
140 // Now we must have a non-empty page-aligned range.
141 CHECK_GT(sz, 0);
142 CHECK_EQ(p, RoundUp(p, kPageSize));
143 CHECK_EQ(sz, RoundUp(sz, kPageSize));
144 const uptr p0 = p;
145 const uptr sz0 = sz;
146 // Probe start of the range.
147 while (sz > 0) {
148 bool has_something = FreeRange(thr, pc, p, kPageSize);
149 p += kPageSize;
150 sz -= kPageSize;
151 if (!has_something)
152 break;
154 // Probe end of the range.
155 while (sz > 0) {
156 bool has_something = FreeRange(thr, pc, p - kPageSize, kPageSize);
157 sz -= kPageSize;
158 if (!has_something)
159 break;
161 // Finally, page out the whole range (including the parts that we've just
162 // freed). Note: we can't simply madvise, because we need to leave a zeroed
163 // range (otherwise __tsan_java_move can crash if it encounters a left-over
164 // meta objects in java heap).
165 uptr metap = (uptr)MemToMeta(p0);
166 uptr metasz = sz0 / kMetaRatio;
167 UnmapOrDie((void*)metap, metasz);
168 MmapFixedNoReserve(metap, metasz);
171 MBlock* MetaMap::GetBlock(uptr p) {
172 u32 *meta = MemToMeta(p);
173 u32 idx = *meta;
174 for (;;) {
175 if (idx == 0)
176 return 0;
177 if (idx & kFlagBlock)
178 return block_alloc_.Map(idx & ~kFlagMask);
179 DCHECK(idx & kFlagSync);
180 SyncVar * s = sync_alloc_.Map(idx & ~kFlagMask);
181 idx = s->next;
185 SyncVar* MetaMap::GetOrCreateAndLock(ThreadState *thr, uptr pc,
186 uptr addr, bool write_lock) {
187 return GetAndLock(thr, pc, addr, write_lock, true);
190 SyncVar* MetaMap::GetIfExistsAndLock(uptr addr) {
191 return GetAndLock(0, 0, addr, true, false);
194 SyncVar* MetaMap::GetAndLock(ThreadState *thr, uptr pc,
195 uptr addr, bool write_lock, bool create) {
196 u32 *meta = MemToMeta(addr);
197 u32 idx0 = *meta;
198 u32 myidx = 0;
199 SyncVar *mys = 0;
200 for (;;) {
201 u32 idx = idx0;
202 for (;;) {
203 if (idx == 0)
204 break;
205 if (idx & kFlagBlock)
206 break;
207 DCHECK(idx & kFlagSync);
208 SyncVar * s = sync_alloc_.Map(idx & ~kFlagMask);
209 if (s->addr == addr) {
210 if (myidx != 0) {
211 mys->Reset(thr);
212 sync_alloc_.Free(&thr->sync_cache, myidx);
214 if (write_lock)
215 s->mtx.Lock();
216 else
217 s->mtx.ReadLock();
218 return s;
220 idx = s->next;
222 if (!create)
223 return 0;
224 if (*meta != idx0) {
225 idx0 = *meta;
226 continue;
229 if (myidx == 0) {
230 const u64 uid = atomic_fetch_add(&uid_gen_, 1, memory_order_relaxed);
231 myidx = sync_alloc_.Alloc(&thr->sync_cache);
232 mys = sync_alloc_.Map(myidx);
233 mys->Init(thr, pc, addr, uid);
235 mys->next = idx0;
236 if (atomic_compare_exchange_strong((atomic_uint32_t*)meta, &idx0,
237 myidx | kFlagSync, memory_order_release)) {
238 if (write_lock)
239 mys->mtx.Lock();
240 else
241 mys->mtx.ReadLock();
242 return mys;
247 void MetaMap::MoveMemory(uptr src, uptr dst, uptr sz) {
248 // src and dst can overlap,
249 // there are no concurrent accesses to the regions (e.g. stop-the-world).
250 CHECK_NE(src, dst);
251 CHECK_NE(sz, 0);
252 uptr diff = dst - src;
253 u32 *src_meta = MemToMeta(src);
254 u32 *dst_meta = MemToMeta(dst);
255 u32 *src_meta_end = MemToMeta(src + sz);
256 uptr inc = 1;
257 if (dst > src) {
258 src_meta = MemToMeta(src + sz) - 1;
259 dst_meta = MemToMeta(dst + sz) - 1;
260 src_meta_end = MemToMeta(src) - 1;
261 inc = -1;
263 for (; src_meta != src_meta_end; src_meta += inc, dst_meta += inc) {
264 CHECK_EQ(*dst_meta, 0);
265 u32 idx = *src_meta;
266 *src_meta = 0;
267 *dst_meta = idx;
268 // Patch the addresses in sync objects.
269 while (idx != 0) {
270 if (idx & kFlagBlock)
271 break;
272 CHECK(idx & kFlagSync);
273 SyncVar *s = sync_alloc_.Map(idx & ~kFlagMask);
274 s->addr += diff;
275 idx = s->next;
280 void MetaMap::OnThreadIdle(ThreadState *thr) {
281 block_alloc_.FlushCache(&thr->block_cache);
282 sync_alloc_.FlushCache(&thr->sync_cache);
285 } // namespace __tsan