2013-02-04 Paul Thomas <pault@gcc.gnu.org>
[official-gcc.git] / libsanitizer / tsan / tsan_interface_java.cc
blobd7325dcb2c4c7e9405f624e255170c0287a70f0b
1 //===-- tsan_interface_java.cc --------------------------------------------===//
2 //
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
5 //
6 //===----------------------------------------------------------------------===//
7 //
8 // This file is a part of ThreadSanitizer (TSan), a race detector.
9 //
10 //===----------------------------------------------------------------------===//
12 #include "tsan_interface_java.h"
13 #include "tsan_rtl.h"
14 #include "tsan_mutex.h"
15 #include "sanitizer_common/sanitizer_internal_defs.h"
16 #include "sanitizer_common/sanitizer_common.h"
17 #include "sanitizer_common/sanitizer_placement_new.h"
19 using namespace __tsan; // NOLINT
21 namespace __tsan {
23 const uptr kHeapShadow = 0x300000000000ull;
24 const uptr kHeapAlignment = 8;
26 struct BlockDesc {
27 bool begin;
28 Mutex mtx;
29 SyncVar *head;
31 BlockDesc()
32 : mtx(MutexTypeJavaMBlock, StatMtxJavaMBlock)
33 , head() {
34 CHECK_EQ(begin, false);
35 begin = true;
38 ~BlockDesc() {
39 CHECK_EQ(begin, true);
40 begin = false;
41 ThreadState *thr = cur_thread();
42 SyncVar *s = head;
43 while (s) {
44 SyncVar *s1 = s->next;
45 StatInc(thr, StatSyncDestroyed);
46 s->mtx.Lock();
47 s->mtx.Unlock();
48 thr->mset.Remove(s->GetId());
49 DestroyAndFree(s);
50 s = s1;
55 struct JavaContext {
56 const uptr heap_begin;
57 const uptr heap_size;
58 BlockDesc *heap_shadow;
60 JavaContext(jptr heap_begin, jptr heap_size)
61 : heap_begin(heap_begin)
62 , heap_size(heap_size) {
63 uptr size = heap_size / kHeapAlignment * sizeof(BlockDesc);
64 heap_shadow = (BlockDesc*)MmapFixedNoReserve(kHeapShadow, size);
65 if ((uptr)heap_shadow != kHeapShadow) {
66 Printf("ThreadSanitizer: failed to mmap Java heap shadow\n");
67 Die();
72 class ScopedJavaFunc {
73 public:
74 ScopedJavaFunc(ThreadState *thr, uptr pc)
75 : thr_(thr) {
76 Initialize(thr_);
77 FuncEntry(thr, pc);
78 CHECK_EQ(thr_->in_rtl, 0);
79 thr_->in_rtl++;
82 ~ScopedJavaFunc() {
83 thr_->in_rtl--;
84 CHECK_EQ(thr_->in_rtl, 0);
85 FuncExit(thr_);
86 // FIXME(dvyukov): process pending signals.
89 private:
90 ThreadState *thr_;
93 static u64 jctx_buf[sizeof(JavaContext) / sizeof(u64) + 1];
94 static JavaContext *jctx;
96 static BlockDesc *getblock(uptr addr) {
97 uptr i = (addr - jctx->heap_begin) / kHeapAlignment;
98 return &jctx->heap_shadow[i];
101 static uptr USED getmem(BlockDesc *b) {
102 uptr i = b - jctx->heap_shadow;
103 uptr p = jctx->heap_begin + i * kHeapAlignment;
104 CHECK_GE(p, jctx->heap_begin);
105 CHECK_LT(p, jctx->heap_begin + jctx->heap_size);
106 return p;
109 static BlockDesc *getblockbegin(uptr addr) {
110 for (BlockDesc *b = getblock(addr);; b--) {
111 CHECK_GE(b, jctx->heap_shadow);
112 if (b->begin)
113 return b;
115 return 0;
118 SyncVar* GetJavaSync(ThreadState *thr, uptr pc, uptr addr,
119 bool write_lock, bool create) {
120 if (jctx == 0 || addr < jctx->heap_begin
121 || addr >= jctx->heap_begin + jctx->heap_size)
122 return 0;
123 BlockDesc *b = getblockbegin(addr);
124 DPrintf("#%d: GetJavaSync %p->%p\n", thr->tid, addr, b);
125 Lock l(&b->mtx);
126 SyncVar *s = b->head;
127 for (; s; s = s->next) {
128 if (s->addr == addr) {
129 DPrintf("#%d: found existing sync for %p\n", thr->tid, addr);
130 break;
133 if (s == 0 && create) {
134 DPrintf("#%d: creating new sync for %p\n", thr->tid, addr);
135 s = CTX()->synctab.Create(thr, pc, addr);
136 s->next = b->head;
137 b->head = s;
139 if (s) {
140 if (write_lock)
141 s->mtx.Lock();
142 else
143 s->mtx.ReadLock();
145 return s;
148 SyncVar* GetAndRemoveJavaSync(ThreadState *thr, uptr pc, uptr addr) {
149 // We do not destroy Java mutexes other than in __tsan_java_free().
150 return 0;
153 } // namespace __tsan {
155 #define SCOPED_JAVA_FUNC(func) \
156 ThreadState *thr = cur_thread(); \
157 const uptr caller_pc = GET_CALLER_PC(); \
158 const uptr pc = (uptr)&func; \
159 (void)pc; \
160 ScopedJavaFunc scoped(thr, caller_pc); \
161 /**/
163 void __tsan_java_init(jptr heap_begin, jptr heap_size) {
164 SCOPED_JAVA_FUNC(__tsan_java_init);
165 DPrintf("#%d: java_init(%p, %p)\n", thr->tid, heap_begin, heap_size);
166 CHECK_EQ(jctx, 0);
167 CHECK_GT(heap_begin, 0);
168 CHECK_GT(heap_size, 0);
169 CHECK_EQ(heap_begin % kHeapAlignment, 0);
170 CHECK_EQ(heap_size % kHeapAlignment, 0);
171 CHECK_LT(heap_begin, heap_begin + heap_size);
172 jctx = new(jctx_buf) JavaContext(heap_begin, heap_size);
175 int __tsan_java_fini() {
176 SCOPED_JAVA_FUNC(__tsan_java_fini);
177 DPrintf("#%d: java_fini()\n", thr->tid);
178 CHECK_NE(jctx, 0);
179 // FIXME(dvyukov): this does not call atexit() callbacks.
180 int status = Finalize(thr);
181 DPrintf("#%d: java_fini() = %d\n", thr->tid, status);
182 return status;
185 void __tsan_java_alloc(jptr ptr, jptr size) {
186 SCOPED_JAVA_FUNC(__tsan_java_alloc);
187 DPrintf("#%d: java_alloc(%p, %p)\n", thr->tid, ptr, size);
188 CHECK_NE(jctx, 0);
189 CHECK_NE(size, 0);
190 CHECK_EQ(ptr % kHeapAlignment, 0);
191 CHECK_EQ(size % kHeapAlignment, 0);
192 CHECK_GE(ptr, jctx->heap_begin);
193 CHECK_LE(ptr + size, jctx->heap_begin + jctx->heap_size);
195 BlockDesc *b = getblock(ptr);
196 new(b) BlockDesc();
199 void __tsan_java_free(jptr ptr, jptr size) {
200 SCOPED_JAVA_FUNC(__tsan_java_free);
201 DPrintf("#%d: java_free(%p, %p)\n", thr->tid, ptr, size);
202 CHECK_NE(jctx, 0);
203 CHECK_NE(size, 0);
204 CHECK_EQ(ptr % kHeapAlignment, 0);
205 CHECK_EQ(size % kHeapAlignment, 0);
206 CHECK_GE(ptr, jctx->heap_begin);
207 CHECK_LE(ptr + size, jctx->heap_begin + jctx->heap_size);
209 BlockDesc *beg = getblock(ptr);
210 BlockDesc *end = getblock(ptr + size);
211 for (BlockDesc *b = beg; b != end; b++) {
212 if (b->begin)
213 b->~BlockDesc();
217 void __tsan_java_move(jptr src, jptr dst, jptr size) {
218 SCOPED_JAVA_FUNC(__tsan_java_move);
219 DPrintf("#%d: java_move(%p, %p, %p)\n", thr->tid, src, dst, size);
220 CHECK_NE(jctx, 0);
221 CHECK_NE(size, 0);
222 CHECK_EQ(src % kHeapAlignment, 0);
223 CHECK_EQ(dst % kHeapAlignment, 0);
224 CHECK_EQ(size % kHeapAlignment, 0);
225 CHECK_GE(src, jctx->heap_begin);
226 CHECK_LE(src + size, jctx->heap_begin + jctx->heap_size);
227 CHECK_GE(dst, jctx->heap_begin);
228 CHECK_LE(dst + size, jctx->heap_begin + jctx->heap_size);
229 CHECK(dst >= src + size || src >= dst + size);
231 // Assuming it's not running concurrently with threads that do
232 // memory accesses and mutex operations (stop-the-world phase).
233 { // NOLINT
234 BlockDesc *s = getblock(src);
235 BlockDesc *d = getblock(dst);
236 BlockDesc *send = getblock(src + size);
237 for (; s != send; s++, d++) {
238 CHECK_EQ(d->begin, false);
239 if (s->begin) {
240 DPrintf("#%d: moving block %p->%p\n", thr->tid, getmem(s), getmem(d));
241 new(d) BlockDesc;
242 d->head = s->head;
243 for (SyncVar *sync = d->head; sync; sync = sync->next) {
244 uptr newaddr = sync->addr - src + dst;
245 DPrintf("#%d: moving sync %p->%p\n", thr->tid, sync->addr, newaddr);
246 sync->addr = newaddr;
248 s->head = 0;
249 s->~BlockDesc();
254 { // NOLINT
255 u64 *s = (u64*)MemToShadow(src);
256 u64 *d = (u64*)MemToShadow(dst);
257 u64 *send = (u64*)MemToShadow(src + size);
258 for (; s != send; s++, d++) {
259 *d = *s;
260 *s = 0;
265 void __tsan_java_mutex_lock(jptr addr) {
266 SCOPED_JAVA_FUNC(__tsan_java_mutex_lock);
267 DPrintf("#%d: java_mutex_lock(%p)\n", thr->tid, addr);
268 CHECK_NE(jctx, 0);
269 CHECK_GE(addr, jctx->heap_begin);
270 CHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
272 MutexLock(thr, pc, addr);
275 void __tsan_java_mutex_unlock(jptr addr) {
276 SCOPED_JAVA_FUNC(__tsan_java_mutex_unlock);
277 DPrintf("#%d: java_mutex_unlock(%p)\n", thr->tid, addr);
278 CHECK_NE(jctx, 0);
279 CHECK_GE(addr, jctx->heap_begin);
280 CHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
282 MutexUnlock(thr, pc, addr);
285 void __tsan_java_mutex_read_lock(jptr addr) {
286 SCOPED_JAVA_FUNC(__tsan_java_mutex_read_lock);
287 DPrintf("#%d: java_mutex_read_lock(%p)\n", thr->tid, addr);
288 CHECK_NE(jctx, 0);
289 CHECK_GE(addr, jctx->heap_begin);
290 CHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
292 MutexReadLock(thr, pc, addr);
295 void __tsan_java_mutex_read_unlock(jptr addr) {
296 SCOPED_JAVA_FUNC(__tsan_java_mutex_read_unlock);
297 DPrintf("#%d: java_mutex_read_unlock(%p)\n", thr->tid, addr);
298 CHECK_NE(jctx, 0);
299 CHECK_GE(addr, jctx->heap_begin);
300 CHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
302 MutexReadUnlock(thr, pc, addr);