* include/bits/allocator.h (operator==, operator!=): Add exception
[official-gcc.git] / libsanitizer / tsan / tsan_fd.cc
blobb7ac3111c89b9151ac02e7f443b1a56583f8378e
1 //===-- tsan_fd.cc --------------------------------------------------------===//
2 //
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
5 //
6 //===----------------------------------------------------------------------===//
7 //
8 // This file is a part of ThreadSanitizer (TSan), a race detector.
9 //
10 //===----------------------------------------------------------------------===//
12 #include "tsan_fd.h"
13 #include "tsan_rtl.h"
14 #include <sanitizer_common/sanitizer_atomic.h>
16 namespace __tsan {
18 const int kTableSizeL1 = 1024;
19 const int kTableSizeL2 = 1024;
20 const int kTableSize = kTableSizeL1 * kTableSizeL2;
22 struct FdSync {
23 atomic_uint64_t rc;
26 struct FdDesc {
27 FdSync *sync;
28 int creation_tid;
29 u32 creation_stack;
32 struct FdContext {
33 atomic_uintptr_t tab[kTableSizeL1];
34 // Addresses used for synchronization.
35 FdSync globsync;
36 FdSync filesync;
37 FdSync socksync;
38 u64 connectsync;
41 static FdContext fdctx;
43 static bool bogusfd(int fd) {
44 // Apparently a bogus fd value.
45 return fd < 0 || fd >= kTableSize;
48 static FdSync *allocsync() {
49 FdSync *s = (FdSync*)internal_alloc(MBlockFD, sizeof(FdSync));
50 atomic_store(&s->rc, 1, memory_order_relaxed);
51 return s;
54 static FdSync *ref(FdSync *s) {
55 if (s && atomic_load(&s->rc, memory_order_relaxed) != (u64)-1)
56 atomic_fetch_add(&s->rc, 1, memory_order_relaxed);
57 return s;
60 static void unref(ThreadState *thr, uptr pc, FdSync *s) {
61 if (s && atomic_load(&s->rc, memory_order_relaxed) != (u64)-1) {
62 if (atomic_fetch_sub(&s->rc, 1, memory_order_acq_rel) == 1) {
63 CHECK_NE(s, &fdctx.globsync);
64 CHECK_NE(s, &fdctx.filesync);
65 CHECK_NE(s, &fdctx.socksync);
66 SyncVar *v = CTX()->synctab.GetAndRemove(thr, pc, (uptr)s);
67 if (v)
68 DestroyAndFree(v);
69 internal_free(s);
74 static FdDesc *fddesc(ThreadState *thr, uptr pc, int fd) {
75 CHECK_GE(fd, 0);
76 CHECK_LT(fd, kTableSize);
77 atomic_uintptr_t *pl1 = &fdctx.tab[fd / kTableSizeL2];
78 uptr l1 = atomic_load(pl1, memory_order_consume);
79 if (l1 == 0) {
80 uptr size = kTableSizeL2 * sizeof(FdDesc);
81 // We need this to reside in user memory to properly catch races on it.
82 void *p = user_alloc(thr, pc, size);
83 internal_memset(p, 0, size);
84 MemoryResetRange(thr, (uptr)&fddesc, (uptr)p, size);
85 if (atomic_compare_exchange_strong(pl1, &l1, (uptr)p, memory_order_acq_rel))
86 l1 = (uptr)p;
87 else
88 user_free(thr, pc, p);
90 return &((FdDesc*)l1)[fd % kTableSizeL2]; // NOLINT
93 // pd must be already ref'ed.
94 static void init(ThreadState *thr, uptr pc, int fd, FdSync *s) {
95 FdDesc *d = fddesc(thr, pc, fd);
96 // As a matter of fact, we don't intercept all close calls.
97 // See e.g. libc __res_iclose().
98 if (d->sync) {
99 unref(thr, pc, d->sync);
100 d->sync = 0;
102 if (flags()->io_sync == 0) {
103 unref(thr, pc, s);
104 } else if (flags()->io_sync == 1) {
105 d->sync = s;
106 } else if (flags()->io_sync == 2) {
107 unref(thr, pc, s);
108 d->sync = &fdctx.globsync;
110 d->creation_tid = thr->tid;
111 d->creation_stack = CurrentStackId(thr, pc);
112 // To catch races between fd usage and open.
113 MemoryRangeImitateWrite(thr, pc, (uptr)d, 8);
116 void FdInit() {
117 atomic_store(&fdctx.globsync.rc, (u64)-1, memory_order_relaxed);
118 atomic_store(&fdctx.filesync.rc, (u64)-1, memory_order_relaxed);
119 atomic_store(&fdctx.socksync.rc, (u64)-1, memory_order_relaxed);
122 void FdOnFork(ThreadState *thr, uptr pc) {
123 // On fork() we need to reset all fd's, because the child is going
124 // close all them, and that will cause races between previous read/write
125 // and the close.
126 for (int l1 = 0; l1 < kTableSizeL1; l1++) {
127 FdDesc *tab = (FdDesc*)atomic_load(&fdctx.tab[l1], memory_order_relaxed);
128 if (tab == 0)
129 break;
130 for (int l2 = 0; l2 < kTableSizeL2; l2++) {
131 FdDesc *d = &tab[l2];
132 MemoryResetRange(thr, pc, (uptr)d, 8);
137 bool FdLocation(uptr addr, int *fd, int *tid, u32 *stack) {
138 for (int l1 = 0; l1 < kTableSizeL1; l1++) {
139 FdDesc *tab = (FdDesc*)atomic_load(&fdctx.tab[l1], memory_order_relaxed);
140 if (tab == 0)
141 break;
142 if (addr >= (uptr)tab && addr < (uptr)(tab + kTableSizeL2)) {
143 int l2 = (addr - (uptr)tab) / sizeof(FdDesc);
144 FdDesc *d = &tab[l2];
145 *fd = l1 * kTableSizeL1 + l2;
146 *tid = d->creation_tid;
147 *stack = d->creation_stack;
148 return true;
151 return false;
154 void FdAcquire(ThreadState *thr, uptr pc, int fd) {
155 if (bogusfd(fd))
156 return;
157 FdDesc *d = fddesc(thr, pc, fd);
158 FdSync *s = d->sync;
159 DPrintf("#%d: FdAcquire(%d) -> %p\n", thr->tid, fd, s);
160 MemoryRead(thr, pc, (uptr)d, kSizeLog8);
161 if (s)
162 Acquire(thr, pc, (uptr)s);
165 void FdRelease(ThreadState *thr, uptr pc, int fd) {
166 if (bogusfd(fd))
167 return;
168 FdDesc *d = fddesc(thr, pc, fd);
169 FdSync *s = d->sync;
170 DPrintf("#%d: FdRelease(%d) -> %p\n", thr->tid, fd, s);
171 MemoryRead(thr, pc, (uptr)d, kSizeLog8);
172 if (s)
173 Release(thr, pc, (uptr)s);
176 void FdAccess(ThreadState *thr, uptr pc, int fd) {
177 DPrintf("#%d: FdAccess(%d)\n", thr->tid, fd);
178 if (bogusfd(fd))
179 return;
180 FdDesc *d = fddesc(thr, pc, fd);
181 MemoryRead(thr, pc, (uptr)d, kSizeLog8);
184 void FdClose(ThreadState *thr, uptr pc, int fd) {
185 DPrintf("#%d: FdClose(%d)\n", thr->tid, fd);
186 if (bogusfd(fd))
187 return;
188 FdDesc *d = fddesc(thr, pc, fd);
189 // To catch races between fd usage and close.
190 MemoryWrite(thr, pc, (uptr)d, kSizeLog8);
191 // We need to clear it, because if we do not intercept any call out there
192 // that creates fd, we will hit false postives.
193 MemoryResetRange(thr, pc, (uptr)d, 8);
194 unref(thr, pc, d->sync);
195 d->sync = 0;
196 d->creation_tid = 0;
197 d->creation_stack = 0;
200 void FdFileCreate(ThreadState *thr, uptr pc, int fd) {
201 DPrintf("#%d: FdFileCreate(%d)\n", thr->tid, fd);
202 if (bogusfd(fd))
203 return;
204 init(thr, pc, fd, &fdctx.filesync);
207 void FdDup(ThreadState *thr, uptr pc, int oldfd, int newfd) {
208 DPrintf("#%d: FdDup(%d, %d)\n", thr->tid, oldfd, newfd);
209 if (bogusfd(oldfd) || bogusfd(newfd))
210 return;
211 // Ignore the case when user dups not yet connected socket.
212 FdDesc *od = fddesc(thr, pc, oldfd);
213 MemoryRead(thr, pc, (uptr)od, kSizeLog8);
214 FdClose(thr, pc, newfd);
215 init(thr, pc, newfd, ref(od->sync));
218 void FdPipeCreate(ThreadState *thr, uptr pc, int rfd, int wfd) {
219 DPrintf("#%d: FdCreatePipe(%d, %d)\n", thr->tid, rfd, wfd);
220 FdSync *s = allocsync();
221 init(thr, pc, rfd, ref(s));
222 init(thr, pc, wfd, ref(s));
223 unref(thr, pc, s);
226 void FdEventCreate(ThreadState *thr, uptr pc, int fd) {
227 DPrintf("#%d: FdEventCreate(%d)\n", thr->tid, fd);
228 if (bogusfd(fd))
229 return;
230 init(thr, pc, fd, allocsync());
233 void FdSignalCreate(ThreadState *thr, uptr pc, int fd) {
234 DPrintf("#%d: FdSignalCreate(%d)\n", thr->tid, fd);
235 if (bogusfd(fd))
236 return;
237 init(thr, pc, fd, 0);
240 void FdInotifyCreate(ThreadState *thr, uptr pc, int fd) {
241 DPrintf("#%d: FdInotifyCreate(%d)\n", thr->tid, fd);
242 if (bogusfd(fd))
243 return;
244 init(thr, pc, fd, 0);
247 void FdPollCreate(ThreadState *thr, uptr pc, int fd) {
248 DPrintf("#%d: FdPollCreate(%d)\n", thr->tid, fd);
249 if (bogusfd(fd))
250 return;
251 init(thr, pc, fd, allocsync());
254 void FdSocketCreate(ThreadState *thr, uptr pc, int fd) {
255 DPrintf("#%d: FdSocketCreate(%d)\n", thr->tid, fd);
256 if (bogusfd(fd))
257 return;
258 // It can be a UDP socket.
259 init(thr, pc, fd, &fdctx.socksync);
262 void FdSocketAccept(ThreadState *thr, uptr pc, int fd, int newfd) {
263 DPrintf("#%d: FdSocketAccept(%d, %d)\n", thr->tid, fd, newfd);
264 if (bogusfd(fd))
265 return;
266 // Synchronize connect->accept.
267 Acquire(thr, pc, (uptr)&fdctx.connectsync);
268 init(thr, pc, newfd, &fdctx.socksync);
271 void FdSocketConnecting(ThreadState *thr, uptr pc, int fd) {
272 DPrintf("#%d: FdSocketConnecting(%d)\n", thr->tid, fd);
273 if (bogusfd(fd))
274 return;
275 // Synchronize connect->accept.
276 Release(thr, pc, (uptr)&fdctx.connectsync);
279 void FdSocketConnect(ThreadState *thr, uptr pc, int fd) {
280 DPrintf("#%d: FdSocketConnect(%d)\n", thr->tid, fd);
281 if (bogusfd(fd))
282 return;
283 init(thr, pc, fd, &fdctx.socksync);
286 uptr File2addr(char *path) {
287 (void)path;
288 static u64 addr;
289 return (uptr)&addr;
292 uptr Dir2addr(char *path) {
293 (void)path;
294 static u64 addr;
295 return (uptr)&addr;
298 } // namespace __tsan