Bug 471036 - disInstr_AMD64: disInstr miscalculated next %rip on RORX imm8, m32/64...
[valgrind.git] / helgrind / hg_main.c
blob3146cc4373a78ff564a279c0df1f27c5a9f6b794
2 /*--------------------------------------------------------------------*/
3 /*--- Helgrind: a Valgrind tool for detecting errors ---*/
4 /*--- in threaded programs. hg_main.c ---*/
5 /*--------------------------------------------------------------------*/
7 /*
8 This file is part of Helgrind, a Valgrind tool for detecting errors
9 in threaded programs.
11 Copyright (C) 2007-2017 OpenWorks LLP
12 info@open-works.co.uk
14 Copyright (C) 2007-2017 Apple, Inc.
16 This program is free software; you can redistribute it and/or
17 modify it under the terms of the GNU General Public License as
18 published by the Free Software Foundation; either version 2 of the
19 License, or (at your option) any later version.
21 This program is distributed in the hope that it will be useful, but
22 WITHOUT ANY WARRANTY; without even the implied warranty of
23 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
24 General Public License for more details.
26 You should have received a copy of the GNU General Public License
27 along with this program; if not, see <http://www.gnu.org/licenses/>.
29 The GNU General Public License is contained in the file COPYING.
31 Neither the names of the U.S. Department of Energy nor the
32 University of California nor the names of its contributors may be
33 used to endorse or promote products derived from this software
34 without prior written permission.
37 #include "pub_tool_basics.h"
38 #include "pub_tool_gdbserver.h"
39 #include "pub_tool_libcassert.h"
40 #include "pub_tool_libcbase.h"
41 #include "pub_tool_libcprint.h"
42 #include "pub_tool_threadstate.h"
43 #include "pub_tool_tooliface.h"
44 #include "pub_tool_hashtable.h"
45 #include "pub_tool_replacemalloc.h"
46 #include "pub_tool_machine.h"
47 #include "pub_tool_options.h"
48 #include "pub_tool_xarray.h"
49 #include "pub_tool_stacktrace.h"
50 #include "pub_tool_wordfm.h"
51 #include "pub_tool_debuginfo.h" // VG_(find_seginfo), VG_(seginfo_soname)
52 #include "pub_tool_redir.h" // sonames for the dynamic linkers
53 #include "pub_tool_vki.h" // VKI_PAGE_SIZE
54 #include "pub_tool_libcproc.h"
55 #include "pub_tool_aspacemgr.h" // VG_(am_is_valid_for_client)
56 #include "pub_tool_poolalloc.h"
57 #include "pub_tool_addrinfo.h"
58 #include "pub_tool_xtree.h"
59 #include "pub_tool_xtmemory.h"
61 #include "hg_basics.h"
62 #include "hg_wordset.h"
63 #include "hg_addrdescr.h"
64 #include "hg_lock_n_thread.h"
65 #include "hg_errors.h"
67 #include "libhb.h"
69 #include "helgrind.h"
72 // FIXME: new_mem_w_tid ignores the supplied tid. (wtf?!)
74 // FIXME: when client destroys a lock or a CV, remove these
75 // from our mappings, so that the associated SO can be freed up
77 /*----------------------------------------------------------------*/
78 /*--- ---*/
79 /*----------------------------------------------------------------*/
81 /* Note this needs to be compiled with -fno-strict-aliasing, since it
82 contains a whole bunch of calls to lookupFM etc which cast between
83 Word and pointer types. gcc rightly complains this breaks ANSI C
84 strict aliasing rules, at -O2. No complaints at -O, but -O2 gives
85 worthwhile performance benefits over -O.
88 // FIXME what is supposed to happen to locks in memory which
89 // is relocated as a result of client realloc?
91 // FIXME put referencing ThreadId into Thread and get
92 // rid of the slow reverse mapping function.
94 // FIXME accesses to NoAccess areas: change state to Excl?
96 // FIXME report errors for accesses of NoAccess memory?
98 // FIXME pth_cond_wait/timedwait wrappers. Even if these fail,
99 // the thread still holds the lock.
101 /* ------------ Debug/trace options ------------ */
103 // 0 for silent, 1 for some stuff, 2 for lots of stuff
104 #define SHOW_EVENTS 0
107 static void all__sanity_check ( const HChar* who ); /* fwds */
109 #define HG_CLI__DEFAULT_MALLOC_REDZONE_SZB 16 /* let's say */
111 // 0 for none, 1 for dump at end of run
112 #define SHOW_DATA_STRUCTURES 0
115 /* ------------ Misc comments ------------ */
117 // FIXME: don't hardwire initial entries for root thread.
118 // Instead, let the pre_thread_ll_create handler do this.
121 /*----------------------------------------------------------------*/
122 /*--- Primary data structures ---*/
123 /*----------------------------------------------------------------*/
125 /* Admin linked list of Threads */
126 static Thread* admin_threads = NULL;
127 Thread* get_admin_threads ( void ) { return admin_threads; }
129 /* Admin double linked list of Locks */
130 /* We need a double linked list to properly and efficiently
131 handle del_LockN. */
132 static Lock* admin_locks = NULL;
134 /* Mapping table for core ThreadIds to Thread* */
135 static Thread** map_threads = NULL; /* Array[VG_N_THREADS] of Thread* */
137 /* Mapping table for lock guest addresses to Lock* */
138 static WordFM* map_locks = NULL; /* WordFM LockAddr Lock* */
140 /* The word-set universes for lock sets. */
141 static WordSetU* univ_lsets = NULL; /* sets of Lock* */
142 static WordSetU* univ_laog = NULL; /* sets of Lock*, for LAOG */
143 static Int next_gc_univ_laog = 1;
144 /* univ_laog will be garbaged collected when the nr of element in univ_laog is
145 >= next_gc_univ_laog. */
147 /* Allow libhb to get at the universe of locksets stored
148 here. Sigh. */
149 WordSetU* HG_(get_univ_lsets) ( void ) { return univ_lsets; }
151 /* Allow libhb to get at the list of locks stored here. Ditto
152 sigh. */
153 Lock* HG_(get_admin_locks) ( void ) { return admin_locks; }
156 /*----------------------------------------------------------------*/
157 /*--- Simple helpers for the data structures ---*/
158 /*----------------------------------------------------------------*/
160 static UWord stats__lockN_acquires = 0;
161 static UWord stats__lockN_releases = 0;
163 #if defined(VGO_solaris)
164 Bool HG_(clo_ignore_thread_creation) = True;
165 #else
166 Bool HG_(clo_ignore_thread_creation) = False;
167 #endif /* VGO_solaris */
169 static
170 ThreadId map_threads_maybe_reverse_lookup_SLOW ( Thread* thr ); /*fwds*/
172 /* --------- Constructors --------- */
174 static Thread* mk_Thread ( Thr* hbthr ) {
175 static Int indx = 1;
176 Thread* thread = HG_(zalloc)( "hg.mk_Thread.1", sizeof(Thread) );
177 thread->locksetA = HG_(emptyWS)( univ_lsets );
178 thread->locksetW = HG_(emptyWS)( univ_lsets );
179 thread->magic = Thread_MAGIC;
180 thread->hbthr = hbthr;
181 thread->coretid = VG_INVALID_THREADID;
182 thread->created_at = NULL;
183 thread->announced = False;
184 thread->first_sp_delta = 0;
185 thread->errmsg_index = indx++;
186 thread->admin = admin_threads;
187 thread->synchr_nesting = 0;
188 thread->pthread_create_nesting_level = 0;
189 #if defined(VGO_solaris)
190 thread->bind_guard_flag = 0;
191 #endif /* VGO_solaris */
193 admin_threads = thread;
194 return thread;
197 // Make a new lock which is unlocked (hence ownerless)
198 // and insert the new lock in admin_locks double linked list.
199 static Lock* mk_LockN ( LockKind kind, Addr guestaddr ) {
200 static ULong unique = 0;
201 Lock* lock = HG_(zalloc)( "hg.mk_Lock.1", sizeof(Lock) );
202 /* begin: add to double linked list */
203 if (admin_locks)
204 admin_locks->admin_prev = lock;
205 lock->admin_next = admin_locks;
206 lock->admin_prev = NULL;
207 admin_locks = lock;
208 /* end: add */
209 lock->unique = unique++;
210 lock->magic = LockN_MAGIC;
211 lock->appeared_at = NULL;
212 lock->acquired_at = NULL;
213 lock->hbso = libhb_so_alloc();
214 lock->guestaddr = guestaddr;
215 lock->kind = kind;
216 lock->heldW = False;
217 lock->heldBy = NULL;
218 tl_assert(HG_(is_sane_LockN)(lock));
219 return lock;
222 /* Release storage for a Lock. Also release storage in .heldBy, if
223 any. Removes from admin_locks double linked list. */
224 static void del_LockN ( Lock* lk )
226 tl_assert(HG_(is_sane_LockN)(lk));
227 tl_assert(lk->hbso);
228 libhb_so_dealloc(lk->hbso);
229 if (lk->heldBy)
230 VG_(deleteBag)( lk->heldBy );
231 /* begin: del lock from double linked list */
232 if (lk == admin_locks) {
233 tl_assert(lk->admin_prev == NULL);
234 if (lk->admin_next)
235 lk->admin_next->admin_prev = NULL;
236 admin_locks = lk->admin_next;
238 else {
239 tl_assert(lk->admin_prev != NULL);
240 lk->admin_prev->admin_next = lk->admin_next;
241 if (lk->admin_next)
242 lk->admin_next->admin_prev = lk->admin_prev;
244 /* end: del */
245 VG_(memset)(lk, 0xAA, sizeof(*lk));
246 HG_(free)(lk);
249 /* Update 'lk' to reflect that 'thr' now has a write-acquisition of
250 it. This is done strictly: only combinations resulting from
251 correct program and libpthread behaviour are allowed. */
252 static void lockN_acquire_writer ( Lock* lk, Thread* thr )
254 tl_assert(HG_(is_sane_LockN)(lk));
255 tl_assert(HG_(is_sane_Thread)(thr));
257 stats__lockN_acquires++;
259 /* EXPOSITION only */
260 /* We need to keep recording snapshots of where the lock was
261 acquired, so as to produce better lock-order error messages. */
262 if (lk->acquired_at == NULL) {
263 ThreadId tid;
264 tl_assert(lk->heldBy == NULL);
265 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
266 lk->acquired_at
267 = VG_(record_ExeContext)(tid, 0/*first_ip_delta*/);
268 } else {
269 tl_assert(lk->heldBy != NULL);
271 /* end EXPOSITION only */
273 switch (lk->kind) {
274 case LK_nonRec:
275 case_LK_nonRec:
276 tl_assert(lk->heldBy == NULL); /* can't w-lock recursively */
277 tl_assert(!lk->heldW);
278 lk->heldW = True;
279 lk->heldBy = VG_(newBag)( HG_(zalloc), "hg.lNaw.1", HG_(free) );
280 VG_(addToBag)( lk->heldBy, (UWord)thr );
281 break;
282 case LK_mbRec:
283 if (lk->heldBy == NULL)
284 goto case_LK_nonRec;
285 /* 2nd and subsequent locking of a lock by its owner */
286 tl_assert(lk->heldW);
287 /* assert: lk is only held by one thread .. */
288 tl_assert(VG_(sizeUniqueBag)(lk->heldBy) == 1);
289 /* assert: .. and that thread is 'thr'. */
290 tl_assert(VG_(elemBag)(lk->heldBy, (UWord)thr)
291 == VG_(sizeTotalBag)(lk->heldBy));
292 VG_(addToBag)(lk->heldBy, (UWord)thr);
293 break;
294 case LK_rdwr:
295 tl_assert(lk->heldBy == NULL && !lk->heldW); /* must be unheld */
296 goto case_LK_nonRec;
297 default:
298 tl_assert(0);
300 tl_assert(HG_(is_sane_LockN)(lk));
303 static void lockN_acquire_reader ( Lock* lk, Thread* thr )
305 tl_assert(HG_(is_sane_LockN)(lk));
306 tl_assert(HG_(is_sane_Thread)(thr));
307 /* can only add reader to a reader-writer lock. */
308 tl_assert(lk->kind == LK_rdwr);
309 /* lk must be free or already r-held. */
310 tl_assert(lk->heldBy == NULL
311 || (lk->heldBy != NULL && !lk->heldW));
313 stats__lockN_acquires++;
315 /* EXPOSITION only */
316 /* We need to keep recording snapshots of where the lock was
317 acquired, so as to produce better lock-order error messages. */
318 if (lk->acquired_at == NULL) {
319 ThreadId tid;
320 tl_assert(lk->heldBy == NULL);
321 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
322 lk->acquired_at
323 = VG_(record_ExeContext)(tid, 0/*first_ip_delta*/);
324 } else {
325 tl_assert(lk->heldBy != NULL);
327 /* end EXPOSITION only */
329 if (lk->heldBy) {
330 VG_(addToBag)(lk->heldBy, (UWord)thr);
331 } else {
332 lk->heldW = False;
333 lk->heldBy = VG_(newBag)( HG_(zalloc), "hg.lNar.1", HG_(free) );
334 VG_(addToBag)( lk->heldBy, (UWord)thr );
336 tl_assert(!lk->heldW);
337 tl_assert(HG_(is_sane_LockN)(lk));
340 /* Update 'lk' to reflect a release of it by 'thr'. This is done
341 strictly: only combinations resulting from correct program and
342 libpthread behaviour are allowed. */
344 static void lockN_release ( Lock* lk, Thread* thr )
346 Bool b;
347 tl_assert(HG_(is_sane_LockN)(lk));
348 tl_assert(HG_(is_sane_Thread)(thr));
349 /* lock must be held by someone */
350 tl_assert(lk->heldBy);
351 stats__lockN_releases++;
352 /* Remove it from the holder set */
353 b = VG_(delFromBag)(lk->heldBy, (UWord)thr);
354 /* thr must actually have been a holder of lk */
355 tl_assert(b);
356 /* normalise */
357 tl_assert(lk->acquired_at);
358 if (VG_(isEmptyBag)(lk->heldBy)) {
359 VG_(deleteBag)(lk->heldBy);
360 lk->heldBy = NULL;
361 lk->heldW = False;
362 lk->acquired_at = NULL;
364 tl_assert(HG_(is_sane_LockN)(lk));
367 static void remove_Lock_from_locksets_of_all_owning_Threads( Lock* lk )
369 Thread* thr;
370 if (!lk->heldBy) {
371 tl_assert(!lk->heldW);
372 return;
374 /* for each thread that holds this lock do ... */
375 VG_(initIterBag)( lk->heldBy );
376 while (VG_(nextIterBag)( lk->heldBy, (UWord*)&thr, NULL )) {
377 tl_assert(HG_(is_sane_Thread)(thr));
378 tl_assert(HG_(elemWS)( univ_lsets,
379 thr->locksetA, (UWord)lk ));
380 thr->locksetA
381 = HG_(delFromWS)( univ_lsets, thr->locksetA, (UWord)lk );
383 if (lk->heldW) {
384 tl_assert(HG_(elemWS)( univ_lsets,
385 thr->locksetW, (UWord)lk ));
386 thr->locksetW
387 = HG_(delFromWS)( univ_lsets, thr->locksetW, (UWord)lk );
390 VG_(doneIterBag)( lk->heldBy );
394 /*----------------------------------------------------------------*/
395 /*--- Print out the primary data structures ---*/
396 /*----------------------------------------------------------------*/
398 #define PP_THREADS (1<<1)
399 #define PP_LOCKS (1<<2)
400 #define PP_ALL (PP_THREADS | PP_LOCKS)
403 static const Int sHOW_ADMIN = 0;
405 static void space ( Int n )
407 Int i;
408 HChar spaces[128+1];
409 tl_assert(n >= 0 && n < 128);
410 if (n == 0)
411 return;
412 for (i = 0; i < n; i++)
413 spaces[i] = ' ';
414 spaces[i] = 0;
415 tl_assert(i < 128+1);
416 VG_(printf)("%s", spaces);
419 static void pp_Thread ( Int d, Thread* t )
421 space(d+0); VG_(printf)("Thread %p {\n", t);
422 if (sHOW_ADMIN) {
423 space(d+3); VG_(printf)("admin %p\n", t->admin);
424 space(d+3); VG_(printf)("magic 0x%x\n", (UInt)t->magic);
426 space(d+3); VG_(printf)("locksetA %d\n", (Int)t->locksetA);
427 space(d+3); VG_(printf)("locksetW %d\n", (Int)t->locksetW);
428 space(d+0); VG_(printf)("}\n");
431 static void pp_admin_threads ( Int d )
433 Int i, n;
434 Thread* t;
435 for (n = 0, t = admin_threads; t; n++, t = t->admin) {
436 /* nothing */
438 space(d); VG_(printf)("admin_threads (%d records) {\n", n);
439 for (i = 0, t = admin_threads; t; i++, t = t->admin) {
440 if (0) {
441 space(n);
442 VG_(printf)("admin_threads record %d of %d:\n", i, n);
444 pp_Thread(d+3, t);
446 space(d); VG_(printf)("}\n");
449 static void pp_map_threads ( Int d )
451 Int i, n = 0;
452 space(d); VG_(printf)("map_threads ");
453 for (i = 0; i < VG_N_THREADS; i++) {
454 if (map_threads[i] != NULL)
455 n++;
457 VG_(printf)("(%d entries) {\n", n);
458 for (i = 0; i < VG_N_THREADS; i++) {
459 if (map_threads[i] == NULL)
460 continue;
461 space(d+3);
462 VG_(printf)("coretid %d -> Thread %p\n", i, map_threads[i]);
464 space(d); VG_(printf)("}\n");
467 static const HChar* show_LockKind ( LockKind lkk ) {
468 switch (lkk) {
469 case LK_mbRec: return "mbRec";
470 case LK_nonRec: return "nonRec";
471 case LK_rdwr: return "rdwr";
472 default: tl_assert(0);
476 /* Pretty Print lock lk.
477 if show_lock_addrdescr, describes the (guest) lock address.
478 (this description will be more complete with --read-var-info=yes).
479 if show_internal_data, shows also helgrind internal information.
480 d is the level at which output is indented. */
481 static void pp_Lock ( Int d, Lock* lk,
482 Bool show_lock_addrdescr,
483 Bool show_internal_data)
485 // FIXME PW EPOCH should use the epoch of the allocated_at ec.
486 const DiEpoch cur_ep = VG_(current_DiEpoch)();
487 space(d+0);
488 if (show_internal_data)
489 VG_(printf)("Lock %p (ga %#lx) {\n", lk, lk->guestaddr);
490 else
491 VG_(printf)("Lock ga %#lx {\n", lk->guestaddr);
492 if (!show_lock_addrdescr
493 || !HG_(get_and_pp_addrdescr) (cur_ep, (Addr) lk->guestaddr))
494 VG_(printf)("\n");
496 if (sHOW_ADMIN) {
497 space(d+3); VG_(printf)("admin_n %p\n", lk->admin_next);
498 space(d+3); VG_(printf)("admin_p %p\n", lk->admin_prev);
499 space(d+3); VG_(printf)("magic 0x%x\n", (UInt)lk->magic);
501 if (show_internal_data) {
502 space(d+3); VG_(printf)("unique %llu\n", lk->unique);
504 space(d+3); VG_(printf)("kind %s\n", show_LockKind(lk->kind));
505 if (show_internal_data) {
506 space(d+3); VG_(printf)("heldW %s\n", lk->heldW ? "yes" : "no");
508 if (show_internal_data) {
509 space(d+3); VG_(printf)("heldBy %p", lk->heldBy);
511 if (lk->heldBy) {
512 Thread* thr;
513 UWord count;
514 VG_(printf)(" { ");
515 VG_(initIterBag)( lk->heldBy );
516 while (VG_(nextIterBag)( lk->heldBy, (UWord*)&thr, &count )) {
517 if (show_internal_data)
518 VG_(printf)("%lu:%p ", count, thr);
519 else {
520 VG_(printf)("%c%lu:thread #%d ",
521 lk->heldW ? 'W' : 'R',
522 count, thr->errmsg_index);
523 if (thr->coretid == VG_INVALID_THREADID)
524 VG_(printf)("tid (exited) ");
525 else
526 VG_(printf)("tid %u ", thr->coretid);
530 VG_(doneIterBag)( lk->heldBy );
531 VG_(printf)("}\n");
533 space(d+0); VG_(printf)("}\n");
536 static void pp_admin_locks ( Int d )
538 Int i, n;
539 Lock* lk;
540 for (n = 0, lk = admin_locks; lk; n++, lk = lk->admin_next) {
541 /* nothing */
543 space(d); VG_(printf)("admin_locks (%d records) {\n", n);
544 for (i = 0, lk = admin_locks; lk; i++, lk = lk->admin_next) {
545 if (0) {
546 space(n);
547 VG_(printf)("admin_locks record %d of %d:\n", i, n);
549 pp_Lock(d+3, lk,
550 False /* show_lock_addrdescr */,
551 True /* show_internal_data */);
553 space(d); VG_(printf)("}\n");
556 static void pp_map_locks ( Int d)
558 void* gla;
559 Lock* lk;
560 space(d); VG_(printf)("map_locks (%d entries) {\n",
561 (Int)VG_(sizeFM)( map_locks ));
562 VG_(initIterFM)( map_locks );
563 while (VG_(nextIterFM)( map_locks, (UWord*)&gla,
564 (UWord*)&lk )) {
565 space(d+3);
566 VG_(printf)("guest %p -> Lock %p\n", gla, lk);
568 VG_(doneIterFM)( map_locks );
569 space(d); VG_(printf)("}\n");
572 static void pp_everything ( Int flags, const HChar* caller )
574 Int d = 0;
575 VG_(printf)("\n");
576 VG_(printf)("All_Data_Structures (caller = \"%s\") {\n", caller);
577 if (flags & PP_THREADS) {
578 VG_(printf)("\n");
579 pp_admin_threads(d+3);
580 VG_(printf)("\n");
581 pp_map_threads(d+3);
583 if (flags & PP_LOCKS) {
584 VG_(printf)("\n");
585 pp_admin_locks(d+3);
586 VG_(printf)("\n");
587 pp_map_locks(d+3);
590 VG_(printf)("\n");
591 VG_(printf)("}\n");
592 VG_(printf)("\n");
595 #undef SHOW_ADMIN
598 /*----------------------------------------------------------------*/
599 /*--- Initialise the primary data structures ---*/
600 /*----------------------------------------------------------------*/
602 static void initialise_data_structures ( Thr* hbthr_root )
604 Thread* thr;
605 WordSetID wsid;
607 /* Get everything initialised and zeroed. */
608 tl_assert(admin_threads == NULL);
609 tl_assert(admin_locks == NULL);
611 tl_assert(map_threads == NULL);
612 map_threads = HG_(zalloc)( "hg.ids.1", VG_N_THREADS * sizeof(Thread*) );
614 tl_assert(sizeof(Addr) == sizeof(UWord));
615 tl_assert(map_locks == NULL);
616 map_locks = VG_(newFM)( HG_(zalloc), "hg.ids.2", HG_(free),
617 NULL/*unboxed Word cmp*/);
619 tl_assert(univ_lsets == NULL);
620 univ_lsets = HG_(newWordSetU)( HG_(zalloc), "hg.ids.4", HG_(free),
621 8/*cacheSize*/ );
622 tl_assert(univ_lsets != NULL);
623 /* Ensure that univ_lsets is non-empty, with lockset zero being the
624 empty lockset. hg_errors.c relies on the assumption that
625 lockset number zero in univ_lsets is always valid. */
626 wsid = HG_(emptyWS)(univ_lsets);
627 tl_assert(wsid == 0);
629 tl_assert(univ_laog == NULL);
630 if (HG_(clo_track_lockorders)) {
631 univ_laog = HG_(newWordSetU)( HG_(zalloc), "hg.ids.5 (univ_laog)",
632 HG_(free), 24/*cacheSize*/ );
633 tl_assert(univ_laog != NULL);
636 /* Set up entries for the root thread */
637 // FIXME: this assumes that the first real ThreadId is 1
639 /* a Thread for the new thread ... */
640 thr = mk_Thread(hbthr_root);
641 thr->coretid = 1; /* FIXME: hardwires an assumption about the
642 identity of the root thread. */
643 tl_assert( libhb_get_Thr_hgthread(hbthr_root) == NULL );
644 libhb_set_Thr_hgthread(hbthr_root, thr);
646 /* and bind it in the thread-map table. */
647 tl_assert(HG_(is_sane_ThreadId)(thr->coretid));
648 tl_assert(thr->coretid != VG_INVALID_THREADID);
650 map_threads[thr->coretid] = thr;
652 tl_assert(VG_INVALID_THREADID == 0);
654 all__sanity_check("initialise_data_structures");
658 /*----------------------------------------------------------------*/
659 /*--- map_threads :: array[core-ThreadId] of Thread* ---*/
660 /*----------------------------------------------------------------*/
662 /* Doesn't assert if the relevant map_threads entry is NULL. */
663 static Thread* map_threads_maybe_lookup ( ThreadId coretid )
665 Thread* thr;
666 tl_assert( HG_(is_sane_ThreadId)(coretid) );
667 thr = map_threads[coretid];
668 return thr;
671 /* Asserts if the relevant map_threads entry is NULL. */
672 static inline Thread* map_threads_lookup ( ThreadId coretid )
674 Thread* thr;
675 tl_assert( HG_(is_sane_ThreadId)(coretid) );
676 thr = map_threads[coretid];
677 tl_assert(thr);
678 return thr;
681 /* Do a reverse lookup. Does not assert if 'thr' is not found in
682 map_threads. */
683 static ThreadId map_threads_maybe_reverse_lookup_SLOW ( Thread* thr )
685 ThreadId tid;
686 tl_assert(HG_(is_sane_Thread)(thr));
687 /* Check nobody used the invalid-threadid slot */
688 tl_assert(VG_INVALID_THREADID < VG_N_THREADS);
689 tl_assert(map_threads[VG_INVALID_THREADID] == NULL);
690 tid = thr->coretid;
691 tl_assert(HG_(is_sane_ThreadId)(tid));
692 return tid;
695 /* Do a reverse lookup. Warning: POTENTIALLY SLOW. Asserts if 'thr'
696 is not found in map_threads. */
697 static ThreadId map_threads_reverse_lookup_SLOW ( Thread* thr )
699 ThreadId tid = map_threads_maybe_reverse_lookup_SLOW( thr );
700 tl_assert(tid != VG_INVALID_THREADID);
701 tl_assert(map_threads[tid]);
702 tl_assert(map_threads[tid]->coretid == tid);
703 return tid;
706 static void map_threads_delete ( ThreadId coretid )
708 Thread* thr;
709 tl_assert(coretid != 0);
710 tl_assert( HG_(is_sane_ThreadId)(coretid) );
711 thr = map_threads[coretid];
712 tl_assert(thr);
713 map_threads[coretid] = NULL;
716 static void HG_(thread_enter_synchr)(Thread *thr) {
717 tl_assert(thr->synchr_nesting >= 0);
718 #if defined(VGO_solaris)
719 thr->synchr_nesting += 1;
720 #endif /* VGO_solaris */
723 static void HG_(thread_leave_synchr)(Thread *thr) {
724 #if defined(VGO_solaris)
725 thr->synchr_nesting -= 1;
726 #endif /* VGO_solaris */
727 tl_assert(thr->synchr_nesting >= 0);
730 static void HG_(thread_enter_pthread_create)(Thread *thr) {
731 tl_assert(thr->pthread_create_nesting_level >= 0);
732 thr->pthread_create_nesting_level += 1;
735 static void HG_(thread_leave_pthread_create)(Thread *thr) {
736 tl_assert(thr->pthread_create_nesting_level > 0);
737 thr->pthread_create_nesting_level -= 1;
740 static Int HG_(get_pthread_create_nesting_level)(ThreadId tid) {
741 Thread *thr = map_threads_maybe_lookup(tid);
742 return thr->pthread_create_nesting_level;
745 /*----------------------------------------------------------------*/
746 /*--- map_locks :: WordFM guest-Addr-of-lock Lock* ---*/
747 /*----------------------------------------------------------------*/
749 /* Make sure there is a lock table entry for the given (lock) guest
750 address. If not, create one of the stated 'kind' in unheld state.
751 In any case, return the address of the existing or new Lock. */
752 static
753 Lock* map_locks_lookup_or_create ( LockKind lkk, Addr ga, ThreadId tid )
755 Bool found;
756 Lock* oldlock = NULL;
757 tl_assert(HG_(is_sane_ThreadId)(tid));
758 found = VG_(lookupFM)( map_locks,
759 NULL, (UWord*)&oldlock, (UWord)ga );
760 if (!found) {
761 Lock* lock = mk_LockN(lkk, ga);
762 lock->appeared_at = VG_(record_ExeContext)( tid, 0 );
763 tl_assert(HG_(is_sane_LockN)(lock));
764 VG_(addToFM)( map_locks, (UWord)ga, (UWord)lock );
765 tl_assert(oldlock == NULL);
766 return lock;
767 } else {
768 tl_assert(oldlock != NULL);
769 tl_assert(HG_(is_sane_LockN)(oldlock));
770 tl_assert(oldlock->guestaddr == ga);
771 return oldlock;
775 static Lock* map_locks_maybe_lookup ( Addr ga )
777 Bool found;
778 Lock* lk = NULL;
779 found = VG_(lookupFM)( map_locks, NULL, (UWord*)&lk, (UWord)ga );
780 tl_assert(found ? lk != NULL : lk == NULL);
781 return lk;
784 static void map_locks_delete ( Addr ga )
786 Addr ga2 = 0;
787 Lock* lk = NULL;
788 VG_(delFromFM)( map_locks,
789 (UWord*)&ga2, (UWord*)&lk, (UWord)ga );
790 /* delFromFM produces the val which is being deleted, if it is
791 found. So assert it is non-null; that in effect asserts that we
792 are deleting a (ga, Lock) pair which actually exists. */
793 tl_assert(lk != NULL);
794 tl_assert(ga2 == ga);
799 /*----------------------------------------------------------------*/
800 /*--- Sanity checking the data structures ---*/
801 /*----------------------------------------------------------------*/
803 static UWord stats__sanity_checks = 0;
805 static void laog__sanity_check ( const HChar* who ); /* fwds */
807 /* REQUIRED INVARIANTS:
809 Thread vs Segment/Lock/SecMaps
811 for each t in Threads {
813 // Thread.lockset: each element is really a valid Lock
815 // Thread.lockset: each Lock in set is actually held by that thread
816 for lk in Thread.lockset
817 lk == LockedBy(t)
819 // Thread.csegid is a valid SegmentID
820 // and the associated Segment has .thr == t
824 all thread Locksets are pairwise empty under intersection
825 (that is, no lock is claimed to be held by more than one thread)
826 -- this is guaranteed if all locks in locksets point back to their
827 owner threads
829 Lock vs Thread/Segment/SecMaps
831 for each entry (gla, la) in map_locks
832 gla == la->guest_addr
834 for each lk in Locks {
836 lk->tag is valid
837 lk->guest_addr does not have shadow state NoAccess
838 if lk == LockedBy(t), then t->lockset contains lk
839 if lk == UnlockedBy(segid) then segid is valid SegmentID
840 and can be mapped to a valid Segment(seg)
841 and seg->thr->lockset does not contain lk
842 if lk == UnlockedNew then (no lockset contains lk)
844 secmaps for lk has .mbHasLocks == True
848 Segment vs Thread/Lock/SecMaps
850 the Segment graph is a dag (no cycles)
851 all of the Segment graph must be reachable from the segids
852 mentioned in the Threads
854 for seg in Segments {
856 seg->thr is a sane Thread
860 SecMaps vs Segment/Thread/Lock
862 for sm in SecMaps {
864 sm properly aligned
865 if any shadow word is ShR or ShM then .mbHasShared == True
867 for each Excl(segid) state
868 map_segments_lookup maps to a sane Segment(seg)
869 for each ShM/ShR(tsetid,lsetid) state
870 each lk in lset is a valid Lock
871 each thr in tset is a valid thread, which is non-dead
877 /* Return True iff 'thr' holds 'lk' in some mode. */
878 static Bool thread_is_a_holder_of_Lock ( Thread* thr, Lock* lk )
880 if (lk->heldBy)
881 return VG_(elemBag)( lk->heldBy, (UWord)thr ) > 0;
882 else
883 return False;
886 /* Sanity check Threads, as far as possible */
887 __attribute__((noinline))
888 static void threads__sanity_check ( const HChar* who )
890 #define BAD(_str) do { how = (_str); goto bad; } while (0)
891 const HChar* how = "no error";
892 Thread* thr;
893 WordSetID wsA, wsW;
894 UWord* ls_words;
895 UWord ls_size, i;
896 Lock* lk;
897 for (thr = admin_threads; thr; thr = thr->admin) {
898 if (!HG_(is_sane_Thread)(thr)) BAD("1");
899 wsA = thr->locksetA;
900 wsW = thr->locksetW;
901 // locks held in W mode are a subset of all locks held
902 if (!HG_(isSubsetOf)( univ_lsets, wsW, wsA )) BAD("7");
903 HG_(getPayloadWS)( &ls_words, &ls_size, univ_lsets, wsA );
904 for (i = 0; i < ls_size; i++) {
905 lk = (Lock*)ls_words[i];
906 // Thread.lockset: each element is really a valid Lock
907 if (!HG_(is_sane_LockN)(lk)) BAD("2");
908 // Thread.lockset: each Lock in set is actually held by that
909 // thread
910 if (!thread_is_a_holder_of_Lock(thr,lk)) BAD("3");
913 return;
914 bad:
915 VG_(printf)("threads__sanity_check: who=\"%s\", bad=\"%s\"\n", who, how);
916 tl_assert(0);
917 #undef BAD
921 /* Sanity check Locks, as far as possible */
922 __attribute__((noinline))
923 static void locks__sanity_check ( const HChar* who )
925 #define BAD(_str) do { how = (_str); goto bad; } while (0)
926 const HChar* how = "no error";
927 Addr gla;
928 Lock* lk;
929 Int i;
930 // # entries in admin_locks == # entries in map_locks
931 for (i = 0, lk = admin_locks; lk; i++, lk = lk->admin_next)
933 if (i != VG_(sizeFM)(map_locks)) BAD("1");
934 // for each entry (gla, lk) in map_locks
935 // gla == lk->guest_addr
936 VG_(initIterFM)( map_locks );
937 while (VG_(nextIterFM)( map_locks,
938 (UWord*)&gla, (UWord*)&lk )) {
939 if (lk->guestaddr != gla) BAD("2");
941 VG_(doneIterFM)( map_locks );
942 // scan through admin_locks ...
943 for (lk = admin_locks; lk; lk = lk->admin_next) {
944 // lock is sane. Quite comprehensive, also checks that
945 // referenced (holder) threads are sane.
946 if (!HG_(is_sane_LockN)(lk)) BAD("3");
947 // map_locks binds guest address back to this lock
948 if (lk != map_locks_maybe_lookup(lk->guestaddr)) BAD("4");
949 // look at all threads mentioned as holders of this lock. Ensure
950 // this lock is mentioned in their locksets.
951 if (lk->heldBy) {
952 Thread* thr;
953 UWord count;
954 VG_(initIterBag)( lk->heldBy );
955 while (VG_(nextIterBag)( lk->heldBy,
956 (UWord*)&thr, &count )) {
957 // HG_(is_sane_LockN) above ensures these
958 tl_assert(count >= 1);
959 tl_assert(HG_(is_sane_Thread)(thr));
960 if (!HG_(elemWS)(univ_lsets, thr->locksetA, (UWord)lk))
961 BAD("6");
962 // also check the w-only lockset
963 if (lk->heldW
964 && !HG_(elemWS)(univ_lsets, thr->locksetW, (UWord)lk))
965 BAD("7");
966 if ((!lk->heldW)
967 && HG_(elemWS)(univ_lsets, thr->locksetW, (UWord)lk))
968 BAD("8");
970 VG_(doneIterBag)( lk->heldBy );
971 } else {
972 /* lock not held by anybody */
973 if (lk->heldW) BAD("9"); /* should be False if !heldBy */
974 // since lk is unheld, then (no lockset contains lk)
975 // hmm, this is really too expensive to check. Hmm.
979 return;
980 bad:
981 VG_(printf)("locks__sanity_check: who=\"%s\", bad=\"%s\"\n", who, how);
982 tl_assert(0);
983 #undef BAD
987 static void all_except_Locks__sanity_check ( const HChar* who ) {
988 stats__sanity_checks++;
989 if (0) VG_(printf)("all_except_Locks__sanity_check(%s)\n", who);
990 threads__sanity_check(who);
991 if (HG_(clo_track_lockorders))
992 laog__sanity_check(who);
994 static void all__sanity_check ( const HChar* who ) {
995 all_except_Locks__sanity_check(who);
996 locks__sanity_check(who);
1000 /*----------------------------------------------------------------*/
1001 /*--- Shadow value and address range handlers ---*/
1002 /*----------------------------------------------------------------*/
1004 static void laog__pre_thread_acquires_lock ( Thread*, Lock* ); /* fwds */
1005 //static void laog__handle_lock_deletions ( WordSetID ); /* fwds */
1006 static inline Thread* get_current_Thread ( void ); /* fwds */
1007 __attribute__((noinline))
1008 static void laog__handle_one_lock_deletion ( Lock* lk ); /* fwds */
1011 /* Block-copy states (needed for implementing realloc()). */
1012 /* FIXME this copies shadow memory; it doesn't apply the MSM to it.
1013 Is that a problem? (hence 'scopy' rather than 'ccopy') */
1014 static void shadow_mem_scopy_range ( Thread* thr,
1015 Addr src, Addr dst, SizeT len )
1017 Thr* hbthr = thr->hbthr;
1018 tl_assert(hbthr);
1019 libhb_copy_shadow_state( hbthr, src, dst, len );
1022 static void shadow_mem_cread_range ( Thread* thr, Addr a, SizeT len )
1024 Thr* hbthr = thr->hbthr;
1025 tl_assert(hbthr);
1026 LIBHB_CREAD_N(hbthr, a, len);
1029 static void shadow_mem_cwrite_range ( Thread* thr, Addr a, SizeT len ) {
1030 Thr* hbthr = thr->hbthr;
1031 tl_assert(hbthr);
1032 LIBHB_CWRITE_N(hbthr, a, len);
1035 inline static void shadow_mem_make_New ( Thread* thr, Addr a, SizeT len )
1037 libhb_srange_new( thr->hbthr, a, len );
1040 inline static void shadow_mem_make_NoAccess_NoFX ( Thread* thr, Addr aIN,
1041 SizeT len )
1043 if (0 && len > 500)
1044 VG_(printf)("make NoAccess_NoFX ( %#lx, %lu )\n", aIN, len );
1045 // has no effect (NoFX)
1046 libhb_srange_noaccess_NoFX( thr->hbthr, aIN, len );
1049 inline static void shadow_mem_make_NoAccess_AHAE ( Thread* thr, Addr aIN,
1050 SizeT len)
1052 if (0 && len > 500)
1053 VG_(printf)("make NoAccess_AHAE ( %#lx, %lu )\n", aIN, len );
1054 // Actually Has An Effect (AHAE)
1055 libhb_srange_noaccess_AHAE( thr->hbthr, aIN, len );
1058 inline static void shadow_mem_make_Untracked ( Thread* thr, Addr aIN,
1059 SizeT len )
1061 if (0 && len > 500)
1062 VG_(printf)("make Untracked ( %#lx, %lu )\n", aIN, len );
1063 libhb_srange_untrack( thr->hbthr, aIN, len );
1067 /*----------------------------------------------------------------*/
1068 /*--- Event handlers (evh__* functions) ---*/
1069 /*--- plus helpers (evhH__* functions) ---*/
1070 /*----------------------------------------------------------------*/
1072 /*--------- Event handler helpers (evhH__* functions) ---------*/
1074 /* Create a new segment for 'thr', making it depend (.prev) on its
1075 existing segment, bind together the SegmentID and Segment, and
1076 return both of them. Also update 'thr' so it references the new
1077 Segment. */
1078 //zz static
1079 //zz void evhH__start_new_segment_for_thread ( /*OUT*/SegmentID* new_segidP,
1080 //zz /*OUT*/Segment** new_segP,
1081 //zz Thread* thr )
1082 //zz {
1083 //zz Segment* cur_seg;
1084 //zz tl_assert(new_segP);
1085 //zz tl_assert(new_segidP);
1086 //zz tl_assert(HG_(is_sane_Thread)(thr));
1087 //zz cur_seg = map_segments_lookup( thr->csegid );
1088 //zz tl_assert(cur_seg);
1089 //zz tl_assert(cur_seg->thr == thr); /* all sane segs should point back
1090 //zz at their owner thread. */
1091 //zz *new_segP = mk_Segment( thr, cur_seg, NULL/*other*/ );
1092 //zz *new_segidP = alloc_SegmentID();
1093 //zz map_segments_add( *new_segidP, *new_segP );
1094 //zz thr->csegid = *new_segidP;
1095 //zz }
1098 /* The lock at 'lock_ga' has acquired a writer. Make all necessary
1099 updates, and also do all possible error checks. */
1100 static
1101 void evhH__post_thread_w_acquires_lock ( Thread* thr,
1102 LockKind lkk, Addr lock_ga )
1104 Lock* lk;
1106 /* Basically what we need to do is call lockN_acquire_writer.
1107 However, that will barf if any 'invalid' lock states would
1108 result. Therefore check before calling. Side effect is that
1109 'HG_(is_sane_LockN)(lk)' is both a pre- and post-condition of this
1110 routine.
1112 Because this routine is only called after successful lock
1113 acquisition, we should not be asked to move the lock into any
1114 invalid states. Requests to do so are bugs in libpthread, since
1115 that should have rejected any such requests. */
1117 tl_assert(HG_(is_sane_Thread)(thr));
1118 /* Try to find the lock. If we can't, then create a new one with
1119 kind 'lkk'. */
1120 lk = map_locks_lookup_or_create(
1121 lkk, lock_ga, map_threads_reverse_lookup_SLOW(thr) );
1122 tl_assert( HG_(is_sane_LockN)(lk) );
1124 /* check libhb level entities exist */
1125 tl_assert(thr->hbthr);
1126 tl_assert(lk->hbso);
1128 if (lk->heldBy == NULL) {
1129 /* the lock isn't held. Simple. */
1130 tl_assert(!lk->heldW);
1131 lockN_acquire_writer( lk, thr );
1132 /* acquire a dependency from the lock's VCs */
1133 libhb_so_recv( thr->hbthr, lk->hbso, True/*strong_recv*/ );
1134 goto noerror;
1137 /* So the lock is already held. If held as a r-lock then
1138 libpthread must be buggy. */
1139 tl_assert(lk->heldBy);
1140 if (!lk->heldW) {
1141 HG_(record_error_Misc)(
1142 thr, "Bug in libpthread: write lock "
1143 "granted on rwlock which is currently rd-held");
1144 goto error;
1147 /* So the lock is held in w-mode. If it's held by some other
1148 thread, then libpthread must be buggy. */
1149 tl_assert(VG_(sizeUniqueBag)(lk->heldBy) == 1); /* from precondition */
1151 if (thr != (Thread*)VG_(anyElementOfBag)(lk->heldBy)) {
1152 HG_(record_error_Misc)(
1153 thr, "Bug in libpthread: write lock "
1154 "granted on mutex/rwlock which is currently "
1155 "wr-held by a different thread");
1156 goto error;
1159 /* So the lock is already held in w-mode by 'thr'. That means this
1160 is an attempt to lock it recursively, which is only allowable
1161 for LK_mbRec kinded locks. Since this routine is called only
1162 once the lock has been acquired, this must also be a libpthread
1163 bug. */
1164 if (lk->kind != LK_mbRec) {
1165 HG_(record_error_Misc)(
1166 thr, "Bug in libpthread: recursive write lock "
1167 "granted on mutex/wrlock which does not "
1168 "support recursion");
1169 goto error;
1172 /* So we are recursively re-locking a lock we already w-hold. */
1173 lockN_acquire_writer( lk, thr );
1174 /* acquire a dependency from the lock's VC. Probably pointless,
1175 but also harmless. */
1176 libhb_so_recv( thr->hbthr, lk->hbso, True/*strong_recv*/ );
1177 goto noerror;
1179 noerror:
1180 if (HG_(clo_track_lockorders)) {
1181 /* check lock order acquisition graph, and update. This has to
1182 happen before the lock is added to the thread's locksetA/W. */
1183 laog__pre_thread_acquires_lock( thr, lk );
1185 /* update the thread's held-locks set */
1186 thr->locksetA = HG_(addToWS)( univ_lsets, thr->locksetA, (UWord)lk );
1187 thr->locksetW = HG_(addToWS)( univ_lsets, thr->locksetW, (UWord)lk );
1188 /* fall through */
1190 error:
1191 tl_assert(HG_(is_sane_LockN)(lk));
1195 /* The lock at 'lock_ga' has acquired a reader. Make all necessary
1196 updates, and also do all possible error checks. */
1197 static
1198 void evhH__post_thread_r_acquires_lock ( Thread* thr,
1199 LockKind lkk, Addr lock_ga )
1201 Lock* lk;
1203 /* Basically what we need to do is call lockN_acquire_reader.
1204 However, that will barf if any 'invalid' lock states would
1205 result. Therefore check before calling. Side effect is that
1206 'HG_(is_sane_LockN)(lk)' is both a pre- and post-condition of this
1207 routine.
1209 Because this routine is only called after successful lock
1210 acquisition, we should not be asked to move the lock into any
1211 invalid states. Requests to do so are bugs in libpthread, since
1212 that should have rejected any such requests. */
1214 tl_assert(HG_(is_sane_Thread)(thr));
1215 /* Try to find the lock. If we can't, then create a new one with
1216 kind 'lkk'. Only a reader-writer lock can be read-locked,
1217 hence the first assertion. */
1218 tl_assert(lkk == LK_rdwr);
1219 lk = map_locks_lookup_or_create(
1220 lkk, lock_ga, map_threads_reverse_lookup_SLOW(thr) );
1221 tl_assert( HG_(is_sane_LockN)(lk) );
1223 /* check libhb level entities exist */
1224 tl_assert(thr->hbthr);
1225 tl_assert(lk->hbso);
1227 if (lk->heldBy == NULL) {
1228 /* the lock isn't held. Simple. */
1229 tl_assert(!lk->heldW);
1230 lockN_acquire_reader( lk, thr );
1231 /* acquire a dependency from the lock's VC */
1232 libhb_so_recv( thr->hbthr, lk->hbso, False/*!strong_recv*/ );
1233 goto noerror;
1236 /* So the lock is already held. If held as a w-lock then
1237 libpthread must be buggy. */
1238 tl_assert(lk->heldBy);
1239 if (lk->heldW) {
1240 HG_(record_error_Misc)( thr, "Bug in libpthread: read lock "
1241 "granted on rwlock which is "
1242 "currently wr-held");
1243 goto error;
1246 /* Easy enough. In short anybody can get a read-lock on a rwlock
1247 provided it is either unlocked or already in rd-held. */
1248 lockN_acquire_reader( lk, thr );
1249 /* acquire a dependency from the lock's VC. Probably pointless,
1250 but also harmless. */
1251 libhb_so_recv( thr->hbthr, lk->hbso, False/*!strong_recv*/ );
1252 goto noerror;
1254 noerror:
1255 if (HG_(clo_track_lockorders)) {
1256 /* check lock order acquisition graph, and update. This has to
1257 happen before the lock is added to the thread's locksetA/W. */
1258 laog__pre_thread_acquires_lock( thr, lk );
1260 /* update the thread's held-locks set */
1261 thr->locksetA = HG_(addToWS)( univ_lsets, thr->locksetA, (UWord)lk );
1262 /* but don't update thr->locksetW, since lk is only rd-held */
1263 /* fall through */
1265 error:
1266 tl_assert(HG_(is_sane_LockN)(lk));
1270 /* The lock at 'lock_ga' is just about to be unlocked. Make all
1271 necessary updates, and also do all possible error checks. */
1272 static
1273 void evhH__pre_thread_releases_lock ( Thread* thr,
1274 Addr lock_ga, Bool isRDWR )
1276 Lock* lock;
1277 Word n;
1278 Bool was_heldW;
1280 /* This routine is called prior to a lock release, before
1281 libpthread has had a chance to validate the call. Hence we need
1282 to detect and reject any attempts to move the lock into an
1283 invalid state. Such attempts are bugs in the client.
1285 isRDWR is True if we know from the wrapper context that lock_ga
1286 should refer to a reader-writer lock, and is False if [ditto]
1287 lock_ga should refer to a standard mutex. */
1289 tl_assert(HG_(is_sane_Thread)(thr));
1290 lock = map_locks_maybe_lookup( lock_ga );
1292 if (!lock) {
1293 /* We know nothing about a lock at 'lock_ga'. Nevertheless
1294 the client is trying to unlock it. So complain, then ignore
1295 the attempt. */
1296 HG_(record_error_UnlockBogus)( thr, lock_ga );
1297 return;
1300 tl_assert(lock->guestaddr == lock_ga);
1301 tl_assert(HG_(is_sane_LockN)(lock));
1303 if (isRDWR && lock->kind != LK_rdwr) {
1304 HG_(record_error_Misc)( thr, "pthread_rwlock_unlock with a "
1305 "pthread_mutex_t* argument " );
1307 if ((!isRDWR) && lock->kind == LK_rdwr) {
1308 HG_(record_error_Misc)( thr, "pthread_mutex_unlock with a "
1309 "pthread_rwlock_t* argument " );
1312 if (!lock->heldBy) {
1313 /* The lock is not held. This indicates a serious bug in the
1314 client. */
1315 tl_assert(!lock->heldW);
1316 HG_(record_error_UnlockUnlocked)( thr, lock );
1317 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetA, (UWord)lock ));
1318 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetW, (UWord)lock ));
1319 goto error;
1322 /* test just above dominates */
1323 tl_assert(lock->heldBy);
1324 was_heldW = lock->heldW;
1326 /* The lock is held. Is this thread one of the holders? If not,
1327 report a bug in the client. */
1328 n = VG_(elemBag)( lock->heldBy, (UWord)thr );
1329 tl_assert(n >= 0);
1330 if (n == 0) {
1331 /* We are not a current holder of the lock. This is a bug in
1332 the guest, and (per POSIX pthread rules) the unlock
1333 attempt will fail. So just complain and do nothing
1334 else. */
1335 Thread* realOwner = (Thread*)VG_(anyElementOfBag)( lock->heldBy );
1336 tl_assert(HG_(is_sane_Thread)(realOwner));
1337 tl_assert(realOwner != thr);
1338 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetA, (UWord)lock ));
1339 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetW, (UWord)lock ));
1340 HG_(record_error_UnlockForeign)( thr, realOwner, lock );
1341 goto error;
1344 /* Ok, we hold the lock 'n' times. */
1345 tl_assert(n >= 1);
1347 lockN_release( lock, thr );
1349 n--;
1350 tl_assert(n >= 0);
1352 if (n > 0) {
1353 tl_assert(lock->heldBy);
1354 tl_assert(n == VG_(elemBag)( lock->heldBy, (UWord)thr ));
1355 /* We still hold the lock. So either it's a recursive lock
1356 or a rwlock which is currently r-held. */
1357 tl_assert(lock->kind == LK_mbRec
1358 || (lock->kind == LK_rdwr && !lock->heldW));
1359 tl_assert(HG_(elemWS)( univ_lsets, thr->locksetA, (UWord)lock ));
1360 if (lock->heldW)
1361 tl_assert(HG_(elemWS)( univ_lsets, thr->locksetW, (UWord)lock ));
1362 else
1363 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetW, (UWord)lock ));
1364 } else {
1365 /* n is zero. This means we don't hold the lock any more. But
1366 if it's a rwlock held in r-mode, someone else could still
1367 hold it. Just do whatever sanity checks we can. */
1368 if (lock->kind == LK_rdwr && lock->heldBy) {
1369 /* It's a rwlock. We no longer hold it but we used to;
1370 nevertheless it still appears to be held by someone else.
1371 The implication is that, prior to this release, it must
1372 have been shared by us and and whoever else is holding it;
1373 which in turn implies it must be r-held, since a lock
1374 can't be w-held by more than one thread. */
1375 /* The lock is now R-held by somebody else: */
1376 tl_assert(lock->heldW == False);
1377 } else {
1378 /* Normal case. It's either not a rwlock, or it's a rwlock
1379 that we used to hold in w-mode (which is pretty much the
1380 same thing as a non-rwlock.) Since this transaction is
1381 atomic (V does not allow multiple threads to run
1382 simultaneously), it must mean the lock is now not held by
1383 anybody. Hence assert for it. */
1384 /* The lock is now not held by anybody: */
1385 tl_assert(!lock->heldBy);
1386 tl_assert(lock->heldW == False);
1388 //if (lock->heldBy) {
1389 // tl_assert(0 == VG_(elemBag)( lock->heldBy, (UWord)thr ));
1391 /* update this thread's lockset accordingly. */
1392 thr->locksetA
1393 = HG_(delFromWS)( univ_lsets, thr->locksetA, (UWord)lock );
1394 thr->locksetW
1395 = HG_(delFromWS)( univ_lsets, thr->locksetW, (UWord)lock );
1396 /* push our VC into the lock */
1397 tl_assert(thr->hbthr);
1398 tl_assert(lock->hbso);
1399 /* If the lock was previously W-held, then we want to do a
1400 strong send, and if previously R-held, then a weak send. */
1401 libhb_so_send( thr->hbthr, lock->hbso, was_heldW );
1403 /* fall through */
1405 error:
1406 tl_assert(HG_(is_sane_LockN)(lock));
1410 /* ---------------------------------------------------------- */
1411 /* -------- Event handlers proper (evh__* functions) -------- */
1412 /* ---------------------------------------------------------- */
1414 /* What is the Thread* for the currently running thread? This is
1415 absolutely performance critical. We receive notifications from the
1416 core for client code starts/stops, and cache the looked-up result
1417 in 'current_Thread'. Hence, for the vast majority of requests,
1418 finding the current thread reduces to a read of a global variable,
1419 provided get_current_Thread_in_C_C is inlined.
1421 Outside of client code, current_Thread is NULL, and presumably
1422 any uses of it will cause a segfault. Hence:
1424 - for uses definitely within client code, use
1425 get_current_Thread_in_C_C.
1427 - for all other uses, use get_current_Thread.
1430 static Thread *current_Thread = NULL,
1431 *current_Thread_prev = NULL;
1433 static void evh__start_client_code ( ThreadId tid, ULong nDisp ) {
1434 if (0) VG_(printf)("start %d %llu\n", (Int)tid, nDisp);
1435 tl_assert(current_Thread == NULL);
1436 current_Thread = map_threads_lookup( tid );
1437 tl_assert(current_Thread != NULL);
1438 if (current_Thread != current_Thread_prev) {
1439 libhb_Thr_resumes( current_Thread->hbthr );
1440 current_Thread_prev = current_Thread;
1443 static void evh__stop_client_code ( ThreadId tid, ULong nDisp ) {
1444 if (0) VG_(printf)(" stop %d %llu\n", (Int)tid, nDisp);
1445 tl_assert(current_Thread != NULL);
1446 current_Thread = NULL;
1447 libhb_maybe_GC();
1449 static inline Thread* get_current_Thread_in_C_C ( void ) {
1450 return current_Thread;
1452 static inline Thread* get_current_Thread ( void ) {
1453 ThreadId coretid;
1454 Thread* thr;
1455 thr = get_current_Thread_in_C_C();
1456 if (LIKELY(thr))
1457 return thr;
1458 /* evidently not in client code. Do it the slow way. */
1459 coretid = VG_(get_running_tid)();
1460 /* FIXME: get rid of the following kludge. It exists because
1461 evh__new_mem is called during initialisation (as notification
1462 of initial memory layout) and VG_(get_running_tid)() returns
1463 VG_INVALID_THREADID at that point. */
1464 if (coretid == VG_INVALID_THREADID)
1465 coretid = 1; /* KLUDGE */
1466 thr = map_threads_lookup( coretid );
1467 return thr;
1470 static
1471 void evh__new_mem ( Addr a, SizeT len ) {
1472 Thread *thr = get_current_Thread();
1473 if (SHOW_EVENTS >= 2)
1474 VG_(printf)("evh__new_mem(%p, %lu)\n", (void*)a, len );
1475 shadow_mem_make_New( thr, a, len );
1476 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1477 all__sanity_check("evh__new_mem-post");
1478 if (UNLIKELY(thr->pthread_create_nesting_level > 0))
1479 shadow_mem_make_Untracked( thr, a, len );
1482 static
1483 void evh__new_mem_stack ( Addr a, SizeT len ) {
1484 Thread *thr = get_current_Thread();
1485 if (SHOW_EVENTS >= 2)
1486 VG_(printf)("evh__new_mem_stack(%p, %lu)\n", (void*)a, len );
1487 shadow_mem_make_New( thr, -VG_STACK_REDZONE_SZB + a, len );
1488 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1489 all__sanity_check("evh__new_mem_stack-post");
1490 if (UNLIKELY(thr->pthread_create_nesting_level > 0))
1491 shadow_mem_make_Untracked( thr, a, len );
1494 #define DCL_evh__new_mem_stack(syze) \
1495 static void VG_REGPARM(1) evh__new_mem_stack_##syze(Addr new_SP) \
1497 Thread *thr = get_current_Thread(); \
1498 if (SHOW_EVENTS >= 2) \
1499 VG_(printf)("evh__new_mem_stack_" #syze "(%p, %lu)\n", \
1500 (void*)new_SP, (SizeT)syze ); \
1501 shadow_mem_make_New( thr, -VG_STACK_REDZONE_SZB + new_SP, syze ); \
1502 if (syze >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE)) \
1503 all__sanity_check("evh__new_mem_stack_" #syze "-post"); \
1504 if (UNLIKELY(thr->pthread_create_nesting_level > 0)) \
1505 shadow_mem_make_Untracked( thr, new_SP, syze ); \
1508 DCL_evh__new_mem_stack(4);
1509 DCL_evh__new_mem_stack(8);
1510 DCL_evh__new_mem_stack(12);
1511 DCL_evh__new_mem_stack(16);
1512 DCL_evh__new_mem_stack(32);
1513 DCL_evh__new_mem_stack(112);
1514 DCL_evh__new_mem_stack(128);
1515 DCL_evh__new_mem_stack(144);
1516 DCL_evh__new_mem_stack(160);
1518 static
1519 void evh__new_mem_w_tid ( Addr a, SizeT len, ThreadId tid ) {
1520 Thread *thr = get_current_Thread();
1521 if (SHOW_EVENTS >= 2)
1522 VG_(printf)("evh__new_mem_w_tid(%p, %lu)\n", (void*)a, len );
1523 shadow_mem_make_New( thr, a, len );
1524 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1525 all__sanity_check("evh__new_mem_w_tid-post");
1526 if (UNLIKELY(thr->pthread_create_nesting_level > 0))
1527 shadow_mem_make_Untracked( thr, a, len );
1530 static
1531 void evh__new_mem_w_perms ( Addr a, SizeT len,
1532 Bool rr, Bool ww, Bool xx, ULong di_handle ) {
1533 Thread *thr = get_current_Thread();
1534 if (SHOW_EVENTS >= 1)
1535 VG_(printf)("evh__new_mem_w_perms(%p, %lu, %d,%d,%d)\n",
1536 (void*)a, len, (Int)rr, (Int)ww, (Int)xx );
1537 if (rr || ww || xx) {
1538 shadow_mem_make_New( thr, a, len );
1539 if (UNLIKELY(thr->pthread_create_nesting_level > 0))
1540 shadow_mem_make_Untracked( thr, a, len );
1542 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1543 all__sanity_check("evh__new_mem_w_perms-post");
1546 static
1547 void evh__set_perms ( Addr a, SizeT len,
1548 Bool rr, Bool ww, Bool xx ) {
1549 // This handles mprotect requests. If the memory is being put
1550 // into no-R no-W state, paint it as NoAccess, for the reasons
1551 // documented at evh__die_mem_munmap().
1552 if (SHOW_EVENTS >= 1)
1553 VG_(printf)("evh__set_perms(%p, %lu, r=%d w=%d x=%d)\n",
1554 (void*)a, len, (Int)rr, (Int)ww, (Int)xx );
1555 /* Hmm. What should we do here, that actually makes any sense?
1556 Let's say: if neither readable nor writable, then declare it
1557 NoAccess, else leave it alone. */
1558 if (!(rr || ww))
1559 shadow_mem_make_NoAccess_AHAE( get_current_Thread(), a, len );
1560 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1561 all__sanity_check("evh__set_perms-post");
1564 static
1565 void evh__die_mem ( Addr a, SizeT len ) {
1566 // Urr, libhb ignores this.
1567 if (SHOW_EVENTS >= 2)
1568 VG_(printf)("evh__die_mem(%p, %lu)\n", (void*)a, len );
1569 shadow_mem_make_NoAccess_NoFX( get_current_Thread(), a, len );
1570 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1571 all__sanity_check("evh__die_mem-post");
1574 static
1575 void evh__die_mem_munmap ( Addr a, SizeT len ) {
1576 // It's important that libhb doesn't ignore this. If, as is likely,
1577 // the client is subject to address space layout randomization,
1578 // then unmapped areas may never get remapped over, even in long
1579 // runs. If we just ignore them we wind up with large resource
1580 // (VTS) leaks in libhb. So force them to NoAccess, so that all
1581 // VTS references in the affected area are dropped. Marking memory
1582 // as NoAccess is expensive, but we assume that munmap is sufficiently
1583 // rare that the space gains of doing this are worth the costs.
1584 if (SHOW_EVENTS >= 2)
1585 VG_(printf)("evh__die_mem_munmap(%p, %lu)\n", (void*)a, len );
1586 shadow_mem_make_NoAccess_AHAE( get_current_Thread(), a, len );
1589 static
1590 void evh__untrack_mem ( Addr a, SizeT len ) {
1591 // Libhb doesn't ignore this.
1592 if (SHOW_EVENTS >= 2)
1593 VG_(printf)("evh__untrack_mem(%p, %lu)\n", (void*)a, len );
1594 shadow_mem_make_Untracked( get_current_Thread(), a, len );
1595 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1596 all__sanity_check("evh__untrack_mem-post");
1599 static
1600 void evh__copy_mem ( Addr src, Addr dst, SizeT len ) {
1601 if (SHOW_EVENTS >= 2)
1602 VG_(printf)("evh__copy_mem(%p, %p, %lu)\n", (void*)src, (void*)dst, len );
1603 Thread *thr = get_current_Thread();
1604 if (LIKELY(thr->synchr_nesting == 0))
1605 shadow_mem_scopy_range( thr , src, dst, len );
1606 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1607 all__sanity_check("evh__copy_mem-post");
1610 static
1611 void evh__pre_thread_ll_create ( ThreadId parent, ThreadId child )
1613 if (SHOW_EVENTS >= 1)
1614 VG_(printf)("evh__pre_thread_ll_create(p=%d, c=%d)\n",
1615 (Int)parent, (Int)child );
1617 if (parent != VG_INVALID_THREADID) {
1618 Thread* thr_p;
1619 Thread* thr_c;
1620 Thr* hbthr_p;
1621 Thr* hbthr_c;
1623 tl_assert(HG_(is_sane_ThreadId)(parent));
1624 tl_assert(HG_(is_sane_ThreadId)(child));
1625 tl_assert(parent != child);
1627 thr_p = map_threads_maybe_lookup( parent );
1628 thr_c = map_threads_maybe_lookup( child );
1630 tl_assert(thr_p != NULL);
1631 tl_assert(thr_c == NULL);
1633 hbthr_p = thr_p->hbthr;
1634 tl_assert(hbthr_p != NULL);
1635 tl_assert( libhb_get_Thr_hgthread(hbthr_p) == thr_p );
1637 hbthr_c = libhb_create ( hbthr_p );
1639 /* Create a new thread record for the child. */
1640 /* a Thread for the new thread ... */
1641 thr_c = mk_Thread( hbthr_c );
1642 tl_assert( libhb_get_Thr_hgthread(hbthr_c) == NULL );
1643 libhb_set_Thr_hgthread(hbthr_c, thr_c);
1645 /* and bind it in the thread-map table */
1646 map_threads[child] = thr_c;
1647 tl_assert(thr_c->coretid == VG_INVALID_THREADID);
1648 thr_c->coretid = child;
1650 /* Record where the parent is so we can later refer to this in
1651 error messages.
1653 On x86/amd64-linux, this entails a nasty glibc specific hack.
1654 The stack snapshot is taken immediately after the parent has
1655 returned from its sys_clone call. Unfortunately there is no
1656 unwind info for the insn following "syscall" - reading the
1657 glibc sources confirms this. So we ask for a snapshot to be
1658 taken as if RIP was 3 bytes earlier, in a place where there
1659 is unwind info. Sigh.
1661 { Word first_ip_delta = 0;
1662 # if defined(VGP_amd64_linux) || defined(VGP_x86_linux)
1663 first_ip_delta = -3;
1664 # elif defined(VGP_arm64_linux) || defined(VGP_arm_linux)
1665 first_ip_delta = -1;
1666 # endif
1667 thr_c->created_at = VG_(record_ExeContext)(parent, first_ip_delta);
1670 if (HG_(clo_ignore_thread_creation)) {
1671 HG_(thread_enter_pthread_create)(thr_c);
1672 tl_assert(thr_c->synchr_nesting == 0);
1673 HG_(thread_enter_synchr)(thr_c);
1674 /* Counterpart in _VG_USERREQ__HG_SET_MY_PTHREAD_T. */
1678 if (HG_(clo_sanity_flags) & SCE_THREADS)
1679 all__sanity_check("evh__pre_thread_create-post");
1682 static
1683 void evh__pre_thread_ll_exit ( ThreadId quit_tid )
1685 Int nHeld;
1686 Thread* thr_q;
1687 if (SHOW_EVENTS >= 1)
1688 VG_(printf)("evh__pre_thread_ll_exit(thr=%d)\n",
1689 (Int)quit_tid );
1691 /* quit_tid has disappeared without joining to any other thread.
1692 Therefore there is no synchronisation event associated with its
1693 exit and so we have to pretty much treat it as if it was still
1694 alive but mysteriously making no progress. That is because, if
1695 we don't know when it really exited, then we can never say there
1696 is a point in time when we're sure the thread really has
1697 finished, and so we need to consider the possibility that it
1698 lingers indefinitely and continues to interact with other
1699 threads. */
1700 /* However, it might have rendezvous'd with a thread that called
1701 pthread_join with this one as arg, prior to this point (that's
1702 how NPTL works). In which case there has already been a prior
1703 sync event. So in any case, just let the thread exit. On NPTL,
1704 all thread exits go through here. */
1705 tl_assert(HG_(is_sane_ThreadId)(quit_tid));
1706 thr_q = map_threads_maybe_lookup( quit_tid );
1707 tl_assert(thr_q != NULL);
1709 /* Complain if this thread holds any locks. */
1710 nHeld = HG_(cardinalityWS)( univ_lsets, thr_q->locksetA );
1711 tl_assert(nHeld >= 0);
1712 if (nHeld > 0) {
1713 HChar buf[80];
1714 VG_(sprintf)(buf, "Exiting thread still holds %d lock%s",
1715 nHeld, nHeld > 1 ? "s" : "");
1716 HG_(record_error_Misc)( thr_q, buf );
1719 /* Not much to do here:
1720 - tell libhb the thread is gone
1721 - clear the map_threads entry, in order that the Valgrind core
1722 can re-use it. */
1723 /* Cleanup actions (next 5 lines) copied in evh__atfork_child; keep
1724 in sync. */
1725 tl_assert(thr_q->hbthr);
1726 libhb_async_exit(thr_q->hbthr);
1727 tl_assert(thr_q->coretid == quit_tid);
1728 thr_q->coretid = VG_INVALID_THREADID;
1729 map_threads_delete( quit_tid );
1731 if (HG_(clo_sanity_flags) & SCE_THREADS)
1732 all__sanity_check("evh__pre_thread_ll_exit-post");
1735 /* This is called immediately after fork, for the child only. 'tid'
1736 is the only surviving thread (as per POSIX rules on fork() in
1737 threaded programs), so we have to clean up map_threads to remove
1738 entries for any other threads. */
1739 static
1740 void evh__atfork_child ( ThreadId tid )
1742 UInt i;
1743 Thread* thr;
1744 /* Slot 0 should never be used. */
1745 thr = map_threads_maybe_lookup( 0/*INVALID*/ );
1746 tl_assert(!thr);
1747 /* Clean up all other slots except 'tid'. */
1748 for (i = 1; i < VG_N_THREADS; i++) {
1749 if (i == tid)
1750 continue;
1751 thr = map_threads_maybe_lookup(i);
1752 if (!thr)
1753 continue;
1754 /* Cleanup actions (next 5 lines) copied from end of
1755 evh__pre_thread_ll_exit; keep in sync. */
1756 tl_assert(thr->hbthr);
1757 libhb_async_exit(thr->hbthr);
1758 tl_assert(thr->coretid == i);
1759 thr->coretid = VG_INVALID_THREADID;
1760 map_threads_delete(i);
1764 /* generate a dependence from the hbthr_q quitter to the hbthr_s stayer. */
1765 static
1766 void generate_quitter_stayer_dependence (Thr* hbthr_q, Thr* hbthr_s)
1768 SO* so;
1769 /* Allocate a temporary synchronisation object and use it to send
1770 an imaginary message from the quitter to the stayer, the purpose
1771 being to generate a dependence from the quitter to the
1772 stayer. */
1773 so = libhb_so_alloc();
1774 tl_assert(so);
1775 /* Send last arg of _so_send as False, since the sending thread
1776 doesn't actually exist any more, so we don't want _so_send to
1777 try taking stack snapshots of it. */
1778 libhb_so_send(hbthr_q, so, True/*strong_send*//*?!? wrt comment above*/);
1779 libhb_so_recv(hbthr_s, so, True/*strong_recv*/);
1780 libhb_so_dealloc(so);
1782 /* Tell libhb that the quitter has been reaped. Note that we might
1783 have to be cleverer about this, to exclude 2nd and subsequent
1784 notifications for the same hbthr_q, in the case where the app is
1785 buggy (calls pthread_join twice or more on the same thread) AND
1786 where libpthread is also buggy and doesn't return ESRCH on
1787 subsequent calls. (If libpthread isn't thusly buggy, then the
1788 wrapper for pthread_join in hg_intercepts.c will stop us getting
1789 notified here multiple times for the same joinee.) See also
1790 comments in helgrind/tests/jointwice.c. */
1791 libhb_joinedwith_done(hbthr_q);
1795 static
1796 void evh__HG_PTHREAD_JOIN_POST ( ThreadId stay_tid, Thread* quit_thr )
1798 Thread* thr_s;
1799 Thread* thr_q;
1800 Thr* hbthr_s;
1801 Thr* hbthr_q;
1803 if (SHOW_EVENTS >= 1)
1804 VG_(printf)("evh__post_thread_join(stayer=%d, quitter=%p)\n",
1805 (Int)stay_tid, quit_thr );
1807 tl_assert(HG_(is_sane_ThreadId)(stay_tid));
1809 thr_s = map_threads_maybe_lookup( stay_tid );
1810 thr_q = quit_thr;
1811 tl_assert(thr_s != NULL);
1812 tl_assert(thr_q != NULL);
1813 tl_assert(thr_s != thr_q);
1815 hbthr_s = thr_s->hbthr;
1816 hbthr_q = thr_q->hbthr;
1817 tl_assert(hbthr_s != hbthr_q);
1818 tl_assert( libhb_get_Thr_hgthread(hbthr_s) == thr_s );
1819 tl_assert( libhb_get_Thr_hgthread(hbthr_q) == thr_q );
1821 generate_quitter_stayer_dependence (hbthr_q, hbthr_s);
1823 /* evh__pre_thread_ll_exit issues an error message if the exiting
1824 thread holds any locks. No need to check here. */
1826 /* This holds because, at least when using NPTL as the thread
1827 library, we should be notified the low level thread exit before
1828 we hear of any join event on it. The low level exit
1829 notification feeds through into evh__pre_thread_ll_exit,
1830 which should clear the map_threads entry for it. Hence we
1831 expect there to be no map_threads entry at this point. */
1832 tl_assert( map_threads_maybe_reverse_lookup_SLOW(thr_q)
1833 == VG_INVALID_THREADID);
1835 if (HG_(clo_sanity_flags) & SCE_THREADS)
1836 all__sanity_check("evh__post_thread_join-post");
1839 static
1840 void evh__pre_mem_read ( CorePart part, ThreadId tid, const HChar* s,
1841 Addr a, SizeT size) {
1842 if (SHOW_EVENTS >= 2
1843 || (SHOW_EVENTS >= 1 && size != 1))
1844 VG_(printf)("evh__pre_mem_read(ctid=%d, \"%s\", %p, %lu)\n",
1845 (Int)tid, s, (void*)a, size );
1846 Thread *thr = map_threads_lookup(tid);
1847 if (LIKELY(thr->synchr_nesting == 0))
1848 shadow_mem_cread_range(thr, a, size);
1849 if (size >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1850 all__sanity_check("evh__pre_mem_read-post");
1853 static
1854 void evh__pre_mem_read_asciiz ( CorePart part, ThreadId tid,
1855 const HChar* s, Addr a ) {
1856 Int len;
1857 if (SHOW_EVENTS >= 1)
1858 VG_(printf)("evh__pre_mem_asciiz(ctid=%d, \"%s\", %p)\n",
1859 (Int)tid, s, (void*)a );
1860 // Don't segfault if the string starts in an obviously stupid
1861 // place. Actually we should check the whole string, not just
1862 // the start address, but that's too much trouble. At least
1863 // checking the first byte is better than nothing. See #255009.
1864 if (!VG_(am_is_valid_for_client) (a, 1, VKI_PROT_READ))
1865 return;
1866 Thread *thr = map_threads_lookup(tid);
1867 len = VG_(strlen)( (HChar*) a );
1868 if (LIKELY(thr->synchr_nesting == 0))
1869 shadow_mem_cread_range( thr, a, len+1 );
1870 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1871 all__sanity_check("evh__pre_mem_read_asciiz-post");
1874 static
1875 void evh__pre_mem_write ( CorePart part, ThreadId tid, const HChar* s,
1876 Addr a, SizeT size ) {
1877 if (SHOW_EVENTS >= 1)
1878 VG_(printf)("evh__pre_mem_write(ctid=%d, \"%s\", %p, %lu)\n",
1879 (Int)tid, s, (void*)a, size );
1880 Thread *thr = map_threads_lookup(tid);
1881 if (LIKELY(thr->synchr_nesting == 0))
1882 shadow_mem_cwrite_range(thr, a, size);
1883 if (size >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1884 all__sanity_check("evh__pre_mem_write-post");
1887 static
1888 void evh__new_mem_heap ( Addr a, SizeT len, Bool is_inited ) {
1889 if (SHOW_EVENTS >= 1)
1890 VG_(printf)("evh__new_mem_heap(%p, %lu, inited=%d)\n",
1891 (void*)a, len, (Int)is_inited );
1892 // We ignore the initialisation state (is_inited); that's ok.
1893 shadow_mem_make_New(get_current_Thread(), a, len);
1894 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1895 all__sanity_check("evh__pre_mem_read-post");
1898 static
1899 void evh__die_mem_heap ( Addr a, SizeT len ) {
1900 Thread* thr;
1901 if (SHOW_EVENTS >= 1)
1902 VG_(printf)("evh__die_mem_heap(%p, %lu)\n", (void*)a, len );
1903 thr = get_current_Thread();
1904 tl_assert(thr);
1905 if (HG_(clo_free_is_write)) {
1906 /* Treat frees as if the memory was written immediately prior to
1907 the free. This shakes out more races, specifically, cases
1908 where memory is referenced by one thread, and freed by
1909 another, and there's no observable synchronisation event to
1910 guarantee that the reference happens before the free. */
1911 if (LIKELY(thr->synchr_nesting == 0))
1912 shadow_mem_cwrite_range(thr, a, len);
1914 shadow_mem_make_NoAccess_AHAE( thr, a, len );
1915 /* We used to call instead
1916 shadow_mem_make_NoAccess_NoFX( thr, a, len );
1917 A non-buggy application will not access anymore
1918 the freed memory, and so marking no access is in theory useless.
1919 Not marking freed memory would avoid the overhead for applications
1920 doing mostly malloc/free, as the freed memory should then be recycled
1921 very quickly after marking.
1922 We rather mark it noaccess for the following reasons:
1923 * accessibility bits then always correctly represents the memory
1924 status (e.g. for the client request VALGRIND_HG_GET_ABITS).
1925 * the overhead is reasonable (about 5 seconds per Gb in 1000 bytes
1926 blocks, on a ppc64le, for a unrealistic workload of an application
1927 doing only malloc/free).
1928 * marking no access allows to GC the SecMap, which might improve
1929 performance and/or memory usage.
1930 * we might detect more applications bugs when memory is marked
1931 noaccess.
1932 If needed, we could support here an option --free-is-noaccess=yes|no
1933 to avoid marking freed memory as no access if some applications
1934 would need to avoid the marking noaccess overhead. */
1936 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1937 all__sanity_check("evh__pre_mem_read-post");
1940 /* --- Event handlers called from generated code --- */
1942 static VG_REGPARM(1)
1943 void evh__mem_help_cread_1(Addr a) {
1944 Thread* thr = get_current_Thread_in_C_C();
1945 Thr* hbthr = thr->hbthr;
1946 if (LIKELY(thr->synchr_nesting == 0))
1947 LIBHB_CREAD_1(hbthr, a);
1950 static VG_REGPARM(1)
1951 void evh__mem_help_cread_2(Addr a) {
1952 Thread* thr = get_current_Thread_in_C_C();
1953 Thr* hbthr = thr->hbthr;
1954 if (LIKELY(thr->synchr_nesting == 0))
1955 LIBHB_CREAD_2(hbthr, a);
1958 static VG_REGPARM(1)
1959 void evh__mem_help_cread_4(Addr a) {
1960 Thread* thr = get_current_Thread_in_C_C();
1961 Thr* hbthr = thr->hbthr;
1962 if (LIKELY(thr->synchr_nesting == 0))
1963 LIBHB_CREAD_4(hbthr, a);
1966 static VG_REGPARM(1)
1967 void evh__mem_help_cread_8(Addr a) {
1968 Thread* thr = get_current_Thread_in_C_C();
1969 Thr* hbthr = thr->hbthr;
1970 if (LIKELY(thr->synchr_nesting == 0))
1971 LIBHB_CREAD_8(hbthr, a);
1974 static VG_REGPARM(2)
1975 void evh__mem_help_cread_N(Addr a, SizeT size) {
1976 Thread* thr = get_current_Thread_in_C_C();
1977 Thr* hbthr = thr->hbthr;
1978 if (LIKELY(thr->synchr_nesting == 0))
1979 LIBHB_CREAD_N(hbthr, a, size);
1982 static VG_REGPARM(1)
1983 void evh__mem_help_cwrite_1(Addr a) {
1984 Thread* thr = get_current_Thread_in_C_C();
1985 Thr* hbthr = thr->hbthr;
1986 if (LIKELY(thr->synchr_nesting == 0))
1987 LIBHB_CWRITE_1(hbthr, a);
1990 static VG_REGPARM(1)
1991 void evh__mem_help_cwrite_2(Addr a) {
1992 Thread* thr = get_current_Thread_in_C_C();
1993 Thr* hbthr = thr->hbthr;
1994 if (LIKELY(thr->synchr_nesting == 0))
1995 LIBHB_CWRITE_2(hbthr, a);
1998 static VG_REGPARM(1)
1999 void evh__mem_help_cwrite_4(Addr a) {
2000 Thread* thr = get_current_Thread_in_C_C();
2001 Thr* hbthr = thr->hbthr;
2002 if (LIKELY(thr->synchr_nesting == 0))
2003 LIBHB_CWRITE_4(hbthr, a);
2006 /* Same as evh__mem_help_cwrite_4 but unwind will use a first_sp_delta of
2007 one word. */
2008 static VG_REGPARM(1)
2009 void evh__mem_help_cwrite_4_fixupSP(Addr a) {
2010 Thread* thr = get_current_Thread_in_C_C();
2011 Thr* hbthr = thr->hbthr;
2013 thr->first_sp_delta = sizeof(Word);
2014 if (LIKELY(thr->synchr_nesting == 0))
2015 LIBHB_CWRITE_4(hbthr, a);
2016 thr->first_sp_delta = 0;
2019 static VG_REGPARM(1)
2020 void evh__mem_help_cwrite_8(Addr a) {
2021 Thread* thr = get_current_Thread_in_C_C();
2022 Thr* hbthr = thr->hbthr;
2023 if (LIKELY(thr->synchr_nesting == 0))
2024 LIBHB_CWRITE_8(hbthr, a);
2027 /* Same as evh__mem_help_cwrite_8 but unwind will use a first_sp_delta of
2028 one word. */
2029 static VG_REGPARM(1)
2030 void evh__mem_help_cwrite_8_fixupSP(Addr a) {
2031 Thread* thr = get_current_Thread_in_C_C();
2032 Thr* hbthr = thr->hbthr;
2034 thr->first_sp_delta = sizeof(Word);
2035 if (LIKELY(thr->synchr_nesting == 0))
2036 LIBHB_CWRITE_8(hbthr, a);
2037 thr->first_sp_delta = 0;
2040 static VG_REGPARM(2)
2041 void evh__mem_help_cwrite_N(Addr a, SizeT size) {
2042 Thread* thr = get_current_Thread_in_C_C();
2043 Thr* hbthr = thr->hbthr;
2044 if (LIKELY(thr->synchr_nesting == 0))
2045 LIBHB_CWRITE_N(hbthr, a, size);
2049 /* ------------------------------------------------------- */
2050 /* -------------- events to do with mutexes -------------- */
2051 /* ------------------------------------------------------- */
2053 /* EXPOSITION only: by intercepting lock init events we can show the
2054 user where the lock was initialised, rather than only being able to
2055 show where it was first locked. Intercepting lock initialisations
2056 is not necessary for the basic operation of the race checker. */
2057 static
2058 void evh__HG_PTHREAD_MUTEX_INIT_POST( ThreadId tid,
2059 void* mutex, Word mbRec )
2061 if (SHOW_EVENTS >= 1)
2062 VG_(printf)("evh__hg_PTHREAD_MUTEX_INIT_POST(ctid=%d, mbRec=%ld, %p)\n",
2063 (Int)tid, mbRec, (void*)mutex );
2064 tl_assert(mbRec == 0 || mbRec == 1);
2065 map_locks_lookup_or_create( mbRec ? LK_mbRec : LK_nonRec,
2066 (Addr)mutex, tid );
2067 if (HG_(clo_sanity_flags) & SCE_LOCKS)
2068 all__sanity_check("evh__hg_PTHREAD_MUTEX_INIT_POST");
2071 static
2072 void evh__HG_PTHREAD_MUTEX_DESTROY_PRE( ThreadId tid, void* mutex,
2073 Bool mutex_is_init )
2075 Thread* thr;
2076 Lock* lk;
2077 if (SHOW_EVENTS >= 1)
2078 VG_(printf)("evh__hg_PTHREAD_MUTEX_DESTROY_PRE"
2079 "(ctid=%d, %p, isInit=%d)\n",
2080 (Int)tid, (void*)mutex, (Int)mutex_is_init );
2082 thr = map_threads_maybe_lookup( tid );
2083 /* cannot fail - Thread* must already exist */
2084 tl_assert( HG_(is_sane_Thread)(thr) );
2086 lk = map_locks_maybe_lookup( (Addr)mutex );
2088 if (lk == NULL && mutex_is_init) {
2089 /* We're destroying a mutex which we don't have any record of,
2090 and which appears to have the value PTHREAD_MUTEX_INITIALIZER.
2091 Assume it never got used, and so we don't need to do anything
2092 more. */
2093 goto out;
2096 if (lk == NULL || (lk->kind != LK_nonRec && lk->kind != LK_mbRec)) {
2097 HG_(record_error_Misc)(
2098 thr, "pthread_mutex_destroy with invalid argument" );
2101 if (lk) {
2102 tl_assert( HG_(is_sane_LockN)(lk) );
2103 tl_assert( lk->guestaddr == (Addr)mutex );
2104 if (lk->heldBy) {
2105 /* Basically act like we unlocked the lock */
2106 HG_(record_error_Misc)(
2107 thr, "pthread_mutex_destroy of a locked mutex" );
2108 /* remove lock from locksets of all owning threads */
2109 remove_Lock_from_locksets_of_all_owning_Threads( lk );
2110 VG_(deleteBag)( lk->heldBy );
2111 lk->heldBy = NULL;
2112 lk->heldW = False;
2113 lk->acquired_at = NULL;
2115 tl_assert( !lk->heldBy );
2116 tl_assert( HG_(is_sane_LockN)(lk) );
2118 if (HG_(clo_track_lockorders))
2119 laog__handle_one_lock_deletion(lk);
2120 map_locks_delete( lk->guestaddr );
2121 del_LockN( lk );
2124 out:
2125 if (HG_(clo_sanity_flags) & SCE_LOCKS)
2126 all__sanity_check("evh__hg_PTHREAD_MUTEX_DESTROY_PRE");
2129 static void evh__HG_PTHREAD_MUTEX_LOCK_PRE ( ThreadId tid,
2130 void* mutex, Word isTryLock )
2132 /* Just check the mutex is sane; nothing else to do. */
2133 // 'mutex' may be invalid - not checked by wrapper
2134 Thread* thr;
2135 Lock* lk;
2136 if (SHOW_EVENTS >= 1)
2137 VG_(printf)("evh__hg_PTHREAD_MUTEX_LOCK_PRE(ctid=%d, mutex=%p)\n",
2138 (Int)tid, (void*)mutex );
2140 tl_assert(isTryLock == 0 || isTryLock == 1);
2141 thr = map_threads_maybe_lookup( tid );
2142 tl_assert(thr); /* cannot fail - Thread* must already exist */
2144 lk = map_locks_maybe_lookup( (Addr)mutex );
2146 if (lk && (lk->kind == LK_rdwr)) {
2147 HG_(record_error_Misc)( thr, "pthread_mutex_lock with a "
2148 "pthread_rwlock_t* argument " );
2151 if ( lk
2152 && isTryLock == 0
2153 && (lk->kind == LK_nonRec || lk->kind == LK_rdwr)
2154 && lk->heldBy
2155 && lk->heldW
2156 && VG_(elemBag)( lk->heldBy, (UWord)thr ) > 0 ) {
2157 /* uh, it's a non-recursive lock and we already w-hold it, and
2158 this is a real lock operation (not a speculative "tryLock"
2159 kind of thing). Duh. Deadlock coming up; but at least
2160 produce an error message. */
2161 const HChar* errstr = "Attempt to re-lock a "
2162 "non-recursive lock I already hold";
2163 const HChar* auxstr = "Lock was previously acquired";
2164 if (lk->acquired_at) {
2165 HG_(record_error_Misc_w_aux)( thr, errstr, auxstr, lk->acquired_at );
2166 } else {
2167 HG_(record_error_Misc)( thr, errstr );
2172 static void evh__HG_PTHREAD_MUTEX_LOCK_POST ( ThreadId tid, void* mutex )
2174 // only called if the real library call succeeded - so mutex is sane
2175 Thread* thr;
2176 if (SHOW_EVENTS >= 1)
2177 VG_(printf)("evh__HG_PTHREAD_MUTEX_LOCK_POST(ctid=%d, mutex=%p)\n",
2178 (Int)tid, (void*)mutex );
2180 thr = map_threads_maybe_lookup( tid );
2181 tl_assert(thr); /* cannot fail - Thread* must already exist */
2183 evhH__post_thread_w_acquires_lock(
2184 thr,
2185 LK_mbRec, /* if not known, create new lock with this LockKind */
2186 (Addr)mutex
2190 static void evh__HG_PTHREAD_MUTEX_UNLOCK_PRE ( ThreadId tid, void* mutex )
2192 // 'mutex' may be invalid - not checked by wrapper
2193 Thread* thr;
2194 if (SHOW_EVENTS >= 1)
2195 VG_(printf)("evh__HG_PTHREAD_MUTEX_UNLOCK_PRE(ctid=%d, mutex=%p)\n",
2196 (Int)tid, (void*)mutex );
2198 thr = map_threads_maybe_lookup( tid );
2199 tl_assert(thr); /* cannot fail - Thread* must already exist */
2201 evhH__pre_thread_releases_lock( thr, (Addr)mutex, False/*!isRDWR*/ );
2204 static void evh__HG_PTHREAD_MUTEX_UNLOCK_POST ( ThreadId tid, void* mutex )
2206 // only called if the real library call succeeded - so mutex is sane
2207 Thread* thr;
2208 if (SHOW_EVENTS >= 1)
2209 VG_(printf)("evh__hg_PTHREAD_MUTEX_UNLOCK_POST(ctid=%d, mutex=%p)\n",
2210 (Int)tid, (void*)mutex );
2211 thr = map_threads_maybe_lookup( tid );
2212 tl_assert(thr); /* cannot fail - Thread* must already exist */
2214 // anything we should do here?
2218 /* ------------------------------------------------------- */
2219 /* -------------- events to do with spinlocks ------------ */
2220 /* ------------------------------------------------------- */
2222 /* All a bit of a kludge. Pretend we're really dealing with ordinary
2223 pthread_mutex_t's instead, for the most part. */
2225 static void evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE( ThreadId tid,
2226 void* slock )
2228 Thread* thr;
2229 Lock* lk;
2230 /* In glibc's kludgey world, we're either initialising or unlocking
2231 it. Since this is the pre-routine, if it is locked, unlock it
2232 and take a dependence edge. Otherwise, do nothing. */
2234 if (SHOW_EVENTS >= 1)
2235 VG_(printf)("evh__hg_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE"
2236 "(ctid=%d, slock=%p)\n",
2237 (Int)tid, (void*)slock );
2239 thr = map_threads_maybe_lookup( tid );
2240 /* cannot fail - Thread* must already exist */;
2241 tl_assert( HG_(is_sane_Thread)(thr) );
2243 lk = map_locks_maybe_lookup( (Addr)slock );
2244 if (lk && lk->heldBy) {
2245 /* it's held. So do the normal pre-unlock actions, as copied
2246 from evh__HG_PTHREAD_MUTEX_UNLOCK_PRE. This stupidly
2247 duplicates the map_locks_maybe_lookup. */
2248 evhH__pre_thread_releases_lock( thr, (Addr)slock,
2249 False/*!isRDWR*/ );
2253 static void evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_POST( ThreadId tid,
2254 void* slock )
2256 Lock* lk;
2257 /* More kludgery. If the lock has never been seen before, do
2258 actions as per evh__HG_PTHREAD_MUTEX_INIT_POST. Else do
2259 nothing. */
2261 if (SHOW_EVENTS >= 1)
2262 VG_(printf)("evh__hg_PTHREAD_SPIN_INIT_OR_UNLOCK_POST"
2263 "(ctid=%d, slock=%p)\n",
2264 (Int)tid, (void*)slock );
2266 lk = map_locks_maybe_lookup( (Addr)slock );
2267 if (!lk) {
2268 map_locks_lookup_or_create( LK_nonRec, (Addr)slock, tid );
2272 static void evh__HG_PTHREAD_SPIN_LOCK_PRE( ThreadId tid,
2273 void* slock, Word isTryLock )
2275 evh__HG_PTHREAD_MUTEX_LOCK_PRE( tid, slock, isTryLock );
2278 static void evh__HG_PTHREAD_SPIN_LOCK_POST( ThreadId tid,
2279 void* slock )
2281 evh__HG_PTHREAD_MUTEX_LOCK_POST( tid, slock );
2284 static void evh__HG_PTHREAD_SPIN_DESTROY_PRE( ThreadId tid,
2285 void* slock )
2287 evh__HG_PTHREAD_MUTEX_DESTROY_PRE( tid, slock, 0/*!isInit*/ );
2291 /* ----------------------------------------------------- */
2292 /* --------------- events to do with CVs --------------- */
2293 /* ----------------------------------------------------- */
2295 /* A mapping from CV to (the SO associated with it, plus some
2296 auxiliary data for error checking). When the CV is
2297 signalled/broadcasted upon, we do a 'send' into the SO, and when a
2298 wait on it completes, we do a 'recv' from the SO. This is believed
2299 to give the correct happens-before events arising from CV
2300 signallings/broadcasts.
2303 /* .so is the SO for this CV.
2304 .mx_ga is the associated mutex, when .nWaiters > 0
2306 POSIX says effectively that the first pthread_cond_{timed}wait call
2307 causes a dynamic binding between the CV and the mutex, and that
2308 lasts until such time as the waiter count falls to zero. Hence
2309 need to keep track of the number of waiters in order to do
2310 consistency tracking. */
2311 typedef
2312 struct {
2313 SO* so; /* libhb-allocated SO */
2314 void* mx_ga; /* addr of associated mutex, if any */
2315 UWord nWaiters; /* # threads waiting on the CV */
2317 CVInfo;
2320 /* pthread_cond_t* -> CVInfo* */
2321 static WordFM* map_cond_to_CVInfo = NULL;
2323 static void map_cond_to_CVInfo_INIT ( void ) {
2324 if (UNLIKELY(map_cond_to_CVInfo == NULL)) {
2325 map_cond_to_CVInfo = VG_(newFM)( HG_(zalloc),
2326 "hg.mctCI.1", HG_(free), NULL );
2330 static CVInfo* map_cond_to_CVInfo_lookup_or_alloc ( void* cond ) {
2331 UWord key, val;
2332 map_cond_to_CVInfo_INIT();
2333 if (VG_(lookupFM)( map_cond_to_CVInfo, &key, &val, (UWord)cond )) {
2334 tl_assert(key == (UWord)cond);
2335 return (CVInfo*)val;
2336 } else {
2337 SO* so = libhb_so_alloc();
2338 CVInfo* cvi = HG_(zalloc)("hg.mctCloa.1", sizeof(CVInfo));
2339 cvi->so = so;
2340 cvi->mx_ga = 0;
2341 VG_(addToFM)( map_cond_to_CVInfo, (UWord)cond, (UWord)cvi );
2342 return cvi;
2346 static CVInfo* map_cond_to_CVInfo_lookup_NO_alloc ( void* cond ) {
2347 UWord key, val;
2348 map_cond_to_CVInfo_INIT();
2349 if (VG_(lookupFM)( map_cond_to_CVInfo, &key, &val, (UWord)cond )) {
2350 tl_assert(key == (UWord)cond);
2351 return (CVInfo*)val;
2352 } else {
2353 return NULL;
2357 static void map_cond_to_CVInfo_delete ( ThreadId tid,
2358 void* cond, Bool cond_is_init ) {
2359 Thread* thr;
2360 UWord keyW, valW;
2362 thr = map_threads_maybe_lookup( tid );
2363 tl_assert(thr); /* cannot fail - Thread* must already exist */
2365 map_cond_to_CVInfo_INIT();
2366 if (VG_(lookupFM)( map_cond_to_CVInfo, &keyW, &valW, (UWord)cond )) {
2367 CVInfo* cvi = (CVInfo*)valW;
2368 tl_assert(keyW == (UWord)cond);
2369 tl_assert(cvi);
2370 tl_assert(cvi->so);
2371 if (cvi->nWaiters > 0) {
2372 HG_(record_error_Misc)(
2373 thr, "pthread_cond_destroy:"
2374 " destruction of condition variable being waited upon");
2375 /* Destroying a cond var being waited upon outcome is EBUSY and
2376 variable is not destroyed. */
2377 return;
2379 if (!VG_(delFromFM)( map_cond_to_CVInfo, &keyW, &valW, (UWord)cond ))
2380 tl_assert(0); // cond var found above, and not here ???
2381 libhb_so_dealloc(cvi->so);
2382 cvi->mx_ga = 0;
2383 HG_(free)(cvi);
2384 } else {
2385 /* We have no record of this CV. So complain about it
2386 .. except, don't bother to complain if it has exactly the
2387 value PTHREAD_COND_INITIALIZER, since it might be that the CV
2388 was initialised like that but never used. */
2389 if (!cond_is_init) {
2390 HG_(record_error_Misc)(
2391 thr, "pthread_cond_destroy: destruction of unknown cond var");
2396 static void evh__HG_PTHREAD_COND_SIGNAL_PRE ( ThreadId tid, void* cond )
2398 /* 'tid' has signalled on 'cond'. As per the comment above, bind
2399 cond to a SO if it is not already so bound, and 'send' on the
2400 SO. This is later used by other thread(s) which successfully
2401 exit from a pthread_cond_wait on the same cv; then they 'recv'
2402 from the SO, thereby acquiring a dependency on this signalling
2403 event. */
2404 Thread* thr;
2405 CVInfo* cvi;
2406 //Lock* lk;
2408 if (SHOW_EVENTS >= 1)
2409 VG_(printf)("evh__HG_PTHREAD_COND_SIGNAL_PRE(ctid=%d, cond=%p)\n",
2410 (Int)tid, (void*)cond );
2412 thr = map_threads_maybe_lookup( tid );
2413 tl_assert(thr); /* cannot fail - Thread* must already exist */
2415 cvi = map_cond_to_CVInfo_lookup_or_alloc( cond );
2416 tl_assert(cvi);
2417 tl_assert(cvi->so);
2419 // error-if: mutex is bogus
2420 // error-if: mutex is not locked
2421 // Hmm. POSIX doesn't actually say that it's an error to call
2422 // pthread_cond_signal with the associated mutex being unlocked.
2423 // Although it does say that it should be "if consistent scheduling
2424 // is desired." For that reason, print "dubious" if the lock isn't
2425 // held by any thread. Skip the "dubious" if it is held by some
2426 // other thread; that sounds straight-out wrong.
2428 // Anybody who writes code that signals on a CV without holding
2429 // the associated MX needs to be shipped off to a lunatic asylum
2430 // ASAP, even though POSIX doesn't actually declare such behaviour
2431 // illegal -- it makes code extremely difficult to understand/
2432 // reason about. In particular it puts the signalling thread in
2433 // a situation where it is racing against the released waiter
2434 // as soon as the signalling is done, and so there needs to be
2435 // some auxiliary synchronisation mechanism in the program that
2436 // makes this safe -- or the race(s) need to be harmless, or
2437 // probably nonexistent.
2439 if (1) {
2440 Lock* lk = NULL;
2441 if (cvi->mx_ga != 0) {
2442 lk = map_locks_maybe_lookup( (Addr)cvi->mx_ga );
2444 /* note: lk could be NULL. Be careful. */
2445 if (lk) {
2446 if (lk->kind == LK_rdwr) {
2447 HG_(record_error_Misc)(thr,
2448 "pthread_cond_{signal,broadcast}: associated lock is a rwlock");
2450 if (lk->heldBy == NULL) {
2451 HG_(record_error_Dubious)(thr,
2452 "pthread_cond_{signal,broadcast}: dubious: "
2453 "associated lock is not held by any thread");
2455 if (lk->heldBy != NULL && 0 == VG_(elemBag)(lk->heldBy, (UWord)thr)) {
2456 HG_(record_error_Misc)(thr,
2457 "pthread_cond_{signal,broadcast}: "
2458 "associated lock is not held by calling thread");
2460 } else {
2461 /* Couldn't even find the damn thing. */
2462 // But actually .. that's not necessarily an error. We don't
2463 // know the (CV,MX) binding until a pthread_cond_wait or bcast
2464 // shows us what it is, and if that may not have happened yet.
2465 // So just keep quiet in this circumstance.
2466 //HG_(record_error_Misc)( thr,
2467 // "pthread_cond_{signal,broadcast}: "
2468 // "no or invalid mutex associated with cond");
2472 libhb_so_send( thr->hbthr, cvi->so, True/*strong_send*/ );
2475 /* returns True if it reckons 'mutex' is valid and held by this
2476 thread, else False */
2477 static Bool evh__HG_PTHREAD_COND_WAIT_PRE ( ThreadId tid,
2478 void* cond, void* mutex )
2480 Thread* thr;
2481 Lock* lk;
2482 Bool lk_valid = True;
2483 CVInfo* cvi;
2485 if (SHOW_EVENTS >= 1)
2486 VG_(printf)("evh__hg_PTHREAD_COND_WAIT_PRE"
2487 "(ctid=%d, cond=%p, mutex=%p)\n",
2488 (Int)tid, (void*)cond, (void*)mutex );
2490 thr = map_threads_maybe_lookup( tid );
2491 tl_assert(thr); /* cannot fail - Thread* must already exist */
2493 lk = map_locks_maybe_lookup( (Addr)mutex );
2495 /* Check for stupid mutex arguments. There are various ways to be
2496 a bozo. Only complain once, though, even if more than one thing
2497 is wrong. */
2498 if (lk == NULL) {
2499 lk_valid = False;
2500 HG_(record_error_Misc)(
2501 thr,
2502 "pthread_cond_{timed}wait called with invalid mutex" );
2503 } else {
2504 tl_assert( HG_(is_sane_LockN)(lk) );
2505 if (lk->kind == LK_rdwr) {
2506 lk_valid = False;
2507 HG_(record_error_Misc)(
2508 thr, "pthread_cond_{timed}wait called with mutex "
2509 "of type pthread_rwlock_t*" );
2510 } else
2511 if (lk->heldBy == NULL) {
2512 lk_valid = False;
2513 HG_(record_error_Misc)(
2514 thr, "pthread_cond_{timed}wait called with un-held mutex");
2515 } else
2516 if (lk->heldBy != NULL
2517 && VG_(elemBag)( lk->heldBy, (UWord)thr ) == 0) {
2518 lk_valid = False;
2519 HG_(record_error_Misc)(
2520 thr, "pthread_cond_{timed}wait called with mutex "
2521 "held by a different thread" );
2525 // error-if: cond is also associated with a different mutex
2526 cvi = map_cond_to_CVInfo_lookup_or_alloc(cond);
2527 tl_assert(cvi);
2528 tl_assert(cvi->so);
2529 if (cvi->nWaiters == 0) {
2530 /* form initial (CV,MX) binding */
2531 cvi->mx_ga = mutex;
2533 else /* check existing (CV,MX) binding */
2534 if (cvi->mx_ga != mutex) {
2535 HG_(record_error_Misc)(
2536 thr, "pthread_cond_{timed}wait: cond is associated "
2537 "with a different mutex");
2539 cvi->nWaiters++;
2541 return lk_valid;
2544 static void evh__HG_PTHREAD_COND_WAIT_POST ( ThreadId tid,
2545 void* cond, void* mutex,
2546 Bool timeout)
2548 /* A pthread_cond_wait(cond, mutex) completed successfully. Find
2549 the SO for this cond, and 'recv' from it so as to acquire a
2550 dependency edge back to the signaller/broadcaster. */
2551 Thread* thr;
2552 CVInfo* cvi;
2554 if (SHOW_EVENTS >= 1)
2555 VG_(printf)("evh__HG_PTHREAD_COND_WAIT_POST"
2556 "(ctid=%d, cond=%p, mutex=%p)\n, timeout=%d",
2557 (Int)tid, (void*)cond, (void*)mutex, (Int)timeout );
2559 thr = map_threads_maybe_lookup( tid );
2560 tl_assert(thr); /* cannot fail - Thread* must already exist */
2562 // error-if: cond is also associated with a different mutex
2564 cvi = map_cond_to_CVInfo_lookup_NO_alloc( cond );
2565 if (!cvi) {
2566 /* This could be either a bug in helgrind or the guest application
2567 that did an error (e.g. cond var was destroyed by another thread.
2568 Let's assume helgrind is perfect ...
2569 Note that this is similar to drd behaviour. */
2570 HG_(record_error_Misc)(thr, "condition variable has been destroyed while"
2571 " being waited upon");
2572 return;
2575 tl_assert(cvi);
2576 tl_assert(cvi->so);
2577 tl_assert(cvi->nWaiters > 0);
2579 if (!timeout && !libhb_so_everSent(cvi->so)) {
2580 /* Hmm. How can a wait on 'cond' succeed if nobody signalled
2581 it? If this happened it would surely be a bug in the threads
2582 library. Or one of those fabled "spurious wakeups". */
2583 HG_(record_error_Misc)( thr, "Bug in libpthread: pthread_cond_wait "
2584 "succeeded"
2585 " without prior pthread_cond_post");
2588 /* anyway, acquire a dependency on it. */
2589 libhb_so_recv( thr->hbthr, cvi->so, True/*strong_recv*/ );
2591 cvi->nWaiters--;
2594 static void evh__HG_PTHREAD_COND_INIT_POST ( ThreadId tid,
2595 void* cond, void* cond_attr )
2597 CVInfo* cvi;
2599 if (SHOW_EVENTS >= 1)
2600 VG_(printf)("evh__HG_PTHREAD_COND_INIT_POST"
2601 "(ctid=%d, cond=%p, cond_attr=%p)\n",
2602 (Int)tid, (void*)cond, (void*) cond_attr );
2604 cvi = map_cond_to_CVInfo_lookup_or_alloc( cond );
2605 tl_assert (cvi);
2606 tl_assert (cvi->so);
2610 static void evh__HG_PTHREAD_COND_DESTROY_PRE ( ThreadId tid,
2611 void* cond, Bool cond_is_init )
2613 /* Deal with destroy events. The only purpose is to free storage
2614 associated with the CV, so as to avoid any possible resource
2615 leaks. */
2616 if (SHOW_EVENTS >= 1)
2617 VG_(printf)("evh__HG_PTHREAD_COND_DESTROY_PRE"
2618 "(ctid=%d, cond=%p, cond_is_init=%d)\n",
2619 (Int)tid, (void*)cond, (Int)cond_is_init );
2621 map_cond_to_CVInfo_delete( tid, cond, cond_is_init );
2625 /* ------------------------------------------------------- */
2626 /* -------------- events to do with rwlocks -------------- */
2627 /* ------------------------------------------------------- */
2629 /* EXPOSITION only */
2630 static
2631 void evh__HG_PTHREAD_RWLOCK_INIT_POST( ThreadId tid, void* rwl )
2633 if (SHOW_EVENTS >= 1)
2634 VG_(printf)("evh__hg_PTHREAD_RWLOCK_INIT_POST(ctid=%d, %p)\n",
2635 (Int)tid, (void*)rwl );
2636 map_locks_lookup_or_create( LK_rdwr, (Addr)rwl, tid );
2637 if (HG_(clo_sanity_flags) & SCE_LOCKS)
2638 all__sanity_check("evh__hg_PTHREAD_RWLOCK_INIT_POST");
2641 static
2642 void evh__HG_PTHREAD_RWLOCK_DESTROY_PRE( ThreadId tid, void* rwl )
2644 Thread* thr;
2645 Lock* lk;
2646 if (SHOW_EVENTS >= 1)
2647 VG_(printf)("evh__hg_PTHREAD_RWLOCK_DESTROY_PRE(ctid=%d, %p)\n",
2648 (Int)tid, (void*)rwl );
2650 thr = map_threads_maybe_lookup( tid );
2651 /* cannot fail - Thread* must already exist */
2652 tl_assert( HG_(is_sane_Thread)(thr) );
2654 lk = map_locks_maybe_lookup( (Addr)rwl );
2656 if (lk == NULL || lk->kind != LK_rdwr) {
2657 HG_(record_error_Misc)(
2658 thr, "pthread_rwlock_destroy with invalid argument" );
2661 if (lk) {
2662 tl_assert( HG_(is_sane_LockN)(lk) );
2663 tl_assert( lk->guestaddr == (Addr)rwl );
2664 if (lk->heldBy) {
2665 /* Basically act like we unlocked the lock */
2666 HG_(record_error_Misc)(
2667 thr, "pthread_rwlock_destroy of a locked mutex" );
2668 /* remove lock from locksets of all owning threads */
2669 remove_Lock_from_locksets_of_all_owning_Threads( lk );
2670 VG_(deleteBag)( lk->heldBy );
2671 lk->heldBy = NULL;
2672 lk->heldW = False;
2673 lk->acquired_at = NULL;
2675 tl_assert( !lk->heldBy );
2676 tl_assert( HG_(is_sane_LockN)(lk) );
2678 if (HG_(clo_track_lockorders))
2679 laog__handle_one_lock_deletion(lk);
2680 map_locks_delete( lk->guestaddr );
2681 del_LockN( lk );
2684 if (HG_(clo_sanity_flags) & SCE_LOCKS)
2685 all__sanity_check("evh__hg_PTHREAD_RWLOCK_DESTROY_PRE");
2688 static
2689 void evh__HG_PTHREAD_RWLOCK_LOCK_PRE ( ThreadId tid,
2690 void* rwl,
2691 Word isW, Word isTryLock )
2693 /* Just check the rwl is sane; nothing else to do. */
2694 // 'rwl' may be invalid - not checked by wrapper
2695 Thread* thr;
2696 Lock* lk;
2697 if (SHOW_EVENTS >= 1)
2698 VG_(printf)("evh__hg_PTHREAD_RWLOCK_LOCK_PRE(ctid=%d, isW=%d, %p)\n",
2699 (Int)tid, (Int)isW, (void*)rwl );
2701 tl_assert(isW == 0 || isW == 1); /* assured us by wrapper */
2702 tl_assert(isTryLock == 0 || isTryLock == 1); /* assured us by wrapper */
2703 thr = map_threads_maybe_lookup( tid );
2704 tl_assert(thr); /* cannot fail - Thread* must already exist */
2706 lk = map_locks_maybe_lookup( (Addr)rwl );
2707 if ( lk
2708 && (lk->kind == LK_nonRec || lk->kind == LK_mbRec) ) {
2709 /* Wrong kind of lock. Duh. */
2710 HG_(record_error_Misc)(
2711 thr, "pthread_rwlock_{rd,rw}lock with a "
2712 "pthread_mutex_t* argument " );
2716 static
2717 void evh__HG_PTHREAD_RWLOCK_LOCK_POST ( ThreadId tid, void* rwl, Word isW )
2719 // only called if the real library call succeeded - so mutex is sane
2720 Thread* thr;
2721 if (SHOW_EVENTS >= 1)
2722 VG_(printf)("evh__hg_PTHREAD_RWLOCK_LOCK_POST(ctid=%d, isW=%d, %p)\n",
2723 (Int)tid, (Int)isW, (void*)rwl );
2725 tl_assert(isW == 0 || isW == 1); /* assured us by wrapper */
2726 thr = map_threads_maybe_lookup( tid );
2727 tl_assert(thr); /* cannot fail - Thread* must already exist */
2729 (isW ? evhH__post_thread_w_acquires_lock
2730 : evhH__post_thread_r_acquires_lock)(
2731 thr,
2732 LK_rdwr, /* if not known, create new lock with this LockKind */
2733 (Addr)rwl
2737 static void evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE ( ThreadId tid, void* rwl )
2739 // 'rwl' may be invalid - not checked by wrapper
2740 Thread* thr;
2741 if (SHOW_EVENTS >= 1)
2742 VG_(printf)("evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE(ctid=%d, rwl=%p)\n",
2743 (Int)tid, (void*)rwl );
2745 thr = map_threads_maybe_lookup( tid );
2746 tl_assert(thr); /* cannot fail - Thread* must already exist */
2748 evhH__pre_thread_releases_lock( thr, (Addr)rwl, True/*isRDWR*/ );
2751 static void evh__HG_PTHREAD_RWLOCK_UNLOCK_POST ( ThreadId tid, void* rwl )
2753 // only called if the real library call succeeded - so mutex is sane
2754 Thread* thr;
2755 if (SHOW_EVENTS >= 1)
2756 VG_(printf)("evh__hg_PTHREAD_RWLOCK_UNLOCK_POST(ctid=%d, rwl=%p)\n",
2757 (Int)tid, (void*)rwl );
2758 thr = map_threads_maybe_lookup( tid );
2759 tl_assert(thr); /* cannot fail - Thread* must already exist */
2761 // anything we should do here?
2765 /* ---------------------------------------------------------- */
2766 /* -------------- events to do with semaphores -------------- */
2767 /* ---------------------------------------------------------- */
2769 /* This is similar to but not identical to the handling for condition
2770 variables. */
2772 /* For each semaphore, we maintain a stack of SOs. When a 'post'
2773 operation is done on a semaphore (unlocking, essentially), a new SO
2774 is created for the posting thread, the posting thread does a strong
2775 send to it (which merely installs the posting thread's VC in the
2776 SO), and the SO is pushed on the semaphore's stack.
2778 Later, when a (probably different) thread completes 'wait' on the
2779 semaphore, we pop a SO off the semaphore's stack (which should be
2780 nonempty), and do a strong recv from it. This mechanism creates
2781 dependencies between posters and waiters of the semaphore.
2783 It may not be necessary to use a stack - perhaps a bag of SOs would
2784 do. But we do need to keep track of how many unused-up posts have
2785 happened for the semaphore.
2787 Imagine T1 and T2 both post once on a semaphore S, and T3 waits
2788 twice on S. T3 cannot complete its waits without both T1 and T2
2789 posting. The above mechanism will ensure that T3 acquires
2790 dependencies on both T1 and T2.
2792 When a semaphore is initialised with value N, we do as if we'd
2793 posted N times on the semaphore: basically create N SOs and do a
2794 strong send to all of then. This allows up to N waits on the
2795 semaphore to acquire a dependency on the initialisation point,
2796 which AFAICS is the correct behaviour.
2798 We don't emit an error for DESTROY_PRE on a semaphore we don't know
2799 about. We should.
2802 /* sem_t* -> XArray* SO* */
2803 static WordFM* map_sem_to_SO_stack = NULL;
2805 static void map_sem_to_SO_stack_INIT ( void ) {
2806 if (map_sem_to_SO_stack == NULL) {
2807 map_sem_to_SO_stack = VG_(newFM)( HG_(zalloc), "hg.mstSs.1",
2808 HG_(free), NULL );
2812 static void push_SO_for_sem ( void* sem, SO* so ) {
2813 UWord keyW;
2814 XArray* xa;
2815 tl_assert(so);
2816 map_sem_to_SO_stack_INIT();
2817 if (VG_(lookupFM)( map_sem_to_SO_stack,
2818 &keyW, (UWord*)&xa, (UWord)sem )) {
2819 tl_assert(keyW == (UWord)sem);
2820 tl_assert(xa);
2821 VG_(addToXA)( xa, &so );
2822 } else {
2823 xa = VG_(newXA)( HG_(zalloc), "hg.pSfs.1", HG_(free), sizeof(SO*) );
2824 VG_(addToXA)( xa, &so );
2825 VG_(addToFM)( map_sem_to_SO_stack, (UWord)sem, (UWord)xa );
2829 static SO* mb_pop_SO_for_sem ( void* sem ) {
2830 UWord keyW;
2831 XArray* xa;
2832 SO* so;
2833 map_sem_to_SO_stack_INIT();
2834 if (VG_(lookupFM)( map_sem_to_SO_stack,
2835 &keyW, (UWord*)&xa, (UWord)sem )) {
2836 /* xa is the stack for this semaphore. */
2837 Word sz;
2838 tl_assert(keyW == (UWord)sem);
2839 sz = VG_(sizeXA)( xa );
2840 tl_assert(sz >= 0);
2841 if (sz == 0)
2842 return NULL; /* odd, the stack is empty */
2843 so = *(SO**)VG_(indexXA)( xa, sz-1 );
2844 tl_assert(so);
2845 VG_(dropTailXA)( xa, 1 );
2846 return so;
2847 } else {
2848 /* hmm, that's odd. No stack for this semaphore. */
2849 return NULL;
2853 static void evh__HG_POSIX_SEM_DESTROY_PRE ( ThreadId tid, void* sem )
2855 UWord keyW, valW;
2856 SO* so;
2858 if (SHOW_EVENTS >= 1)
2859 VG_(printf)("evh__HG_POSIX_SEM_DESTROY_PRE(ctid=%d, sem=%p)\n",
2860 (Int)tid, (void*)sem );
2862 map_sem_to_SO_stack_INIT();
2864 /* Empty out the semaphore's SO stack. This way of doing it is
2865 stupid, but at least it's easy. */
2866 while (1) {
2867 so = mb_pop_SO_for_sem( sem );
2868 if (!so) break;
2869 libhb_so_dealloc(so);
2872 if (VG_(delFromFM)( map_sem_to_SO_stack, &keyW, &valW, (UWord)sem )) {
2873 XArray* xa = (XArray*)valW;
2874 tl_assert(keyW == (UWord)sem);
2875 tl_assert(xa);
2876 tl_assert(VG_(sizeXA)(xa) == 0); /* preceding loop just emptied it */
2877 VG_(deleteXA)(xa);
2881 static
2882 void evh__HG_POSIX_SEM_INIT_POST ( ThreadId tid, void* sem, UWord value )
2884 SO* so;
2885 Thread* thr;
2887 if (SHOW_EVENTS >= 1)
2888 VG_(printf)("evh__HG_POSIX_SEM_INIT_POST(ctid=%d, sem=%p, value=%lu)\n",
2889 (Int)tid, (void*)sem, value );
2891 thr = map_threads_maybe_lookup( tid );
2892 tl_assert(thr); /* cannot fail - Thread* must already exist */
2894 /* Empty out the semaphore's SO stack. This way of doing it is
2895 stupid, but at least it's easy. */
2896 while (1) {
2897 so = mb_pop_SO_for_sem( sem );
2898 if (!so) break;
2899 libhb_so_dealloc(so);
2902 /* If we don't do this check, the following while loop runs us out
2903 of memory for stupid initial values of 'value'. */
2904 if (value > 10000) {
2905 HG_(record_error_Misc)(
2906 thr, "sem_init: initial value exceeds 10000; using 10000" );
2907 value = 10000;
2910 /* Now create 'valid' new SOs for the thread, do a strong send to
2911 each of them, and push them all on the stack. */
2912 for (; value > 0; value--) {
2913 Thr* hbthr = thr->hbthr;
2914 tl_assert(hbthr);
2916 so = libhb_so_alloc();
2917 libhb_so_send( hbthr, so, True/*strong send*/ );
2918 push_SO_for_sem( sem, so );
2922 static void evh__HG_POSIX_SEM_POST_PRE ( ThreadId tid, void* sem )
2924 /* 'tid' has posted on 'sem'. Create a new SO, do a strong send to
2925 it (iow, write our VC into it, then tick ours), and push the SO
2926 on on a stack of SOs associated with 'sem'. This is later used
2927 by other thread(s) which successfully exit from a sem_wait on
2928 the same sem; by doing a strong recv from SOs popped of the
2929 stack, they acquire dependencies on the posting thread
2930 segment(s). */
2932 Thread* thr;
2933 SO* so;
2934 Thr* hbthr;
2936 if (SHOW_EVENTS >= 1)
2937 VG_(printf)("evh__HG_POSIX_SEM_POST_PRE(ctid=%d, sem=%p)\n",
2938 (Int)tid, (void*)sem );
2940 thr = map_threads_maybe_lookup( tid );
2941 tl_assert(thr); /* cannot fail - Thread* must already exist */
2943 // error-if: sem is bogus
2945 hbthr = thr->hbthr;
2946 tl_assert(hbthr);
2948 so = libhb_so_alloc();
2949 libhb_so_send( hbthr, so, True/*strong send*/ );
2950 push_SO_for_sem( sem, so );
2953 static void evh__HG_POSIX_SEM_WAIT_POST ( ThreadId tid, void* sem )
2955 /* A sem_wait(sem) completed successfully. Pop the posting-SO for
2956 the 'sem' from this semaphore's SO-stack, and do a strong recv
2957 from it. This creates a dependency back to one of the post-ers
2958 for the semaphore. */
2960 Thread* thr;
2961 SO* so;
2962 Thr* hbthr;
2964 if (SHOW_EVENTS >= 1)
2965 VG_(printf)("evh__HG_POSIX_SEM_WAIT_POST(ctid=%d, sem=%p)\n",
2966 (Int)tid, (void*)sem );
2968 thr = map_threads_maybe_lookup( tid );
2969 tl_assert(thr); /* cannot fail - Thread* must already exist */
2971 // error-if: sem is bogus
2973 so = mb_pop_SO_for_sem( sem );
2975 if (so) {
2976 hbthr = thr->hbthr;
2977 tl_assert(hbthr);
2979 libhb_so_recv( hbthr, so, True/*strong recv*/ );
2980 libhb_so_dealloc(so);
2981 } else {
2982 /* Hmm. How can a wait on 'sem' succeed if nobody posted to it?
2983 If this happened it would surely be a bug in the threads
2984 library. */
2985 HG_(record_error_Misc)(
2986 thr, "Bug in libpthread: sem_wait succeeded on"
2987 " semaphore without prior sem_post");
2992 /* -------------------------------------------------------- */
2993 /* -------------- events to do with barriers -------------- */
2994 /* -------------------------------------------------------- */
2996 typedef
2997 struct {
2998 Bool initted; /* has it yet been initted by guest? */
2999 Bool resizable; /* is resizing allowed? */
3000 UWord size; /* declared size */
3001 XArray* waiting; /* XA of Thread*. # present is 0 .. .size */
3003 Bar;
3005 static Bar* new_Bar ( void ) {
3006 Bar* bar = HG_(zalloc)( "hg.nB.1 (new_Bar)", sizeof(Bar) );
3007 /* all fields are zero */
3008 tl_assert(bar->initted == False);
3009 return bar;
3012 static void delete_Bar ( Bar* bar ) {
3013 tl_assert(bar);
3014 if (bar->waiting)
3015 VG_(deleteXA)(bar->waiting);
3016 HG_(free)(bar);
3019 /* A mapping which stores auxiliary data for barriers. */
3021 /* pthread_barrier_t* -> Bar* */
3022 static WordFM* map_barrier_to_Bar = NULL;
3024 static void map_barrier_to_Bar_INIT ( void ) {
3025 if (UNLIKELY(map_barrier_to_Bar == NULL)) {
3026 map_barrier_to_Bar = VG_(newFM)( HG_(zalloc),
3027 "hg.mbtBI.1", HG_(free), NULL );
3031 static Bar* map_barrier_to_Bar_lookup_or_alloc ( void* barrier ) {
3032 UWord key, val;
3033 map_barrier_to_Bar_INIT();
3034 if (VG_(lookupFM)( map_barrier_to_Bar, &key, &val, (UWord)barrier )) {
3035 tl_assert(key == (UWord)barrier);
3036 return (Bar*)val;
3037 } else {
3038 Bar* bar = new_Bar();
3039 VG_(addToFM)( map_barrier_to_Bar, (UWord)barrier, (UWord)bar );
3040 return bar;
3044 static void map_barrier_to_Bar_delete ( void* barrier ) {
3045 UWord keyW, valW;
3046 map_barrier_to_Bar_INIT();
3047 if (VG_(delFromFM)( map_barrier_to_Bar, &keyW, &valW, (UWord)barrier )) {
3048 Bar* bar = (Bar*)valW;
3049 tl_assert(keyW == (UWord)barrier);
3050 delete_Bar(bar);
3055 static void evh__HG_PTHREAD_BARRIER_INIT_PRE ( ThreadId tid,
3056 void* barrier,
3057 UWord count,
3058 UWord resizable )
3060 Thread* thr;
3061 Bar* bar;
3063 if (SHOW_EVENTS >= 1)
3064 VG_(printf)("evh__HG_PTHREAD_BARRIER_INIT_PRE"
3065 "(tid=%d, barrier=%p, count=%lu, resizable=%lu)\n",
3066 (Int)tid, (void*)barrier, count, resizable );
3068 thr = map_threads_maybe_lookup( tid );
3069 tl_assert(thr); /* cannot fail - Thread* must already exist */
3071 if (count == 0) {
3072 HG_(record_error_Misc)(
3073 thr, "pthread_barrier_init: 'count' argument is zero"
3077 if (resizable != 0 && resizable != 1) {
3078 HG_(record_error_Misc)(
3079 thr, "pthread_barrier_init: invalid 'resizable' argument"
3083 bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
3084 tl_assert(bar);
3086 if (bar->initted) {
3087 HG_(record_error_Misc)(
3088 thr, "pthread_barrier_init: barrier is already initialised"
3092 if (bar->waiting && VG_(sizeXA)(bar->waiting) > 0) {
3093 tl_assert(bar->initted);
3094 HG_(record_error_Misc)(
3095 thr, "pthread_barrier_init: threads are waiting at barrier"
3097 VG_(dropTailXA)(bar->waiting, VG_(sizeXA)(bar->waiting));
3099 if (!bar->waiting) {
3100 bar->waiting = VG_(newXA)( HG_(zalloc), "hg.eHPBIP.1", HG_(free),
3101 sizeof(Thread*) );
3104 tl_assert(VG_(sizeXA)(bar->waiting) == 0);
3105 bar->initted = True;
3106 bar->resizable = resizable == 1 ? True : False;
3107 bar->size = count;
3111 static void evh__HG_PTHREAD_BARRIER_DESTROY_PRE ( ThreadId tid,
3112 void* barrier )
3114 Thread* thr;
3115 Bar* bar;
3117 /* Deal with destroy events. The only purpose is to free storage
3118 associated with the barrier, so as to avoid any possible
3119 resource leaks. */
3120 if (SHOW_EVENTS >= 1)
3121 VG_(printf)("evh__HG_PTHREAD_BARRIER_DESTROY_PRE"
3122 "(tid=%d, barrier=%p)\n",
3123 (Int)tid, (void*)barrier );
3125 thr = map_threads_maybe_lookup( tid );
3126 tl_assert(thr); /* cannot fail - Thread* must already exist */
3128 bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
3129 tl_assert(bar);
3131 if (!bar->initted) {
3132 HG_(record_error_Misc)(
3133 thr, "pthread_barrier_destroy: barrier was never initialised"
3137 if (bar->initted && bar->waiting && VG_(sizeXA)(bar->waiting) > 0) {
3138 HG_(record_error_Misc)(
3139 thr, "pthread_barrier_destroy: threads are waiting at barrier"
3143 /* Maybe we shouldn't do this; just let it persist, so that when it
3144 is reinitialised we don't need to do any dynamic memory
3145 allocation? The downside is a potentially unlimited space leak,
3146 if the client creates (in turn) a large number of barriers all
3147 at different locations. Note that if we do later move to the
3148 don't-delete-it scheme, we need to mark the barrier as
3149 uninitialised again since otherwise a later _init call will
3150 elicit a duplicate-init error. */
3151 map_barrier_to_Bar_delete( barrier );
3155 /* All the threads have arrived. Now do the Interesting Bit. Get a
3156 new synchronisation object and do a weak send to it from all the
3157 participating threads. This makes its vector clocks be the join of
3158 all the individual threads' vector clocks. Then do a strong
3159 receive from it back to all threads, so that their VCs are a copy
3160 of it (hence are all equal to the join of their original VCs.) */
3161 static void do_barrier_cross_sync_and_empty ( Bar* bar )
3163 /* XXX check bar->waiting has no duplicates */
3164 UWord i;
3165 SO* so = libhb_so_alloc();
3167 tl_assert(bar->waiting);
3168 tl_assert(VG_(sizeXA)(bar->waiting) == bar->size);
3170 /* compute the join ... */
3171 for (i = 0; i < bar->size; i++) {
3172 Thread* t = *(Thread**)VG_(indexXA)(bar->waiting, i);
3173 Thr* hbthr = t->hbthr;
3174 libhb_so_send( hbthr, so, False/*weak send*/ );
3176 /* ... and distribute to all threads */
3177 for (i = 0; i < bar->size; i++) {
3178 Thread* t = *(Thread**)VG_(indexXA)(bar->waiting, i);
3179 Thr* hbthr = t->hbthr;
3180 libhb_so_recv( hbthr, so, True/*strong recv*/ );
3183 /* finally, we must empty out the waiting vector */
3184 VG_(dropTailXA)(bar->waiting, VG_(sizeXA)(bar->waiting));
3186 /* and we don't need this any more. Perhaps a stack-allocated
3187 SO would be better? */
3188 libhb_so_dealloc(so);
3192 static void evh__HG_PTHREAD_BARRIER_WAIT_PRE ( ThreadId tid,
3193 void* barrier )
3195 /* This function gets called after a client thread calls
3196 pthread_barrier_wait but before it arrives at the real
3197 pthread_barrier_wait.
3199 Why is the following correct? It's a bit subtle.
3201 If this is not the last thread arriving at the barrier, we simply
3202 note its presence and return. Because valgrind (at least as of
3203 Nov 08) is single threaded, we are guaranteed safe from any race
3204 conditions when in this function -- no other client threads are
3205 running.
3207 If this is the last thread, then we are again the only running
3208 thread. All the other threads will have either arrived at the
3209 real pthread_barrier_wait or are on their way to it, but in any
3210 case are guaranteed not to be able to move past it, because this
3211 thread is currently in this function and so has not yet arrived
3212 at the real pthread_barrier_wait. That means that:
3214 1. While we are in this function, none of the other threads
3215 waiting at the barrier can move past it.
3217 2. When this function returns (and simulated execution resumes),
3218 this thread and all other waiting threads will be able to move
3219 past the real barrier.
3221 Because of this, it is now safe to update the vector clocks of
3222 all threads, to represent the fact that they all arrived at the
3223 barrier and have all moved on. There is no danger of any
3224 complications to do with some threads leaving the barrier and
3225 racing back round to the front, whilst others are still leaving
3226 (which is the primary source of complication in correct handling/
3227 implementation of barriers). That can't happen because we update
3228 here our data structures so as to indicate that the threads have
3229 passed the barrier, even though, as per (2) above, they are
3230 guaranteed not to pass the barrier until we return.
3232 This relies crucially on Valgrind being single threaded. If that
3233 changes, this will need to be reconsidered.
3235 Thread* thr;
3236 Bar* bar;
3237 UWord present;
3239 if (SHOW_EVENTS >= 1)
3240 VG_(printf)("evh__HG_PTHREAD_BARRIER_WAIT_PRE"
3241 "(tid=%d, barrier=%p)\n",
3242 (Int)tid, (void*)barrier );
3244 thr = map_threads_maybe_lookup( tid );
3245 tl_assert(thr); /* cannot fail - Thread* must already exist */
3247 bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
3248 tl_assert(bar);
3250 if (!bar->initted) {
3251 HG_(record_error_Misc)(
3252 thr, "pthread_barrier_wait: barrier is uninitialised"
3254 return; /* client is broken .. avoid assertions below */
3257 /* guaranteed by _INIT_PRE above */
3258 tl_assert(bar->size > 0);
3259 tl_assert(bar->waiting);
3261 VG_(addToXA)( bar->waiting, &thr );
3263 /* guaranteed by this function */
3264 present = VG_(sizeXA)(bar->waiting);
3265 tl_assert(present > 0 && present <= bar->size);
3267 if (present < bar->size)
3268 return;
3270 do_barrier_cross_sync_and_empty(bar);
3274 static void evh__HG_PTHREAD_BARRIER_RESIZE_PRE ( ThreadId tid,
3275 void* barrier,
3276 UWord newcount )
3278 Thread* thr;
3279 Bar* bar;
3280 UWord present;
3282 if (SHOW_EVENTS >= 1)
3283 VG_(printf)("evh__HG_PTHREAD_BARRIER_RESIZE_PRE"
3284 "(tid=%d, barrier=%p, newcount=%lu)\n",
3285 (Int)tid, (void*)barrier, newcount );
3287 thr = map_threads_maybe_lookup( tid );
3288 tl_assert(thr); /* cannot fail - Thread* must already exist */
3290 bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
3291 tl_assert(bar);
3293 if (!bar->initted) {
3294 HG_(record_error_Misc)(
3295 thr, "pthread_barrier_resize: barrier is uninitialised"
3297 return; /* client is broken .. avoid assertions below */
3300 if (!bar->resizable) {
3301 HG_(record_error_Misc)(
3302 thr, "pthread_barrier_resize: barrier is may not be resized"
3304 return; /* client is broken .. avoid assertions below */
3307 if (newcount == 0) {
3308 HG_(record_error_Misc)(
3309 thr, "pthread_barrier_resize: 'newcount' argument is zero"
3311 return; /* client is broken .. avoid assertions below */
3314 /* guaranteed by _INIT_PRE above */
3315 tl_assert(bar->size > 0);
3316 tl_assert(bar->waiting);
3317 /* Guaranteed by this fn */
3318 tl_assert(newcount > 0);
3320 if (newcount >= bar->size) {
3321 /* Increasing the capacity. There's no possibility of threads
3322 moving on from the barrier in this situation, so just note
3323 the fact and do nothing more. */
3324 bar->size = newcount;
3325 } else {
3326 /* Decreasing the capacity. If we decrease it to be equal or
3327 below the number of waiting threads, they will now move past
3328 the barrier, so need to mess with dep edges in the same way
3329 as if the barrier had filled up normally. */
3330 present = VG_(sizeXA)(bar->waiting);
3331 tl_assert(present <= bar->size);
3332 if (newcount <= present) {
3333 bar->size = present; /* keep the cross_sync call happy */
3334 do_barrier_cross_sync_and_empty(bar);
3336 bar->size = newcount;
3341 /* ----------------------------------------------------- */
3342 /* ----- events to do with user-specified HB edges ----- */
3343 /* ----------------------------------------------------- */
3345 /* A mapping from arbitrary UWord tag to the SO associated with it.
3346 The UWord tags are meaningless to us, interpreted only by the
3347 user. */
3351 /* UWord -> SO* */
3352 static WordFM* map_usertag_to_SO = NULL;
3354 static void map_usertag_to_SO_INIT ( void ) {
3355 if (UNLIKELY(map_usertag_to_SO == NULL)) {
3356 map_usertag_to_SO = VG_(newFM)( HG_(zalloc),
3357 "hg.mutS.1", HG_(free), NULL );
3361 static SO* map_usertag_to_SO_lookup_or_alloc ( UWord usertag ) {
3362 UWord key, val;
3363 map_usertag_to_SO_INIT();
3364 if (VG_(lookupFM)( map_usertag_to_SO, &key, &val, usertag )) {
3365 tl_assert(key == (UWord)usertag);
3366 return (SO*)val;
3367 } else {
3368 SO* so = libhb_so_alloc();
3369 VG_(addToFM)( map_usertag_to_SO, usertag, (UWord)so );
3370 return so;
3374 static void map_usertag_to_SO_delete ( UWord usertag ) {
3375 UWord keyW, valW;
3376 map_usertag_to_SO_INIT();
3377 if (VG_(delFromFM)( map_usertag_to_SO, &keyW, &valW, usertag )) {
3378 SO* so = (SO*)valW;
3379 tl_assert(keyW == usertag);
3380 tl_assert(so);
3381 libhb_so_dealloc(so);
3386 static
3387 void evh__HG_USERSO_SEND_PRE ( ThreadId tid, UWord usertag )
3389 /* TID is just about to notionally sent a message on a notional
3390 abstract synchronisation object whose identity is given by
3391 USERTAG. Bind USERTAG to a real SO if it is not already so
3392 bound, and do a 'weak send' on the SO. This joins the vector
3393 clocks from this thread into any vector clocks already present
3394 in the SO. The resulting SO vector clocks are later used by
3395 other thread(s) which successfully 'receive' from the SO,
3396 thereby acquiring a dependency on all the events that have
3397 previously signalled on this SO. */
3398 Thread* thr;
3399 SO* so;
3401 if (SHOW_EVENTS >= 1)
3402 VG_(printf)("evh__HG_USERSO_SEND_PRE(ctid=%d, usertag=%#lx)\n",
3403 (Int)tid, usertag );
3405 thr = map_threads_maybe_lookup( tid );
3406 tl_assert(thr); /* cannot fail - Thread* must already exist */
3408 so = map_usertag_to_SO_lookup_or_alloc( usertag );
3409 tl_assert(so);
3411 libhb_so_send( thr->hbthr, so, False/*!strong_send*/ );
3414 static
3415 void evh__HG_USERSO_RECV_POST ( ThreadId tid, UWord usertag )
3417 /* TID has just notionally received a message from a notional
3418 abstract synchronisation object whose identity is given by
3419 USERTAG. Bind USERTAG to a real SO if it is not already so
3420 bound. If the SO has at some point in the past been 'sent' on,
3421 to a 'strong receive' on it, thereby acquiring a dependency on
3422 the sender. */
3423 Thread* thr;
3424 SO* so;
3426 if (SHOW_EVENTS >= 1)
3427 VG_(printf)("evh__HG_USERSO_RECV_POST(ctid=%d, usertag=%#lx)\n",
3428 (Int)tid, usertag );
3430 thr = map_threads_maybe_lookup( tid );
3431 tl_assert(thr); /* cannot fail - Thread* must already exist */
3433 so = map_usertag_to_SO_lookup_or_alloc( usertag );
3434 tl_assert(so);
3436 /* Acquire a dependency on it. If the SO has never so far been
3437 sent on, then libhb_so_recv will do nothing. So we're safe
3438 regardless of SO's history. */
3439 libhb_so_recv( thr->hbthr, so, True/*strong_recv*/ );
3442 static
3443 void evh__HG_USERSO_FORGET_ALL ( ThreadId tid, UWord usertag )
3445 /* TID declares that any happens-before edges notionally stored in
3446 USERTAG can be deleted. If (as would normally be the case) a
3447 SO is associated with USERTAG, then the association is removed
3448 and all resources associated with SO are freed. Importantly,
3449 that frees up any VTSs stored in SO. */
3450 if (SHOW_EVENTS >= 1)
3451 VG_(printf)("evh__HG_USERSO_FORGET_ALL(ctid=%d, usertag=%#lx)\n",
3452 (Int)tid, usertag );
3454 map_usertag_to_SO_delete( usertag );
3458 #if defined(VGO_solaris)
3459 /* ----------------------------------------------------- */
3460 /* --- events to do with bind guard/clear intercepts --- */
3461 /* ----------------------------------------------------- */
3463 static
3464 void evh__HG_RTLD_BIND_GUARD(ThreadId tid, Int flags)
3466 if (SHOW_EVENTS >= 1)
3467 VG_(printf)("evh__HG_RTLD_BIND_GUARD"
3468 "(tid=%d, flags=%d)\n",
3469 (Int)tid, flags);
3471 Thread *thr = map_threads_maybe_lookup(tid);
3472 tl_assert(thr != NULL);
3474 Int bindflag = (flags & VKI_THR_FLG_RTLD);
3475 if ((bindflag & thr->bind_guard_flag) == 0) {
3476 thr->bind_guard_flag |= bindflag;
3477 HG_(thread_enter_synchr)(thr);
3478 /* Misuse pthread_create_nesting_level for ignoring mutex activity. */
3479 HG_(thread_enter_pthread_create)(thr);
3483 static
3484 void evh__HG_RTLD_BIND_CLEAR(ThreadId tid, Int flags)
3486 if (SHOW_EVENTS >= 1)
3487 VG_(printf)("evh__HG_RTLD_BIND_CLEAR"
3488 "(tid=%d, flags=%d)\n",
3489 (Int)tid, flags);
3491 Thread *thr = map_threads_maybe_lookup(tid);
3492 tl_assert(thr != NULL);
3494 Int bindflag = (flags & VKI_THR_FLG_RTLD);
3495 if ((thr->bind_guard_flag & bindflag) != 0) {
3496 thr->bind_guard_flag &= ~bindflag;
3497 HG_(thread_leave_synchr)(thr);
3498 HG_(thread_leave_pthread_create)(thr);
3501 #endif /* VGO_solaris */
3504 /*--------------------------------------------------------------*/
3505 /*--- Lock acquisition order monitoring ---*/
3506 /*--------------------------------------------------------------*/
3508 /* FIXME: here are some optimisations still to do in
3509 laog__pre_thread_acquires_lock.
3511 The graph is structured so that if L1 --*--> L2 then L1 must be
3512 acquired before L2.
3514 The common case is that some thread T holds (eg) L1 L2 and L3 and
3515 is repeatedly acquiring and releasing Ln, and there is no ordering
3516 error in what it is doing. Hence it repeatedly:
3518 (1) searches laog to see if Ln --*--> {L1,L2,L3}, which always
3519 produces the answer No (because there is no error).
3521 (2) adds edges {L1,L2,L3} --> Ln to laog, which are already present
3522 (because they already got added the first time T acquired Ln).
3524 Hence cache these two events:
3526 (1) Cache result of the query from last time. Invalidate the cache
3527 any time any edges are added to or deleted from laog.
3529 (2) Cache these add-edge requests and ignore them if said edges
3530 have already been added to laog. Invalidate the cache any time
3531 any edges are deleted from laog.
3534 typedef
3535 struct {
3536 WordSetID inns; /* in univ_laog */
3537 WordSetID outs; /* in univ_laog */
3539 LAOGLinks;
3541 /* lock order acquisition graph */
3542 static WordFM* laog = NULL; /* WordFM Lock* LAOGLinks* */
3544 /* EXPOSITION ONLY: for each edge in 'laog', record the two places
3545 where that edge was created, so that we can show the user later if
3546 we need to. */
3547 typedef
3548 struct {
3549 Addr src_ga; /* Lock guest addresses for */
3550 Addr dst_ga; /* src/dst of the edge */
3551 ExeContext* src_ec; /* And corresponding places where that */
3552 ExeContext* dst_ec; /* ordering was established */
3554 LAOGLinkExposition;
3556 static Word cmp_LAOGLinkExposition ( UWord llx1W, UWord llx2W ) {
3557 /* Compare LAOGLinkExposition*s by (src_ga,dst_ga) field pair. */
3558 LAOGLinkExposition* llx1 = (LAOGLinkExposition*)llx1W;
3559 LAOGLinkExposition* llx2 = (LAOGLinkExposition*)llx2W;
3560 if (llx1->src_ga < llx2->src_ga) return -1;
3561 if (llx1->src_ga > llx2->src_ga) return 1;
3562 if (llx1->dst_ga < llx2->dst_ga) return -1;
3563 if (llx1->dst_ga > llx2->dst_ga) return 1;
3564 return 0;
3567 static WordFM* laog_exposition = NULL; /* WordFM LAOGLinkExposition* NULL */
3568 /* end EXPOSITION ONLY */
3571 __attribute__((noinline))
3572 static void laog__init ( void )
3574 tl_assert(!laog);
3575 tl_assert(!laog_exposition);
3576 tl_assert(HG_(clo_track_lockorders));
3578 laog = VG_(newFM)( HG_(zalloc), "hg.laog__init.1",
3579 HG_(free), NULL/*unboxedcmp*/ );
3581 laog_exposition = VG_(newFM)( HG_(zalloc), "hg.laog__init.2", HG_(free),
3582 cmp_LAOGLinkExposition );
3585 static void laog__show ( const HChar* who ) {
3586 UWord i, ws_size;
3587 UWord* ws_words;
3588 Lock* me;
3589 LAOGLinks* links;
3590 VG_(printf)("laog (requested by %s) {\n", who);
3591 VG_(initIterFM)( laog );
3592 me = NULL;
3593 links = NULL;
3594 while (VG_(nextIterFM)( laog, (UWord*)&me,
3595 (UWord*)&links )) {
3596 tl_assert(me);
3597 tl_assert(links);
3598 VG_(printf)(" node %p:\n", me);
3599 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->inns );
3600 for (i = 0; i < ws_size; i++)
3601 VG_(printf)(" inn %#lx\n", ws_words[i] );
3602 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->outs );
3603 for (i = 0; i < ws_size; i++)
3604 VG_(printf)(" out %#lx\n", ws_words[i] );
3605 me = NULL;
3606 links = NULL;
3608 VG_(doneIterFM)( laog );
3609 VG_(printf)("}\n");
3612 static void univ_laog_do_GC ( void ) {
3613 Word i;
3614 LAOGLinks* links;
3615 Word seen = 0;
3616 Int prev_next_gc_univ_laog = next_gc_univ_laog;
3617 const UWord univ_laog_cardinality = HG_(cardinalityWSU)( univ_laog);
3619 Bool *univ_laog_seen = HG_(zalloc) ( "hg.gc_univ_laog.1",
3620 (Int) univ_laog_cardinality
3621 * sizeof(Bool) );
3622 // univ_laog_seen[*] set to 0 (False) by zalloc.
3624 VG_(initIterFM)( laog );
3625 links = NULL;
3626 while (VG_(nextIterFM)( laog, NULL, (UWord*)&links )) {
3627 tl_assert(links);
3628 tl_assert(links->inns < univ_laog_cardinality);
3629 univ_laog_seen[links->inns] = True;
3630 tl_assert(links->outs < univ_laog_cardinality);
3631 univ_laog_seen[links->outs] = True;
3632 links = NULL;
3634 VG_(doneIterFM)( laog );
3636 for (i = 0; i < (Int)univ_laog_cardinality; i++) {
3637 if (univ_laog_seen[i])
3638 seen++;
3639 else
3640 HG_(dieWS) ( univ_laog, (WordSet)i );
3643 HG_(free) (univ_laog_seen);
3645 // We need to decide the value of the next_gc.
3646 // 3 solutions were looked at:
3647 // Sol 1: garbage collect at seen * 2
3648 // This solution was a lot slower, probably because we both do a lot of
3649 // garbage collection and do not keep long enough laog WV that will become
3650 // useful again very soon.
3651 // Sol 2: garbage collect at a percentage increase of the current cardinality
3652 // (with a min increase of 1)
3653 // Trials on a small test program with 1%, 5% and 10% increase was done.
3654 // 1% is slightly faster than 5%, which is slightly slower than 10%.
3655 // However, on a big application, this caused the memory to be exhausted,
3656 // as even a 1% increase of size at each gc becomes a lot, when many gc
3657 // are done.
3658 // Sol 3: always garbage collect at current cardinality + 1.
3659 // This solution was the fastest of the 3 solutions, and caused no memory
3660 // exhaustion in the big application.
3662 // With regards to cost introduced by gc: on the t2t perf test (doing only
3663 // lock/unlock operations), t2t 50 10 2 was about 25% faster than the
3664 // version with garbage collection. With t2t 50 20 2, my machine started
3665 // to page out, and so the garbage collected version was much faster.
3666 // On smaller lock sets (e.g. t2t 20 5 2, giving about 100 locks), the
3667 // difference performance is insignificant (~ 0.1 s).
3668 // Of course, it might be that real life programs are not well represented
3669 // by t2t.
3671 // If ever we want to have a more sophisticated control
3672 // (e.g. clo options to control the percentage increase or fixed increased),
3673 // we should do it here, eg.
3674 // next_gc_univ_laog = prev_next_gc_univ_laog + VG_(clo_laog_gc_fixed);
3675 // Currently, we just hard-code the solution 3 above.
3676 next_gc_univ_laog = prev_next_gc_univ_laog + 1;
3678 if (VG_(clo_stats))
3679 VG_(message)
3680 (Vg_DebugMsg,
3681 "univ_laog_do_GC cardinality entered %d exit %d next gc at %d\n",
3682 (Int)univ_laog_cardinality, (Int)seen, next_gc_univ_laog);
3686 __attribute__((noinline))
3687 static void laog__add_edge ( Lock* src, Lock* dst ) {
3688 UWord keyW;
3689 LAOGLinks* links;
3690 Bool presentF, presentR;
3691 if (0) VG_(printf)("laog__add_edge %p %p\n", src, dst);
3693 /* Take the opportunity to sanity check the graph. Record in
3694 presentF if there is already a src->dst mapping in this node's
3695 forwards links, and presentR if there is already a src->dst
3696 mapping in this node's backwards links. They should agree!
3697 Also, we need to know whether the edge was already present so as
3698 to decide whether or not to update the link details mapping. We
3699 can compute presentF and presentR essentially for free, so may
3700 as well do this always. */
3701 presentF = presentR = False;
3703 /* Update the out edges for src */
3704 keyW = 0;
3705 links = NULL;
3706 if (VG_(lookupFM)( laog, &keyW, (UWord*)&links, (UWord)src )) {
3707 WordSetID outs_new;
3708 tl_assert(links);
3709 tl_assert(keyW == (UWord)src);
3710 outs_new = HG_(addToWS)( univ_laog, links->outs, (UWord)dst );
3711 presentF = outs_new == links->outs;
3712 links->outs = outs_new;
3713 } else {
3714 links = HG_(zalloc)("hg.lae.1", sizeof(LAOGLinks));
3715 links->inns = HG_(emptyWS)( univ_laog );
3716 links->outs = HG_(singletonWS)( univ_laog, (UWord)dst );
3717 VG_(addToFM)( laog, (UWord)src, (UWord)links );
3719 /* Update the in edges for dst */
3720 keyW = 0;
3721 links = NULL;
3722 if (VG_(lookupFM)( laog, &keyW, (UWord*)&links, (UWord)dst )) {
3723 WordSetID inns_new;
3724 tl_assert(links);
3725 tl_assert(keyW == (UWord)dst);
3726 inns_new = HG_(addToWS)( univ_laog, links->inns, (UWord)src );
3727 presentR = inns_new == links->inns;
3728 links->inns = inns_new;
3729 } else {
3730 links = HG_(zalloc)("hg.lae.2", sizeof(LAOGLinks));
3731 links->inns = HG_(singletonWS)( univ_laog, (UWord)src );
3732 links->outs = HG_(emptyWS)( univ_laog );
3733 VG_(addToFM)( laog, (UWord)dst, (UWord)links );
3736 tl_assert( (presentF && presentR) || (!presentF && !presentR) );
3738 if (!presentF && src->acquired_at && dst->acquired_at) {
3739 LAOGLinkExposition expo;
3740 /* If this edge is entering the graph, and we have acquired_at
3741 information for both src and dst, record those acquisition
3742 points. Hence, if there is later a violation of this
3743 ordering, we can show the user the two places in which the
3744 required src-dst ordering was previously established. */
3745 if (0) VG_(printf)("acquire edge %#lx %#lx\n",
3746 src->guestaddr, dst->guestaddr);
3747 expo.src_ga = src->guestaddr;
3748 expo.dst_ga = dst->guestaddr;
3749 expo.src_ec = NULL;
3750 expo.dst_ec = NULL;
3751 tl_assert(laog_exposition);
3752 if (VG_(lookupFM)( laog_exposition, NULL, NULL, (UWord)&expo )) {
3753 /* we already have it; do nothing */
3754 } else {
3755 LAOGLinkExposition* expo2 = HG_(zalloc)("hg.lae.3",
3756 sizeof(LAOGLinkExposition));
3757 expo2->src_ga = src->guestaddr;
3758 expo2->dst_ga = dst->guestaddr;
3759 expo2->src_ec = src->acquired_at;
3760 expo2->dst_ec = dst->acquired_at;
3761 VG_(addToFM)( laog_exposition, (UWord)expo2, (UWord)NULL );
3765 if (HG_(cardinalityWSU) (univ_laog) >= next_gc_univ_laog)
3766 univ_laog_do_GC();
3769 __attribute__((noinline))
3770 static void laog__del_edge ( Lock* src, Lock* dst ) {
3771 UWord keyW;
3772 LAOGLinks* links;
3773 if (0) VG_(printf)("laog__del_edge enter %p %p\n", src, dst);
3774 /* Update the out edges for src */
3775 keyW = 0;
3776 links = NULL;
3777 if (VG_(lookupFM)( laog, &keyW, (UWord*)&links, (UWord)src )) {
3778 tl_assert(links);
3779 tl_assert(keyW == (UWord)src);
3780 links->outs = HG_(delFromWS)( univ_laog, links->outs, (UWord)dst );
3782 /* Update the in edges for dst */
3783 keyW = 0;
3784 links = NULL;
3785 if (VG_(lookupFM)( laog, &keyW, (UWord*)&links, (UWord)dst )) {
3786 tl_assert(links);
3787 tl_assert(keyW == (UWord)dst);
3788 links->inns = HG_(delFromWS)( univ_laog, links->inns, (UWord)src );
3791 /* Remove the exposition of src,dst (if present) */
3793 LAOGLinkExposition *fm_expo;
3795 LAOGLinkExposition expo;
3796 expo.src_ga = src->guestaddr;
3797 expo.dst_ga = dst->guestaddr;
3798 expo.src_ec = NULL;
3799 expo.dst_ec = NULL;
3801 if (VG_(delFromFM) (laog_exposition,
3802 (UWord*)&fm_expo, NULL, (UWord)&expo )) {
3803 HG_(free) (fm_expo);
3807 /* deleting edges can increase nr of of WS so check for gc. */
3808 if (HG_(cardinalityWSU) (univ_laog) >= next_gc_univ_laog)
3809 univ_laog_do_GC();
3810 if (0) VG_(printf)("laog__del_edge exit\n");
3813 __attribute__((noinline))
3814 static WordSetID /* in univ_laog */ laog__succs ( Lock* lk ) {
3815 UWord keyW;
3816 LAOGLinks* links;
3817 keyW = 0;
3818 links = NULL;
3819 if (VG_(lookupFM)( laog, &keyW, (UWord*)&links, (UWord)lk )) {
3820 tl_assert(links);
3821 tl_assert(keyW == (UWord)lk);
3822 return links->outs;
3823 } else {
3824 return HG_(emptyWS)( univ_laog );
3828 __attribute__((noinline))
3829 static WordSetID /* in univ_laog */ laog__preds ( Lock* lk ) {
3830 UWord keyW;
3831 LAOGLinks* links;
3832 keyW = 0;
3833 links = NULL;
3834 if (VG_(lookupFM)( laog, &keyW, (UWord*)&links, (UWord)lk )) {
3835 tl_assert(links);
3836 tl_assert(keyW == (UWord)lk);
3837 return links->inns;
3838 } else {
3839 return HG_(emptyWS)( univ_laog );
3843 __attribute__((noinline))
3844 static void laog__sanity_check ( const HChar* who ) {
3845 UWord i, ws_size;
3846 UWord* ws_words;
3847 Lock* me;
3848 LAOGLinks* links;
3849 VG_(initIterFM)( laog );
3850 me = NULL;
3851 links = NULL;
3852 if (0) VG_(printf)("laog sanity check\n");
3853 while (VG_(nextIterFM)( laog, (UWord*)&me,
3854 (UWord*)&links )) {
3855 tl_assert(me);
3856 tl_assert(links);
3857 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->inns );
3858 for (i = 0; i < ws_size; i++) {
3859 if ( ! HG_(elemWS)( univ_laog,
3860 laog__succs( (Lock*)ws_words[i] ),
3861 (UWord)me ))
3862 goto bad;
3864 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->outs );
3865 for (i = 0; i < ws_size; i++) {
3866 if ( ! HG_(elemWS)( univ_laog,
3867 laog__preds( (Lock*)ws_words[i] ),
3868 (UWord)me ))
3869 goto bad;
3871 me = NULL;
3872 links = NULL;
3874 VG_(doneIterFM)( laog );
3875 return;
3877 bad:
3878 VG_(printf)("laog__sanity_check(%s) FAILED\n", who);
3879 laog__show(who);
3880 tl_assert(0);
3883 /* If there is a path in laog from 'src' to any of the elements in
3884 'dst', return an arbitrarily chosen element of 'dst' reachable from
3885 'src'. If no path exist from 'src' to any element in 'dst', return
3886 NULL. */
3887 __attribute__((noinline))
3888 static
3889 Lock* laog__do_dfs_from_to ( Lock* src, WordSetID dsts /* univ_lsets */ )
3891 Lock* ret;
3892 Word ssz;
3893 XArray* stack; /* of Lock* */
3894 WordFM* visited; /* Lock* -> void, iow, Set(Lock*) */
3895 Lock* here;
3896 WordSetID succs;
3897 UWord succs_size, i;
3898 UWord* succs_words;
3899 //laog__sanity_check();
3901 /* If the destination set is empty, we can never get there from
3902 'src' :-), so don't bother to try */
3903 if (HG_(isEmptyWS)( univ_lsets, dsts ))
3904 return NULL;
3906 ret = NULL;
3907 stack = VG_(newXA)( HG_(zalloc), "hg.lddft.1", HG_(free), sizeof(Lock*) );
3908 visited = VG_(newFM)( HG_(zalloc), "hg.lddft.2", HG_(free), NULL/*unboxedcmp*/ );
3910 (void) VG_(addToXA)( stack, &src );
3912 while (True) {
3914 ssz = VG_(sizeXA)( stack );
3916 if (ssz == 0) { ret = NULL; break; }
3918 here = *(Lock**) VG_(indexXA)( stack, ssz-1 );
3919 VG_(dropTailXA)( stack, 1 );
3921 if (HG_(elemWS)( univ_lsets, dsts, (UWord)here )) { ret = here; break; }
3923 if (VG_(lookupFM)( visited, NULL, NULL, (UWord)here ))
3924 continue;
3926 VG_(addToFM)( visited, (UWord)here, 0 );
3928 succs = laog__succs( here );
3929 HG_(getPayloadWS)( &succs_words, &succs_size, univ_laog, succs );
3930 for (i = 0; i < succs_size; i++)
3931 (void) VG_(addToXA)( stack, &succs_words[i] );
3934 VG_(deleteFM)( visited, NULL, NULL );
3935 VG_(deleteXA)( stack );
3936 return ret;
3940 /* Thread 'thr' is acquiring 'lk'. Check for inconsistent ordering
3941 between 'lk' and the locks already held by 'thr' and issue a
3942 complaint if so. Also, update the ordering graph appropriately.
3944 __attribute__((noinline))
3945 static void laog__pre_thread_acquires_lock (
3946 Thread* thr, /* NB: BEFORE lock is added */
3947 Lock* lk
3950 UWord* ls_words;
3951 UWord ls_size, i;
3952 Lock* other;
3954 /* It may be that 'thr' already holds 'lk' and is recursively
3955 relocking in. In this case we just ignore the call. */
3956 /* NB: univ_lsets really is correct here */
3957 if (HG_(elemWS)( univ_lsets, thr->locksetA, (UWord)lk ))
3958 return;
3960 /* First, the check. Complain if there is any path in laog from lk
3961 to any of the locks already held by thr, since if any such path
3962 existed, it would mean that previously lk was acquired before
3963 (rather than after, as we are doing here) at least one of those
3964 locks.
3966 other = laog__do_dfs_from_to(lk, thr->locksetA);
3967 if (other) {
3968 LAOGLinkExposition key, *found;
3969 /* So we managed to find a path lk --*--> other in the graph,
3970 which implies that 'lk' should have been acquired before
3971 'other' but is in fact being acquired afterwards. We present
3972 the lk/other arguments to record_error_LockOrder in the order
3973 in which they should have been acquired. */
3974 /* Go look in the laog_exposition mapping, to find the allocation
3975 points for this edge, so we can show the user. */
3976 key.src_ga = lk->guestaddr;
3977 key.dst_ga = other->guestaddr;
3978 key.src_ec = NULL;
3979 key.dst_ec = NULL;
3980 found = NULL;
3981 if (VG_(lookupFM)( laog_exposition,
3982 (UWord*)&found, NULL, (UWord)&key )) {
3983 tl_assert(found != &key);
3984 tl_assert(found->src_ga == key.src_ga);
3985 tl_assert(found->dst_ga == key.dst_ga);
3986 tl_assert(found->src_ec);
3987 tl_assert(found->dst_ec);
3988 HG_(record_error_LockOrder)(
3989 thr, lk, other,
3990 found->src_ec, found->dst_ec, other->acquired_at );
3991 } else {
3992 /* Hmm. This can't happen (can it?) */
3993 /* Yes, it can happen: see tests/tc14_laog_dinphils.
3994 Imagine we have 3 philosophers A B C, and the forks
3995 between them:
3999 fCA fBC
4001 A fAB B
4003 Let's have the following actions:
4004 A takes fCA,fAB
4005 A releases fCA,fAB
4006 B takes fAB,fBC
4007 B releases fAB,fBC
4008 C takes fBC,fCA
4009 C releases fBC,fCA
4011 Helgrind will report a lock order error when C takes fCA.
4012 Effectively, we have a deadlock if the following
4013 sequence is done:
4014 A takes fCA
4015 B takes fAB
4016 C takes fBC
4018 The error reported is:
4019 Observed (incorrect) order fBC followed by fCA
4020 but the stack traces that have established the required order
4021 are not given.
4023 This is because there is no pair (fCA, fBC) in laog exposition :
4024 the laog_exposition records all pairs of locks between a new lock
4025 taken by a thread and all the already taken locks.
4026 So, there is no laog_exposition (fCA, fBC) as no thread ever
4027 first locked fCA followed by fBC.
4029 In other words, when the deadlock cycle involves more than
4030 two locks, then helgrind does not report the sequence of
4031 operations that created the cycle.
4033 However, we can report the current stack trace (where
4034 lk is being taken), and the stack trace where other was acquired:
4035 Effectively, the variable 'other' contains a lock currently
4036 held by this thread, with its 'acquired_at'. */
4038 HG_(record_error_LockOrder)(
4039 thr, lk, other,
4040 NULL, NULL, other->acquired_at );
4044 /* Second, add to laog the pairs
4045 (old, lk) | old <- locks already held by thr
4046 Since both old and lk are currently held by thr, their acquired_at
4047 fields must be non-NULL.
4049 tl_assert(lk->acquired_at);
4050 HG_(getPayloadWS)( &ls_words, &ls_size, univ_lsets, thr->locksetA );
4051 for (i = 0; i < ls_size; i++) {
4052 Lock* old = (Lock*)ls_words[i];
4053 tl_assert(old->acquired_at);
4054 laog__add_edge( old, lk );
4057 /* Why "except_Locks" ? We're here because a lock is being
4058 acquired by a thread, and we're in an inconsistent state here.
4059 See the call points in evhH__post_thread_{r,w}_acquires_lock.
4060 When called in this inconsistent state, locks__sanity_check duly
4061 barfs. */
4062 if (HG_(clo_sanity_flags) & SCE_LAOG)
4063 all_except_Locks__sanity_check("laog__pre_thread_acquires_lock-post");
4066 /* Allocates a duplicate of words. Caller must HG_(free) the result. */
4067 static UWord* UWordV_dup(UWord* words, Word words_size)
4069 UInt i;
4071 if (words_size == 0)
4072 return NULL;
4074 UWord *dup = HG_(zalloc) ("hg.dup.1", (SizeT) words_size * sizeof(UWord));
4076 for (i = 0; i < words_size; i++)
4077 dup[i] = words[i];
4079 return dup;
4082 /* Delete from 'laog' any pair mentioning a lock in locksToDelete */
4084 __attribute__((noinline))
4085 static void laog__handle_one_lock_deletion ( Lock* lk )
4087 WordSetID preds, succs;
4088 UWord preds_size, succs_size, i, j;
4089 UWord *preds_words, *succs_words;
4091 preds = laog__preds( lk );
4092 succs = laog__succs( lk );
4094 // We need to duplicate the payload, as these can be garbage collected
4095 // during the del/add operations below.
4096 HG_(getPayloadWS)( &preds_words, &preds_size, univ_laog, preds );
4097 preds_words = UWordV_dup(preds_words, preds_size);
4099 HG_(getPayloadWS)( &succs_words, &succs_size, univ_laog, succs );
4100 succs_words = UWordV_dup(succs_words, succs_size);
4102 for (i = 0; i < preds_size; i++)
4103 laog__del_edge( (Lock*)preds_words[i], lk );
4105 for (j = 0; j < succs_size; j++)
4106 laog__del_edge( lk, (Lock*)succs_words[j] );
4108 for (i = 0; i < preds_size; i++) {
4109 for (j = 0; j < succs_size; j++) {
4110 if (preds_words[i] != succs_words[j]) {
4111 /* This can pass unlocked locks to laog__add_edge, since
4112 we're deleting stuff. So their acquired_at fields may
4113 be NULL. */
4114 laog__add_edge( (Lock*)preds_words[i], (Lock*)succs_words[j] );
4119 if (preds_words)
4120 HG_(free) (preds_words);
4121 if (succs_words)
4122 HG_(free) (succs_words);
4124 // Remove lk information from laog links FM
4126 LAOGLinks *links;
4127 Lock* linked_lk;
4129 if (VG_(delFromFM) (laog,
4130 (UWord*)&linked_lk, (UWord*)&links, (UWord)lk)) {
4131 tl_assert (linked_lk == lk);
4132 HG_(free) (links);
4135 /* FIXME ??? What about removing lock lk data from EXPOSITION ??? */
4138 //__attribute__((noinline))
4139 //static void laog__handle_lock_deletions (
4140 // WordSetID /* in univ_laog */ locksToDelete
4141 // )
4143 // Word i, ws_size;
4144 // UWord* ws_words;
4147 // HG_(getPayloadWS)( &ws_words, &ws_size, univ_lsets, locksToDelete );
4148 // UWordV_dup call needed here ...
4149 // for (i = 0; i < ws_size; i++)
4150 // laog__handle_one_lock_deletion( (Lock*)ws_words[i] );
4152 // if (HG_(clo_sanity_flags) & SCE_LAOG)
4153 // all__sanity_check("laog__handle_lock_deletions-post");
4157 /*--------------------------------------------------------------*/
4158 /*--- Malloc/free replacements ---*/
4159 /*--------------------------------------------------------------*/
4161 typedef
4162 struct {
4163 void* next; /* required by m_hashtable */
4164 Addr payload; /* ptr to actual block */
4165 SizeT szB; /* size requested */
4166 ExeContext* where; /* where it was allocated */
4167 Thread* thr; /* allocating thread */
4169 MallocMeta;
4171 /* A hash table of MallocMetas, used to track malloc'd blocks
4172 (obviously). */
4173 static VgHashTable *hg_mallocmeta_table = NULL;
4175 /* MallocMeta are small elements. We use a pool to avoid
4176 the overhead of malloc for each MallocMeta. */
4177 static PoolAlloc *MallocMeta_poolalloc = NULL;
4179 static MallocMeta* new_MallocMeta ( void ) {
4180 MallocMeta* md = VG_(allocEltPA) (MallocMeta_poolalloc);
4181 VG_(memset)(md, 0, sizeof(MallocMeta));
4182 return md;
4184 static void delete_MallocMeta ( MallocMeta* md ) {
4185 VG_(freeEltPA)(MallocMeta_poolalloc, md);
4189 /* Allocate a client block and set up the metadata for it. */
4191 static
4192 void* handle_alloc ( ThreadId tid,
4193 SizeT szB, SizeT alignB, Bool is_zeroed )
4195 Addr p;
4196 MallocMeta* md;
4198 tl_assert( ((SSizeT)szB) >= 0 );
4199 p = (Addr)VG_(cli_malloc)(alignB, szB);
4200 if (!p) {
4201 return NULL;
4203 if (is_zeroed)
4204 VG_(memset)((void*)p, 0, szB);
4206 /* Note that map_threads_lookup must succeed (cannot assert), since
4207 memory can only be allocated by currently alive threads, hence
4208 they must have an entry in map_threads. */
4209 md = new_MallocMeta();
4210 md->payload = p;
4211 md->szB = szB;
4212 md->where = VG_(record_ExeContext)( tid, 0 );
4213 md->thr = map_threads_lookup( tid );
4215 VG_(HT_add_node)( hg_mallocmeta_table, (VgHashNode*)md );
4216 if (UNLIKELY(VG_(clo_xtree_memory) == Vg_XTMemory_Full))
4217 VG_(XTMemory_Full_alloc)(md->szB, md->where);
4219 /* Tell the lower level memory wranglers. */
4220 evh__new_mem_heap( p, szB, is_zeroed );
4222 return (void*)p;
4225 /* Re the checks for less-than-zero (also in hg_cli__realloc below):
4226 Cast to a signed type to catch any unexpectedly negative args.
4227 We're assuming here that the size asked for is not greater than
4228 2^31 bytes (for 32-bit platforms) or 2^63 bytes (for 64-bit
4229 platforms). */
4230 static void* hg_cli__malloc ( ThreadId tid, SizeT n ) {
4231 if (((SSizeT)n) < 0) return NULL;
4232 return handle_alloc ( tid, n, VG_(clo_alignment),
4233 /*is_zeroed*/False );
4235 static void* hg_cli____builtin_new ( ThreadId tid, SizeT n ) {
4236 if (((SSizeT)n) < 0) return NULL;
4237 return handle_alloc ( tid, n, VG_(clo_alignment),
4238 /*is_zeroed*/False );
4240 static void* hg_cli____builtin_new_aligned ( ThreadId tid, SizeT n, SizeT align, SizeT orig_align ) {
4241 if (((SSizeT)n) < 0) return NULL;
4242 return handle_alloc ( tid, n, align,
4243 /*is_zeroed*/False );
4245 static void* hg_cli____builtin_vec_new ( ThreadId tid, SizeT n ) {
4246 if (((SSizeT)n) < 0) return NULL;
4247 return handle_alloc ( tid, n, VG_(clo_alignment),
4248 /*is_zeroed*/False );
4250 static void* hg_cli____builtin_vec_new_aligned ( ThreadId tid, SizeT n, SizeT align, SizeT orig_align ) {
4251 if (((SSizeT)n) < 0) return NULL;
4252 return handle_alloc ( tid, n, align,
4253 /*is_zeroed*/False );
4255 static void* hg_cli__memalign ( ThreadId tid, SizeT align, SizeT orig_alignT, SizeT n ) {
4256 if (((SSizeT)n) < 0) return NULL;
4257 return handle_alloc ( tid, n, align,
4258 /*is_zeroed*/False );
4260 static void* hg_cli__calloc ( ThreadId tid, SizeT nmemb, SizeT size1 ) {
4261 if ( ((SSizeT)nmemb) < 0 || ((SSizeT)size1) < 0 ) return NULL;
4262 return handle_alloc ( tid, nmemb*size1, VG_(clo_alignment),
4263 /*is_zeroed*/True );
4267 /* Free a client block, including getting rid of the relevant
4268 metadata. */
4270 static void handle_free ( ThreadId tid, void* p )
4272 MallocMeta *md, *old_md;
4273 SizeT szB;
4275 /* First see if we can find the metadata for 'p'. */
4276 md = (MallocMeta*) VG_(HT_lookup)( hg_mallocmeta_table, (UWord)p );
4277 if (!md)
4278 return; /* apparently freeing a bogus address. Oh well. */
4280 tl_assert(md->payload == (Addr)p);
4281 szB = md->szB;
4282 if (UNLIKELY(VG_(clo_xtree_memory) == Vg_XTMemory_Full)) {
4283 ExeContext* ec_free = VG_(record_ExeContext)( tid, 0 );
4284 VG_(XTMemory_Full_free)(md->szB, md->where, ec_free);
4287 /* Nuke the metadata block */
4288 old_md = (MallocMeta*)
4289 VG_(HT_remove)( hg_mallocmeta_table, (UWord)p );
4290 tl_assert(old_md); /* it must be present - we just found it */
4291 tl_assert(old_md == md);
4292 tl_assert(old_md->payload == (Addr)p);
4294 VG_(cli_free)((void*)old_md->payload);
4295 delete_MallocMeta(old_md);
4297 /* Tell the lower level memory wranglers. */
4298 evh__die_mem_heap( (Addr)p, szB );
4301 static void hg_cli__free ( ThreadId tid, void* p ) {
4302 handle_free(tid, p);
4304 static void hg_cli____builtin_delete ( ThreadId tid, void* p ) {
4305 handle_free(tid, p);
4307 static void hg_cli____builtin_delete_aligned ( ThreadId tid, void* p, SizeT align ) {
4308 handle_free(tid, p);
4310 static void hg_cli____builtin_vec_delete ( ThreadId tid, void* p ) {
4311 handle_free(tid, p);
4313 static void hg_cli____builtin_vec_delete_aligned ( ThreadId tid, void* p, SizeT align ) {
4314 handle_free(tid, p);
4317 static void* hg_cli__realloc ( ThreadId tid, void* payloadV, SizeT new_size )
4319 MallocMeta *md, *md_new, *md_tmp;
4320 SizeT i;
4322 Addr payload = (Addr)payloadV;
4324 if (((SSizeT)new_size) < 0) return NULL;
4326 if (payloadV == NULL) {
4327 return handle_alloc ( tid, new_size, VG_(clo_alignment),
4328 /*is_zeroed*/False );
4331 md = (MallocMeta*) VG_(HT_lookup)( hg_mallocmeta_table, (UWord)payload );
4332 if (!md)
4333 return NULL; /* apparently realloc-ing a bogus address. Oh well. */
4335 tl_assert(md->payload == payload);
4337 if (new_size == 0U ) {
4338 if (VG_(clo_realloc_zero_bytes_frees) == True) {
4339 md_tmp = VG_(HT_remove)( hg_mallocmeta_table, payload );
4340 tl_assert(md_tmp);
4341 tl_assert(md_tmp == md);
4343 VG_(cli_free)((void*)md->payload);
4344 delete_MallocMeta(md);
4346 return NULL;
4348 new_size = 1U;
4351 if (md->szB == new_size) {
4352 /* size unchanged */
4353 md->where = VG_(record_ExeContext)(tid, 0);
4354 return payloadV;
4357 if (md->szB > new_size) {
4358 /* new size is smaller */
4359 md->szB = new_size;
4360 md->where = VG_(record_ExeContext)(tid, 0);
4361 evh__die_mem_heap( md->payload + new_size, md->szB - new_size );
4362 return payloadV;
4365 /* else */ {
4366 /* new size is bigger */
4367 Addr p_new = (Addr)VG_(cli_malloc)(VG_(clo_alignment), new_size);
4368 if (!p_new) {
4369 // Nb: if realloc fails, NULL is returned but the old block is not
4370 // touched. What an awful function.
4371 return NULL;
4374 /* First half kept and copied, second half new */
4375 // FIXME: shouldn't we use a copier which implements the
4376 // memory state machine?
4377 evh__copy_mem( payload, p_new, md->szB );
4378 evh__new_mem_heap ( p_new + md->szB, new_size - md->szB,
4379 /*inited*/False );
4380 /* FIXME: can anything funny happen here? specifically, if the
4381 old range contained a lock, then die_mem_heap will complain.
4382 Is that the correct behaviour? Not sure. */
4383 evh__die_mem_heap( payload, md->szB );
4385 /* Copy from old to new */
4386 for (i = 0; i < md->szB; i++)
4387 ((UChar*)p_new)[i] = ((UChar*)payload)[i];
4389 /* Because the metadata hash table is index by payload address,
4390 we have to get rid of the old hash table entry and make a new
4391 one. We can't just modify the existing metadata in place,
4392 because then it would (almost certainly) be in the wrong hash
4393 chain. */
4394 md_new = new_MallocMeta();
4395 *md_new = *md;
4397 md_tmp = VG_(HT_remove)( hg_mallocmeta_table, payload );
4398 tl_assert(md_tmp);
4399 tl_assert(md_tmp == md);
4401 VG_(cli_free)((void*)md->payload);
4402 delete_MallocMeta(md);
4404 /* Update fields */
4405 md_new->where = VG_(record_ExeContext)( tid, 0 );
4406 md_new->szB = new_size;
4407 md_new->payload = p_new;
4408 md_new->thr = map_threads_lookup( tid );
4410 /* and add */
4411 VG_(HT_add_node)( hg_mallocmeta_table, (VgHashNode*)md_new );
4413 return (void*)p_new;
4417 static SizeT hg_cli_malloc_usable_size ( ThreadId tid, void* p )
4419 MallocMeta *md = VG_(HT_lookup)( hg_mallocmeta_table, (UWord)p );
4421 // There may be slop, but pretend there isn't because only the asked-for
4422 // area will have been shadowed properly.
4423 return ( md ? md->szB : 0 );
4427 /* For error creation: map 'data_addr' to a malloc'd chunk, if any.
4428 Slow linear search. With a bit of hash table help if 'data_addr'
4429 is either the start of a block or up to 15 word-sized steps along
4430 from the start of a block. */
4432 static inline Bool addr_is_in_MM_Chunk( MallocMeta* mm, Addr a )
4434 /* Accept 'a' as within 'mm' if 'mm's size is zero and 'a' points
4435 right at it. */
4436 if (UNLIKELY(mm->szB == 0 && a == mm->payload))
4437 return True;
4438 /* else normal interval rules apply */
4439 if (LIKELY(a < mm->payload)) return False;
4440 if (LIKELY(a >= mm->payload + mm->szB)) return False;
4441 return True;
4444 Bool HG_(mm_find_containing_block)( /*OUT*/ExeContext** where,
4445 /*OUT*/UInt* tnr,
4446 /*OUT*/Addr* payload,
4447 /*OUT*/SizeT* szB,
4448 Addr data_addr )
4450 MallocMeta* mm;
4451 Int i;
4452 const Int n_fast_check_words = 16;
4454 /* Before searching the list of allocated blocks in hg_mallocmeta_table,
4455 first verify that data_addr is in a heap client segment. */
4456 const NSegment *s = VG_(am_find_nsegment) (data_addr);
4457 if (s == NULL || !s->isCH)
4458 return False;
4460 /* First, do a few fast searches on the basis that data_addr might
4461 be exactly the start of a block or up to 15 words inside. This
4462 can happen commonly via the creq
4463 _VG_USERREQ__HG_CLEAN_MEMORY_HEAPBLOCK. */
4464 for (i = 0; i < n_fast_check_words; i++) {
4465 mm = VG_(HT_lookup)( hg_mallocmeta_table,
4466 data_addr - (UWord)(UInt)i * sizeof(UWord) );
4467 if (UNLIKELY(mm && addr_is_in_MM_Chunk(mm, data_addr)))
4468 goto found;
4471 /* Well, this totally sucks. But without using an interval tree or
4472 some such, it's hard to see how to do better. We have to check
4473 every block in the entire table. */
4474 VG_(HT_ResetIter)(hg_mallocmeta_table);
4475 while ( (mm = VG_(HT_Next)(hg_mallocmeta_table)) ) {
4476 if (UNLIKELY(addr_is_in_MM_Chunk(mm, data_addr)))
4477 goto found;
4480 /* Not found. Bah. */
4481 return False;
4482 /*NOTREACHED*/
4484 found:
4485 tl_assert(mm);
4486 tl_assert(addr_is_in_MM_Chunk(mm, data_addr));
4487 if (where) *where = mm->where;
4488 if (tnr) *tnr = mm->thr->errmsg_index;
4489 if (payload) *payload = mm->payload;
4490 if (szB) *szB = mm->szB;
4491 return True;
4495 /*--------------------------------------------------------------*/
4496 /*--- Instrumentation ---*/
4497 /*--------------------------------------------------------------*/
4499 #define unop(_op, _arg1) IRExpr_Unop((_op),(_arg1))
4500 #define binop(_op, _arg1, _arg2) IRExpr_Binop((_op),(_arg1),(_arg2))
4501 #define mkexpr(_tmp) IRExpr_RdTmp((_tmp))
4502 #define mkU32(_n) IRExpr_Const(IRConst_U32(_n))
4503 #define mkU64(_n) IRExpr_Const(IRConst_U64(_n))
4504 #define assign(_t, _e) IRStmt_WrTmp((_t), (_e))
4506 /* This takes and returns atoms, of course. Not full IRExprs. */
4507 static IRExpr* mk_And1 ( IRSB* sbOut, IRExpr* arg1, IRExpr* arg2 )
4509 tl_assert(arg1 && arg2);
4510 tl_assert(isIRAtom(arg1));
4511 tl_assert(isIRAtom(arg2));
4512 /* Generate 32to1(And32(1Uto32(arg1), 1Uto32(arg2))). Appalling
4513 code, I know. */
4514 IRTemp wide1 = newIRTemp(sbOut->tyenv, Ity_I32);
4515 IRTemp wide2 = newIRTemp(sbOut->tyenv, Ity_I32);
4516 IRTemp anded = newIRTemp(sbOut->tyenv, Ity_I32);
4517 IRTemp res = newIRTemp(sbOut->tyenv, Ity_I1);
4518 addStmtToIRSB(sbOut, assign(wide1, unop(Iop_1Uto32, arg1)));
4519 addStmtToIRSB(sbOut, assign(wide2, unop(Iop_1Uto32, arg2)));
4520 addStmtToIRSB(sbOut, assign(anded, binop(Iop_And32, mkexpr(wide1),
4521 mkexpr(wide2))));
4522 addStmtToIRSB(sbOut, assign(res, unop(Iop_32to1, mkexpr(anded))));
4523 return mkexpr(res);
4526 static void instrument_mem_access ( IRSB* sbOut,
4527 IRExpr* addr,
4528 Int szB,
4529 Bool isStore,
4530 Bool fixupSP_needed,
4531 Int hWordTy_szB,
4532 Int goff_sp,
4533 Int goff_sp_s1,
4534 /* goff_sp_s1 is the offset in guest
4535 state where the cachedstack validity
4536 is stored. */
4537 IRExpr* guard ) /* NULL => True */
4539 IRType tyAddr = Ity_INVALID;
4540 const HChar* hName = NULL;
4541 void* hAddr = NULL;
4542 Int regparms = 0;
4543 IRExpr** argv = NULL;
4544 IRDirty* di = NULL;
4546 // THRESH is the size of the window above SP (well,
4547 // mostly above) that we assume implies a stack reference.
4548 const Int THRESH = 4096 * 4; // somewhat arbitrary
4549 const Int rz_szB = VG_STACK_REDZONE_SZB;
4551 tl_assert(isIRAtom(addr));
4552 tl_assert(hWordTy_szB == 4 || hWordTy_szB == 8);
4554 tyAddr = typeOfIRExpr( sbOut->tyenv, addr );
4555 tl_assert(tyAddr == Ity_I32 || tyAddr == Ity_I64);
4557 /* So the effective address is in 'addr' now. */
4558 regparms = 1; // unless stated otherwise
4559 if (isStore) {
4560 switch (szB) {
4561 case 1:
4562 hName = "evh__mem_help_cwrite_1";
4563 hAddr = &evh__mem_help_cwrite_1;
4564 argv = mkIRExprVec_1( addr );
4565 break;
4566 case 2:
4567 hName = "evh__mem_help_cwrite_2";
4568 hAddr = &evh__mem_help_cwrite_2;
4569 argv = mkIRExprVec_1( addr );
4570 break;
4571 case 4:
4572 if (fixupSP_needed) {
4573 /* Unwind has to be done with a SP fixed up with one word.
4574 See Ist_Put heuristic in hg_instrument. */
4575 hName = "evh__mem_help_cwrite_4_fixupSP";
4576 hAddr = &evh__mem_help_cwrite_4_fixupSP;
4577 } else {
4578 hName = "evh__mem_help_cwrite_4";
4579 hAddr = &evh__mem_help_cwrite_4;
4581 argv = mkIRExprVec_1( addr );
4582 break;
4583 case 8:
4584 if (fixupSP_needed) {
4585 /* Unwind has to be done with a SP fixed up with one word.
4586 See Ist_Put heuristic in hg_instrument. */
4587 hName = "evh__mem_help_cwrite_8_fixupSP";
4588 hAddr = &evh__mem_help_cwrite_8_fixupSP;
4589 } else {
4590 hName = "evh__mem_help_cwrite_8";
4591 hAddr = &evh__mem_help_cwrite_8;
4593 argv = mkIRExprVec_1( addr );
4594 break;
4595 default:
4596 tl_assert(szB > 8 && szB <= 512); /* stay sane */
4597 regparms = 2;
4598 hName = "evh__mem_help_cwrite_N";
4599 hAddr = &evh__mem_help_cwrite_N;
4600 argv = mkIRExprVec_2( addr, mkIRExpr_HWord( szB ));
4601 break;
4603 } else {
4604 switch (szB) {
4605 case 1:
4606 hName = "evh__mem_help_cread_1";
4607 hAddr = &evh__mem_help_cread_1;
4608 argv = mkIRExprVec_1( addr );
4609 break;
4610 case 2:
4611 hName = "evh__mem_help_cread_2";
4612 hAddr = &evh__mem_help_cread_2;
4613 argv = mkIRExprVec_1( addr );
4614 break;
4615 case 4:
4616 hName = "evh__mem_help_cread_4";
4617 hAddr = &evh__mem_help_cread_4;
4618 argv = mkIRExprVec_1( addr );
4619 break;
4620 case 8:
4621 hName = "evh__mem_help_cread_8";
4622 hAddr = &evh__mem_help_cread_8;
4623 argv = mkIRExprVec_1( addr );
4624 break;
4625 default:
4626 tl_assert(szB > 8 && szB <= 512); /* stay sane */
4627 regparms = 2;
4628 hName = "evh__mem_help_cread_N";
4629 hAddr = &evh__mem_help_cread_N;
4630 argv = mkIRExprVec_2( addr, mkIRExpr_HWord( szB ));
4631 break;
4635 /* Create the helper. */
4636 tl_assert(hName);
4637 tl_assert(hAddr);
4638 tl_assert(argv);
4639 di = unsafeIRDirty_0_N( regparms,
4640 hName, VG_(fnptr_to_fnentry)( hAddr ),
4641 argv );
4643 if (HG_(clo_delta_stacktrace)) {
4644 /* memory access helper might read the shadow1 SP offset, that
4645 indicates if the cached stacktrace is valid. */
4646 di->fxState[0].fx = Ifx_Read;
4647 di->fxState[0].offset = goff_sp_s1;
4648 di->fxState[0].size = hWordTy_szB;
4649 di->fxState[0].nRepeats = 0;
4650 di->fxState[0].repeatLen = 0;
4651 di->nFxState = 1;
4654 if (! HG_(clo_check_stack_refs)) {
4655 /* We're ignoring memory references which are (obviously) to the
4656 stack. In fact just skip stack refs that are within 4 pages
4657 of SP (SP - the redzone, really), as that's simple, easy, and
4658 filters out most stack references. */
4659 /* Generate the guard condition: "(addr - (SP - RZ)) >u N", for
4660 some arbitrary N. If that is true then addr is outside the
4661 range (SP - RZ .. SP + N - RZ). If N is smallish (a few
4662 pages) then we can say addr is within a few pages of SP and
4663 so can't possibly be a heap access, and so can be skipped.
4665 Note that the condition simplifies to
4666 (addr - SP + RZ) >u N
4667 which generates better code in x86/amd64 backends, but it does
4668 not unfortunately simplify to
4669 (addr - SP) >u (N - RZ)
4670 (would be beneficial because N - RZ is a constant) because
4671 wraparound arithmetic messes up the comparison. eg.
4672 20 >u 10 == True,
4673 but (20 - 15) >u (10 - 15) == 5 >u (MAXINT-5) == False.
4675 IRTemp sp = newIRTemp(sbOut->tyenv, tyAddr);
4676 addStmtToIRSB( sbOut, assign(sp, IRExpr_Get(goff_sp, tyAddr)));
4678 /* "addr - SP" */
4679 IRTemp addr_minus_sp = newIRTemp(sbOut->tyenv, tyAddr);
4680 addStmtToIRSB(
4681 sbOut,
4682 assign(addr_minus_sp,
4683 tyAddr == Ity_I32
4684 ? binop(Iop_Sub32, addr, mkexpr(sp))
4685 : binop(Iop_Sub64, addr, mkexpr(sp)))
4688 /* "addr - SP + RZ" */
4689 IRTemp diff = newIRTemp(sbOut->tyenv, tyAddr);
4690 addStmtToIRSB(
4691 sbOut,
4692 assign(diff,
4693 tyAddr == Ity_I32
4694 ? binop(Iop_Add32, mkexpr(addr_minus_sp), mkU32(rz_szB))
4695 : binop(Iop_Add64, mkexpr(addr_minus_sp), mkU64(rz_szB)))
4698 /* guardA == "guard on the address" */
4699 IRTemp guardA = newIRTemp(sbOut->tyenv, Ity_I1);
4700 addStmtToIRSB(
4701 sbOut,
4702 assign(guardA,
4703 tyAddr == Ity_I32
4704 ? binop(Iop_CmpLT32U, mkU32(THRESH), mkexpr(diff))
4705 : binop(Iop_CmpLT64U, mkU64(THRESH), mkexpr(diff)))
4707 di->guard = mkexpr(guardA);
4710 /* If there's a guard on the access itself (as supplied by the
4711 caller of this routine), we need to AND that in to any guard we
4712 might already have. */
4713 if (guard) {
4714 di->guard = mk_And1(sbOut, di->guard, guard);
4717 /* Add the helper. */
4718 addStmtToIRSB( sbOut, IRStmt_Dirty(di) );
4722 /* Figure out if GA is a guest code address in the dynamic linker, and
4723 if so return True. Otherwise (and in case of any doubt) return
4724 False. (sidedly safe w/ False as the safe value) */
4725 static Bool is_in_dynamic_linker_shared_object( Addr ga )
4727 DebugInfo* dinfo;
4728 const HChar* soname;
4730 dinfo = VG_(find_DebugInfo)( VG_(current_DiEpoch)(), ga );
4731 if (!dinfo) return False;
4733 soname = VG_(DebugInfo_get_soname)(dinfo);
4734 tl_assert(soname);
4735 if (0) VG_(printf)("%s\n", soname);
4737 return VG_(is_soname_ld_so)(soname);
4740 static
4741 void addInvalidateCachedStack (IRSB* bbOut,
4742 Int goff_sp_s1,
4743 Int hWordTy_szB)
4745 /* Invalidate cached stack: Write 0 in the shadow1 offset 0 */
4746 addStmtToIRSB( bbOut,
4747 IRStmt_Put(goff_sp_s1,
4748 hWordTy_szB == 4 ?
4749 mkU32(0) : mkU64(0)));
4750 /// ???? anything more efficient than assign a Word???
4753 static
4754 IRSB* hg_instrument ( VgCallbackClosure* closure,
4755 IRSB* bbIn,
4756 const VexGuestLayout* layout,
4757 const VexGuestExtents* vge,
4758 const VexArchInfo* archinfo_host,
4759 IRType gWordTy, IRType hWordTy )
4761 Int i;
4762 IRSB* bbOut;
4763 Addr cia; /* address of current insn */
4764 IRStmt* st;
4765 Bool inLDSO = False;
4766 Addr inLDSOmask4K = 1; /* mismatches on first check */
4768 // Set to True when SP must be fixed up when taking a stack trace for the
4769 // mem accesses in the rest of the instruction
4770 Bool fixupSP_needed = False;
4772 const Int goff_SP = layout->offset_SP;
4773 /* SP in shadow1 indicates if cached stack is valid.
4774 We have to invalidate the cached stack e.g. when seeing call or ret. */
4775 const Int goff_SP_s1 = layout->total_sizeB + layout->offset_SP;
4776 const Int hWordTy_szB = sizeofIRType(hWordTy);
4778 if (gWordTy != hWordTy) {
4779 /* We don't currently support this case. */
4780 VG_(tool_panic)("host/guest word size mismatch");
4783 if (VKI_PAGE_SIZE < 4096 || VG_(log2)(VKI_PAGE_SIZE) == -1) {
4784 VG_(tool_panic)("implausible or too-small VKI_PAGE_SIZE");
4787 /* Set up BB */
4788 bbOut = emptyIRSB();
4789 bbOut->tyenv = deepCopyIRTypeEnv(bbIn->tyenv);
4790 bbOut->next = deepCopyIRExpr(bbIn->next);
4791 bbOut->jumpkind = bbIn->jumpkind;
4792 bbOut->offsIP = bbIn->offsIP;
4794 // Copy verbatim any IR preamble preceding the first IMark
4795 i = 0;
4796 while (i < bbIn->stmts_used && bbIn->stmts[i]->tag != Ist_IMark) {
4797 addStmtToIRSB( bbOut, bbIn->stmts[i] );
4798 i++;
4801 // Get the first statement, and initial cia from it
4802 tl_assert(bbIn->stmts_used > 0);
4803 tl_assert(i < bbIn->stmts_used);
4804 st = bbIn->stmts[i];
4805 tl_assert(Ist_IMark == st->tag);
4806 cia = st->Ist.IMark.addr;
4807 st = NULL;
4809 for (/*use current i*/; i < bbIn->stmts_used; i++) {
4810 st = bbIn->stmts[i];
4811 tl_assert(st);
4812 tl_assert(isFlatIRStmt(st));
4813 switch (st->tag) {
4814 case Ist_Exit:
4815 /* No memory reference, but if we do anything else than
4816 Ijk_Boring, indicate to helgrind that the previously
4817 recorded stack is invalid.
4818 For Ijk_Boring, also invalidate the stack if the exit
4819 instruction has no CF info. This heuristic avoids cached
4820 stack trace mismatch in some cases such as longjmp
4821 implementation. Similar logic below for the bb exit. */
4822 if (HG_(clo_delta_stacktrace)
4823 && (st->Ist.Exit.jk != Ijk_Boring || ! VG_(has_CF_info)(cia)))
4824 addInvalidateCachedStack(bbOut, goff_SP_s1, hWordTy_szB);
4825 break;
4826 case Ist_NoOp:
4827 case Ist_AbiHint:
4828 /* None of these can contain any memory references. */
4829 break;
4830 case Ist_Put:
4831 /* This cannot contain any memory references. */
4832 /* If we see a put to SP, from now on in this instruction,
4833 the SP needed to unwind has to be fixed up by one word.
4834 This very simple heuristic ensures correct unwinding in the
4835 typical case of a push instruction. If we need to cover more
4836 cases, then we need to better track how the SP is modified by
4837 the instruction (and calculate a precise sp delta), rather than
4838 assuming that the SP is decremented by a Word size. */
4839 if (HG_(clo_delta_stacktrace) && st->Ist.Put.offset == goff_SP) {
4840 fixupSP_needed = True;
4842 break;
4843 case Ist_PutI:
4844 /* This cannot contain any memory references. */
4845 break;
4847 case Ist_IMark:
4848 fixupSP_needed = False;
4850 /* no mem refs, but note the insn address. */
4851 cia = st->Ist.IMark.addr;
4853 /* Don't instrument the dynamic linker. It generates a
4854 lot of races which we just expensively suppress, so
4855 it's pointless.
4857 Avoid flooding is_in_dynamic_linker_shared_object with
4858 requests by only checking at transitions between 4K
4859 pages. */
4860 if ((cia & ~(Addr)0xFFF) != inLDSOmask4K) {
4861 if (0) VG_(printf)("NEW %#lx\n", cia);
4862 inLDSOmask4K = cia & ~(Addr)0xFFF;
4863 inLDSO = is_in_dynamic_linker_shared_object(cia);
4864 } else {
4865 if (0) VG_(printf)("old %#lx\n", cia);
4867 break;
4869 case Ist_MBE:
4870 switch (st->Ist.MBE.event) {
4871 case Imbe_Fence:
4872 case Imbe_CancelReservation:
4873 break; /* not interesting */
4874 default:
4875 goto unhandled;
4877 break;
4879 case Ist_CAS: {
4880 /* Atomic read-modify-write cycle. Just pretend it's a
4881 read. */
4882 IRCAS* cas = st->Ist.CAS.details;
4883 Bool isDCAS = cas->oldHi != IRTemp_INVALID;
4884 if (isDCAS) {
4885 tl_assert(cas->expdHi);
4886 tl_assert(cas->dataHi);
4887 } else {
4888 tl_assert(!cas->expdHi);
4889 tl_assert(!cas->dataHi);
4891 /* Just be boring about it. */
4892 if (!inLDSO) {
4893 instrument_mem_access(
4894 bbOut,
4895 cas->addr,
4896 (isDCAS ? 2 : 1)
4897 * sizeofIRType(typeOfIRExpr(bbIn->tyenv, cas->dataLo)),
4898 False/*!isStore*/, fixupSP_needed,
4899 hWordTy_szB, goff_SP, goff_SP_s1,
4900 NULL/*no-guard*/
4903 break;
4906 case Ist_LLSC: {
4907 /* We pretend store-conditionals don't exist, viz, ignore
4908 them. Whereas load-linked's are treated the same as
4909 normal loads. */
4910 IRType dataTy;
4911 if (st->Ist.LLSC.storedata == NULL) {
4912 /* LL */
4913 dataTy = typeOfIRTemp(bbIn->tyenv, st->Ist.LLSC.result);
4914 if (!inLDSO) {
4915 instrument_mem_access(
4916 bbOut,
4917 st->Ist.LLSC.addr,
4918 sizeofIRType(dataTy),
4919 False/*!isStore*/, fixupSP_needed,
4920 hWordTy_szB, goff_SP, goff_SP_s1,
4921 NULL/*no-guard*/
4924 } else {
4925 /* SC */
4926 /*ignore */
4928 break;
4931 case Ist_Store:
4932 if (!inLDSO) {
4933 instrument_mem_access(
4934 bbOut,
4935 st->Ist.Store.addr,
4936 sizeofIRType(typeOfIRExpr(bbIn->tyenv, st->Ist.Store.data)),
4937 True/*isStore*/, fixupSP_needed,
4938 hWordTy_szB, goff_SP, goff_SP_s1,
4939 NULL/*no-guard*/
4942 break;
4944 case Ist_StoreG: {
4945 IRStoreG* sg = st->Ist.StoreG.details;
4946 IRExpr* data = sg->data;
4947 IRExpr* addr = sg->addr;
4948 IRType type = typeOfIRExpr(bbIn->tyenv, data);
4949 tl_assert(type != Ity_INVALID);
4950 instrument_mem_access( bbOut, addr, sizeofIRType(type),
4951 True/*isStore*/, fixupSP_needed,
4952 hWordTy_szB,
4953 goff_SP, goff_SP_s1, sg->guard );
4954 break;
4957 case Ist_LoadG: {
4958 IRLoadG* lg = st->Ist.LoadG.details;
4959 IRType type = Ity_INVALID; /* loaded type */
4960 IRType typeWide = Ity_INVALID; /* after implicit widening */
4961 IRExpr* addr = lg->addr;
4962 typeOfIRLoadGOp(lg->cvt, &typeWide, &type);
4963 tl_assert(type != Ity_INVALID);
4964 instrument_mem_access( bbOut, addr, sizeofIRType(type),
4965 False/*!isStore*/, fixupSP_needed,
4966 hWordTy_szB,
4967 goff_SP, goff_SP_s1, lg->guard );
4968 break;
4971 case Ist_WrTmp: {
4972 IRExpr* data = st->Ist.WrTmp.data;
4973 if (data->tag == Iex_Load) {
4974 if (!inLDSO) {
4975 instrument_mem_access(
4976 bbOut,
4977 data->Iex.Load.addr,
4978 sizeofIRType(data->Iex.Load.ty),
4979 False/*!isStore*/, fixupSP_needed,
4980 hWordTy_szB, goff_SP, goff_SP_s1,
4981 NULL/*no-guard*/
4985 break;
4988 case Ist_Dirty: {
4989 Int dataSize;
4990 IRDirty* d = st->Ist.Dirty.details;
4991 if (d->mFx != Ifx_None) {
4992 /* This dirty helper accesses memory. Collect the
4993 details. */
4994 tl_assert(d->mAddr != NULL);
4995 tl_assert(d->mSize != 0);
4996 dataSize = d->mSize;
4997 if (d->mFx == Ifx_Read || d->mFx == Ifx_Modify) {
4998 if (!inLDSO) {
4999 instrument_mem_access(
5000 bbOut, d->mAddr, dataSize,
5001 False/*!isStore*/, fixupSP_needed,
5002 hWordTy_szB, goff_SP, goff_SP_s1,
5003 NULL/*no-guard*/
5007 if (d->mFx == Ifx_Write || d->mFx == Ifx_Modify) {
5008 if (!inLDSO) {
5009 instrument_mem_access(
5010 bbOut, d->mAddr, dataSize,
5011 True/*isStore*/, fixupSP_needed,
5012 hWordTy_szB, goff_SP, goff_SP_s1,
5013 NULL/*no-guard*/
5017 } else {
5018 tl_assert(d->mAddr == NULL);
5019 tl_assert(d->mSize == 0);
5021 break;
5024 default:
5025 unhandled:
5026 ppIRStmt(st);
5027 tl_assert(0);
5029 } /* switch (st->tag) */
5031 addStmtToIRSB( bbOut, st );
5032 } /* iterate over bbIn->stmts */
5034 // See above the case Ist_Exit:
5035 if (HG_(clo_delta_stacktrace)
5036 && (bbOut->jumpkind != Ijk_Boring || ! VG_(has_CF_info)(cia)))
5037 addInvalidateCachedStack(bbOut, goff_SP_s1, hWordTy_szB);
5039 return bbOut;
5042 #undef binop
5043 #undef mkexpr
5044 #undef mkU32
5045 #undef mkU64
5046 #undef assign
5049 /*----------------------------------------------------------------*/
5050 /*--- Client requests ---*/
5051 /*----------------------------------------------------------------*/
5053 /* Sheesh. Yet another goddam finite map. */
5054 static WordFM* map_pthread_t_to_Thread = NULL; /* pthread_t -> Thread* */
5056 static void map_pthread_t_to_Thread_INIT ( void ) {
5057 if (UNLIKELY(map_pthread_t_to_Thread == NULL)) {
5058 map_pthread_t_to_Thread = VG_(newFM)( HG_(zalloc), "hg.mpttT.1",
5059 HG_(free), NULL );
5063 /* A list of Ada dependent tasks and their masters. Used for implementing
5064 the Ada task termination semantic as implemented by the
5065 gcc gnat Ada runtime. */
5066 typedef
5067 struct {
5068 void* dependent; // Ada Task Control Block of the Dependent
5069 void* master; // ATCB of the master
5070 Word master_level; // level of dependency between master and dependent
5071 Thread* hg_dependent; // helgrind Thread* for dependent task.
5073 GNAT_dmml; // (d)ependent (m)aster (m)aster_(l)evel.
5074 static XArray* gnat_dmmls; /* of GNAT_dmml */
5075 static void gnat_dmmls_INIT (void)
5077 if (UNLIKELY(gnat_dmmls == NULL)) {
5078 gnat_dmmls = VG_(newXA) (HG_(zalloc), "hg.gnat_md.1",
5079 HG_(free),
5080 sizeof(GNAT_dmml) );
5084 static void xtmemory_report_next_block(XT_Allocs* xta, ExeContext** ec_alloc)
5086 const MallocMeta* md = VG_(HT_Next)(hg_mallocmeta_table);
5087 if (md) {
5088 xta->nbytes = md->szB;
5089 xta->nblocks = 1;
5090 *ec_alloc = md->where;
5091 } else
5092 xta->nblocks = 0;
5094 static void HG_(xtmemory_report) ( const HChar* filename, Bool fini )
5096 // Make xtmemory_report_next_block ready to be called.
5097 VG_(HT_ResetIter)(hg_mallocmeta_table);
5098 VG_(XTMemory_report)(filename, fini, xtmemory_report_next_block,
5099 VG_(XT_filter_1top_and_maybe_below_main));
5102 static void print_monitor_help ( void )
5104 VG_(gdb_printf)
5106 "\n"
5107 "helgrind monitor commands:\n"
5108 " info locks [lock_addr] : show status of lock at addr lock_addr\n"
5109 " with no lock_addr, show status of all locks\n"
5110 " accesshistory <addr> [<len>] : show access history recorded\n"
5111 " for <len> (or 1) bytes at <addr>\n"
5112 " xtmemory [<filename>]\n"
5113 " dump xtree memory profile in <filename> (default xtmemory.kcg.%%p.%%n)\n"
5114 "\n");
5117 /* return True if request recognised, False otherwise */
5118 static Bool handle_gdb_monitor_command (ThreadId tid, HChar *req)
5120 HChar* wcmd;
5121 HChar s[VG_(strlen)(req)]; /* copy for strtok_r */
5122 HChar *ssaveptr;
5123 Int kwdid;
5125 VG_(strcpy) (s, req);
5127 wcmd = VG_(strtok_r) (s, " ", &ssaveptr);
5128 /* NB: if possible, avoid introducing a new command below which
5129 starts with the same first letter(s) as an already existing
5130 command. This ensures a shorter abbreviation for the user. */
5131 switch (VG_(keyword_id)
5132 ("help info accesshistory xtmemory",
5133 wcmd, kwd_report_duplicated_matches)) {
5134 case -2: /* multiple matches */
5135 return True;
5136 case -1: /* not found */
5137 return False;
5138 case 0: /* help */
5139 print_monitor_help();
5140 return True;
5141 case 1: /* info */
5142 wcmd = VG_(strtok_r) (NULL, " ", &ssaveptr);
5143 switch (kwdid = VG_(keyword_id)
5144 ("locks",
5145 wcmd, kwd_report_all)) {
5146 case -2:
5147 case -1:
5148 break;
5149 case 0: // locks
5151 const HChar* wa;
5152 Addr lk_addr = 0;
5153 Bool lk_shown = False;
5154 Bool all_locks = True;
5155 Int i;
5156 Lock* lk;
5158 wa = VG_(strtok_r) (NULL, " ", &ssaveptr);
5159 if (wa != NULL) {
5160 if (VG_(parse_Addr) (&wa, &lk_addr) )
5161 all_locks = False;
5162 else {
5163 VG_(gdb_printf) ("missing or malformed address\n");
5166 for (i = 0, lk = admin_locks; lk; i++, lk = lk->admin_next) {
5167 if (all_locks || lk_addr == lk->guestaddr) {
5168 pp_Lock(0, lk,
5169 True /* show_lock_addrdescr */,
5170 False /* show_internal_data */);
5171 lk_shown = True;
5174 if (i == 0)
5175 VG_(gdb_printf) ("no locks\n");
5176 if (!all_locks && !lk_shown)
5177 VG_(gdb_printf) ("lock with address %p not found\n",
5178 (void*)lk_addr);
5180 break;
5181 default:
5182 tl_assert(0);
5184 return True;
5186 case 2: /* accesshistory */
5188 Addr address;
5189 SizeT szB = 1;
5190 if (HG_(clo_history_level) < 2) {
5191 VG_(gdb_printf)
5192 ("helgrind must be started with --history-level=full"
5193 " to use accesshistory\n");
5194 return True;
5196 if (VG_(strtok_get_address_and_size) (&address, &szB, &ssaveptr)) {
5197 if (szB >= 1)
5198 libhb_event_map_access_history (address, szB, HG_(print_access));
5199 else
5200 VG_(gdb_printf) ("len must be >=1\n");
5202 return True;
5205 case 3: { /* xtmemory */
5206 HChar* filename;
5207 filename = VG_(strtok_r) (NULL, " ", &ssaveptr);
5208 HG_(xtmemory_report)(filename, False);
5209 return True;
5212 default:
5213 tl_assert(0);
5214 return False;
5218 static
5219 Bool hg_handle_client_request ( ThreadId tid, UWord* args, UWord* ret)
5221 if (!VG_IS_TOOL_USERREQ('H','G',args[0])
5222 && VG_USERREQ__GDB_MONITOR_COMMAND != args[0])
5223 return False;
5225 /* Anything that gets past the above check is one of ours, so we
5226 should be able to handle it. */
5228 /* default, meaningless return value, unless otherwise set */
5229 *ret = 0;
5231 switch (args[0]) {
5233 /* --- --- User-visible client requests --- --- */
5235 case VG_USERREQ__HG_CLEAN_MEMORY:
5236 if (0) VG_(printf)("VG_USERREQ__HG_CLEAN_MEMORY(%#lx,%lu)\n",
5237 args[1], args[2]);
5238 /* Call die_mem to (expensively) tidy up properly, if there
5239 are any held locks etc in the area. Calling evh__die_mem
5240 and then evh__new_mem is a bit inefficient; probably just
5241 the latter would do. */
5242 if (args[2] > 0) { /* length */
5243 evh__die_mem(args[1], args[2]);
5244 /* and then set it to New */
5245 evh__new_mem(args[1], args[2]);
5247 break;
5249 case _VG_USERREQ__HG_CLEAN_MEMORY_HEAPBLOCK: {
5250 Addr payload = 0;
5251 SizeT pszB = 0;
5252 if (0) VG_(printf)("VG_USERREQ__HG_CLEAN_MEMORY_HEAPBLOCK(%#lx)\n",
5253 args[1]);
5254 if (HG_(mm_find_containing_block)(NULL, NULL,
5255 &payload, &pszB, args[1])) {
5256 if (pszB > 0) {
5257 evh__die_mem(payload, pszB);
5258 evh__new_mem(payload, pszB);
5260 *ret = pszB;
5261 } else {
5262 *ret = (UWord)-1;
5264 break;
5267 case _VG_USERREQ__HG_ARANGE_MAKE_UNTRACKED:
5268 if (0) VG_(printf)("HG_ARANGE_MAKE_UNTRACKED(%#lx,%lu)\n",
5269 args[1], args[2]);
5270 if (args[2] > 0) { /* length */
5271 evh__untrack_mem(args[1], args[2]);
5273 break;
5275 case _VG_USERREQ__HG_ARANGE_MAKE_TRACKED:
5276 if (0) VG_(printf)("HG_ARANGE_MAKE_TRACKED(%#lx,%lu)\n",
5277 args[1], args[2]);
5278 if (args[2] > 0) { /* length */
5279 evh__new_mem(args[1], args[2]);
5281 break;
5283 case _VG_USERREQ__HG_GET_ABITS:
5284 if (0) VG_(printf)("HG_GET_ABITS(%#lx,%#lx,%lu)\n",
5285 args[1], args[2], args[3]);
5286 UChar *zzabit = (UChar *) args[2];
5287 if (zzabit == NULL
5288 || VG_(am_is_valid_for_client)((Addr)zzabit, (SizeT)args[3],
5289 VKI_PROT_READ|VKI_PROT_WRITE))
5290 *ret = (UWord) libhb_srange_get_abits ((Addr) args[1],
5291 (UChar*) args[2],
5292 (SizeT) args[3]);
5293 else
5294 *ret = -1;
5295 break;
5297 /* This thread (tid) (a master) is informing us that it has
5298 seen the termination of a dependent task, and that this should
5299 be considered as a join between master and dependent. */
5300 case _VG_USERREQ__HG_GNAT_DEPENDENT_MASTER_JOIN: {
5301 Word n;
5302 const Thread *stayer = map_threads_maybe_lookup( tid );
5303 const void *dependent = (void*)args[1];
5304 const void *master = (void*)args[2];
5306 if (0)
5307 VG_(printf)("HG_GNAT_DEPENDENT_MASTER_JOIN (tid %d): "
5308 "self_id = %p Thread* = %p dependent %p\n",
5309 (Int)tid, master, stayer, dependent);
5311 gnat_dmmls_INIT();
5312 /* Similar loop as for master completed hook below, but stops at
5313 the first matching occurence, only comparing master and
5314 dependent. */
5315 for (n = VG_(sizeXA) (gnat_dmmls) - 1; n >= 0; n--) {
5316 GNAT_dmml *dmml = (GNAT_dmml*) VG_(indexXA)(gnat_dmmls, n);
5317 if (dmml->master == master
5318 && dmml->dependent == dependent) {
5319 if (0)
5320 VG_(printf)("quitter %p dependency to stayer %p (join)\n",
5321 dmml->hg_dependent->hbthr, stayer->hbthr);
5322 tl_assert(dmml->hg_dependent->hbthr != stayer->hbthr);
5323 generate_quitter_stayer_dependence (dmml->hg_dependent->hbthr,
5324 stayer->hbthr);
5325 VG_(removeIndexXA) (gnat_dmmls, n);
5326 break;
5329 break;
5332 /* --- --- Client requests for Helgrind's use only --- --- */
5334 /* Some thread is telling us its pthread_t value. Record the
5335 binding between that and the associated Thread*, so we can
5336 later find the Thread* again when notified of a join by the
5337 thread. */
5338 case _VG_USERREQ__HG_SET_MY_PTHREAD_T: {
5339 Thread* my_thr = NULL;
5340 if (0)
5341 VG_(printf)("SET_MY_PTHREAD_T (tid %d): pthread_t = %p\n", (Int)tid,
5342 (void*)args[1]);
5343 map_pthread_t_to_Thread_INIT();
5344 my_thr = map_threads_maybe_lookup( tid );
5345 /* This assertion should hold because the map_threads (tid to
5346 Thread*) binding should have been made at the point of
5347 low-level creation of this thread, which should have
5348 happened prior to us getting this client request for it.
5349 That's because this client request is sent from
5350 client-world from the 'thread_wrapper' function, which
5351 only runs once the thread has been low-level created. */
5352 tl_assert(my_thr != NULL);
5353 /* So now we know that (pthread_t)args[1] is associated with
5354 (Thread*)my_thr. Note that down. */
5355 if (0)
5356 VG_(printf)("XXXX: bind pthread_t %p to Thread* %p\n",
5357 (void*)args[1], (void*)my_thr );
5358 VG_(addToFM)( map_pthread_t_to_Thread, (UWord)args[1], (UWord)my_thr );
5360 if (my_thr->coretid != 1) {
5361 /* FIXME: hardwires assumption about identity of the root thread. */
5362 if (HG_(clo_ignore_thread_creation)) {
5363 HG_(thread_leave_pthread_create)(my_thr);
5364 HG_(thread_leave_synchr)(my_thr);
5365 tl_assert(my_thr->synchr_nesting == 0);
5368 break;
5371 case _VG_USERREQ__HG_PTH_API_ERROR: {
5372 Thread* my_thr = NULL;
5373 map_pthread_t_to_Thread_INIT();
5374 my_thr = map_threads_maybe_lookup( tid );
5375 tl_assert(my_thr); /* See justification above in SET_MY_PTHREAD_T */
5376 HG_(record_error_PthAPIerror)(
5377 my_thr, (HChar*)args[1], (UWord)args[2], (HChar*)args[3] );
5378 break;
5381 /* This thread (tid) has completed a join with the quitting
5382 thread whose pthread_t is in args[1]. */
5383 case _VG_USERREQ__HG_PTHREAD_JOIN_POST: {
5384 Thread* thr_q = NULL; /* quitter Thread* */
5385 Bool found = False;
5386 if (0)
5387 VG_(printf)("NOTIFY_JOIN_COMPLETE (tid %d): quitter = %p\n", (Int)tid,
5388 (void*)args[1]);
5389 map_pthread_t_to_Thread_INIT();
5390 found = VG_(lookupFM)( map_pthread_t_to_Thread,
5391 NULL, (UWord*)&thr_q, (UWord)args[1] );
5392 /* Can this fail? It would mean that our pthread_join
5393 wrapper observed a successful join on args[1] yet that
5394 thread never existed (or at least, it never lodged an
5395 entry in the mapping (via SET_MY_PTHREAD_T)). Which
5396 sounds like a bug in the threads library. */
5397 // FIXME: get rid of this assertion; handle properly
5398 tl_assert(found);
5399 if (found) {
5400 if (0)
5401 VG_(printf)(".................... quitter Thread* = %p\n",
5402 thr_q);
5403 evh__HG_PTHREAD_JOIN_POST( tid, thr_q );
5405 break;
5408 /* This thread (tid) is informing us of its master. */
5409 case _VG_USERREQ__HG_GNAT_MASTER_HOOK: {
5410 GNAT_dmml dmml;
5411 dmml.dependent = (void*)args[1];
5412 dmml.master = (void*)args[2];
5413 dmml.master_level = (Word)args[3];
5414 dmml.hg_dependent = map_threads_maybe_lookup( tid );
5415 tl_assert(dmml.hg_dependent);
5417 if (0)
5418 VG_(printf)("HG_GNAT_MASTER_HOOK (tid %d): "
5419 "dependent = %p master = %p master_level = %ld"
5420 " dependent Thread* = %p\n",
5421 (Int)tid, dmml.dependent, dmml.master, dmml.master_level,
5422 dmml.hg_dependent);
5423 gnat_dmmls_INIT();
5424 VG_(addToXA) (gnat_dmmls, &dmml);
5425 break;
5428 /* This thread (tid) is informing us that it has completed a
5429 master. */
5430 case _VG_USERREQ__HG_GNAT_MASTER_COMPLETED_HOOK: {
5431 Word n;
5432 const Thread *stayer = map_threads_maybe_lookup( tid );
5433 const void *master = (void*)args[1];
5434 const Word master_level = (Word) args[2];
5435 tl_assert(stayer);
5437 if (0)
5438 VG_(printf)("HG_GNAT_MASTER_COMPLETED_HOOK (tid %d): "
5439 "self_id = %p master_level = %ld Thread* = %p\n",
5440 (Int)tid, master, master_level, stayer);
5442 gnat_dmmls_INIT();
5443 /* Reverse loop on the array, simulating a pthread_join for
5444 the Dependent tasks of the completed master, and removing
5445 them from the array. */
5446 for (n = VG_(sizeXA) (gnat_dmmls) - 1; n >= 0; n--) {
5447 GNAT_dmml *dmml = (GNAT_dmml*) VG_(indexXA)(gnat_dmmls, n);
5448 if (dmml->master == master
5449 && dmml->master_level == master_level) {
5450 if (0)
5451 VG_(printf)("quitter %p dependency to stayer %p\n",
5452 dmml->hg_dependent->hbthr, stayer->hbthr);
5453 tl_assert(dmml->hg_dependent->hbthr != stayer->hbthr);
5454 generate_quitter_stayer_dependence (dmml->hg_dependent->hbthr,
5455 stayer->hbthr);
5456 VG_(removeIndexXA) (gnat_dmmls, n);
5459 break;
5462 /* EXPOSITION only: by intercepting lock init events we can show
5463 the user where the lock was initialised, rather than only
5464 being able to show where it was first locked. Intercepting
5465 lock initialisations is not necessary for the basic operation
5466 of the race checker. */
5467 case _VG_USERREQ__HG_PTHREAD_MUTEX_INIT_POST:
5468 evh__HG_PTHREAD_MUTEX_INIT_POST( tid, (void*)args[1], args[2] );
5469 break;
5471 /* mutex=arg[1], mutex_is_init=arg[2] */
5472 case _VG_USERREQ__HG_PTHREAD_MUTEX_DESTROY_PRE:
5473 evh__HG_PTHREAD_MUTEX_DESTROY_PRE( tid, (void*)args[1], args[2] != 0 );
5474 break;
5476 case _VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_PRE: // pth_mx_t*
5477 HG_(thread_enter_synchr)(map_threads_maybe_lookup(tid));
5478 if (HG_(get_pthread_create_nesting_level)(tid) == 0)
5479 evh__HG_PTHREAD_MUTEX_UNLOCK_PRE( tid, (void*)args[1] );
5480 break;
5482 case _VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_POST: // pth_mx_t*
5483 if (HG_(get_pthread_create_nesting_level)(tid) == 0)
5484 evh__HG_PTHREAD_MUTEX_UNLOCK_POST( tid, (void*)args[1] );
5485 HG_(thread_leave_synchr)(map_threads_maybe_lookup(tid));
5486 break;
5488 case _VG_USERREQ__HG_PTHREAD_MUTEX_LOCK_PRE: // pth_mx_t*
5489 HG_(thread_enter_synchr)(map_threads_maybe_lookup(tid));
5490 if (HG_(get_pthread_create_nesting_level)(tid) == 0)
5491 evh__HG_PTHREAD_MUTEX_LOCK_PRE( tid, (void*)args[1], args[2] );
5492 break;
5494 case _VG_USERREQ__HG_PTHREAD_MUTEX_LOCK_POST: // pth_mx_t*, long
5495 if ((args[2] == True) // lock actually taken
5496 && (HG_(get_pthread_create_nesting_level)(tid) == 0))
5497 evh__HG_PTHREAD_MUTEX_LOCK_POST( tid, (void*)args[1] );
5498 HG_(thread_leave_synchr)(map_threads_maybe_lookup(tid));
5499 break;
5501 /* This thread is about to do pthread_cond_signal on the
5502 pthread_cond_t* in arg[1]. Ditto pthread_cond_broadcast. */
5503 case _VG_USERREQ__HG_PTHREAD_COND_SIGNAL_PRE:
5504 case _VG_USERREQ__HG_PTHREAD_COND_BROADCAST_PRE:
5505 HG_(thread_enter_synchr)(map_threads_maybe_lookup(tid));
5506 evh__HG_PTHREAD_COND_SIGNAL_PRE( tid, (void*)args[1] );
5507 break;
5509 case _VG_USERREQ__HG_PTHREAD_COND_SIGNAL_POST:
5510 case _VG_USERREQ__HG_PTHREAD_COND_BROADCAST_POST:
5511 HG_(thread_leave_synchr)(map_threads_maybe_lookup(tid));
5512 break;
5514 /* Entry into pthread_cond_wait, cond=arg[1], mutex=arg[2].
5515 Returns a flag indicating whether or not the mutex is believed to be
5516 valid for this operation. */
5517 case _VG_USERREQ__HG_PTHREAD_COND_WAIT_PRE: {
5518 HG_(thread_enter_synchr)(map_threads_maybe_lookup(tid));
5519 Bool mutex_is_valid
5520 = evh__HG_PTHREAD_COND_WAIT_PRE( tid, (void*)args[1],
5521 (void*)args[2] );
5522 *ret = mutex_is_valid ? 1 : 0;
5523 break;
5526 /* Thread successfully completed pthread_cond_init:
5527 cond=arg[1], cond_attr=arg[2] */
5528 case _VG_USERREQ__HG_PTHREAD_COND_INIT_POST:
5529 evh__HG_PTHREAD_COND_INIT_POST( tid,
5530 (void*)args[1], (void*)args[2] );
5531 break;
5533 /* cond=arg[1], cond_is_init=arg[2] */
5534 case _VG_USERREQ__HG_PTHREAD_COND_DESTROY_PRE:
5535 evh__HG_PTHREAD_COND_DESTROY_PRE( tid, (void*)args[1], args[2] != 0 );
5536 break;
5538 /* Thread completed pthread_cond_wait, cond=arg[1],
5539 mutex=arg[2], timeout=arg[3], successful=arg[4] */
5540 case _VG_USERREQ__HG_PTHREAD_COND_WAIT_POST:
5541 if (args[4] == True)
5542 evh__HG_PTHREAD_COND_WAIT_POST( tid,
5543 (void*)args[1], (void*)args[2],
5544 (Bool)args[3] );
5545 HG_(thread_leave_synchr)(map_threads_maybe_lookup(tid));
5546 break;
5548 case _VG_USERREQ__HG_PTHREAD_RWLOCK_INIT_POST:
5549 evh__HG_PTHREAD_RWLOCK_INIT_POST( tid, (void*)args[1] );
5550 break;
5552 case _VG_USERREQ__HG_PTHREAD_RWLOCK_DESTROY_PRE:
5553 evh__HG_PTHREAD_RWLOCK_DESTROY_PRE( tid, (void*)args[1] );
5554 break;
5556 /* rwlock=arg[1], isW=arg[2], isTryLock=arg[3] */
5557 case _VG_USERREQ__HG_PTHREAD_RWLOCK_LOCK_PRE:
5558 HG_(thread_enter_synchr)(map_threads_maybe_lookup(tid));
5559 if (HG_(get_pthread_create_nesting_level)(tid) == 0)
5560 evh__HG_PTHREAD_RWLOCK_LOCK_PRE( tid, (void*)args[1],
5561 args[2], args[3] );
5562 break;
5564 /* rwlock=arg[1], isW=arg[2], tookLock=arg[3] */
5565 case _VG_USERREQ__HG_PTHREAD_RWLOCK_LOCK_POST:
5566 if ((args[3] == True)
5567 && (HG_(get_pthread_create_nesting_level)(tid) == 0))
5568 evh__HG_PTHREAD_RWLOCK_LOCK_POST( tid, (void*)args[1], args[2] );
5569 HG_(thread_leave_synchr)(map_threads_maybe_lookup(tid));
5570 break;
5572 case _VG_USERREQ__HG_PTHREAD_RWLOCK_UNLOCK_PRE:
5573 HG_(thread_enter_synchr)(map_threads_maybe_lookup(tid));
5574 if (HG_(get_pthread_create_nesting_level)(tid) == 0)
5575 evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE( tid, (void*)args[1] );
5576 break;
5578 case _VG_USERREQ__HG_PTHREAD_RWLOCK_UNLOCK_POST:
5579 if (HG_(get_pthread_create_nesting_level)(tid) == 0)
5580 evh__HG_PTHREAD_RWLOCK_UNLOCK_POST( tid, (void*)args[1] );
5581 HG_(thread_leave_synchr)(map_threads_maybe_lookup(tid));
5582 break;
5584 case _VG_USERREQ__HG_POSIX_SEM_INIT_POST: /* sem_t*, unsigned long */
5585 evh__HG_POSIX_SEM_INIT_POST( tid, (void*)args[1], args[2] );
5586 break;
5588 case _VG_USERREQ__HG_POSIX_SEM_DESTROY_PRE: /* sem_t* */
5589 evh__HG_POSIX_SEM_DESTROY_PRE( tid, (void*)args[1] );
5590 break;
5592 case _VG_USERREQ__HG_POSIX_SEM_POST_PRE: /* sem_t* */
5593 HG_(thread_enter_synchr)(map_threads_maybe_lookup(tid));
5594 evh__HG_POSIX_SEM_POST_PRE( tid, (void*)args[1] );
5595 break;
5597 case _VG_USERREQ__HG_POSIX_SEM_POST_POST: /* sem_t* */
5598 HG_(thread_leave_synchr)(map_threads_maybe_lookup(tid));
5599 break;
5601 case _VG_USERREQ__HG_POSIX_SEM_WAIT_PRE: /* sem_t* */
5602 HG_(thread_enter_synchr)(map_threads_maybe_lookup(tid));
5603 break;
5605 case _VG_USERREQ__HG_POSIX_SEM_WAIT_POST: /* sem_t*, long tookLock */
5606 if (args[2] == True)
5607 evh__HG_POSIX_SEM_WAIT_POST( tid, (void*)args[1] );
5608 HG_(thread_leave_synchr)(map_threads_maybe_lookup(tid));
5609 break;
5611 case _VG_USERREQ__HG_PTHREAD_BARRIER_INIT_PRE:
5612 /* pth_bar_t*, ulong count, ulong resizable */
5613 evh__HG_PTHREAD_BARRIER_INIT_PRE( tid, (void*)args[1],
5614 args[2], args[3] );
5615 break;
5617 case _VG_USERREQ__HG_PTHREAD_BARRIER_RESIZE_PRE:
5618 /* pth_bar_t*, ulong newcount */
5619 evh__HG_PTHREAD_BARRIER_RESIZE_PRE ( tid, (void*)args[1],
5620 args[2] );
5621 break;
5623 case _VG_USERREQ__HG_PTHREAD_BARRIER_WAIT_PRE:
5624 /* pth_bar_t* */
5625 evh__HG_PTHREAD_BARRIER_WAIT_PRE( tid, (void*)args[1] );
5626 break;
5628 case _VG_USERREQ__HG_PTHREAD_BARRIER_DESTROY_PRE:
5629 /* pth_bar_t* */
5630 evh__HG_PTHREAD_BARRIER_DESTROY_PRE( tid, (void*)args[1] );
5631 break;
5633 case _VG_USERREQ__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE:
5634 /* pth_spinlock_t* */
5635 evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE( tid, (void*)args[1] );
5636 break;
5638 case _VG_USERREQ__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_POST:
5639 /* pth_spinlock_t* */
5640 evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_POST( tid, (void*)args[1] );
5641 break;
5643 case _VG_USERREQ__HG_PTHREAD_SPIN_LOCK_PRE:
5644 /* pth_spinlock_t*, Word */
5645 evh__HG_PTHREAD_SPIN_LOCK_PRE( tid, (void*)args[1], args[2] );
5646 break;
5648 case _VG_USERREQ__HG_PTHREAD_SPIN_LOCK_POST:
5649 /* pth_spinlock_t* */
5650 evh__HG_PTHREAD_SPIN_LOCK_POST( tid, (void*)args[1] );
5651 break;
5653 case _VG_USERREQ__HG_PTHREAD_SPIN_DESTROY_PRE:
5654 /* pth_spinlock_t* */
5655 evh__HG_PTHREAD_SPIN_DESTROY_PRE( tid, (void*)args[1] );
5656 break;
5658 case _VG_USERREQ__HG_CLIENTREQ_UNIMP: {
5659 /* HChar* who */
5660 HChar* who = (HChar*)args[1];
5661 HChar buf[50 + 50];
5662 Thread* thr = map_threads_maybe_lookup( tid );
5663 tl_assert( thr ); /* I must be mapped */
5664 tl_assert( who );
5665 tl_assert( VG_(strlen)(who) <= 50 );
5666 VG_(sprintf)(buf, "Unimplemented client request macro \"%s\"", who );
5667 /* record_error_Misc strdup's buf, so this is safe: */
5668 HG_(record_error_Misc)( thr, buf );
5669 break;
5672 case _VG_USERREQ__HG_USERSO_SEND_PRE:
5673 /* UWord arbitrary-SO-tag */
5674 evh__HG_USERSO_SEND_PRE( tid, args[1] );
5675 break;
5677 case _VG_USERREQ__HG_USERSO_RECV_POST:
5678 /* UWord arbitrary-SO-tag */
5679 evh__HG_USERSO_RECV_POST( tid, args[1] );
5680 break;
5682 case _VG_USERREQ__HG_USERSO_FORGET_ALL:
5683 /* UWord arbitrary-SO-tag */
5684 evh__HG_USERSO_FORGET_ALL( tid, args[1] );
5685 break;
5687 case VG_USERREQ__GDB_MONITOR_COMMAND: {
5688 Bool handled = handle_gdb_monitor_command (tid, (HChar*)args[1]);
5689 if (handled)
5690 *ret = 1;
5691 else
5692 *ret = 0;
5693 return handled;
5696 case _VG_USERREQ__HG_PTHREAD_CREATE_BEGIN: {
5697 Thread *thr = map_threads_maybe_lookup(tid);
5698 if (HG_(clo_ignore_thread_creation)) {
5699 HG_(thread_enter_pthread_create)(thr);
5700 HG_(thread_enter_synchr)(thr);
5702 break;
5705 case _VG_USERREQ__HG_PTHREAD_CREATE_END: {
5706 Thread *thr = map_threads_maybe_lookup(tid);
5707 if (HG_(clo_ignore_thread_creation)) {
5708 HG_(thread_leave_pthread_create)(thr);
5709 HG_(thread_leave_synchr)(thr);
5711 break;
5714 case _VG_USERREQ__HG_PTHREAD_MUTEX_ACQUIRE_PRE: // pth_mx_t*, long tryLock
5715 evh__HG_PTHREAD_MUTEX_LOCK_PRE( tid, (void*)args[1], args[2] );
5716 break;
5718 case _VG_USERREQ__HG_PTHREAD_MUTEX_ACQUIRE_POST: // pth_mx_t*
5719 evh__HG_PTHREAD_MUTEX_LOCK_POST( tid, (void*)args[1] );
5720 break;
5722 case _VG_USERREQ__HG_PTHREAD_RWLOCK_ACQUIRED: // void*, long isW
5723 evh__HG_PTHREAD_RWLOCK_LOCK_POST( tid, (void*)args[1], args[2] );
5724 break;
5726 case _VG_USERREQ__HG_PTHREAD_RWLOCK_RELEASED: // void*
5727 evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE( tid, (void*)args[1] );
5728 break;
5730 case _VG_USERREQ__HG_POSIX_SEM_RELEASED: /* sem_t* */
5731 evh__HG_POSIX_SEM_POST_PRE( tid, (void*)args[1] );
5732 break;
5734 case _VG_USERREQ__HG_POSIX_SEM_ACQUIRED: /* sem_t* */
5735 evh__HG_POSIX_SEM_WAIT_POST( tid, (void*)args[1] );
5736 break;
5738 #if defined(VGO_solaris)
5739 case _VG_USERREQ__HG_RTLD_BIND_GUARD:
5740 evh__HG_RTLD_BIND_GUARD(tid, args[1]);
5741 break;
5743 case _VG_USERREQ__HG_RTLD_BIND_CLEAR:
5744 evh__HG_RTLD_BIND_CLEAR(tid, args[1]);
5745 break;
5746 #endif /* VGO_solaris */
5748 default:
5749 /* Unhandled Helgrind client request! */
5750 VG_(message)(Vg_UserMsg,
5751 "Warning: unknown Helgrind client request code %llx\n",
5752 (ULong)args[0]);
5753 return False;
5756 return True;
5760 /*----------------------------------------------------------------*/
5761 /*--- Setup ---*/
5762 /*----------------------------------------------------------------*/
5764 static Bool hg_process_cmd_line_option ( const HChar* arg )
5766 const HChar* tmp_str;
5768 if VG_BOOL_CLO(arg, "--track-lockorders",
5769 HG_(clo_track_lockorders)) {}
5770 else if VG_BOOL_CLO(arg, "--cmp-race-err-addrs",
5771 HG_(clo_cmp_race_err_addrs)) {}
5773 else if VG_XACT_CLO(arg, "--history-level=none",
5774 HG_(clo_history_level), 0);
5775 else if VG_XACT_CLO(arg, "--history-level=approx",
5776 HG_(clo_history_level), 1);
5777 else if VG_XACT_CLO(arg, "--history-level=full",
5778 HG_(clo_history_level), 2);
5780 else if VG_BINT_CLO(arg, "--history-backtrace-size",
5781 HG_(clo_history_backtrace_size), 2, 500) {}
5782 // 500 just in case someone with a lot of CPU and memory would like to use
5783 // the same value for --num-callers and this.
5785 else if VG_BOOL_CLO(arg, "--delta-stacktrace",
5786 HG_(clo_delta_stacktrace)) {}
5788 else if VG_BINT_CLO(arg, "--conflict-cache-size",
5789 HG_(clo_conflict_cache_size), 10*1000, 150*1000*1000) {}
5791 /* "stuvwx" --> stuvwx (binary) */
5792 else if VG_STR_CLO(arg, "--hg-sanity-flags", tmp_str) {
5793 Int j;
5795 if (6 != VG_(strlen)(tmp_str)) {
5796 VG_(message)(Vg_UserMsg,
5797 "--hg-sanity-flags argument must have 6 digits\n");
5798 return False;
5800 for (j = 0; j < 6; j++) {
5801 if ('0' == tmp_str[j]) { /* do nothing */ }
5802 else if ('1' == tmp_str[j]) HG_(clo_sanity_flags) |= (1 << (6-1-j));
5803 else {
5804 VG_(message)(Vg_UserMsg, "--hg-sanity-flags argument can "
5805 "only contain 0s and 1s\n");
5806 return False;
5809 if (0) VG_(printf)("XXX sanity flags: 0x%lx\n", HG_(clo_sanity_flags));
5812 else if VG_BOOL_CLO(arg, "--free-is-write",
5813 HG_(clo_free_is_write)) {}
5815 else if VG_XACT_CLO(arg, "--vts-pruning=never",
5816 HG_(clo_vts_pruning), 0);
5817 else if VG_XACT_CLO(arg, "--vts-pruning=auto",
5818 HG_(clo_vts_pruning), 1);
5819 else if VG_XACT_CLO(arg, "--vts-pruning=always",
5820 HG_(clo_vts_pruning), 2);
5822 else if VG_BOOL_CLO(arg, "--check-stack-refs",
5823 HG_(clo_check_stack_refs)) {}
5824 else if VG_BOOL_CLO(arg, "--ignore-thread-creation",
5825 HG_(clo_ignore_thread_creation)) {}
5827 else
5828 return VG_(replacement_malloc_process_cmd_line_option)(arg);
5830 return True;
5833 static void hg_print_usage ( void )
5835 VG_(printf)(
5836 " --free-is-write=no|yes treat heap frees as writes [no]\n"
5837 " --track-lockorders=no|yes show lock ordering errors? [yes]\n"
5838 " --history-level=none|approx|full [full]\n"
5839 " full: show both stack traces for a data race (can be very slow)\n"
5840 " approx: full trace for one thread, approx for the other (faster)\n"
5841 " none: only show trace for one thread in a race (fastest)\n"
5842 " --history-backtrace-size=<number> record <number> callers for full\n"
5843 " history level [8]\n"
5844 " --delta-stacktrace=no|yes [yes on linux amd64/x86]\n"
5845 " no : always compute a full history stacktrace from unwind info\n"
5846 " yes : derive a stacktrace from the previous stacktrace\n"
5847 " if there was no call/return or similar instruction\n"
5848 " --conflict-cache-size=N size of 'full' history cache [2000000]\n"
5849 " --check-stack-refs=no|yes race-check reads and writes on the\n"
5850 " main stack and thread stacks? [yes]\n"
5851 " --ignore-thread-creation=yes|no Ignore activities during thread\n"
5852 " creation [%s]\n",
5853 HG_(clo_ignore_thread_creation) ? "yes" : "no"
5857 static void hg_print_debug_usage ( void )
5859 VG_(printf)(" --cmp-race-err-addrs=no|yes are data addresses in "
5860 "race errors significant? [no]\n");
5861 VG_(printf)(" --hg-sanity-flags=<XXXXXX> sanity check "
5862 " at events (X = 0|1) [000000]\n");
5863 VG_(printf)(" --hg-sanity-flags values:\n");
5864 VG_(printf)(" 010000 after changes to "
5865 "lock-order-acquisition-graph\n");
5866 VG_(printf)(" 001000 at memory accesses\n");
5867 VG_(printf)(" 000100 at mem permission setting for "
5868 "ranges >= %d bytes\n", SCE_BIGRANGE_T);
5869 VG_(printf)(" 000010 at lock/unlock events\n");
5870 VG_(printf)(" 000001 at thread create/join events\n");
5871 VG_(printf)(
5872 " --vts-pruning=never|auto|always [auto]\n"
5873 " never: is never done (may cause big space leaks in Helgrind)\n"
5874 " auto: done just often enough to keep space usage under control\n"
5875 " always: done after every VTS GC (mostly just a big time waster)\n"
5879 static void hg_print_stats (void)
5882 if (1) {
5883 VG_(printf)("\n");
5884 HG_(ppWSUstats)( univ_lsets, "univ_lsets" );
5885 if (HG_(clo_track_lockorders)) {
5886 VG_(printf)("\n");
5887 HG_(ppWSUstats)( univ_laog, "univ_laog" );
5891 //zz VG_(printf)("\n");
5892 //zz VG_(printf)(" hbefore: %'10lu queries\n", stats__hbefore_queries);
5893 //zz VG_(printf)(" hbefore: %'10lu cache 0 hits\n", stats__hbefore_cache0s);
5894 //zz VG_(printf)(" hbefore: %'10lu cache > 0 hits\n", stats__hbefore_cacheNs);
5895 //zz VG_(printf)(" hbefore: %'10lu graph searches\n", stats__hbefore_gsearches);
5896 //zz VG_(printf)(" hbefore: %'10lu of which slow\n",
5897 //zz stats__hbefore_gsearches - stats__hbefore_gsearchFs);
5898 //zz VG_(printf)(" hbefore: %'10lu stack high water mark\n",
5899 //zz stats__hbefore_stk_hwm);
5900 //zz VG_(printf)(" hbefore: %'10lu cache invals\n", stats__hbefore_invals);
5901 //zz VG_(printf)(" hbefore: %'10lu probes\n", stats__hbefore_probes);
5903 VG_(printf)("\n");
5904 VG_(printf)(" locksets: %'8d unique lock sets\n",
5905 (Int)HG_(cardinalityWSU)( univ_lsets ));
5906 if (HG_(clo_track_lockorders)) {
5907 VG_(printf)(" univ_laog: %'8d unique lock sets\n",
5908 (Int)HG_(cardinalityWSU)( univ_laog ));
5911 //VG_(printf)("L(ast)L(ock) map: %'8lu inserts (%d map size)\n",
5912 // stats__ga_LL_adds,
5913 // (Int)(ga_to_lastlock ? VG_(sizeFM)( ga_to_lastlock ) : 0) );
5915 VG_(printf)(" LockN-to-P map: %'8llu queries (%llu map size)\n",
5916 HG_(stats__LockN_to_P_queries),
5917 HG_(stats__LockN_to_P_get_map_size)() );
5919 VG_(printf)("client malloc-ed blocks: %'8u\n",
5920 VG_(HT_count_nodes)(hg_mallocmeta_table));
5922 VG_(printf)("string table map: %'8llu queries (%llu map size)\n",
5923 HG_(stats__string_table_queries),
5924 HG_(stats__string_table_get_map_size)() );
5925 if (HG_(clo_track_lockorders)) {
5926 VG_(printf)(" LAOG: %'8d map size\n",
5927 (Int)(laog ? VG_(sizeFM)( laog ) : 0));
5928 VG_(printf)(" LAOG exposition: %'8d map size\n",
5929 (Int)(laog_exposition ? VG_(sizeFM)( laog_exposition ) : 0));
5932 VG_(printf)(" locks: %'8lu acquires, "
5933 "%'lu releases\n",
5934 stats__lockN_acquires,
5935 stats__lockN_releases
5937 VG_(printf)(" sanity checks: %'8lu\n", stats__sanity_checks);
5939 VG_(printf)("\n");
5940 libhb_shutdown(True); // This in fact only print stats.
5943 static void hg_fini ( Int exitcode )
5945 HG_(xtmemory_report) (VG_(clo_xtree_memory_file), True);
5947 if (VG_(clo_verbosity) == 1 && !VG_(clo_xml)
5948 && HG_(clo_history_level) >= 2) {
5949 VG_(umsg)(
5950 "Use --history-level=approx or =none to gain increased speed, at\n" );
5951 VG_(umsg)(
5952 "the cost of reduced accuracy of conflicting-access information\n");
5955 if (SHOW_DATA_STRUCTURES)
5956 pp_everything( PP_ALL, "SK_(fini)" );
5957 if (HG_(clo_sanity_flags))
5958 all__sanity_check("SK_(fini)");
5960 if (VG_(clo_stats))
5961 hg_print_stats();
5964 /* FIXME: move these somewhere sane */
5966 static
5967 void for_libhb__get_stacktrace ( Thr* hbt, Addr* frames, UWord nRequest )
5969 Thread* thr;
5970 ThreadId tid;
5971 UWord nActual;
5972 tl_assert(hbt);
5973 thr = libhb_get_Thr_hgthread( hbt );
5974 tl_assert(thr);
5975 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
5976 nActual = (UWord)VG_(get_StackTrace_with_deltas)
5977 ( tid, frames, (UInt)nRequest,
5978 NULL, NULL, 0,
5979 thr->first_sp_delta);
5980 tl_assert(nActual <= nRequest);
5981 for (; nActual < nRequest; nActual++)
5982 frames[nActual] = 0;
5985 static
5986 ExeContext* for_libhb__get_EC ( Thr* hbt )
5988 Thread* thr;
5989 ThreadId tid;
5990 ExeContext* ec;
5991 tl_assert(hbt);
5992 thr = libhb_get_Thr_hgthread( hbt );
5993 tl_assert(thr);
5994 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
5995 /* this will assert if tid is invalid */
5996 ec = VG_(record_ExeContext)( tid, 0 );
5997 return ec;
6001 static void hg_post_clo_init ( void )
6003 Thr* hbthr_root;
6005 if (HG_(clo_delta_stacktrace) && VG_(clo_vex_control).guest_chase) {
6006 if (VG_(clo_verbosity) >= 2)
6007 VG_(message)(Vg_UserMsg,
6008 "helgrind --delta-stacktrace=yes only works with "
6009 "--vex-guest-chase=no\n"
6010 "=> (re-setting it to 'no')\n");
6011 VG_(clo_vex_control).guest_chase = False;
6015 /////////////////////////////////////////////
6016 hbthr_root = libhb_init( for_libhb__get_stacktrace,
6017 for_libhb__get_EC );
6018 /////////////////////////////////////////////
6021 if (HG_(clo_track_lockorders))
6022 laog__init();
6024 initialise_data_structures(hbthr_root);
6025 if (VG_(clo_xtree_memory) == Vg_XTMemory_Full)
6026 // Activate full xtree memory profiling.
6027 VG_(XTMemory_Full_init)(VG_(XT_filter_1top_and_maybe_below_main));
6030 static void hg_info_location (DiEpoch ep, Addr a)
6032 (void) HG_(get_and_pp_addrdescr) (ep, a);
6035 static void hg_pre_clo_init ( void )
6037 VG_(details_name) ("Helgrind");
6038 VG_(details_version) (NULL);
6039 VG_(details_description) ("a thread error detector");
6040 VG_(details_copyright_author)(
6041 "Copyright (C) 2007-2017, and GNU GPL'd, by OpenWorks LLP et al.");
6042 VG_(details_bug_reports_to) (VG_BUGS_TO);
6043 VG_(details_avg_translation_sizeB) ( 320 );
6045 VG_(basic_tool_funcs) (hg_post_clo_init,
6046 hg_instrument,
6047 hg_fini);
6049 VG_(needs_core_errors) ();
6050 VG_(needs_tool_errors) (HG_(eq_Error),
6051 HG_(before_pp_Error),
6052 HG_(pp_Error),
6053 False,/*show TIDs for errors*/
6054 HG_(update_extra),
6055 HG_(recognised_suppression),
6056 HG_(read_extra_suppression_info),
6057 HG_(error_matches_suppression),
6058 HG_(get_error_name),
6059 HG_(get_extra_suppression_info),
6060 HG_(print_extra_suppression_use),
6061 HG_(update_extra_suppression_use));
6063 VG_(needs_xml_output) ();
6065 VG_(needs_command_line_options)(hg_process_cmd_line_option,
6066 hg_print_usage,
6067 hg_print_debug_usage);
6068 VG_(needs_client_requests) (hg_handle_client_request);
6070 // FIXME?
6071 //VG_(needs_sanity_checks) (hg_cheap_sanity_check,
6072 // hg_expensive_sanity_check);
6074 VG_(needs_print_stats) (hg_print_stats);
6075 VG_(needs_info_location) (hg_info_location);
6077 VG_(needs_malloc_replacement) (hg_cli__malloc,
6078 hg_cli____builtin_new,
6079 hg_cli____builtin_new_aligned,
6080 hg_cli____builtin_vec_new,
6081 hg_cli____builtin_vec_new_aligned,
6082 hg_cli__memalign,
6083 hg_cli__calloc,
6084 hg_cli__free,
6085 hg_cli____builtin_delete,
6086 hg_cli____builtin_delete_aligned,
6087 hg_cli____builtin_vec_delete,
6088 hg_cli____builtin_vec_delete_aligned,
6089 hg_cli__realloc,
6090 hg_cli_malloc_usable_size,
6091 HG_CLI__DEFAULT_MALLOC_REDZONE_SZB );
6093 /* 21 Dec 08: disabled this; it mostly causes H to start more
6094 slowly and use significantly more memory, without very often
6095 providing useful results. The user can request to load this
6096 information manually with --read-var-info=yes. */
6097 if (0) VG_(needs_var_info)(); /* optional */
6099 VG_(track_new_mem_startup) ( evh__new_mem_w_perms );
6100 VG_(track_new_mem_stack_signal)( evh__new_mem_w_tid );
6101 VG_(track_new_mem_brk) ( evh__new_mem_w_tid );
6102 VG_(track_new_mem_mmap) ( evh__new_mem_w_perms );
6103 VG_(track_new_mem_stack) ( evh__new_mem_stack );
6104 VG_(track_new_mem_stack_4) ( evh__new_mem_stack_4 );
6105 VG_(track_new_mem_stack_8) ( evh__new_mem_stack_8 );
6106 VG_(track_new_mem_stack_12) ( evh__new_mem_stack_12 );
6107 VG_(track_new_mem_stack_16) ( evh__new_mem_stack_16 );
6108 VG_(track_new_mem_stack_32) ( evh__new_mem_stack_32 );
6109 VG_(track_new_mem_stack_112) ( evh__new_mem_stack_112 );
6110 VG_(track_new_mem_stack_128) ( evh__new_mem_stack_128 );
6111 VG_(track_new_mem_stack_144) ( evh__new_mem_stack_144 );
6112 VG_(track_new_mem_stack_160) ( evh__new_mem_stack_160 );
6114 // FIXME: surely this isn't thread-aware
6115 VG_(track_copy_mem_remap) ( evh__copy_mem );
6117 VG_(track_change_mem_mprotect) ( evh__set_perms );
6119 VG_(track_die_mem_stack_signal)( evh__die_mem );
6120 VG_(track_die_mem_brk) ( evh__die_mem_munmap );
6121 VG_(track_die_mem_munmap) ( evh__die_mem_munmap );
6123 /* evh__die_mem calls at the end libhb_srange_noaccess_NoFX
6124 which has no effect. We do not use VG_(track_die_mem_stack),
6125 as this would be an expensive way to do nothing. */
6126 // VG_(track_die_mem_stack) ( evh__die_mem );
6128 // FIXME: what is this for?
6129 VG_(track_ban_mem_stack) (NULL);
6131 VG_(track_pre_mem_read) ( evh__pre_mem_read );
6132 VG_(track_pre_mem_read_asciiz) ( evh__pre_mem_read_asciiz );
6133 VG_(track_pre_mem_write) ( evh__pre_mem_write );
6134 VG_(track_post_mem_write) (NULL);
6136 /////////////////
6138 VG_(track_pre_thread_ll_create)( evh__pre_thread_ll_create );
6139 VG_(track_pre_thread_ll_exit) ( evh__pre_thread_ll_exit );
6141 VG_(track_start_client_code)( evh__start_client_code );
6142 VG_(track_stop_client_code)( evh__stop_client_code );
6144 /* Ensure that requirements for "dodgy C-as-C++ style inheritance"
6145 as described in comments at the top of pub_tool_hashtable.h, are
6146 met. Blargh. */
6147 tl_assert( sizeof(void*) == sizeof(struct _MallocMeta*) );
6148 tl_assert( sizeof(UWord) == sizeof(Addr) );
6149 hg_mallocmeta_table
6150 = VG_(HT_construct)( "hg_malloc_metadata_table" );
6152 MallocMeta_poolalloc = VG_(newPA) ( sizeof(MallocMeta),
6153 1000,
6154 HG_(zalloc),
6155 "hg_malloc_metadata_pool",
6156 HG_(free));
6158 // add a callback to clean up on (threaded) fork.
6159 VG_(atfork)(NULL/*pre*/, NULL/*parent*/, evh__atfork_child/*child*/);
6162 VG_DETERMINE_INTERFACE_VERSION(hg_pre_clo_init)
6164 /*--------------------------------------------------------------------*/
6165 /*--- end hg_main.c ---*/
6166 /*--------------------------------------------------------------------*/