Add new exp files to EXTRA_DIST in memcheck/tests/Makefile.am
[valgrind.git] / helgrind / hg_errors.c
blob638739dc6e56e14ec80a9601d95ac875df29a680
2 /*--------------------------------------------------------------------*/
3 /*--- Error management for Helgrind. ---*/
4 /*--- hg_errors.c ---*/
5 /*--------------------------------------------------------------------*/
7 /*
8 This file is part of Helgrind, a Valgrind tool for detecting errors
9 in threaded programs.
11 Copyright (C) 2007-2017 OpenWorks Ltd
12 info@open-works.co.uk
14 This program is free software; you can redistribute it and/or
15 modify it under the terms of the GNU General Public License as
16 published by the Free Software Foundation; either version 2 of the
17 License, or (at your option) any later version.
19 This program is distributed in the hope that it will be useful, but
20 WITHOUT ANY WARRANTY; without even the implied warranty of
21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 General Public License for more details.
24 You should have received a copy of the GNU General Public License
25 along with this program; if not, see <http://www.gnu.org/licenses/>.
27 The GNU General Public License is contained in the file COPYING.
30 #include "pub_tool_basics.h"
31 #include "pub_tool_libcbase.h"
32 #include "pub_tool_libcassert.h"
33 #include "pub_tool_libcprint.h"
34 #include "pub_tool_stacktrace.h"
35 #include "pub_tool_execontext.h"
36 #include "pub_tool_errormgr.h"
37 #include "pub_tool_wordfm.h"
38 #include "pub_tool_xarray.h"
39 #include "pub_tool_debuginfo.h"
40 #include "pub_tool_threadstate.h"
41 #include "pub_tool_options.h" // VG_(clo_xml)
42 #include "pub_tool_aspacemgr.h"
43 #include "pub_tool_addrinfo.h"
45 #include "hg_basics.h"
46 #include "hg_addrdescr.h"
47 #include "hg_wordset.h"
48 #include "hg_lock_n_thread.h"
49 #include "libhb.h"
50 #include "hg_errors.h" /* self */
53 /*----------------------------------------------------------------*/
54 /*--- Error management -- storage ---*/
55 /*----------------------------------------------------------------*/
57 /* maps (by value) strings to a copy of them in ARENA_TOOL */
59 static WordFM* string_table = NULL;
61 ULong HG_(stats__string_table_queries) = 0;
63 ULong HG_(stats__string_table_get_map_size) ( void ) {
64 return string_table ? (ULong)VG_(sizeFM)(string_table) : 0;
67 static Word string_table_cmp ( UWord s1, UWord s2 ) {
68 return (Word)VG_(strcmp)( (HChar*)s1, (HChar*)s2 );
71 static HChar* string_table_strdup ( const HChar* str ) {
72 HChar* copy = NULL;
73 HG_(stats__string_table_queries)++;
74 if (!str)
75 str = "(null)";
76 if (!string_table) {
77 string_table = VG_(newFM)( HG_(zalloc), "hg.sts.1",
78 HG_(free), string_table_cmp );
80 if (VG_(lookupFM)( string_table,
81 NULL, (UWord*)&copy, (UWord)str )) {
82 tl_assert(copy);
83 if (0) VG_(printf)("string_table_strdup: %p -> %p\n", str, copy );
84 return copy;
85 } else {
86 copy = HG_(strdup)("hg.sts.2", str);
87 VG_(addToFM)( string_table, (UWord)copy, (UWord)copy );
88 return copy;
92 /* maps from Lock .unique fields to LockP*s */
94 static WordFM* map_LockN_to_P = NULL;
96 ULong HG_(stats__LockN_to_P_queries) = 0;
98 ULong HG_(stats__LockN_to_P_get_map_size) ( void ) {
99 return map_LockN_to_P ? (ULong)VG_(sizeFM)(map_LockN_to_P) : 0;
102 static Word lock_unique_cmp ( UWord lk1W, UWord lk2W )
104 Lock* lk1 = (Lock*)lk1W;
105 Lock* lk2 = (Lock*)lk2W;
106 tl_assert( HG_(is_sane_LockNorP)(lk1) );
107 tl_assert( HG_(is_sane_LockNorP)(lk2) );
108 if (lk1->unique < lk2->unique) return -1;
109 if (lk1->unique > lk2->unique) return 1;
110 return 0;
113 /* Given a normal Lock (LockN), convert it to a persistent Lock
114 (LockP). In some cases the LockN could be invalid (if it's been
115 freed), so we enquire, in hg_main.c's admin_locks list, whether it
116 is in fact valid. If allowed_to_be_invalid is True, then it's OK
117 for the LockN to be invalid, in which case Lock_INVALID is
118 returned. In all other cases, we insist that the LockN is a valid
119 lock, and return its corresponding LockP.
121 Why can LockNs sometimes be invalid? Because they are harvested
122 from locksets that are attached to the OldRef info for conflicting
123 threads. By the time we detect a race, the some of the elements of
124 the lockset may have been destroyed by the client, in which case
125 the corresponding Lock structures we maintain will have been freed.
127 So we check that each LockN is a member of the admin_locks double
128 linked list of all Lock structures. That stops us prodding around
129 in potentially freed-up Lock structures. However, it's not quite a
130 proper check: if a new Lock has been reallocated at the same
131 address as one which was previously freed, we'll wind up copying
132 the new one as the basis for the LockP, which is completely bogus
133 because it is unrelated to the previous Lock that lived there.
134 Let's hope that doesn't happen too often.
136 static Lock* mk_LockP_from_LockN ( Lock* lkn,
137 Bool allowed_to_be_invalid )
139 Lock* lkp = NULL;
140 HG_(stats__LockN_to_P_queries)++;
142 /* First off, let's do some sanity checks. If
143 allowed_to_be_invalid is False, we _must_ be able to find 'lkn'
144 in admin_locks; else we must assert. If it is True, it's OK for
145 it not to be findable, but in that case we must return
146 Lock_INVALID right away. */
147 Lock* lock_list = HG_(get_admin_locks)();
148 while (lock_list) {
149 if (lock_list == lkn)
150 break;
151 lock_list = lock_list->admin_next;
153 if (lock_list == NULL) {
154 /* We didn't find it. That possibility has to be OK'd by the
155 caller. */
156 tl_assert(allowed_to_be_invalid);
157 return Lock_INVALID;
160 /* So we must be looking at a valid LockN. */
161 tl_assert( HG_(is_sane_LockN)(lkn) );
163 if (!map_LockN_to_P) {
164 map_LockN_to_P = VG_(newFM)( HG_(zalloc), "hg.mLPfLN.1",
165 HG_(free), lock_unique_cmp );
167 if (!VG_(lookupFM)( map_LockN_to_P, NULL, (UWord*)&lkp, (UWord)lkn)) {
168 lkp = HG_(zalloc)( "hg.mLPfLN.2", sizeof(Lock) );
169 *lkp = *lkn;
170 lkp->admin_next = NULL;
171 lkp->admin_prev = NULL;
172 lkp->magic = LockP_MAGIC;
173 /* Forget about the bag of lock holders - don't copy that.
174 Also, acquired_at should be NULL whenever heldBy is, and vice
175 versa. Also forget about the associated libhb synch object. */
176 lkp->heldW = False;
177 lkp->heldBy = NULL;
178 lkp->acquired_at = NULL;
179 lkp->hbso = NULL;
180 VG_(addToFM)( map_LockN_to_P, (UWord)lkp, (UWord)lkp );
182 tl_assert( HG_(is_sane_LockP)(lkp) );
183 return lkp;
186 static Int sort_by_guestaddr(const void* n1, const void* n2)
188 const Lock* l1 = *(const Lock *const *)n1;
189 const Lock* l2 = *(const Lock *const *)n2;
191 Addr a1 = l1 == Lock_INVALID ? 0 : l1->guestaddr;
192 Addr a2 = l2 == Lock_INVALID ? 0 : l2->guestaddr;
193 if (a1 < a2) return -1;
194 if (a1 > a2) return 1;
195 return 0;
198 /* Expand a WordSet of LockN*'s into a NULL-terminated vector of
199 LockP*'s. Any LockN's that can't be converted into a LockP
200 (because they have been freed, see comment on mk_LockP_from_LockN)
201 are converted instead into the value Lock_INVALID. Hence the
202 returned vector is a sequence: zero or more (valid LockP* or
203 LockN_INVALID), terminated by a NULL. */
204 static
205 Lock** enumerate_WordSet_into_LockP_vector( WordSetU* univ_lsets,
206 WordSetID lockset,
207 Bool allowed_to_be_invalid )
209 tl_assert(univ_lsets);
210 tl_assert( HG_(plausibleWS)(univ_lsets, lockset) );
211 UWord nLocks = HG_(cardinalityWS)(univ_lsets, lockset);
212 Lock** lockPs = HG_(zalloc)( "hg.eWSiLPa",
213 (nLocks+1) * sizeof(Lock*) );
214 tl_assert(lockPs[nLocks] == NULL); /* pre-NULL terminated */
215 UWord* lockNs = NULL;
216 UWord nLockNs = 0;
217 if (nLocks > 0) {
218 /* HG_(getPayloadWS) doesn't assign non-NULL to &lockNs if the
219 lockset is empty; hence the guarding "if". Sigh. */
220 HG_(getPayloadWS)( &lockNs, &nLockNs, univ_lsets, lockset );
221 tl_assert(lockNs);
223 UWord i;
224 /* Convert to LockPs. */
225 for (i = 0; i < nLockNs; i++) {
226 lockPs[i] = mk_LockP_from_LockN( (Lock*)lockNs[i],
227 allowed_to_be_invalid );
229 /* Sort the locks by increasing Lock::guestaddr to avoid jitters
230 in the output. */
231 VG_(ssort)(lockPs, nLockNs, sizeof lockPs[0], sort_by_guestaddr);
233 return lockPs;
236 /* Get the number of useful elements in a vector created by
237 enumerate_WordSet_into_LockP_vector. Returns both the total number
238 of elements (not including the terminating NULL) and the number of
239 non-Lock_INVALID elements. */
240 static void count_LockP_vector ( /*OUT*/UWord* nLocks,
241 /*OUT*/UWord* nLocksValid,
242 Lock** vec )
244 tl_assert(vec);
245 *nLocks = *nLocksValid = 0;
246 UWord n = 0;
247 while (vec[n]) {
248 (*nLocks)++;
249 if (vec[n] != Lock_INVALID)
250 (*nLocksValid)++;
251 n++;
255 /* Find out whether 'lk' is in 'vec'. */
256 static Bool elem_LockP_vector ( Lock** vec, Lock* lk )
258 tl_assert(vec);
259 tl_assert(lk);
260 UWord n = 0;
261 while (vec[n]) {
262 if (vec[n] == lk)
263 return True;
264 n++;
266 return False;
270 /* Errors:
272 race: program counter
273 read or write
274 data size
275 previous state
276 current state
278 FIXME: how does state printing interact with lockset gc?
279 Are the locksets in prev/curr state always valid?
280 Ditto question for the threadsets
281 ThreadSets - probably are always valid if Threads
282 are never thrown away.
283 LockSets - could at least print the lockset elements that
284 correspond to actual locks at the time of printing. Hmm.
287 /* Error kinds */
288 typedef
289 enum {
290 XE_Race=1101, // race
291 XE_UnlockUnlocked, // unlocking a not-locked lock
292 XE_UnlockForeign, // unlocking a lock held by some other thread
293 XE_UnlockBogus, // unlocking an address not known to be a lock
294 XE_PthAPIerror, // error from the POSIX pthreads API
295 XE_LockOrder, // lock order error
296 XE_Misc, // misc other error (w/ string to describe it)
297 XE_Dubious // a bit like misc for cases where the POSIX
298 // spec is unclear on error conditons
300 XErrorTag;
302 /* Extra contexts for kinds */
303 typedef
304 struct {
305 XErrorTag tag;
306 union {
307 struct {
308 Addr data_addr;
309 Int szB;
310 AddrInfo data_addrinfo;
311 Bool isWrite;
312 Thread* thr;
313 Lock** locksHeldW;
314 /* h1_* and h2_* provide some description of a previously
315 observed access with which we are conflicting. */
316 Thread* h1_ct; /* non-NULL means h1 info present */
317 ExeContext* h1_ct_mbsegstartEC;
318 ExeContext* h1_ct_mbsegendEC;
319 Thread* h2_ct; /* non-NULL means h2 info present */
320 ExeContext* h2_ct_accEC;
321 Int h2_ct_accSzB;
322 Bool h2_ct_accIsW;
323 Lock** h2_ct_locksHeldW;
324 } Race;
325 struct {
326 Thread* thr; /* doing the unlocking */
327 Lock* lock; /* lock (that is already unlocked) */
328 } UnlockUnlocked;
329 struct {
330 Thread* thr; /* doing the unlocking */
331 Thread* owner; /* thread that actually holds the lock */
332 Lock* lock; /* lock (that is held by 'owner') */
333 } UnlockForeign;
334 struct {
335 Thread* thr; /* doing the unlocking */
336 Addr lock_ga; /* purported address of the lock */
337 } UnlockBogus;
338 struct {
339 Thread* thr;
340 HChar* fnname; /* persistent, in tool-arena */
341 Word err; /* pth error code */
342 HChar* errstr; /* persistent, in tool-arena */
343 } PthAPIerror;
344 struct {
345 Thread* thr;
346 /* The first 4 fields describe the previously observed
347 (should-be) ordering. */
348 Lock* shouldbe_earlier_lk;
349 Lock* shouldbe_later_lk;
350 ExeContext* shouldbe_earlier_ec;
351 ExeContext* shouldbe_later_ec;
352 /* In principle we need to record two more stacks, from
353 this thread, when acquiring the locks in the "wrong"
354 order. In fact the wallclock-later acquisition by this
355 thread is recorded in the main stack for this error.
356 So we only need a stack for the earlier acquisition by
357 this thread. */
358 ExeContext* actual_earlier_ec;
359 } LockOrder;
360 struct {
361 Thread* thr;
362 HChar* errstr; /* persistent, in tool-arena */
363 HChar* auxstr; /* optional, persistent, in tool-arena */
364 ExeContext* auxctx; /* optional */
365 } Misc;
366 } XE;
368 XError;
370 static void init_XError ( XError* xe ) {
371 VG_(memset)(xe, 0, sizeof(*xe) );
372 xe->tag = XE_Race-1; /* bogus */
376 /* Extensions of suppressions */
377 typedef
378 enum {
379 XS_Race=1201, /* race */
380 XS_FreeMemLock,
381 XS_UnlockUnlocked,
382 XS_UnlockForeign,
383 XS_UnlockBogus,
384 XS_PthAPIerror,
385 XS_LockOrder,
386 XS_Misc,
387 XS_Dubious
389 XSuppTag;
392 /* Updates the copy with address info if necessary. */
393 UInt HG_(update_extra) ( const Error* err )
395 XError* xe = (XError*)VG_(get_error_extra)(err);
396 tl_assert(xe);
397 //if (extra != NULL && Undescribed == extra->addrinfo.akind) {
398 // describe_addr ( VG_(get_error_address)(err), &(extra->addrinfo) );
401 if (xe->tag == XE_Race) {
403 /* Note the set of locks that the thread is (w-)holding.
404 Convert the WordSetID of LockN*'s into a NULL-terminated
405 vector of LockP*'s. We don't expect to encounter any invalid
406 LockNs in this conversion. */
407 tl_assert(xe->XE.Race.thr);
408 xe->XE.Race.locksHeldW
409 = enumerate_WordSet_into_LockP_vector(
410 HG_(get_univ_lsets)(),
411 xe->XE.Race.thr->locksetW,
412 False/*!allowed_to_be_invalid*/
415 /* See if we can come up with a source level description of the
416 raced-upon address. This is potentially expensive, which is
417 why it's only done at the update_extra point, not when the
418 error is initially created. */
419 static Int xxx = 0;
420 xxx++;
421 if (0)
422 VG_(printf)("HG_(update_extra): "
423 "%d conflicting-event queries\n", xxx);
425 HG_(describe_addr) (VG_(get_ExeContext_epoch)(VG_(get_error_where)(err)),
426 xe->XE.Race.data_addr, &xe->XE.Race.data_addrinfo);
428 /* And poke around in the conflicting-event map, to see if we
429 can rustle up a plausible-looking conflicting memory access
430 to show. */
431 if (HG_(clo_history_level) >= 2) {
432 Thr* thrp = NULL;
433 ExeContext* wherep = NULL;
434 Addr acc_addr = xe->XE.Race.data_addr;
435 Int acc_szB = xe->XE.Race.szB;
436 Thr* acc_thr = xe->XE.Race.thr->hbthr;
437 Bool acc_isW = xe->XE.Race.isWrite;
438 SizeT conf_szB = 0;
439 Bool conf_isW = False;
440 WordSetID conf_locksHeldW = 0;
441 tl_assert(!xe->XE.Race.h2_ct_accEC);
442 tl_assert(!xe->XE.Race.h2_ct);
443 if (libhb_event_map_lookup(
444 &wherep, &thrp, &conf_szB, &conf_isW, &conf_locksHeldW,
445 acc_thr, acc_addr, acc_szB, acc_isW )) {
446 Thread* threadp;
447 tl_assert(wherep);
448 tl_assert(thrp);
449 threadp = libhb_get_Thr_hgthread( thrp );
450 tl_assert(threadp);
451 xe->XE.Race.h2_ct_accEC = wherep;
452 xe->XE.Race.h2_ct = threadp;
453 xe->XE.Race.h2_ct_accSzB = (Int)conf_szB;
454 xe->XE.Race.h2_ct_accIsW = conf_isW;
455 xe->XE.Race.h2_ct_locksHeldW
456 = enumerate_WordSet_into_LockP_vector(
457 HG_(get_univ_lsets)(),
458 conf_locksHeldW,
459 True/*allowed_to_be_invalid*/
464 // both NULL or both non-NULL
465 tl_assert( (!!xe->XE.Race.h2_ct) == (!!xe->XE.Race.h2_ct_accEC) );
468 return sizeof(XError);
471 void HG_(record_error_Race) ( Thread* thr,
472 Addr data_addr, Int szB, Bool isWrite,
473 Thread* h1_ct,
474 ExeContext* h1_ct_segstart,
475 ExeContext* h1_ct_mbsegendEC )
477 XError xe;
478 tl_assert( HG_(is_sane_Thread)(thr) );
480 # if defined(VGO_linux) || defined(VGO_freebsd)
481 /* Skip any races on locations apparently in GOTPLT sections. This
482 is said to be caused by ld.so poking PLT table entries (or
483 whatever) when it writes the resolved address of a dynamically
484 linked routine, into the table (or whatever) when it is called
485 for the first time. */
487 VgSectKind sect = VG_(DebugInfo_sect_kind)( NULL, data_addr );
488 if (0) VG_(printf)("XXXXXXXXX RACE on %#lx %s\n",
489 data_addr, VG_(pp_SectKind)(sect));
490 /* SectPLT is required on ???-linux */
491 if (sect == Vg_SectGOTPLT) return;
492 /* SectPLT is required on ppc32/64-linux */
493 if (sect == Vg_SectPLT) return;
494 /* SectGOT is required on arm-linux */
495 if (sect == Vg_SectGOT) return;
497 # endif
499 init_XError(&xe);
500 xe.tag = XE_Race;
501 xe.XE.Race.data_addr = data_addr;
502 xe.XE.Race.szB = szB;
503 xe.XE.Race.isWrite = isWrite;
504 xe.XE.Race.thr = thr;
505 tl_assert(isWrite == False || isWrite == True);
506 tl_assert(szB == 8 || szB == 4 || szB == 2 || szB == 1);
507 /* Skip on the detailed description of the raced-on address at this
508 point; it's expensive. Leave it for the update_extra function
509 if we ever make it that far. */
510 xe.XE.Race.data_addrinfo.tag = Addr_Undescribed;
511 // FIXME: tid vs thr
512 // Skip on any of the conflicting-access info at this point.
513 // It's expensive to obtain, and this error is more likely than
514 // not to be discarded. We'll fill these fields in in
515 // HG_(update_extra) just above, assuming the error ever makes
516 // it that far (unlikely).
517 xe.XE.Race.h2_ct_accSzB = 0;
518 xe.XE.Race.h2_ct_accIsW = False;
519 xe.XE.Race.h2_ct_accEC = NULL;
520 xe.XE.Race.h2_ct = NULL;
521 tl_assert( HG_(is_sane_ThreadId)(thr->coretid) );
522 tl_assert( thr->coretid != VG_INVALID_THREADID );
524 xe.XE.Race.h1_ct = h1_ct;
525 xe.XE.Race.h1_ct_mbsegstartEC = h1_ct_segstart;
526 xe.XE.Race.h1_ct_mbsegendEC = h1_ct_mbsegendEC;
528 VG_(maybe_record_error)( thr->coretid,
529 XE_Race, data_addr, NULL, &xe );
532 void HG_(record_error_UnlockUnlocked) ( Thread* thr, Lock* lk )
534 XError xe;
535 tl_assert( HG_(is_sane_Thread)(thr) );
536 tl_assert( HG_(is_sane_LockN)(lk) );
537 init_XError(&xe);
538 xe.tag = XE_UnlockUnlocked;
539 xe.XE.UnlockUnlocked.thr
540 = thr;
541 xe.XE.UnlockUnlocked.lock
542 = mk_LockP_from_LockN(lk, False/*!allowed_to_be_invalid*/);
543 // FIXME: tid vs thr
544 tl_assert( HG_(is_sane_ThreadId)(thr->coretid) );
545 tl_assert( thr->coretid != VG_INVALID_THREADID );
546 VG_(maybe_record_error)( thr->coretid,
547 XE_UnlockUnlocked, 0, NULL, &xe );
550 void HG_(record_error_UnlockForeign) ( Thread* thr,
551 Thread* owner, Lock* lk )
553 XError xe;
554 tl_assert( HG_(is_sane_Thread)(thr) );
555 tl_assert( HG_(is_sane_Thread)(owner) );
556 tl_assert( HG_(is_sane_LockN)(lk) );
557 init_XError(&xe);
558 xe.tag = XE_UnlockForeign;
559 xe.XE.UnlockForeign.thr = thr;
560 xe.XE.UnlockForeign.owner = owner;
561 xe.XE.UnlockForeign.lock
562 = mk_LockP_from_LockN(lk, False/*!allowed_to_be_invalid*/);
563 // FIXME: tid vs thr
564 tl_assert( HG_(is_sane_ThreadId)(thr->coretid) );
565 tl_assert( thr->coretid != VG_INVALID_THREADID );
566 VG_(maybe_record_error)( thr->coretid,
567 XE_UnlockForeign, 0, NULL, &xe );
570 void HG_(record_error_UnlockBogus) ( Thread* thr, Addr lock_ga )
572 XError xe;
573 tl_assert( HG_(is_sane_Thread)(thr) );
574 init_XError(&xe);
575 xe.tag = XE_UnlockBogus;
576 xe.XE.UnlockBogus.thr = thr;
577 xe.XE.UnlockBogus.lock_ga = lock_ga;
578 // FIXME: tid vs thr
579 tl_assert( HG_(is_sane_ThreadId)(thr->coretid) );
580 tl_assert( thr->coretid != VG_INVALID_THREADID );
581 VG_(maybe_record_error)( thr->coretid,
582 XE_UnlockBogus, 0, NULL, &xe );
585 void HG_(record_error_LockOrder)(
586 Thread* thr,
587 Lock* shouldbe_earlier_lk,
588 Lock* shouldbe_later_lk,
589 ExeContext* shouldbe_earlier_ec,
590 ExeContext* shouldbe_later_ec,
591 ExeContext* actual_earlier_ec
594 XError xe;
595 tl_assert( HG_(is_sane_Thread)(thr) );
596 tl_assert(HG_(clo_track_lockorders));
597 init_XError(&xe);
598 xe.tag = XE_LockOrder;
599 xe.XE.LockOrder.thr = thr;
600 xe.XE.LockOrder.shouldbe_earlier_lk
601 = mk_LockP_from_LockN(shouldbe_earlier_lk,
602 False/*!allowed_to_be_invalid*/);
603 xe.XE.LockOrder.shouldbe_earlier_ec = shouldbe_earlier_ec;
604 xe.XE.LockOrder.shouldbe_later_lk
605 = mk_LockP_from_LockN(shouldbe_later_lk,
606 False/*!allowed_to_be_invalid*/);
607 xe.XE.LockOrder.shouldbe_later_ec = shouldbe_later_ec;
608 xe.XE.LockOrder.actual_earlier_ec = actual_earlier_ec;
609 // FIXME: tid vs thr
610 tl_assert( HG_(is_sane_ThreadId)(thr->coretid) );
611 tl_assert( thr->coretid != VG_INVALID_THREADID );
612 VG_(maybe_record_error)( thr->coretid,
613 XE_LockOrder, 0, NULL, &xe );
616 void HG_(record_error_PthAPIerror) ( Thread* thr, const HChar* fnname,
617 Word err, const HChar* errstr )
619 XError xe;
620 tl_assert( HG_(is_sane_Thread)(thr) );
621 tl_assert(fnname);
622 tl_assert(errstr);
623 init_XError(&xe);
624 xe.tag = XE_PthAPIerror;
625 xe.XE.PthAPIerror.thr = thr;
626 xe.XE.PthAPIerror.fnname = string_table_strdup(fnname);
627 xe.XE.PthAPIerror.err = err;
628 xe.XE.PthAPIerror.errstr = string_table_strdup(errstr);
629 // FIXME: tid vs thr
630 tl_assert( HG_(is_sane_ThreadId)(thr->coretid) );
631 tl_assert( thr->coretid != VG_INVALID_THREADID );
632 VG_(maybe_record_error)( thr->coretid,
633 XE_PthAPIerror, 0, NULL, &xe );
636 void HG_(record_error_Misc_w_aux) ( Thread* thr, const HChar* errstr,
637 const HChar* auxstr, ExeContext* auxctx )
639 XError xe;
640 tl_assert( HG_(is_sane_Thread)(thr) );
641 tl_assert(errstr);
642 init_XError(&xe);
643 xe.tag = XE_Misc;
644 xe.XE.Misc.thr = thr;
645 xe.XE.Misc.errstr = string_table_strdup(errstr);
646 xe.XE.Misc.auxstr = auxstr ? string_table_strdup(auxstr) : NULL;
647 xe.XE.Misc.auxctx = auxctx;
648 // FIXME: tid vs thr
649 tl_assert( HG_(is_sane_ThreadId)(thr->coretid) );
650 tl_assert( thr->coretid != VG_INVALID_THREADID );
651 VG_(maybe_record_error)( thr->coretid,
652 XE_Misc, 0, NULL, &xe );
655 void HG_(record_error_Misc) ( Thread* thr, const HChar* errstr )
657 HG_(record_error_Misc_w_aux)(thr, errstr, NULL, NULL);
660 void HG_(record_error_Dubious_w_aux) ( Thread* thr, const HChar* errstr,
661 const HChar* auxstr, ExeContext* auxctx )
663 XError xe;
664 tl_assert( HG_(is_sane_Thread)(thr) );
665 tl_assert(errstr);
666 init_XError(&xe);
667 xe.tag = XE_Dubious;
668 xe.XE.Misc.thr = thr;
669 xe.XE.Misc.errstr = string_table_strdup(errstr);
670 xe.XE.Misc.auxstr = auxstr ? string_table_strdup(auxstr) : NULL;
671 xe.XE.Misc.auxctx = auxctx;
672 // FIXME: tid vs thr
673 tl_assert( HG_(is_sane_ThreadId)(thr->coretid) );
674 tl_assert( thr->coretid != VG_INVALID_THREADID );
675 VG_(maybe_record_error)( thr->coretid,
676 XE_Dubious, 0, NULL, &xe );
679 void HG_(record_error_Dubious) ( Thread* thr, const HChar* errstr )
681 HG_(record_error_Dubious_w_aux)(thr, errstr, NULL, NULL);
684 Bool HG_(eq_Error) ( VgRes not_used, const Error* e1, const Error* e2 )
686 XError *xe1, *xe2;
688 tl_assert(VG_(get_error_kind)(e1) == VG_(get_error_kind)(e2));
690 xe1 = (XError*)VG_(get_error_extra)(e1);
691 xe2 = (XError*)VG_(get_error_extra)(e2);
692 tl_assert(xe1);
693 tl_assert(xe2);
695 switch (VG_(get_error_kind)(e1)) {
696 case XE_Race:
697 return xe1->XE.Race.szB == xe2->XE.Race.szB
698 && xe1->XE.Race.isWrite == xe2->XE.Race.isWrite
699 && (HG_(clo_cmp_race_err_addrs)
700 ? xe1->XE.Race.data_addr == xe2->XE.Race.data_addr
701 : True);
702 case XE_UnlockUnlocked:
703 return xe1->XE.UnlockUnlocked.thr == xe2->XE.UnlockUnlocked.thr
704 && xe1->XE.UnlockUnlocked.lock == xe2->XE.UnlockUnlocked.lock;
705 case XE_UnlockForeign:
706 return xe1->XE.UnlockForeign.thr == xe2->XE.UnlockForeign.thr
707 && xe1->XE.UnlockForeign.owner == xe2->XE.UnlockForeign.owner
708 && xe1->XE.UnlockForeign.lock == xe2->XE.UnlockForeign.lock;
709 case XE_UnlockBogus:
710 return xe1->XE.UnlockBogus.thr == xe2->XE.UnlockBogus.thr
711 && xe1->XE.UnlockBogus.lock_ga == xe2->XE.UnlockBogus.lock_ga;
712 case XE_PthAPIerror:
713 return xe1->XE.PthAPIerror.thr == xe2->XE.PthAPIerror.thr
714 && 0==VG_(strcmp)(xe1->XE.PthAPIerror.fnname,
715 xe2->XE.PthAPIerror.fnname)
716 && xe1->XE.PthAPIerror.err == xe2->XE.PthAPIerror.err;
717 case XE_LockOrder:
718 return xe1->XE.LockOrder.thr == xe2->XE.LockOrder.thr;
719 case XE_Misc:
720 return xe1->XE.Misc.thr == xe2->XE.Misc.thr
721 && 0==VG_(strcmp)(xe1->XE.Misc.errstr, xe2->XE.Misc.errstr);
722 case XE_Dubious:
723 return xe1->XE.Misc.thr == xe2->XE.Misc.thr
724 && 0==VG_(strcmp)(xe1->XE.Misc.errstr, xe2->XE.Misc.errstr);
725 default:
726 tl_assert(0);
729 /*NOTREACHED*/
730 tl_assert(0);
734 /*----------------------------------------------------------------*/
735 /*--- Error management -- printing ---*/
736 /*----------------------------------------------------------------*/
738 /* Do a printf-style operation on either the XML or normal output
739 channel, depending on the setting of VG_(clo_xml).
741 static void emit_WRK ( const HChar* format, va_list vargs )
743 if (VG_(clo_xml)) {
744 VG_(vprintf_xml)(format, vargs);
745 } else {
746 VG_(vmessage)(Vg_UserMsg, format, vargs);
749 static void emit ( const HChar* format, ... ) PRINTF_CHECK(1, 2);
750 static void emit ( const HChar* format, ... )
752 va_list vargs;
753 va_start(vargs, format);
754 emit_WRK(format, vargs);
755 va_end(vargs);
759 /* Announce (that is, print the point-of-creation) of 'thr'. Only do
760 this once, as we only want to see these announcements once per
761 thread. Returned Bool indicates whether or not an announcement was
762 made.
764 static Bool announce_one_thread ( Thread* thr )
766 tl_assert(HG_(is_sane_Thread)(thr));
767 tl_assert(thr->errmsg_index >= 1);
768 if (thr->announced)
769 return False;
771 if (VG_(clo_xml)) {
773 VG_(printf_xml)("<announcethread>\n");
774 VG_(printf_xml)(" <hthreadid>%d</hthreadid>\n", thr->errmsg_index);
775 if (thr->errmsg_index == 1) {
776 tl_assert(thr->created_at == NULL);
777 VG_(printf_xml)(" <isrootthread></isrootthread>\n");
778 } else {
779 tl_assert(thr->created_at != NULL);
780 VG_(pp_ExeContext)( thr->created_at );
782 VG_(printf_xml)("</announcethread>\n\n");
784 } else {
786 VG_(umsg)("---Thread-Announcement----------"
787 "--------------------------------" "\n");
788 VG_(umsg)("\n");
790 if (thr->errmsg_index == 1) {
791 tl_assert(thr->created_at == NULL);
792 VG_(message)(Vg_UserMsg,
793 "Thread #%d is the program's root thread\n",
794 thr->errmsg_index);
795 } else {
796 tl_assert(thr->created_at != NULL);
797 VG_(message)(Vg_UserMsg, "Thread #%d was created\n",
798 thr->errmsg_index);
799 VG_(pp_ExeContext)( thr->created_at );
801 VG_(message)(Vg_UserMsg, "\n");
805 thr->announced = True;
806 return True;
809 /* Announce 'lk'. */
810 static void announce_LockP ( Lock* lk )
812 tl_assert(lk);
813 if (lk == Lock_INVALID)
814 return; /* Can't be announced -- we know nothing about it. */
815 tl_assert(lk->magic == LockP_MAGIC);
817 if (VG_(clo_xml)) {
818 if (lk->appeared_at) {
819 emit( " <auxwhat>Lock at %p was first observed</auxwhat>\n",
820 (void*)lk );
821 VG_(pp_ExeContext)( lk->appeared_at );
824 } else {
825 if (lk->appeared_at) {
826 VG_(umsg)( " Lock at %p was first observed\n",
827 (void*)lk->guestaddr );
828 VG_(pp_ExeContext)( lk->appeared_at );
829 } else {
830 VG_(umsg)( " Lock at %p : no stacktrace for first observation\n",
831 (void*)lk->guestaddr );
833 HG_(get_and_pp_addrdescr)
834 (lk->appeared_at
835 ? VG_(get_ExeContext_epoch)(lk->appeared_at)
836 : VG_(current_DiEpoch)(),
837 lk->guestaddr);
838 VG_(umsg)("\n");
842 /* Announce (that is, print point-of-first-observation) for the
843 locks in 'lockvec' and, if non-NULL, 'lockvec2'. */
844 static void announce_combined_LockP_vecs ( Lock** lockvec,
845 Lock** lockvec2 )
847 UWord i;
848 tl_assert(lockvec);
849 for (i = 0; lockvec[i]; i++) {
850 announce_LockP(lockvec[i]);
852 if (lockvec2) {
853 for (i = 0; lockvec2[i]; i++) {
854 Lock* lk = lockvec2[i];
855 if (!elem_LockP_vector(lockvec, lk))
856 announce_LockP(lk);
862 static void show_LockP_summary_textmode ( Lock** locks, const HChar* pre )
864 tl_assert(locks);
865 UWord i;
866 UWord nLocks = 0, nLocksValid = 0;
867 count_LockP_vector(&nLocks, &nLocksValid, locks);
868 tl_assert(nLocksValid <= nLocks);
870 if (nLocks == 0) {
871 VG_(umsg)( "%sLocks held: none", pre );
872 } else {
873 VG_(umsg)( "%sLocks held: %lu, at address%s ",
874 pre, nLocks, nLocksValid == 1 ? "" : "es" );
877 if (nLocks > 0) {
878 for (i = 0; i < nLocks; i++) {
879 if (locks[i] == Lock_INVALID)
880 continue;
881 VG_(umsg)( "%p", (void*)locks[i]->guestaddr);
882 if (locks[i+1] != NULL)
883 VG_(umsg)(" ");
885 if (nLocksValid < nLocks)
886 VG_(umsg)(" (and %lu that can't be shown)", nLocks - nLocksValid);
888 VG_(umsg)("\n");
892 /* This is the "this error is due to be printed shortly; so have a
893 look at it any print any preamble you want" function. We use it to
894 announce any previously un-announced threads in the upcoming error
895 message.
897 void HG_(before_pp_Error) ( const Error* err )
899 XError* xe;
900 tl_assert(err);
901 xe = (XError*)VG_(get_error_extra)(err);
902 tl_assert(xe);
904 switch (VG_(get_error_kind)(err)) {
905 case XE_Dubious:
906 announce_one_thread( xe->XE.Misc.thr );
907 break;
908 case XE_Misc:
909 announce_one_thread( xe->XE.Misc.thr );
910 break;
911 case XE_LockOrder:
912 announce_one_thread( xe->XE.LockOrder.thr );
913 break;
914 case XE_PthAPIerror:
915 announce_one_thread( xe->XE.PthAPIerror.thr );
916 break;
917 case XE_UnlockBogus:
918 announce_one_thread( xe->XE.UnlockBogus.thr );
919 break;
920 case XE_UnlockForeign:
921 announce_one_thread( xe->XE.UnlockForeign.thr );
922 announce_one_thread( xe->XE.UnlockForeign.owner );
923 break;
924 case XE_UnlockUnlocked:
925 announce_one_thread( xe->XE.UnlockUnlocked.thr );
926 break;
927 case XE_Race:
928 announce_one_thread( xe->XE.Race.thr );
929 if (xe->XE.Race.h2_ct)
930 announce_one_thread( xe->XE.Race.h2_ct );
931 if (xe->XE.Race.h1_ct)
932 announce_one_thread( xe->XE.Race.h1_ct );
933 if (xe->XE.Race.data_addrinfo.Addr.Block.alloc_tinfo.tnr) {
934 Thread* thr = get_admin_threads();
935 while (thr) {
936 if (thr->errmsg_index
937 == xe->XE.Race.data_addrinfo.Addr.Block.alloc_tinfo.tnr) {
938 announce_one_thread (thr);
939 break;
941 thr = thr->admin;
944 break;
945 default:
946 tl_assert(0);
950 void HG_(pp_Error) ( const Error* err )
952 const Bool xml = VG_(clo_xml); /* a shorthand, that's all */
954 if (!xml) {
955 VG_(umsg)("--------------------------------"
956 "--------------------------------" "\n");
957 VG_(umsg)("\n");
960 XError *xe = (XError*)VG_(get_error_extra)(err);
961 tl_assert(xe);
963 if (xml)
964 emit( " <kind>%s</kind>\n", HG_(get_error_name)(err));
966 switch (VG_(get_error_kind)(err)) {
967 case XE_Dubious: {
968 tl_assert( HG_(is_sane_Thread)( xe->XE.Misc.thr ) );
970 if (xml) {
972 emit( " <xwhat>\n" );
973 emit( " <text>Thread #%d: %s</text>\n",
974 (Int)xe->XE.Misc.thr->errmsg_index,
975 xe->XE.Misc.errstr );
976 emit( " <hthreadid>%d</hthreadid>\n",
977 (Int)xe->XE.Misc.thr->errmsg_index );
978 emit( " </xwhat>\n" );
979 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
980 if (xe->XE.Misc.auxstr) {
981 emit(" <auxwhat>%s</auxwhat>\n", xe->XE.Misc.auxstr);
982 if (xe->XE.Misc.auxctx)
983 VG_(pp_ExeContext)( xe->XE.Misc.auxctx );
986 } else {
988 emit( "Thread #%d: %s\n",
989 (Int)xe->XE.Misc.thr->errmsg_index,
990 xe->XE.Misc.errstr );
991 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
992 if (xe->XE.Misc.auxstr) {
993 emit(" %s\n", xe->XE.Misc.auxstr);
994 if (xe->XE.Misc.auxctx)
995 VG_(pp_ExeContext)( xe->XE.Misc.auxctx );
999 break;
1002 case XE_Misc: {
1003 tl_assert( HG_(is_sane_Thread)( xe->XE.Misc.thr ) );
1005 if (xml) {
1007 emit( " <xwhat>\n" );
1008 emit( " <text>Thread #%d: %s</text>\n",
1009 (Int)xe->XE.Misc.thr->errmsg_index,
1010 xe->XE.Misc.errstr );
1011 emit( " <hthreadid>%d</hthreadid>\n",
1012 (Int)xe->XE.Misc.thr->errmsg_index );
1013 emit( " </xwhat>\n" );
1014 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
1015 if (xe->XE.Misc.auxstr) {
1016 emit(" <auxwhat>%s</auxwhat>\n", xe->XE.Misc.auxstr);
1017 if (xe->XE.Misc.auxctx)
1018 VG_(pp_ExeContext)( xe->XE.Misc.auxctx );
1021 } else {
1023 emit( "Thread #%d: %s\n",
1024 (Int)xe->XE.Misc.thr->errmsg_index,
1025 xe->XE.Misc.errstr );
1026 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
1027 if (xe->XE.Misc.auxstr) {
1028 emit(" %s\n", xe->XE.Misc.auxstr);
1029 if (xe->XE.Misc.auxctx)
1030 VG_(pp_ExeContext)( xe->XE.Misc.auxctx );
1034 break;
1037 case XE_LockOrder: {
1038 tl_assert( HG_(is_sane_Thread)( xe->XE.LockOrder.thr ) );
1040 if (xml) {
1042 emit( " <xwhat>\n" );
1043 emit( " <text>Thread #%d: lock order \"%p before %p\" "
1044 "violated</text>\n",
1045 (Int)xe->XE.LockOrder.thr->errmsg_index,
1046 (void*)xe->XE.LockOrder.shouldbe_earlier_lk->guestaddr,
1047 (void*)xe->XE.LockOrder.shouldbe_later_lk->guestaddr );
1048 emit( " <hthreadid>%d</hthreadid>\n",
1049 (Int)xe->XE.LockOrder.thr->errmsg_index );
1050 emit( " </xwhat>\n" );
1051 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
1052 if (xe->XE.LockOrder.shouldbe_earlier_ec
1053 && xe->XE.LockOrder.shouldbe_later_ec) {
1054 emit( " <auxwhat>Required order was established by "
1055 "acquisition of lock at %p</auxwhat>\n",
1056 (void*)xe->XE.LockOrder.shouldbe_earlier_lk->guestaddr );
1057 VG_(pp_ExeContext)( xe->XE.LockOrder.shouldbe_earlier_ec );
1058 emit( " <auxwhat>followed by a later acquisition "
1059 "of lock at %p</auxwhat>\n",
1060 (void*)xe->XE.LockOrder.shouldbe_later_lk->guestaddr );
1061 VG_(pp_ExeContext)( xe->XE.LockOrder.shouldbe_later_ec );
1063 announce_LockP ( xe->XE.LockOrder.shouldbe_earlier_lk );
1064 announce_LockP ( xe->XE.LockOrder.shouldbe_later_lk );
1066 } else {
1068 emit( "Thread #%d: lock order \"%p before %p\" violated\n",
1069 (Int)xe->XE.LockOrder.thr->errmsg_index,
1070 (void*)xe->XE.LockOrder.shouldbe_earlier_lk->guestaddr,
1071 (void*)xe->XE.LockOrder.shouldbe_later_lk->guestaddr );
1072 emit( "\n" );
1073 emit( "Observed (incorrect) order is: "
1074 "acquisition of lock at %p\n",
1075 (void*)xe->XE.LockOrder.shouldbe_later_lk->guestaddr);
1076 if (xe->XE.LockOrder.actual_earlier_ec) {
1077 VG_(pp_ExeContext)(xe->XE.LockOrder.actual_earlier_ec);
1078 } else {
1079 emit(" (stack unavailable)\n");
1081 emit( "\n" );
1082 emit(" followed by a later acquisition of lock at %p\n",
1083 (void*)xe->XE.LockOrder.shouldbe_earlier_lk->guestaddr);
1084 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
1085 if (xe->XE.LockOrder.shouldbe_earlier_ec
1086 && xe->XE.LockOrder.shouldbe_later_ec) {
1087 emit("\n");
1088 emit( "Required order was established by "
1089 "acquisition of lock at %p\n",
1090 (void*)xe->XE.LockOrder.shouldbe_earlier_lk->guestaddr );
1091 VG_(pp_ExeContext)( xe->XE.LockOrder.shouldbe_earlier_ec );
1092 emit( "\n" );
1093 emit( " followed by a later acquisition of lock at %p\n",
1094 (void*)xe->XE.LockOrder.shouldbe_later_lk->guestaddr );
1095 VG_(pp_ExeContext)( xe->XE.LockOrder.shouldbe_later_ec );
1097 emit("\n");
1098 announce_LockP ( xe->XE.LockOrder.shouldbe_earlier_lk );
1099 announce_LockP ( xe->XE.LockOrder.shouldbe_later_lk );
1103 break;
1106 case XE_PthAPIerror: {
1107 tl_assert( HG_(is_sane_Thread)( xe->XE.PthAPIerror.thr ) );
1109 if (xml) {
1111 emit( " <xwhat>\n" );
1112 emit(
1113 " <text>Thread #%d's call to %pS failed</text>\n",
1114 (Int)xe->XE.PthAPIerror.thr->errmsg_index,
1115 xe->XE.PthAPIerror.fnname );
1116 emit( " <hthreadid>%d</hthreadid>\n",
1117 (Int)xe->XE.PthAPIerror.thr->errmsg_index );
1118 emit( " </xwhat>\n" );
1119 emit( " <what>with error code %ld (%s)</what>\n",
1120 xe->XE.PthAPIerror.err, xe->XE.PthAPIerror.errstr );
1121 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
1123 } else {
1125 emit( "Thread #%d's call to %pS failed\n",
1126 (Int)xe->XE.PthAPIerror.thr->errmsg_index,
1127 xe->XE.PthAPIerror.fnname );
1128 emit( " with error code %ld (%s)\n",
1129 xe->XE.PthAPIerror.err, xe->XE.PthAPIerror.errstr );
1130 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
1134 break;
1137 case XE_UnlockBogus: {
1138 tl_assert( HG_(is_sane_Thread)( xe->XE.UnlockBogus.thr ) );
1140 if (xml) {
1142 emit( " <xwhat>\n" );
1143 emit( " <text>Thread #%d unlocked an invalid "
1144 "lock at %p</text>\n",
1145 (Int)xe->XE.UnlockBogus.thr->errmsg_index,
1146 (void*)xe->XE.UnlockBogus.lock_ga );
1147 emit( " <hthreadid>%d</hthreadid>\n",
1148 (Int)xe->XE.UnlockBogus.thr->errmsg_index );
1149 emit( " </xwhat>\n" );
1150 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
1152 } else {
1154 emit( "Thread #%d unlocked an invalid lock at %p\n",
1155 (Int)xe->XE.UnlockBogus.thr->errmsg_index,
1156 (void*)xe->XE.UnlockBogus.lock_ga );
1157 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
1161 break;
1164 case XE_UnlockForeign: {
1165 tl_assert( HG_(is_sane_LockP)( xe->XE.UnlockForeign.lock ) );
1166 tl_assert( HG_(is_sane_Thread)( xe->XE.UnlockForeign.owner ) );
1167 tl_assert( HG_(is_sane_Thread)( xe->XE.UnlockForeign.thr ) );
1169 if (xml) {
1171 emit( " <xwhat>\n" );
1172 emit( " <text>Thread #%d unlocked lock at %p "
1173 "currently held by thread #%d</text>\n",
1174 (Int)xe->XE.UnlockForeign.thr->errmsg_index,
1175 (void*)xe->XE.UnlockForeign.lock->guestaddr,
1176 (Int)xe->XE.UnlockForeign.owner->errmsg_index );
1177 emit( " <hthreadid>%d</hthreadid>\n",
1178 (Int)xe->XE.UnlockForeign.thr->errmsg_index );
1179 emit( " <hthreadid>%d</hthreadid>\n",
1180 (Int)xe->XE.UnlockForeign.owner->errmsg_index );
1181 emit( " </xwhat>\n" );
1182 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
1183 announce_LockP ( xe->XE.UnlockForeign.lock );
1185 } else {
1187 emit( "Thread #%d unlocked lock at %p "
1188 "currently held by thread #%d\n",
1189 (Int)xe->XE.UnlockForeign.thr->errmsg_index,
1190 (void*)xe->XE.UnlockForeign.lock->guestaddr,
1191 (Int)xe->XE.UnlockForeign.owner->errmsg_index );
1192 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
1193 announce_LockP ( xe->XE.UnlockForeign.lock );
1197 break;
1200 case XE_UnlockUnlocked: {
1201 tl_assert( HG_(is_sane_LockP)( xe->XE.UnlockUnlocked.lock ) );
1202 tl_assert( HG_(is_sane_Thread)( xe->XE.UnlockUnlocked.thr ) );
1204 if (xml) {
1206 emit( " <xwhat>\n" );
1207 emit( " <text>Thread #%d unlocked a "
1208 "not-locked lock at %p</text>\n",
1209 (Int)xe->XE.UnlockUnlocked.thr->errmsg_index,
1210 (void*)xe->XE.UnlockUnlocked.lock->guestaddr );
1211 emit( " <hthreadid>%d</hthreadid>\n",
1212 (Int)xe->XE.UnlockUnlocked.thr->errmsg_index );
1213 emit( " </xwhat>\n" );
1214 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
1215 announce_LockP ( xe->XE.UnlockUnlocked.lock);
1217 } else {
1219 emit( "Thread #%d unlocked a not-locked lock at %p\n",
1220 (Int)xe->XE.UnlockUnlocked.thr->errmsg_index,
1221 (void*)xe->XE.UnlockUnlocked.lock->guestaddr );
1222 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
1223 announce_LockP ( xe->XE.UnlockUnlocked.lock);
1227 break;
1230 case XE_Race: {
1231 Addr err_ga;
1232 const HChar* what;
1233 Int szB;
1234 what = xe->XE.Race.isWrite ? "write" : "read";
1235 szB = xe->XE.Race.szB;
1236 err_ga = VG_(get_error_address)(err);
1238 tl_assert( HG_(is_sane_Thread)( xe->XE.Race.thr ));
1239 if (xe->XE.Race.h2_ct)
1240 tl_assert( HG_(is_sane_Thread)( xe->XE.Race.h2_ct ));
1242 if (xml) {
1244 /* ------ XML ------ */
1245 emit( " <xwhat>\n" );
1246 emit( " <text>Possible data race during %s of size %d "
1247 "at %p by thread #%d</text>\n",
1248 what, szB, (void*)err_ga, (Int)xe->XE.Race.thr->errmsg_index );
1249 emit( " <hthreadid>%d</hthreadid>\n",
1250 (Int)xe->XE.Race.thr->errmsg_index );
1251 emit( " </xwhat>\n" );
1252 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
1254 if (xe->XE.Race.h2_ct) {
1255 tl_assert(xe->XE.Race.h2_ct_accEC); // assured by update_extra
1256 emit( " <xauxwhat>\n");
1257 emit( " <text>This conflicts with a previous %s of size %d "
1258 "by thread #%d</text>\n",
1259 xe->XE.Race.h2_ct_accIsW ? "write" : "read",
1260 xe->XE.Race.h2_ct_accSzB,
1261 xe->XE.Race.h2_ct->errmsg_index );
1262 emit( " <hthreadid>%d</hthreadid>\n",
1263 xe->XE.Race.h2_ct->errmsg_index);
1264 emit(" </xauxwhat>\n");
1265 VG_(pp_ExeContext)( xe->XE.Race.h2_ct_accEC );
1268 if (xe->XE.Race.h1_ct) {
1269 emit( " <xauxwhat>\n");
1270 emit( " <text>This conflicts with a previous access "
1271 "by thread #%d, after</text>\n",
1272 xe->XE.Race.h1_ct->errmsg_index );
1273 emit( " <hthreadid>%d</hthreadid>\n",
1274 xe->XE.Race.h1_ct->errmsg_index );
1275 emit(" </xauxwhat>\n");
1276 if (xe->XE.Race.h1_ct_mbsegstartEC) {
1277 VG_(pp_ExeContext)( xe->XE.Race.h1_ct_mbsegstartEC );
1278 } else {
1279 emit( " <auxwhat>(the start of the thread)</auxwhat>\n" );
1281 emit( " <auxwhat>but before</auxwhat>\n" );
1282 if (xe->XE.Race.h1_ct_mbsegendEC) {
1283 VG_(pp_ExeContext)( xe->XE.Race.h1_ct_mbsegendEC );
1284 } else {
1285 emit( " <auxwhat>(the end of the thread)</auxwhat>\n" );
1289 } else {
1291 /* ------ Text ------ */
1292 announce_combined_LockP_vecs( xe->XE.Race.locksHeldW,
1293 xe->XE.Race.h2_ct_locksHeldW );
1295 emit( "Possible data race during %s of size %d "
1296 "at %p by thread #%d\n",
1297 what, szB, (void*)err_ga, (Int)xe->XE.Race.thr->errmsg_index );
1299 tl_assert(xe->XE.Race.locksHeldW);
1300 show_LockP_summary_textmode( xe->XE.Race.locksHeldW, "" );
1301 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
1303 if (xe->XE.Race.h2_ct) {
1304 tl_assert(xe->XE.Race.h2_ct_accEC); // assured by update_extra
1305 tl_assert(xe->XE.Race.h2_ct_locksHeldW);
1306 emit( "\n" );
1307 emit( "This conflicts with a previous %s of size %d "
1308 "by thread #%d\n",
1309 xe->XE.Race.h2_ct_accIsW ? "write" : "read",
1310 xe->XE.Race.h2_ct_accSzB,
1311 xe->XE.Race.h2_ct->errmsg_index );
1312 show_LockP_summary_textmode( xe->XE.Race.h2_ct_locksHeldW, "" );
1313 VG_(pp_ExeContext)( xe->XE.Race.h2_ct_accEC );
1316 if (xe->XE.Race.h1_ct) {
1317 emit( " This conflicts with a previous access by thread #%d, "
1318 "after\n",
1319 xe->XE.Race.h1_ct->errmsg_index );
1320 if (xe->XE.Race.h1_ct_mbsegstartEC) {
1321 VG_(pp_ExeContext)( xe->XE.Race.h1_ct_mbsegstartEC );
1322 } else {
1323 emit( " (the start of the thread)\n" );
1325 emit( " but before\n" );
1326 if (xe->XE.Race.h1_ct_mbsegendEC) {
1327 VG_(pp_ExeContext)( xe->XE.Race.h1_ct_mbsegendEC );
1328 } else {
1329 emit( " (the end of the thread)\n" );
1334 VG_(pp_addrinfo) (err_ga, &xe->XE.Race.data_addrinfo);
1335 break; /* case XE_Race */
1336 } /* case XE_Race */
1338 default:
1339 tl_assert(0);
1340 } /* switch (VG_(get_error_kind)(err)) */
1343 void HG_(print_access) (StackTrace ips, UInt n_ips,
1344 Thr* thr_a,
1345 Addr ga,
1346 SizeT SzB,
1347 Bool isW,
1348 WordSetID locksHeldW )
1350 Thread* threadp;
1352 threadp = libhb_get_Thr_hgthread( thr_a );
1353 tl_assert(threadp);
1354 if (!threadp->announced) {
1355 /* This is for interactive use. We announce the thread if needed,
1356 but reset it to not announced afterwards, because we want
1357 the thread to be announced on the error output/log if needed. */
1358 announce_one_thread (threadp);
1359 threadp->announced = False;
1362 announce_one_thread (threadp);
1363 VG_(printf) ("%s of size %d at %p by thread #%d",
1364 isW ? "write" : "read",
1365 (int)SzB, (void*)ga, threadp->errmsg_index);
1366 if (threadp->coretid == VG_INVALID_THREADID)
1367 VG_(printf)(" tid (exited)\n");
1368 else
1369 VG_(printf)(" tid %u\n", threadp->coretid);
1371 Lock** locksHeldW_P;
1372 locksHeldW_P = enumerate_WordSet_into_LockP_vector(
1373 HG_(get_univ_lsets)(),
1374 locksHeldW,
1375 True/*allowed_to_be_invalid*/
1377 show_LockP_summary_textmode( locksHeldW_P, "" );
1378 HG_(free) (locksHeldW_P);
1380 // FIXME PW EPOCH : need the real ips epoch.
1381 VG_(pp_StackTrace)( VG_(current_DiEpoch)(), ips, n_ips );
1382 VG_(printf) ("\n");
1385 const HChar* HG_(get_error_name) ( const Error* err )
1387 switch (VG_(get_error_kind)(err)) {
1388 case XE_Race: return "Race";
1389 case XE_UnlockUnlocked: return "UnlockUnlocked";
1390 case XE_UnlockForeign: return "UnlockForeign";
1391 case XE_UnlockBogus: return "UnlockBogus";
1392 case XE_PthAPIerror: return "PthAPIerror";
1393 case XE_LockOrder: return "LockOrder";
1394 case XE_Misc: return "Misc";
1395 case XE_Dubious: return "Dubious";
1396 default: tl_assert(0); /* fill in missing case */
1400 Bool HG_(recognised_suppression) ( const HChar* name, Supp *su )
1402 # define TRY(_name,_xskind) \
1403 if (0 == VG_(strcmp)(name, (_name))) { \
1404 VG_(set_supp_kind)(su, (_xskind)); \
1405 return True; \
1407 TRY("Race", XS_Race);
1408 TRY("FreeMemLock", XS_FreeMemLock);
1409 TRY("UnlockUnlocked", XS_UnlockUnlocked);
1410 TRY("UnlockForeign", XS_UnlockForeign);
1411 TRY("UnlockBogus", XS_UnlockBogus);
1412 TRY("PthAPIerror", XS_PthAPIerror);
1413 TRY("LockOrder", XS_LockOrder);
1414 TRY("Misc", XS_Misc);
1415 TRY("Dubious", XS_Dubious);
1416 return False;
1417 # undef TRY
1420 Bool HG_(read_extra_suppression_info) ( Int fd, HChar** bufpp, SizeT* nBufp,
1421 Int* lineno, Supp* su )
1423 /* do nothing -- no extra suppression info present. Return True to
1424 indicate nothing bad happened. */
1425 return True;
1428 Bool HG_(error_matches_suppression) ( const Error* err, const Supp* su )
1430 switch (VG_(get_supp_kind)(su)) {
1431 case XS_Race: return VG_(get_error_kind)(err) == XE_Race;
1432 case XS_UnlockUnlocked: return VG_(get_error_kind)(err) == XE_UnlockUnlocked;
1433 case XS_UnlockForeign: return VG_(get_error_kind)(err) == XE_UnlockForeign;
1434 case XS_UnlockBogus: return VG_(get_error_kind)(err) == XE_UnlockBogus;
1435 case XS_PthAPIerror: return VG_(get_error_kind)(err) == XE_PthAPIerror;
1436 case XS_LockOrder: return VG_(get_error_kind)(err) == XE_LockOrder;
1437 case XS_Misc: return VG_(get_error_kind)(err) == XE_Misc;
1438 case XS_Dubious: return VG_(get_error_kind)(err) == XE_Dubious;
1439 //case XS_: return VG_(get_error_kind)(err) == XE_;
1440 default: tl_assert(0); /* fill in missing cases */
1444 SizeT HG_(get_extra_suppression_info) ( const Error* err,
1445 /*OUT*/HChar* buf, Int nBuf )
1447 tl_assert(nBuf >= 1);
1448 /* Do nothing */
1449 buf[0] = '\0';
1450 return 0;
1453 SizeT HG_(print_extra_suppression_use) ( const Supp* su,
1454 /*OUT*/HChar* buf, Int nBuf )
1456 tl_assert(nBuf >= 1);
1457 /* Do nothing */
1458 buf[0] = '\0';
1459 return 0;
1462 void HG_(update_extra_suppression_use) ( const Error* err, const Supp* su )
1464 /* Do nothing */
1465 return;
1469 /*--------------------------------------------------------------------*/
1470 /*--- end hg_errors.c ---*/
1471 /*--------------------------------------------------------------------*/