Bug 469146 - massif --ignore-fn does not ignore inlined functions
[valgrind.git] / drd / drd_main.c
blob2161e0316e15a09f51f9e6c1e37e930c6575c327
1 /*
2 This file is part of drd, a thread error detector.
4 Copyright (C) 2006-2020 Bart Van Assche <bvanassche@acm.org>.
6 This program is free software; you can redistribute it and/or
7 modify it under the terms of the GNU General Public License as
8 published by the Free Software Foundation; either version 2 of the
9 License, or (at your option) any later version.
11 This program is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, see <http://www.gnu.org/licenses/>.
19 The GNU General Public License is contained in the file COPYING.
23 #include "drd_barrier.h"
24 #include "drd_clientobj.h"
25 #include "drd_clientreq.h"
26 #include "drd_cond.h"
27 #include "drd_error.h"
28 #include "drd_hb.h"
29 #include "drd_load_store.h"
30 #include "drd_malloc_wrappers.h"
31 #include "drd_mutex.h"
32 #include "drd_rwlock.h"
33 #include "drd_segment.h"
34 #include "drd_semaphore.h"
35 #include "drd_suppression.h"
36 #include "drd_thread.h"
37 #include "libvex_guest_offsets.h"
38 #include "pub_drd_bitmap.h"
39 #include "pub_tool_vki.h" // Must be included before pub_tool_libcproc
40 #include "pub_tool_basics.h"
41 #include "pub_tool_libcassert.h" // tl_assert()
42 #include "pub_tool_libcbase.h" // VG_(strcmp)
43 #include "pub_tool_libcprint.h" // VG_(printf)
44 #include "pub_tool_libcproc.h" // VG_(getenv)()
45 #include "pub_tool_machine.h"
46 #include "pub_tool_mallocfree.h" // VG_(malloc)(), VG_(free)()
47 #include "pub_tool_options.h" // command line options
48 #include "pub_tool_replacemalloc.h"
49 #include "pub_tool_threadstate.h" // VG_(get_running_tid)()
50 #include "pub_tool_tooliface.h"
51 #include "pub_tool_aspacemgr.h" // VG_(am_is_valid_for_client)
54 /* Local variables. */
56 static Bool s_print_stats;
57 static Bool s_var_info;
58 static Bool s_show_stack_usage;
59 static Bool s_trace_alloc;
60 static Bool trace_sectsuppr;
63 /**
64 * Implement the needs_command_line_options for drd.
66 static Bool DRD_(process_cmd_line_option)(const HChar* arg)
68 Bool check_stack_accesses = False;
69 int join_list_vol = -1;
70 int exclusive_threshold_ms = -1;
71 Bool first_race_only = False;
72 Bool report_signal_unlocked = False;
73 Bool segment_merging = False;
74 int segment_merge_interval = -1;
75 int shared_threshold_ms = -1;
76 Bool show_confl_seg = False;
77 Bool trace_barrier = False;
78 Bool trace_clientobj = False;
79 Bool trace_cond = False;
80 Bool trace_csw = False;
81 Bool trace_fork_join = False;
82 Bool trace_hb = False;
83 Bool trace_conflict_set = False;
84 Bool trace_conflict_set_bm = False;
85 Bool trace_mutex = False;
86 Bool trace_rwlock = False;
87 Bool trace_segment = False;
88 Bool trace_semaphore = False;
89 Bool trace_suppression = False;
90 const HChar* trace_address = 0;
91 const HChar* ptrace_address= 0;
93 if VG_BOOL_CLO(arg, "--check-stack-var", check_stack_accesses) {
94 DRD_(set_check_stack_accesses)(check_stack_accesses);
96 else if VG_INT_CLO (arg, "--join-list-vol", join_list_vol) {}
97 else if VG_BOOL_CLO(arg, "--drd-stats", s_print_stats) {}
98 else if VG_BOOL_CLO(arg, "--first-race-only", first_race_only) {
99 DRD_(set_first_race_only)(first_race_only);
101 else if VG_BOOL_CLO(arg, "--free-is-write", DRD_(g_free_is_write)) {}
102 else if VG_BOOL_CLO(arg,"--report-signal-unlocked",report_signal_unlocked) {
103 DRD_(cond_set_report_signal_unlocked)(report_signal_unlocked);
105 else if VG_BOOL_CLO(arg, "--segment-merging", segment_merging) {
106 DRD_(thread_set_segment_merging)(segment_merging);
108 else if VG_INT_CLO (arg, "--segment-merging-interval", segment_merge_interval)
110 else if VG_BOOL_CLO(arg, "--show-confl-seg", show_confl_seg) {
111 DRD_(set_show_conflicting_segments)(show_confl_seg);
113 else if VG_BOOL_CLO(arg, "--show-stack-usage", s_show_stack_usage) {}
114 else if VG_BOOL_CLO(arg, "--ignore-thread-creation",
115 DRD_(ignore_thread_creation)) {}
116 else if VG_BOOL_CLO(arg, "--trace-alloc", s_trace_alloc) {}
117 else if VG_BOOL_CLO(arg, "--trace-barrier", trace_barrier) {
118 DRD_(barrier_set_trace)(trace_barrier);
120 else if VG_BOOL_CLO(arg, "--trace-clientobj", trace_clientobj) {
121 DRD_(clientobj_set_trace)(trace_clientobj);
123 else if VG_BOOL_CLO(arg, "--trace-cond", trace_cond) {
124 DRD_(cond_set_trace)(trace_cond);
126 else if VG_BOOL_CLO(arg, "--trace-conflict-set", trace_conflict_set) {
127 DRD_(thread_trace_conflict_set)(trace_conflict_set);
129 else if VG_BOOL_CLO(arg, "--trace-conflict-set-bm", trace_conflict_set_bm){
130 DRD_(thread_trace_conflict_set_bm)(trace_conflict_set_bm);
132 else if VG_BOOL_CLO(arg, "--trace-csw", trace_csw) {
133 DRD_(thread_trace_context_switches)(trace_csw);
135 else if VG_BOOL_CLO(arg, "--trace-fork-join", trace_fork_join) {
136 DRD_(thread_set_trace_fork_join)(trace_fork_join);
138 else if VG_BOOL_CLO(arg, "--trace-hb", trace_hb) {
139 DRD_(hb_set_trace)(trace_hb);
141 else if VG_BOOL_CLO(arg, "--trace-mutex", trace_mutex) {
142 DRD_(mutex_set_trace)(trace_mutex);
144 else if VG_BOOL_CLO(arg, "--trace-rwlock", trace_rwlock) {
145 DRD_(rwlock_set_trace)(trace_rwlock);
147 else if VG_BOOL_CLO(arg, "--trace-sectsuppr", trace_sectsuppr) {}
148 else if VG_BOOL_CLO(arg, "--trace-segment", trace_segment) {
149 DRD_(sg_set_trace)(trace_segment);
151 else if VG_BOOL_CLO(arg, "--trace-semaphore", trace_semaphore) {
152 DRD_(semaphore_set_trace)(trace_semaphore);
154 else if VG_BOOL_CLO(arg, "--trace-suppr", trace_suppression) {
155 DRD_(suppression_set_trace)(trace_suppression);
157 else if VG_BOOL_CLO(arg, "--var-info", s_var_info) {}
158 else if VG_BOOL_CLO(arg, "--verify-conflict-set", DRD_(verify_conflict_set))
160 else if VG_INT_CLO (arg, "--exclusive-threshold", exclusive_threshold_ms) {}
161 else if VG_STR_CLO (arg, "--ptrace-addr", ptrace_address) {}
162 else if VG_INT_CLO (arg, "--shared-threshold", shared_threshold_ms) {}
163 else if VG_STR_CLO (arg, "--trace-addr", trace_address) {}
164 else
165 return VG_(replacement_malloc_process_cmd_line_option)(arg);
167 if (exclusive_threshold_ms != -1)
169 DRD_(mutex_set_lock_threshold)(exclusive_threshold_ms);
170 DRD_(rwlock_set_exclusive_threshold)(exclusive_threshold_ms);
172 if (join_list_vol != -1)
173 DRD_(thread_set_join_list_vol)(join_list_vol);
174 if (shared_threshold_ms != -1)
176 DRD_(rwlock_set_shared_threshold)(shared_threshold_ms);
178 if (segment_merge_interval != -1)
179 DRD_(thread_set_segment_merge_interval)(segment_merge_interval);
180 if (trace_address) {
181 const Addr addr = VG_(strtoll16)(trace_address, 0);
182 DRD_(start_tracing_address_range)(addr, addr + 1, False);
184 if (ptrace_address) {
185 HChar *plus = VG_(strchr)(ptrace_address, '+');
186 Addr addr, length;
187 if (plus)
188 *plus = '\0';
189 addr = VG_(strtoll16)(ptrace_address, 0);
190 length = plus ? VG_(strtoll16)(plus + 1, 0) : 1;
191 DRD_(start_tracing_address_range)(addr, addr + length, True);
194 return True;
197 static void DRD_(print_usage)(void)
199 VG_(printf)(
200 " --check-stack-var=yes|no Whether or not to report data races on\n"
201 " stack variables [no].\n"
202 " --exclusive-threshold=<n> Print an error message if any mutex or\n"
203 " writer lock is held longer than the specified\n"
204 " time (in milliseconds) [off].\n"
205 " --first-race-only=yes|no Only report the first data race that occurs on\n"
206 " a memory location instead of all races [no].\n"
207 " --free-is-write=yes|no Whether to report races between freeing memory\n"
208 " and subsequent accesses of that memory[no].\n"
209 " --join-list-vol=<n> Number of threads to delay cleanup for [10].\n"
210 " --report-signal-unlocked=yes|no Whether to report calls to\n"
211 " pthread_cond_signal() where the mutex associated\n"
212 " with the signal via pthread_cond_wait() is not\n"
213 " locked at the time the signal is sent [yes].\n"
214 " --segment-merging=yes|no Controls segment merging [yes].\n"
215 " Segment merging is an algorithm to limit memory usage of the\n"
216 " data race detection algorithm. Disabling segment merging may\n"
217 " improve the accuracy of the so-called 'other segments' displayed\n"
218 " in race reports but can also trigger an out of memory error.\n"
219 " --segment-merging-interval=<n> Perform segment merging every time n new\n"
220 " segments have been created. Default: %d.\n"
221 " --shared-threshold=<n> Print an error message if a reader lock\n"
222 " is held longer than the specified time (in\n"
223 " milliseconds) [off]\n"
224 " --show-confl-seg=yes|no Show conflicting segments in race reports [yes].\n"
225 " --show-stack-usage=yes|no Print stack usage at thread exit time [no].\n"
226 " --ignore-thread-creation=yes|no Ignore activities during thread \n"
227 " creation [%s].\n"
228 "\n"
229 " drd options for monitoring process behavior:\n"
230 " --ptrace-addr=<address>[+<length>] Trace all load and store activity for\n"
231 " the specified address range and keep doing that\n"
232 " even after the memory at that address has been\n"
233 " freed and reallocated [off].\n"
234 " --trace-addr=<address> Trace all load and store activity for the\n"
235 " specified address [off].\n"
236 " --trace-alloc=yes|no Trace all memory allocations and deallocations\n"
237 " [no].\n"
238 " --trace-barrier=yes|no Trace all barrier activity [no].\n"
239 " --trace-cond=yes|no Trace all condition variable activity [no].\n"
240 " --trace-fork-join=yes|no Trace all thread fork/join activity [no].\n"
241 " --trace-hb=yes|no Trace ANNOTATE_HAPPENS_BEFORE() etc. [no].\n"
242 " --trace-mutex=yes|no Trace all mutex activity [no].\n"
243 " --trace-rwlock=yes|no Trace all reader-writer lock activity[no].\n"
244 " --trace-semaphore=yes|no Trace all semaphore activity [no].\n",
245 DRD_(thread_get_segment_merge_interval)(),
246 DRD_(ignore_thread_creation) ? "yes" : "no"
250 static void DRD_(print_debug_usage)(void)
252 VG_(printf)(
253 " --drd-stats=yes|no Print statistics about DRD activity [no].\n"
254 " --trace-clientobj=yes|no Trace all client object activity [no].\n"
255 " --trace-csw=yes|no Trace all scheduler context switches [no].\n"
256 " --trace-conflict-set=yes|no Trace all conflict set updates [no].\n"
257 " --trace-conflict-set-bm=yes|no Trace all conflict set bitmap\n"
258 " updates [no]. Note: enabling this option\n"
259 " will generate a lot of output !\n"
260 " --trace-sectsuppr=yes|no Trace which the dynamic library sections on\n"
261 " which data race detection is suppressed.\n"
262 " --trace-segment=yes|no Trace segment actions [no].\n"
263 " --trace-suppr=yes|no Trace all address suppression actions [no].\n"
264 " --verify-conflict-set=yes|no Verify conflict set consistency [no].\n"
270 // Implements the thread-related core callbacks.
273 static void drd_pre_mem_read(const CorePart part,
274 const ThreadId tid,
275 const HChar* const s,
276 const Addr a,
277 const SizeT size)
279 DRD_(thread_set_vg_running_tid)(VG_(get_running_tid)());
280 if (size > 0)
282 DRD_(trace_load)(a, size);
286 static void drd_pre_mem_read_asciiz(const CorePart part,
287 const ThreadId tid,
288 const HChar* const s,
289 const Addr a)
291 const HChar* p = (void*)a;
292 SizeT size = 0;
294 // Don't segfault if the string starts in an obviously stupid
295 // place. Actually we should check the whole string, not just
296 // the start address, but that's too much trouble. At least
297 // checking the first byte is better than nothing. See #255009.
298 if (!VG_(am_is_valid_for_client) (a, 1, VKI_PROT_READ))
299 return;
301 /* Note: the expression '*p' reads client memory and may crash if the */
302 /* client provided an invalid pointer ! */
303 while (*p)
305 p++;
306 size++;
308 if (size > 0)
310 DRD_(trace_load)(a, size);
314 static void drd_post_mem_write(const CorePart part,
315 const ThreadId tid,
316 const Addr a,
317 const SizeT size)
319 DRD_(thread_set_vg_running_tid)(VG_(get_running_tid)());
320 if (size > 0)
322 DRD_(trace_store)(a, size);
326 static __inline__
327 void drd_start_using_mem(const Addr a1, const SizeT len,
328 const Bool is_stack_mem)
330 const Addr a2 = a1 + len;
332 tl_assert(a1 <= a2);
334 if (!is_stack_mem && s_trace_alloc)
335 DRD_(trace_msg)("Started using memory range 0x%lx + %lu%s",
336 a1, len, DRD_(running_thread_inside_pthread_create)()
337 ? " (inside pthread_create())" : "");
339 if (!is_stack_mem && DRD_(g_free_is_write))
340 DRD_(thread_stop_using_mem)(a1, a2);
342 if (UNLIKELY(DRD_(any_address_is_traced)()))
344 DRD_(trace_mem_access)(a1, len, eStart, 0, 0);
347 if (UNLIKELY(DRD_(running_thread_inside_pthread_create)()))
349 DRD_(start_suppression)(a1, a2, "pthread_create()");
353 static void drd_start_using_mem_w_ecu(const Addr a1,
354 const SizeT len,
355 UInt ec_uniq)
357 drd_start_using_mem(a1, len, False);
360 static void drd_start_using_mem_w_tid(const Addr a1,
361 const SizeT len,
362 ThreadId tid)
364 drd_start_using_mem(a1, len, False);
367 static __inline__
368 void drd_stop_using_mem(const Addr a1, const SizeT len,
369 const Bool is_stack_mem)
371 const Addr a2 = a1 + len;
373 tl_assert(a1 <= a2);
375 if (UNLIKELY(DRD_(any_address_is_traced)()))
376 DRD_(trace_mem_access)(a1, len, eEnd, 0, 0);
378 if (!is_stack_mem && s_trace_alloc)
379 DRD_(trace_msg)("Stopped using memory range 0x%lx + %lu",
380 a1, len);
382 if (!is_stack_mem || DRD_(get_check_stack_accesses)())
384 if (is_stack_mem || !DRD_(g_free_is_write))
385 DRD_(thread_stop_using_mem)(a1, a2);
386 else if (DRD_(g_free_is_write))
387 DRD_(trace_store)(a1, len);
388 DRD_(clientobj_stop_using_mem)(a1, a2);
389 DRD_(suppression_stop_using_mem)(a1, a2);
393 static __inline__
394 void drd_stop_using_nonstack_mem(const Addr a1, const SizeT len)
396 drd_stop_using_mem(a1, len, False);
400 * Discard all information DRD has about memory accesses and client objects
401 * in the specified address range.
403 void DRD_(clean_memory)(const Addr a1, const SizeT len)
405 const Bool is_stack_memory = DRD_(thread_address_on_any_stack)(a1);
406 drd_stop_using_mem(a1, len, is_stack_memory);
407 drd_start_using_mem(a1, len, is_stack_memory);
411 * Suppress data race reports on all addresses contained in .plt, .got and
412 * .got.plt sections inside the address range [ a, a + len [. The data in
413 * these sections is modified by _dl_relocate_object() every time a function
414 * in a shared library is called for the first time. Since the first call
415 * to a function in a shared library can happen from a multithreaded context,
416 * such calls can cause conflicting accesses. See also Ulrich Drepper's
417 * paper "How to Write Shared Libraries" for more information about relocation
418 * (http://people.redhat.com/drepper/dsohowto.pdf).
419 * Note: the contents of the .got section is only modified by the MIPS resolver.
421 static void DRD_(suppress_relocation_conflicts)(const Addr a, const SizeT len)
423 const DebugInfo* di;
425 if (trace_sectsuppr)
426 VG_(dmsg)("Evaluating range @ 0x%lx size %lu\n", a, len);
428 for (di = VG_(next_DebugInfo)(0); di; di = VG_(next_DebugInfo)(di)) {
429 Addr avma;
430 SizeT size;
432 if (trace_sectsuppr)
433 VG_(dmsg)("Examining %s / %s\n", VG_(DebugInfo_get_filename)(di),
434 VG_(DebugInfo_get_soname)(di));
437 * Suppress the race report on the libpthread global variable
438 * __pthread_multiple_threads. See also
439 * http://bugs.kde.org/show_bug.cgi?id=323905.
441 avma = VG_(DebugInfo_get_bss_avma)(di);
442 size = VG_(DebugInfo_get_bss_size)(di);
443 tl_assert((avma && size) || (avma == 0 && size == 0));
444 if (size > 0 &&
445 VG_(strcmp)(VG_(DebugInfo_get_soname)(di), "libpthread.so.0") == 0) {
446 if (trace_sectsuppr)
447 VG_(dmsg)("Suppressing .bss @ 0x%lx size %lu\n", avma, size);
448 tl_assert(VG_(DebugInfo_sect_kind)(NULL, avma) == Vg_SectBSS);
449 DRD_(start_suppression)(avma, avma + size, ".bss");
452 avma = VG_(DebugInfo_get_plt_avma)(di);
453 size = VG_(DebugInfo_get_plt_size)(di);
454 tl_assert((avma && size) || (avma == 0 && size == 0));
455 if (size > 0) {
456 if (trace_sectsuppr)
457 VG_(dmsg)("Suppressing .plt @ 0x%lx size %lu\n", avma, size);
458 tl_assert(VG_(DebugInfo_sect_kind)(NULL, avma) == Vg_SectPLT);
459 DRD_(start_suppression)(avma, avma + size, ".plt");
462 avma = VG_(DebugInfo_get_gotplt_avma)(di);
463 size = VG_(DebugInfo_get_gotplt_size)(di);
464 tl_assert((avma && size) || (avma == 0 && size == 0));
465 if (size > 0) {
466 if (trace_sectsuppr)
467 VG_(dmsg)("Suppressing .got.plt @ 0x%lx size %lu\n", avma, size);
468 tl_assert(VG_(DebugInfo_sect_kind)(NULL, avma) == Vg_SectGOTPLT);
469 DRD_(start_suppression)(avma, avma + size, ".gotplt");
472 avma = VG_(DebugInfo_get_got_avma)(di);
473 size = VG_(DebugInfo_get_got_size)(di);
474 tl_assert((avma && size) || (avma == 0 && size == 0));
475 if (size > 0) {
476 if (trace_sectsuppr)
477 VG_(dmsg)("Suppressing .got @ 0x%lx size %lu\n", avma, size);
478 tl_assert(VG_(DebugInfo_sect_kind)(NULL, avma) == Vg_SectGOT);
479 DRD_(start_suppression)(avma, avma + size, ".got");
484 static
485 void drd_start_using_mem_w_perms(const Addr a, const SizeT len,
486 const Bool rr, const Bool ww, const Bool xx,
487 ULong di_handle)
489 DRD_(thread_set_vg_running_tid)(VG_(get_running_tid)());
491 drd_start_using_mem(a, len, False);
493 DRD_(suppress_relocation_conflicts)(a, len);
497 * Called by the core when the stack of a thread grows, to indicate that
498 * the addresses in range [ a, a + len [ may now be used by the client.
499 * Assumption: stacks grow downward.
501 static __inline__
502 void drd_start_using_mem_stack2(const DrdThreadId tid, const Addr a,
503 const SizeT len)
505 DRD_(thread_set_stack_min)(tid, a - VG_STACK_REDZONE_SZB);
506 drd_start_using_mem(a - VG_STACK_REDZONE_SZB, len + VG_STACK_REDZONE_SZB,
507 True);
510 static __inline__
511 void drd_start_using_mem_stack(const Addr a, const SizeT len)
513 drd_start_using_mem_stack2(DRD_(thread_get_running_tid)(), a, len);
517 * Called by the core when the stack of a thread shrinks, to indicate that
518 * the addresses [ a, a + len [ are no longer accessible for the client.
519 * Assumption: stacks grow downward.
521 static __inline__
522 void drd_stop_using_mem_stack2(const DrdThreadId tid, const Addr a,
523 const SizeT len)
525 DRD_(thread_set_stack_min)(tid, a + len - VG_STACK_REDZONE_SZB);
526 drd_stop_using_mem(a - VG_STACK_REDZONE_SZB, len + VG_STACK_REDZONE_SZB,
527 True);
530 static __inline__
531 void drd_stop_using_mem_stack(const Addr a, const SizeT len)
533 drd_stop_using_mem_stack2(DRD_(thread_get_running_tid)(), a, len);
536 static
537 Bool on_alt_stack(const Addr a)
539 ThreadId vg_tid;
540 Addr alt_min;
541 SizeT alt_size;
543 vg_tid = VG_(get_running_tid)();
544 alt_min = VG_(thread_get_altstack_min)(vg_tid);
545 alt_size = VG_(thread_get_altstack_size)(vg_tid);
546 return (SizeT)(a - alt_min) < alt_size;
549 static
550 void drd_start_using_mem_alt_stack(const Addr a, const SizeT len)
552 if (!on_alt_stack(a))
553 drd_start_using_mem_stack(a, len);
556 static
557 void drd_stop_using_mem_alt_stack(const Addr a, const SizeT len)
559 if (!on_alt_stack(a))
560 drd_stop_using_mem_stack(a, len);
564 * Callback function invoked by the Valgrind core before a signal is delivered.
566 static
567 void drd_pre_deliver_signal(const ThreadId vg_tid, const Int sigNo,
568 const Bool alt_stack)
570 DrdThreadId drd_tid;
572 drd_tid = DRD_(VgThreadIdToDrdThreadId)(vg_tid);
573 DRD_(thread_set_on_alt_stack)(drd_tid, alt_stack);
574 if (alt_stack)
577 * As soon a signal handler has been invoked on the alternate stack,
578 * switch to stack memory handling functions that can handle the
579 * alternate stack.
581 VG_(track_new_mem_stack)(drd_start_using_mem_alt_stack);
582 VG_(track_die_mem_stack)(drd_stop_using_mem_alt_stack);
587 * Callback function invoked by the Valgrind core after a signal is delivered,
588 * at least if the signal handler did not longjmp().
590 static
591 void drd_post_deliver_signal(const ThreadId vg_tid, const Int sigNo)
593 DrdThreadId drd_tid;
595 drd_tid = DRD_(VgThreadIdToDrdThreadId)(vg_tid);
596 DRD_(thread_set_on_alt_stack)(drd_tid, False);
597 if (DRD_(thread_get_threads_on_alt_stack)() == 0)
599 VG_(track_new_mem_stack)(drd_start_using_mem_stack);
600 VG_(track_die_mem_stack)(drd_stop_using_mem_stack);
605 * Callback function called by the Valgrind core before a stack area is
606 * being used by a signal handler.
608 * @param[in] a Start of address range - VG_STACK_REDZONE_SZB.
609 * @param[in] len Address range length + VG_STACK_REDZONE_SZB.
610 * @param[in] tid Valgrind thread ID for whom the signal frame is being
611 * constructed.
613 static void drd_start_using_mem_stack_signal(const Addr a, const SizeT len,
614 ThreadId tid)
616 DRD_(thread_set_vg_running_tid)(VG_(get_running_tid)());
617 drd_start_using_mem(a + VG_STACK_REDZONE_SZB, len - VG_STACK_REDZONE_SZB,
618 True);
621 static void drd_stop_using_mem_stack_signal(Addr a, SizeT len)
623 drd_stop_using_mem(a + VG_STACK_REDZONE_SZB, len - VG_STACK_REDZONE_SZB,
624 True);
627 static void drd_register_stack(Addr start, Addr end)
629 DrdThreadId drd_tid = DRD_(thread_get_running_tid)();
631 DRD_(thread_register_stack)(drd_tid, start, end);
634 static
635 void drd_pre_thread_create(const ThreadId creator, const ThreadId created)
637 const DrdThreadId drd_creator = DRD_(VgThreadIdToDrdThreadId)(creator);
638 tl_assert(created != VG_INVALID_THREADID);
639 DRD_(thread_pre_create)(drd_creator, created);
640 if (DRD_(IsValidDrdThreadId)(drd_creator))
642 DRD_(thread_new_segment)(drd_creator);
644 if (DRD_(thread_get_trace_fork_join)())
646 DRD_(trace_msg)("drd_pre_thread_create creator = %u, created = %u",
647 drd_creator, created);
652 * Called by Valgrind's core before any loads or stores are performed on
653 * the context of thread "created".
655 static
656 void drd_post_thread_create(const ThreadId vg_created)
658 DrdThreadId drd_created;
659 Addr stack_max;
661 tl_assert(vg_created != VG_INVALID_THREADID);
663 drd_created = DRD_(thread_post_create)(vg_created);
665 /* Set up red zone before the code in glibc's clone.S is run. */
666 stack_max = DRD_(thread_get_stack_max)(drd_created);
667 drd_start_using_mem_stack2(drd_created, stack_max, 0);
669 if (DRD_(thread_get_trace_fork_join)())
671 DRD_(trace_msg)("drd_post_thread_create created = %u", drd_created);
673 if (! DRD_(get_check_stack_accesses)())
675 DRD_(start_suppression)(DRD_(thread_get_stack_max)(drd_created)
676 - DRD_(thread_get_stack_size)(drd_created),
677 DRD_(thread_get_stack_max)(drd_created),
678 "stack");
682 /* Called after a thread has performed its last memory access. */
683 static void drd_thread_finished(ThreadId vg_tid)
685 DrdThreadId drd_tid;
688 * Ignore if invoked because thread creation failed. See e.g.
689 * coregrind/m_syswrap/syswrap-amd64-linux.c
691 if (VG_(get_running_tid)() != vg_tid)
692 return;
694 drd_tid = DRD_(VgThreadIdToDrdThreadId)(vg_tid);
695 tl_assert(drd_tid != DRD_INVALID_THREADID);
696 if (DRD_(thread_get_trace_fork_join)())
698 DRD_(trace_msg)("drd_thread_finished tid = %u%s", drd_tid,
699 DRD_(thread_get_joinable)(drd_tid)
700 ? "" : " (which is a detached thread)");
702 if (s_show_stack_usage && !VG_(clo_xml)) {
703 const SizeT stack_size = DRD_(thread_get_stack_size)(drd_tid);
704 const SizeT used_stack
705 = (DRD_(thread_get_stack_max)(drd_tid)
706 - DRD_(thread_get_stack_min_min)(drd_tid));
707 VG_(message)(Vg_UserMsg,
708 "thread %u%s finished and used %lu bytes out of %lu on its stack. Margin: %ld bytes.\n",
709 drd_tid,
710 DRD_(thread_get_joinable)(drd_tid)
711 ? "" : " (which is a detached thread)",
712 used_stack, stack_size, (long)(stack_size - used_stack));
715 drd_stop_using_mem(DRD_(thread_get_stack_min)(drd_tid),
716 DRD_(thread_get_stack_max)(drd_tid)
717 - DRD_(thread_get_stack_min)(drd_tid),
718 True);
719 DRD_(thread_set_record_loads)(drd_tid, False);
720 DRD_(thread_set_record_stores)(drd_tid, False);
721 DRD_(thread_finished)(drd_tid);
725 * Called immediately after fork for the child process only. 'tid' is the
726 * only surviving thread in the child process. Cleans up thread state.
727 * See also http://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_atfork.html for a detailed discussion of using fork() in combination with mutexes.
729 static
730 void drd__atfork_child(ThreadId tid)
732 DRD_(drd_thread_atfork_child)(tid);
737 // Implementation of the tool interface.
740 static void DRD_(post_clo_init)(void)
742 #if defined(VGO_linux) || defined(VGO_darwin) || defined(VGO_solaris) || defined(VGO_freebsd)
743 /* fine */
744 #else
745 VG_(printf)("\nWARNING: DRD has not yet been tested on this operating system.\n\n");
746 # endif
748 if (s_var_info)
750 VG_(needs_var_info)();
754 static void drd_start_client_code(const ThreadId tid, const ULong bbs_done)
756 tl_assert(tid == VG_(get_running_tid)());
757 DRD_(thread_set_vg_running_tid)(tid);
760 static void DRD_(fini)(Int exitcode)
762 // thread_print_all();
764 if ((VG_(clo_stats) || s_print_stats) && !VG_(clo_xml))
766 ULong pu = DRD_(thread_get_update_conflict_set_count)();
767 ULong pu_seg_cr = DRD_(thread_get_update_conflict_set_new_sg_count)();
768 ULong pu_mtx_cv = DRD_(thread_get_update_conflict_set_sync_count)();
769 ULong pu_join = DRD_(thread_get_update_conflict_set_join_count)();
771 VG_(message)(Vg_UserMsg,
772 " thread: %llu context switches.\n",
773 DRD_(thread_get_context_switch_count)());
774 VG_(message)(Vg_UserMsg,
775 "confl set: %llu full updates and %llu partial updates;\n",
776 DRD_(thread_get_compute_conflict_set_count)(),
777 pu);
778 VG_(message)(Vg_UserMsg,
779 " %llu partial updates during segment creation,\n",
780 pu_seg_cr);
781 VG_(message)(Vg_UserMsg,
782 " %llu because of mutex/sema/cond.var. operations,\n",
783 pu_mtx_cv);
784 VG_(message)(Vg_UserMsg,
785 " %llu because of barrier/rwlock operations and\n",
786 pu - pu_seg_cr - pu_mtx_cv - pu_join);
787 VG_(message)(Vg_UserMsg,
788 " %llu partial updates because of thread join"
789 " operations.\n",
790 pu_join);
791 VG_(message)(Vg_UserMsg,
792 " segments: created %llu segments, max %llu alive,\n",
793 DRD_(sg_get_segments_created_count)(),
794 DRD_(sg_get_max_segments_alive_count)());
795 VG_(message)(Vg_UserMsg,
796 " %llu discard points and %llu merges.\n",
797 DRD_(thread_get_discard_ordered_segments_count)(),
798 DRD_(sg_get_segment_merge_count)());
799 VG_(message)(Vg_UserMsg,
800 "segmnt cr: %llu mutex, %llu rwlock, %llu semaphore and"
801 " %llu barrier.\n",
802 DRD_(get_mutex_segment_creation_count)(),
803 DRD_(get_rwlock_segment_creation_count)(),
804 DRD_(get_semaphore_segment_creation_count)(),
805 DRD_(get_barrier_segment_creation_count)());
806 VG_(message)(Vg_UserMsg,
807 " bitmaps: %llu level one"
808 " and %llu level two bitmaps were allocated.\n",
809 DRD_(bm_get_bitmap_creation_count)(),
810 DRD_(bm_get_bitmap2_creation_count)());
811 VG_(message)(Vg_UserMsg,
812 " mutex: %llu non-recursive lock/unlock events.\n",
813 DRD_(get_mutex_lock_count)());
814 DRD_(print_malloc_stats)();
817 DRD_(bm_module_cleanup)();
820 static
821 void drd_pre_clo_init(void)
823 // Basic tool stuff.
824 VG_(details_name) ("drd");
825 VG_(details_version) (NULL);
826 VG_(details_description) ("a thread error detector");
827 VG_(details_copyright_author)("Copyright (C) 2006-2020, and GNU GPL'd,"
828 " by Bart Van Assche.");
829 VG_(details_bug_reports_to) (VG_BUGS_TO);
831 VG_(basic_tool_funcs) (DRD_(post_clo_init),
832 DRD_(instrument),
833 DRD_(fini));
835 // Command line stuff.
836 VG_(needs_command_line_options)(DRD_(process_cmd_line_option),
837 DRD_(print_usage),
838 DRD_(print_debug_usage));
839 VG_(needs_xml_output) ();
841 // Error handling.
842 DRD_(register_error_handlers)();
844 // Core event tracking.
845 VG_(track_pre_mem_read) (drd_pre_mem_read);
846 VG_(track_pre_mem_read_asciiz) (drd_pre_mem_read_asciiz);
847 VG_(track_post_mem_write) (drd_post_mem_write);
848 VG_(track_new_mem_brk) (drd_start_using_mem_w_tid);
849 VG_(track_new_mem_mmap) (drd_start_using_mem_w_perms);
850 VG_(track_new_mem_stack) (drd_start_using_mem_stack);
851 VG_(track_new_mem_stack_signal) (drd_start_using_mem_stack_signal);
852 VG_(track_new_mem_startup) (drd_start_using_mem_w_perms);
853 VG_(track_die_mem_brk) (drd_stop_using_nonstack_mem);
854 VG_(track_die_mem_munmap) (drd_stop_using_nonstack_mem);
855 VG_(track_die_mem_stack) (drd_stop_using_mem_stack);
856 VG_(track_die_mem_stack_signal) (drd_stop_using_mem_stack_signal);
857 VG_(track_register_stack) (drd_register_stack);
858 VG_(track_pre_deliver_signal) (drd_pre_deliver_signal);
859 VG_(track_post_deliver_signal) (drd_post_deliver_signal);
860 VG_(track_start_client_code) (drd_start_client_code);
861 VG_(track_pre_thread_ll_create) (drd_pre_thread_create);
862 VG_(track_pre_thread_first_insn)(drd_post_thread_create);
863 VG_(track_pre_thread_ll_exit) (drd_thread_finished);
864 VG_(atfork) (NULL/*pre*/, NULL/*parent*/,
865 drd__atfork_child/*child*/);
867 // Other stuff.
868 DRD_(register_malloc_wrappers)(drd_start_using_mem_w_ecu,
869 drd_stop_using_nonstack_mem);
871 DRD_(bm_module_init)();
873 DRD_(clientreq_init)();
875 DRD_(suppression_init)();
877 DRD_(clientobj_init)();
879 DRD_(thread_init)();
882 HChar* const smi = VG_(getenv)("DRD_SEGMENT_MERGING_INTERVAL");
883 if (smi)
884 DRD_(thread_set_segment_merge_interval)(VG_(strtoll10)(smi, NULL));
887 if (VG_(getenv)("DRD_VERIFY_CONFLICT_SET"))
888 DRD_(verify_conflict_set) = True;
893 VG_DETERMINE_INTERFACE_VERSION(drd_pre_clo_init)