1 /*--------------------------------------------------------------------*/
3 /*--- ct_threads.c ---*/
4 /*--------------------------------------------------------------------*/
7 This file is part of Callgrind, a Valgrind tool for call tracing.
9 Copyright (C) 2002-2017, Josef Weidendorfer (Josef.Weidendorfer@gmx.de)
11 This program is free software; you can redistribute it and/or
12 modify it under the terms of the GNU General Public License as
13 published by the Free Software Foundation; either version 2 of the
14 License, or (at your option) any later version.
16 This program is distributed in the hope that it will be useful, but
17 WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 General Public License for more details.
21 You should have received a copy of the GNU General Public License
22 along with this program; if not, write to the Free Software
23 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
26 The GNU General Public License is contained in the file COPYING.
31 #include "pub_tool_threadstate.h"
34 static exec_state
* exec_state_save(void);
35 static exec_state
* exec_state_restore(void);
36 static exec_state
* push_exec_state(int);
37 static exec_state
* top_exec_state(void);
39 static exec_stack current_states
;
42 /*------------------------------------------------------------*/
43 /*--- Support for multi-threading ---*/
44 /*------------------------------------------------------------*/
48 * For Valgrind, MT is cooperative (no preemting in our code),
49 * so we don't need locks...
55 * - event counters: last, current
57 * Even when ignoring MT, we need this functions to set up some
58 * datastructures for the process (= Thread 1).
61 /* current running thread */
62 ThreadId
CLG_(current_tid
);
64 static thread_info
** thread
;
66 thread_info
** CLG_(get_threads
)()
71 thread_info
* CLG_(get_current_thread
)()
73 return thread
[CLG_(current_tid
)];
76 void CLG_(init_threads
)()
80 thread
= CLG_MALLOC("cl.threads.it.1", VG_N_THREADS
* sizeof thread
[0]);
82 for(i
=0;i
<VG_N_THREADS
;i
++)
84 CLG_(current_tid
) = VG_INVALID_THREADID
;
87 /* switches through all threads and calls func */
88 void CLG_(forall_threads
)(void (*func
)(thread_info
*))
90 Int t
, orig_tid
= CLG_(current_tid
);
92 for(t
=1;t
<VG_N_THREADS
;t
++) {
93 if (!thread
[t
]) continue;
94 CLG_(switch_thread
)(t
);
97 CLG_(switch_thread
)(orig_tid
);
102 thread_info
* new_thread(void)
106 t
= (thread_info
*) CLG_MALLOC("cl.threads.nt.1",
107 sizeof(thread_info
));
110 CLG_(init_exec_stack
)( &(t
->states
) );
111 CLG_(init_call_stack
)( &(t
->calls
) );
112 CLG_(init_fn_stack
) ( &(t
->fns
) );
113 /* t->states.entry[0]->cxt = CLG_(get_cxt)(t->fns.bottom); */
116 t
->lastdump_cost
= CLG_(get_eventset_cost
)( CLG_(sets
).full
);
117 t
->sighandler_cost
= CLG_(get_eventset_cost
)( CLG_(sets
).full
);
118 CLG_(init_cost
)( CLG_(sets
).full
, t
->lastdump_cost
);
119 CLG_(init_cost
)( CLG_(sets
).full
, t
->sighandler_cost
);
121 /* init data containers */
122 CLG_(init_fn_array
)( &(t
->fn_active
) );
123 CLG_(init_bbcc_hash
)( &(t
->bbccs
) );
124 CLG_(init_jcc_hash
)( &(t
->jccs
) );
130 void CLG_(switch_thread
)(ThreadId tid
)
132 if (tid
== CLG_(current_tid
)) return;
134 CLG_DEBUG(0, ">> thread %u (was %u)\n", tid
, CLG_(current_tid
));
136 if (CLG_(current_tid
) != VG_INVALID_THREADID
) {
137 /* save thread state */
138 thread_info
* t
= thread
[CLG_(current_tid
)];
142 /* current context (including signal handler contexts) */
144 CLG_(copy_current_exec_stack
)( &(t
->states
) );
145 CLG_(copy_current_call_stack
)( &(t
->calls
) );
146 CLG_(copy_current_fn_stack
) ( &(t
->fns
) );
148 CLG_(copy_current_fn_array
) ( &(t
->fn_active
) );
149 /* If we cumulate costs of threads, use TID 1 for all jccs/bccs */
150 if (!CLG_(clo
).separate_threads
) t
= thread
[1];
151 CLG_(copy_current_bbcc_hash
)( &(t
->bbccs
) );
152 CLG_(copy_current_jcc_hash
) ( &(t
->jccs
) );
155 CLG_(current_tid
) = tid
;
156 CLG_ASSERT(tid
< VG_N_THREADS
);
158 if (tid
!= VG_INVALID_THREADID
) {
161 /* load thread state */
163 if (thread
[tid
] == 0) thread
[tid
] = new_thread();
166 /* current context (including signal handler contexts) */
167 CLG_(set_current_exec_stack
)( &(t
->states
) );
168 exec_state_restore();
169 CLG_(set_current_call_stack
)( &(t
->calls
) );
170 CLG_(set_current_fn_stack
) ( &(t
->fns
) );
172 CLG_(set_current_fn_array
) ( &(t
->fn_active
) );
173 /* If we cumulate costs of threads, use TID 1 for all jccs/bccs */
174 if (!CLG_(clo
).separate_threads
) t
= thread
[1];
175 CLG_(set_current_bbcc_hash
) ( &(t
->bbccs
) );
176 CLG_(set_current_jcc_hash
) ( &(t
->jccs
) );
181 void CLG_(run_thread
)(ThreadId tid
)
183 /* check for dumps needed */
184 static ULong bbs_done
= 0;
185 HChar buf
[50]; // large enough
187 if (CLG_(clo
).dump_every_bb
>0) {
188 if (CLG_(stat
).bb_executions
- bbs_done
> CLG_(clo
).dump_every_bb
) {
189 VG_(sprintf
)(buf
, "--dump-every-bb=%llu", CLG_(clo
).dump_every_bb
);
190 CLG_(dump_profile
)(buf
, False
);
191 bbs_done
= CLG_(stat
).bb_executions
;
195 /* now check for thread switch */
196 CLG_(switch_thread
)(tid
);
199 void CLG_(pre_signal
)(ThreadId tid
, Int sigNum
, Bool alt_stack
)
203 CLG_DEBUG(0, ">> pre_signal(TID %u, sig %d, alt_st %s)\n",
204 tid
, sigNum
, alt_stack
? "yes":"no");
206 /* switch to the thread the handler runs in */
207 CLG_(switch_thread
)(tid
);
209 /* save current execution state */
212 /* setup new cxtinfo struct for this signal handler */
213 es
= push_exec_state(sigNum
);
214 CLG_(zero_cost
)( CLG_(sets
).full
, es
->cost
);
215 CLG_(current_state
).cost
= es
->cost
;
216 es
->call_stack_bottom
= CLG_(current_call_stack
).sp
;
218 /* setup current state for a spontaneous call */
219 CLG_(init_exec_state
)( &CLG_(current_state
) );
220 CLG_(current_state
).sig
= sigNum
;
224 /* Run post-signal if the stackpointer for call stack is at
225 * the bottom in current exec state (e.g. a signal handler)
227 * Called from CLG_(pop_call_stack)
229 void CLG_(run_post_signal_on_call_stack_bottom
)()
231 exec_state
* es
= top_exec_state();
233 CLG_ASSERT(CLG_(current_state
).sig
>0);
235 if (CLG_(current_call_stack
).sp
== es
->call_stack_bottom
)
236 CLG_(post_signal
)( CLG_(current_tid
), CLG_(current_state
).sig
);
239 void CLG_(post_signal
)(ThreadId tid
, Int sigNum
)
242 UInt fn_number
, *pactive
;
244 CLG_DEBUG(0, ">> post_signal(TID %u, sig %d)\n",
247 /* thread switching potentially needed, eg. with instrumentation off */
248 CLG_(switch_thread
)(tid
);
249 CLG_ASSERT(sigNum
== CLG_(current_state
).sig
);
251 /* Unwind call stack of this signal handler.
252 * This should only be needed at finalisation time
254 es
= top_exec_state();
256 while(CLG_(current_call_stack
).sp
> es
->call_stack_bottom
)
257 CLG_(pop_call_stack
)();
259 if (CLG_(current_state
).cxt
) {
260 /* correct active counts */
261 fn_number
= CLG_(current_state
).cxt
->fn
[0]->number
;
262 pactive
= CLG_(get_fn_entry
)(fn_number
);
264 CLG_DEBUG(0, " set active count of %s back to %u\n",
265 CLG_(current_state
).cxt
->fn
[0]->name
, *pactive
);
268 if (CLG_(current_fn_stack
).top
> CLG_(current_fn_stack
).bottom
) {
269 /* set fn_stack_top back.
270 * top can point to 0 if nothing was executed in the signal handler;
271 * this is possible at end on unwinding handlers.
273 if (*(CLG_(current_fn_stack
).top
) != 0) {
274 CLG_(current_fn_stack
).top
--;
275 CLG_ASSERT(*(CLG_(current_fn_stack
).top
) == 0);
277 if (CLG_(current_fn_stack
).top
> CLG_(current_fn_stack
).bottom
)
278 CLG_(current_fn_stack
).top
--;
282 CLG_ASSERT(CLG_(current_state
).cost
== es
->cost
);
283 CLG_(add_and_zero_cost
)( CLG_(sets
).full
,
284 thread
[CLG_(current_tid
)]->sighandler_cost
,
285 CLG_(current_state
).cost
);
287 /* restore previous context */
290 es
= top_exec_state();
291 CLG_(current_state
).sig
= es
->sig
;
292 exec_state_restore();
294 /* There is no way to reliable get the thread ID we are switching to
295 * after this handler returns. So we sync with actual TID at start of
296 * CLG_(setup_bb)(), which should be the next for callgrind.
302 /*------------------------------------------------------------*/
303 /*--- Execution states in a thread & signal handlers ---*/
304 /*------------------------------------------------------------*/
306 /* Each thread can be interrupted by a signal handler, and they
307 * themselves again. But as there's no scheduling among handlers
308 * of the same thread, we don't need additional stacks.
309 * So storing execution contexts and
310 * adding separators in the callstack(needed to not intermix normal/handler
311 * functions in contexts) should be enough.
314 /* not initialized: call_stack_bottom, sig */
315 void CLG_(init_exec_state
)(exec_state
* es
)
317 es
->collect
= CLG_(clo
).collect_atstart
;
325 static exec_state
* new_exec_state(Int sigNum
)
328 es
= (exec_state
*) CLG_MALLOC("cl.threads.nes.1",
331 /* allocate real cost space: needed as incremented by
332 * simulation functions */
333 es
->cost
= CLG_(get_eventset_cost
)(CLG_(sets
).full
);
334 CLG_(zero_cost
)( CLG_(sets
).full
, es
->cost
);
335 CLG_(init_exec_state
)(es
);
337 es
->call_stack_bottom
= 0;
342 void CLG_(init_exec_stack
)(exec_stack
* es
)
346 /* The first element is for the main thread */
347 es
->entry
[0] = new_exec_state(0);
348 for(i
=1;i
<MAX_SIGHANDLERS
;i
++)
353 void CLG_(copy_current_exec_stack
)(exec_stack
* dst
)
357 dst
->sp
= current_states
.sp
;
358 for(i
=0;i
<MAX_SIGHANDLERS
;i
++)
359 dst
->entry
[i
] = current_states
.entry
[i
];
362 void CLG_(set_current_exec_stack
)(exec_stack
* dst
)
366 current_states
.sp
= dst
->sp
;
367 for(i
=0;i
<MAX_SIGHANDLERS
;i
++)
368 current_states
.entry
[i
] = dst
->entry
[i
];
372 /* Get top context info struct of current thread */
374 exec_state
* top_exec_state(void)
376 Int sp
= current_states
.sp
;
379 CLG_ASSERT((sp
>= 0) && (sp
< MAX_SIGHANDLERS
));
380 es
= current_states
.entry
[sp
];
385 /* Allocates a free context info structure for a new entered
386 * signal handler, putting it on the context stack.
387 * Returns a pointer to the structure.
389 static exec_state
* push_exec_state(int sigNum
)
395 sp
= current_states
.sp
;
397 CLG_ASSERT((sigNum
> 0) && (sigNum
<= _VKI_NSIG
));
398 CLG_ASSERT((sp
> 0) && (sp
< MAX_SIGHANDLERS
));
399 es
= current_states
.entry
[sp
];
401 es
= new_exec_state(sigNum
);
402 current_states
.entry
[sp
] = es
;
410 /* Save current context to top cxtinfo struct */
412 exec_state
* exec_state_save(void)
414 exec_state
* es
= top_exec_state();
416 es
->cxt
= CLG_(current_state
).cxt
;
417 es
->collect
= CLG_(current_state
).collect
;
418 es
->jmps_passed
= CLG_(current_state
).jmps_passed
;
419 es
->bbcc
= CLG_(current_state
).bbcc
;
420 es
->nonskipped
= CLG_(current_state
).nonskipped
;
421 CLG_ASSERT(es
->cost
== CLG_(current_state
).cost
);
424 CLG_DEBUG(1, " cxtinfo_save(sig %d): collect %s, jmps_passed %d\n",
425 es
->sig
, es
->collect
? "Yes": "No", es
->jmps_passed
);
426 CLG_(print_bbcc
)(-9, es
->bbcc
);
427 CLG_(print_cost
)(-9, CLG_(sets
).full
, es
->cost
);
430 /* signal number does not need to be saved */
431 CLG_ASSERT(CLG_(current_state
).sig
== es
->sig
);
437 exec_state
* exec_state_restore(void)
439 exec_state
* es
= top_exec_state();
441 CLG_(current_state
).cxt
= es
->cxt
;
442 CLG_(current_state
).collect
= es
->collect
;
443 CLG_(current_state
).jmps_passed
= es
->jmps_passed
;
444 CLG_(current_state
).bbcc
= es
->bbcc
;
445 CLG_(current_state
).nonskipped
= es
->nonskipped
;
446 CLG_(current_state
).cost
= es
->cost
;
447 CLG_(current_state
).sig
= es
->sig
;
450 CLG_DEBUG(1, " exec_state_restore(sig %d): collect %s, jmps_passed %d\n",
451 es
->sig
, es
->collect
? "Yes": "No", es
->jmps_passed
);
452 CLG_(print_bbcc
)(-9, es
->bbcc
);
453 CLG_(print_cxt
)(-9, es
->cxt
, 0);
454 CLG_(print_cost
)(-9, CLG_(sets
).full
, es
->cost
);