1 /* Branch trace support for GDB, the GNU debugger.
3 Copyright (C) 2013-2024 Free Software Foundation, Inc.
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
23 #include "gdbthread.h"
30 #include "filenames.h"
32 #include "gdbsupport/rsp-low.h"
34 #include "cli/cli-utils.h"
37 /* For maintenance commands. */
38 #include "record-btrace.h"
44 /* Command lists for btrace maintenance commands. */
45 static struct cmd_list_element
*maint_btrace_cmdlist
;
46 static struct cmd_list_element
*maint_btrace_set_cmdlist
;
47 static struct cmd_list_element
*maint_btrace_show_cmdlist
;
48 static struct cmd_list_element
*maint_btrace_pt_set_cmdlist
;
49 static struct cmd_list_element
*maint_btrace_pt_show_cmdlist
;
51 /* Control whether to skip PAD packets when computing the packet history. */
52 static bool maint_btrace_pt_skip_pad
= true;
54 static void btrace_add_pc (struct thread_info
*tp
);
56 /* Print a record debug message. Use do ... while (0) to avoid ambiguities
57 when used in if statements. */
59 #define DEBUG(msg, args...) \
62 if (record_debug != 0) \
63 gdb_printf (gdb_stdlog, \
64 "[btrace] " msg "\n", ##args); \
68 #define DEBUG_FTRACE(msg, args...) DEBUG ("[ftrace] " msg, ##args)
70 /* Return the function name of a recorded function segment for printing.
71 This function never returns NULL. */
74 ftrace_print_function_name (const struct btrace_function
*bfun
)
76 struct minimal_symbol
*msym
;
83 return sym
->print_name ();
86 return msym
->print_name ();
91 /* Return the file name of a recorded function segment for printing.
92 This function never returns NULL. */
95 ftrace_print_filename (const struct btrace_function
*bfun
)
103 filename
= symtab_to_filename_for_display (sym
->symtab ());
105 filename
= "<unknown>";
110 /* Return a string representation of the address of an instruction.
111 This function never returns NULL. */
114 ftrace_print_insn_addr (const struct btrace_insn
*insn
)
119 return core_addr_to_string_nz (insn
->pc
);
122 /* Print an ftrace debug status message. */
125 ftrace_debug (const struct btrace_function
*bfun
, const char *prefix
)
127 const char *fun
, *file
;
128 unsigned int ibegin
, iend
;
131 fun
= ftrace_print_function_name (bfun
);
132 file
= ftrace_print_filename (bfun
);
135 ibegin
= bfun
->insn_offset
;
136 iend
= ibegin
+ bfun
->insn
.size ();
138 DEBUG_FTRACE ("%s: fun = %s, file = %s, level = %d, insn = [%u; %u)",
139 prefix
, fun
, file
, level
, ibegin
, iend
);
142 /* Return the number of instructions in a given function call segment. */
145 ftrace_call_num_insn (const struct btrace_function
* bfun
)
150 /* A gap is always counted as one instruction. */
151 if (bfun
->errcode
!= 0)
154 return bfun
->insn
.size ();
157 /* Return the function segment with the given NUMBER or NULL if no such segment
158 exists. BTINFO is the branch trace information for the current thread. */
160 static struct btrace_function
*
161 ftrace_find_call_by_number (struct btrace_thread_info
*btinfo
,
164 if (number
== 0 || number
> btinfo
->functions
.size ())
167 return &btinfo
->functions
[number
- 1];
170 /* A const version of the function above. */
172 static const struct btrace_function
*
173 ftrace_find_call_by_number (const struct btrace_thread_info
*btinfo
,
176 if (number
== 0 || number
> btinfo
->functions
.size ())
179 return &btinfo
->functions
[number
- 1];
182 /* Return non-zero if BFUN does not match MFUN and FUN,
183 return zero otherwise. */
186 ftrace_function_switched (const struct btrace_function
*bfun
,
187 const struct minimal_symbol
*mfun
,
188 const struct symbol
*fun
)
190 struct minimal_symbol
*msym
;
196 /* If the minimal symbol changed, we certainly switched functions. */
197 if (mfun
!= NULL
&& msym
!= NULL
198 && strcmp (mfun
->linkage_name (), msym
->linkage_name ()) != 0)
201 /* If the symbol changed, we certainly switched functions. */
202 if (fun
!= NULL
&& sym
!= NULL
)
204 const char *bfname
, *fname
;
206 /* Check the function name. */
207 if (strcmp (fun
->linkage_name (), sym
->linkage_name ()) != 0)
210 /* Check the location of those functions, as well. */
211 bfname
= symtab_to_fullname (sym
->symtab ());
212 fname
= symtab_to_fullname (fun
->symtab ());
213 if (filename_cmp (fname
, bfname
) != 0)
217 /* If we lost symbol information, we switched functions. */
218 if (!(msym
== NULL
&& sym
== NULL
) && mfun
== NULL
&& fun
== NULL
)
221 /* If we gained symbol information, we switched functions. */
222 if (msym
== NULL
&& sym
== NULL
&& !(mfun
== NULL
&& fun
== NULL
))
228 /* Allocate and initialize a new branch trace function segment at the end of
230 BTINFO is the branch trace information for the current thread.
231 MFUN and FUN are the symbol information we have for this function.
232 This invalidates all struct btrace_function pointer currently held. */
234 static struct btrace_function
*
235 ftrace_new_function (struct btrace_thread_info
*btinfo
,
236 struct minimal_symbol
*mfun
,
240 unsigned int number
, insn_offset
;
242 if (btinfo
->functions
.empty ())
244 /* Start counting NUMBER and INSN_OFFSET at one. */
251 const struct btrace_function
*prev
= &btinfo
->functions
.back ();
253 number
= prev
->number
+ 1;
254 insn_offset
= prev
->insn_offset
+ ftrace_call_num_insn (prev
);
257 return &btinfo
->functions
.emplace_back (mfun
, fun
, number
, insn_offset
,
261 /* Update the UP field of a function segment. */
264 ftrace_update_caller (struct btrace_function
*bfun
,
265 struct btrace_function
*caller
,
266 btrace_function_flags flags
)
269 ftrace_debug (bfun
, "updating caller");
271 bfun
->up
= caller
->number
;
274 ftrace_debug (bfun
, "set caller");
275 ftrace_debug (caller
, "..to");
278 /* Fix up the caller for all segments of a function. */
281 ftrace_fixup_caller (struct btrace_thread_info
*btinfo
,
282 struct btrace_function
*bfun
,
283 struct btrace_function
*caller
,
284 btrace_function_flags flags
)
286 unsigned int prev
, next
;
290 ftrace_update_caller (bfun
, caller
, flags
);
292 /* Update all function segments belonging to the same function. */
293 for (; prev
!= 0; prev
= bfun
->prev
)
295 bfun
= ftrace_find_call_by_number (btinfo
, prev
);
296 ftrace_update_caller (bfun
, caller
, flags
);
299 for (; next
!= 0; next
= bfun
->next
)
301 bfun
= ftrace_find_call_by_number (btinfo
, next
);
302 ftrace_update_caller (bfun
, caller
, flags
);
306 /* Add a new function segment for a call at the end of the trace.
307 BTINFO is the branch trace information for the current thread.
308 MFUN and FUN are the symbol information we have for this function. */
310 static struct btrace_function
*
311 ftrace_new_call (struct btrace_thread_info
*btinfo
,
312 struct minimal_symbol
*mfun
,
315 const unsigned int length
= btinfo
->functions
.size ();
316 struct btrace_function
*bfun
= ftrace_new_function (btinfo
, mfun
, fun
);
321 ftrace_debug (bfun
, "new call");
326 /* Add a new function segment for a tail call at the end of the trace.
327 BTINFO is the branch trace information for the current thread.
328 MFUN and FUN are the symbol information we have for this function. */
330 static struct btrace_function
*
331 ftrace_new_tailcall (struct btrace_thread_info
*btinfo
,
332 struct minimal_symbol
*mfun
,
335 const unsigned int length
= btinfo
->functions
.size ();
336 struct btrace_function
*bfun
= ftrace_new_function (btinfo
, mfun
, fun
);
340 bfun
->flags
|= BFUN_UP_LINKS_TO_TAILCALL
;
342 ftrace_debug (bfun
, "new tail call");
347 /* Return the caller of BFUN or NULL if there is none. This function skips
348 tail calls in the call chain. BTINFO is the branch trace information for
349 the current thread. */
350 static struct btrace_function
*
351 ftrace_get_caller (struct btrace_thread_info
*btinfo
,
352 struct btrace_function
*bfun
)
354 for (; bfun
!= NULL
; bfun
= ftrace_find_call_by_number (btinfo
, bfun
->up
))
355 if ((bfun
->flags
& BFUN_UP_LINKS_TO_TAILCALL
) == 0)
356 return ftrace_find_call_by_number (btinfo
, bfun
->up
);
361 /* Find the innermost caller in the back trace of BFUN with MFUN/FUN
362 symbol information. BTINFO is the branch trace information for the current
365 static struct btrace_function
*
366 ftrace_find_caller (struct btrace_thread_info
*btinfo
,
367 struct btrace_function
*bfun
,
368 struct minimal_symbol
*mfun
,
371 for (; bfun
!= NULL
; bfun
= ftrace_find_call_by_number (btinfo
, bfun
->up
))
373 /* Skip functions with incompatible symbol information. */
374 if (ftrace_function_switched (bfun
, mfun
, fun
))
377 /* This is the function segment we're looking for. */
384 /* Find the innermost caller in the back trace of BFUN, skipping all
385 function segments that do not end with a call instruction (e.g.
386 tail calls ending with a jump). BTINFO is the branch trace information for
387 the current thread. */
389 static struct btrace_function
*
390 ftrace_find_call (struct btrace_thread_info
*btinfo
,
391 struct btrace_function
*bfun
)
393 for (; bfun
!= NULL
; bfun
= ftrace_find_call_by_number (btinfo
, bfun
->up
))
396 if (bfun
->errcode
!= 0)
399 btrace_insn
&last
= bfun
->insn
.back ();
401 if (last
.iclass
== BTRACE_INSN_CALL
)
408 /* Add a continuation segment for a function into which we return at the end of
410 BTINFO is the branch trace information for the current thread.
411 MFUN and FUN are the symbol information we have for this function. */
413 static struct btrace_function
*
414 ftrace_new_return (struct btrace_thread_info
*btinfo
,
415 struct minimal_symbol
*mfun
,
418 struct btrace_function
*prev
, *bfun
, *caller
;
420 bfun
= ftrace_new_function (btinfo
, mfun
, fun
);
421 prev
= ftrace_find_call_by_number (btinfo
, bfun
->number
- 1);
423 /* It is important to start at PREV's caller. Otherwise, we might find
424 PREV itself, if PREV is a recursive function. */
425 caller
= ftrace_find_call_by_number (btinfo
, prev
->up
);
426 caller
= ftrace_find_caller (btinfo
, caller
, mfun
, fun
);
429 /* The caller of PREV is the preceding btrace function segment in this
430 function instance. */
431 gdb_assert (caller
->next
== 0);
433 caller
->next
= bfun
->number
;
434 bfun
->prev
= caller
->number
;
436 /* Maintain the function level. */
437 bfun
->level
= caller
->level
;
439 /* Maintain the call stack. */
440 bfun
->up
= caller
->up
;
441 bfun
->flags
= caller
->flags
;
443 ftrace_debug (bfun
, "new return");
447 /* We did not find a caller. This could mean that something went
448 wrong or that the call is simply not included in the trace. */
450 /* Let's search for some actual call. */
451 caller
= ftrace_find_call_by_number (btinfo
, prev
->up
);
452 caller
= ftrace_find_call (btinfo
, caller
);
455 /* There is no call in PREV's back trace. We assume that the
456 branch trace did not include it. */
458 /* Let's find the topmost function and add a new caller for it.
459 This should handle a series of initial tail calls. */
460 while (prev
->up
!= 0)
461 prev
= ftrace_find_call_by_number (btinfo
, prev
->up
);
463 bfun
->level
= prev
->level
- 1;
465 /* Fix up the call stack for PREV. */
466 ftrace_fixup_caller (btinfo
, prev
, bfun
, BFUN_UP_LINKS_TO_RET
);
468 ftrace_debug (bfun
, "new return - no caller");
472 /* There is a call in PREV's back trace to which we should have
473 returned but didn't. Let's start a new, separate back trace
474 from PREV's level. */
475 bfun
->level
= prev
->level
- 1;
477 /* We fix up the back trace for PREV but leave other function segments
478 on the same level as they are.
479 This should handle things like schedule () correctly where we're
480 switching contexts. */
481 prev
->up
= bfun
->number
;
482 prev
->flags
= BFUN_UP_LINKS_TO_RET
;
484 ftrace_debug (bfun
, "new return - unknown caller");
491 /* Add a new function segment for a function switch at the end of the trace.
492 BTINFO is the branch trace information for the current thread.
493 MFUN and FUN are the symbol information we have for this function. */
495 static struct btrace_function
*
496 ftrace_new_switch (struct btrace_thread_info
*btinfo
,
497 struct minimal_symbol
*mfun
,
500 struct btrace_function
*prev
, *bfun
;
502 /* This is an unexplained function switch. We can't really be sure about the
503 call stack, yet the best I can think of right now is to preserve it. */
504 bfun
= ftrace_new_function (btinfo
, mfun
, fun
);
505 prev
= ftrace_find_call_by_number (btinfo
, bfun
->number
- 1);
507 bfun
->flags
= prev
->flags
;
509 ftrace_debug (bfun
, "new switch");
514 /* Add a new function segment for a gap in the trace due to a decode error at
515 the end of the trace.
516 BTINFO is the branch trace information for the current thread.
517 ERRCODE is the format-specific error code. */
519 static struct btrace_function
*
520 ftrace_new_gap (struct btrace_thread_info
*btinfo
, int errcode
,
521 std::vector
<unsigned int> &gaps
)
523 struct btrace_function
*bfun
;
525 if (btinfo
->functions
.empty ())
526 bfun
= ftrace_new_function (btinfo
, NULL
, NULL
);
529 /* We hijack the previous function segment if it was empty. */
530 bfun
= &btinfo
->functions
.back ();
531 if (bfun
->errcode
!= 0 || !bfun
->insn
.empty ())
532 bfun
= ftrace_new_function (btinfo
, NULL
, NULL
);
535 bfun
->errcode
= errcode
;
536 gaps
.push_back (bfun
->number
);
538 ftrace_debug (bfun
, "new gap");
543 /* Update the current function segment at the end of the trace in BTINFO with
544 respect to the instruction at PC. This may create new function segments.
545 Return the chronologically latest function segment, never NULL. */
547 static struct btrace_function
*
548 ftrace_update_function (struct btrace_thread_info
*btinfo
, CORE_ADDR pc
)
550 struct bound_minimal_symbol bmfun
;
551 struct minimal_symbol
*mfun
;
553 struct btrace_function
*bfun
;
555 /* Try to determine the function we're in. We use both types of symbols
556 to avoid surprises when we sometimes get a full symbol and sometimes
557 only a minimal symbol. */
558 fun
= find_pc_function (pc
);
559 bmfun
= lookup_minimal_symbol_by_pc (pc
);
562 if (fun
== NULL
&& mfun
== NULL
)
563 DEBUG_FTRACE ("no symbol at %s", core_addr_to_string_nz (pc
));
565 /* If we didn't have a function, we create one. */
566 if (btinfo
->functions
.empty ())
567 return ftrace_new_function (btinfo
, mfun
, fun
);
569 /* If we had a gap before, we create a function. */
570 bfun
= &btinfo
->functions
.back ();
571 if (bfun
->errcode
!= 0)
572 return ftrace_new_function (btinfo
, mfun
, fun
);
574 /* Check the last instruction, if we have one.
575 We do this check first, since it allows us to fill in the call stack
576 links in addition to the normal flow links. */
577 btrace_insn
*last
= NULL
;
578 if (!bfun
->insn
.empty ())
579 last
= &bfun
->insn
.back ();
583 switch (last
->iclass
)
585 case BTRACE_INSN_RETURN
:
589 /* On some systems, _dl_runtime_resolve returns to the resolved
590 function instead of jumping to it. From our perspective,
591 however, this is a tailcall.
592 If we treated it as return, we wouldn't be able to find the
593 resolved function in our stack back trace. Hence, we would
594 lose the current stack back trace and start anew with an empty
595 back trace. When the resolved function returns, we would then
596 create a stack back trace with the same function names but
597 different frame id's. This will confuse stepping. */
598 fname
= ftrace_print_function_name (bfun
);
599 if (strcmp (fname
, "_dl_runtime_resolve") == 0)
600 return ftrace_new_tailcall (btinfo
, mfun
, fun
);
602 return ftrace_new_return (btinfo
, mfun
, fun
);
605 case BTRACE_INSN_CALL
:
606 /* Ignore calls to the next instruction. They are used for PIC. */
607 if (last
->pc
+ last
->size
== pc
)
610 return ftrace_new_call (btinfo
, mfun
, fun
);
612 case BTRACE_INSN_JUMP
:
616 start
= get_pc_function_start (pc
);
618 /* A jump to the start of a function is (typically) a tail call. */
620 return ftrace_new_tailcall (btinfo
, mfun
, fun
);
622 /* Some versions of _Unwind_RaiseException use an indirect
623 jump to 'return' to the exception handler of the caller
624 handling the exception instead of a return. Let's restrict
625 this heuristic to that and related functions. */
626 const char *fname
= ftrace_print_function_name (bfun
);
627 if (strncmp (fname
, "_Unwind_", strlen ("_Unwind_")) == 0)
629 struct btrace_function
*caller
630 = ftrace_find_call_by_number (btinfo
, bfun
->up
);
631 caller
= ftrace_find_caller (btinfo
, caller
, mfun
, fun
);
633 return ftrace_new_return (btinfo
, mfun
, fun
);
636 /* If we can't determine the function for PC, we treat a jump at
637 the end of the block as tail call if we're switching functions
638 and as an intra-function branch if we don't. */
639 if (start
== 0 && ftrace_function_switched (bfun
, mfun
, fun
))
640 return ftrace_new_tailcall (btinfo
, mfun
, fun
);
647 /* Check if we're switching functions for some other reason. */
648 if (ftrace_function_switched (bfun
, mfun
, fun
))
650 DEBUG_FTRACE ("switching from %s in %s at %s",
651 ftrace_print_insn_addr (last
),
652 ftrace_print_function_name (bfun
),
653 ftrace_print_filename (bfun
));
655 return ftrace_new_switch (btinfo
, mfun
, fun
);
661 /* Add the instruction at PC to BFUN's instructions. */
664 ftrace_update_insns (struct btrace_function
*bfun
, const btrace_insn
&insn
)
666 bfun
->insn
.push_back (insn
);
668 if (record_debug
> 1)
669 ftrace_debug (bfun
, "update insn");
672 /* Classify the instruction at PC. */
674 static enum btrace_insn_class
675 ftrace_classify_insn (struct gdbarch
*gdbarch
, CORE_ADDR pc
)
677 enum btrace_insn_class iclass
;
679 iclass
= BTRACE_INSN_OTHER
;
682 if (gdbarch_insn_is_call (gdbarch
, pc
))
683 iclass
= BTRACE_INSN_CALL
;
684 else if (gdbarch_insn_is_ret (gdbarch
, pc
))
685 iclass
= BTRACE_INSN_RETURN
;
686 else if (gdbarch_insn_is_jump (gdbarch
, pc
))
687 iclass
= BTRACE_INSN_JUMP
;
689 catch (const gdb_exception_error
&error
)
696 /* Try to match the back trace at LHS to the back trace at RHS. Returns the
697 number of matching function segments or zero if the back traces do not
698 match. BTINFO is the branch trace information for the current thread. */
701 ftrace_match_backtrace (struct btrace_thread_info
*btinfo
,
702 struct btrace_function
*lhs
,
703 struct btrace_function
*rhs
)
707 for (matches
= 0; lhs
!= NULL
&& rhs
!= NULL
; ++matches
)
709 if (ftrace_function_switched (lhs
, rhs
->msym
, rhs
->sym
))
712 lhs
= ftrace_get_caller (btinfo
, lhs
);
713 rhs
= ftrace_get_caller (btinfo
, rhs
);
719 /* Add ADJUSTMENT to the level of BFUN and succeeding function segments.
720 BTINFO is the branch trace information for the current thread. */
723 ftrace_fixup_level (struct btrace_thread_info
*btinfo
,
724 struct btrace_function
*bfun
, int adjustment
)
729 DEBUG_FTRACE ("fixup level (%+d)", adjustment
);
730 ftrace_debug (bfun
, "..bfun");
734 bfun
->level
+= adjustment
;
735 bfun
= ftrace_find_call_by_number (btinfo
, bfun
->number
+ 1);
739 /* Recompute the global level offset. Traverse the function trace and compute
740 the global level offset as the negative of the minimal function level. */
743 ftrace_compute_global_level_offset (struct btrace_thread_info
*btinfo
)
750 if (btinfo
->functions
.empty ())
753 unsigned int length
= btinfo
->functions
.size() - 1;
754 for (unsigned int i
= 0; i
< length
; ++i
)
755 level
= std::min (level
, btinfo
->functions
[i
].level
);
757 /* The last function segment contains the current instruction, which is not
758 really part of the trace. If it contains just this one instruction, we
759 ignore the segment. */
760 struct btrace_function
*last
= &btinfo
->functions
.back();
761 if (last
->insn
.size () != 1)
762 level
= std::min (level
, last
->level
);
764 DEBUG_FTRACE ("setting global level offset: %d", -level
);
765 btinfo
->level
= -level
;
768 /* Connect the function segments PREV and NEXT in a bottom-to-top walk as in
769 ftrace_connect_backtrace. BTINFO is the branch trace information for the
773 ftrace_connect_bfun (struct btrace_thread_info
*btinfo
,
774 struct btrace_function
*prev
,
775 struct btrace_function
*next
)
777 DEBUG_FTRACE ("connecting...");
778 ftrace_debug (prev
, "..prev");
779 ftrace_debug (next
, "..next");
781 /* The function segments are not yet connected. */
782 gdb_assert (prev
->next
== 0);
783 gdb_assert (next
->prev
== 0);
785 prev
->next
= next
->number
;
786 next
->prev
= prev
->number
;
788 /* We may have moved NEXT to a different function level. */
789 ftrace_fixup_level (btinfo
, next
, prev
->level
- next
->level
);
791 /* If we run out of back trace for one, let's use the other's. */
794 const btrace_function_flags flags
= next
->flags
;
796 next
= ftrace_find_call_by_number (btinfo
, next
->up
);
799 DEBUG_FTRACE ("using next's callers");
800 ftrace_fixup_caller (btinfo
, prev
, next
, flags
);
803 else if (next
->up
== 0)
805 const btrace_function_flags flags
= prev
->flags
;
807 prev
= ftrace_find_call_by_number (btinfo
, prev
->up
);
810 DEBUG_FTRACE ("using prev's callers");
811 ftrace_fixup_caller (btinfo
, next
, prev
, flags
);
816 /* PREV may have a tailcall caller, NEXT can't. If it does, fixup the up
817 link to add the tail callers to NEXT's back trace.
819 This removes NEXT->UP from NEXT's back trace. It will be added back
820 when connecting NEXT and PREV's callers - provided they exist.
822 If PREV's back trace consists of a series of tail calls without an
823 actual call, there will be no further connection and NEXT's caller will
824 be removed for good. To catch this case, we handle it here and connect
825 the top of PREV's back trace to NEXT's caller. */
826 if ((prev
->flags
& BFUN_UP_LINKS_TO_TAILCALL
) != 0)
828 struct btrace_function
*caller
;
829 btrace_function_flags next_flags
, prev_flags
;
831 /* We checked NEXT->UP above so CALLER can't be NULL. */
832 caller
= ftrace_find_call_by_number (btinfo
, next
->up
);
833 next_flags
= next
->flags
;
834 prev_flags
= prev
->flags
;
836 DEBUG_FTRACE ("adding prev's tail calls to next");
838 prev
= ftrace_find_call_by_number (btinfo
, prev
->up
);
839 ftrace_fixup_caller (btinfo
, next
, prev
, prev_flags
);
841 for (; prev
!= NULL
; prev
= ftrace_find_call_by_number (btinfo
,
844 /* At the end of PREV's back trace, continue with CALLER. */
847 DEBUG_FTRACE ("fixing up link for tailcall chain");
848 ftrace_debug (prev
, "..top");
849 ftrace_debug (caller
, "..up");
851 ftrace_fixup_caller (btinfo
, prev
, caller
, next_flags
);
853 /* If we skipped any tail calls, this may move CALLER to a
854 different function level.
856 Note that changing CALLER's level is only OK because we
857 know that this is the last iteration of the bottom-to-top
858 walk in ftrace_connect_backtrace.
860 Otherwise we will fix up CALLER's level when we connect it
861 to PREV's caller in the next iteration. */
862 ftrace_fixup_level (btinfo
, caller
,
863 prev
->level
- caller
->level
- 1);
867 /* There's nothing to do if we find a real call. */
868 if ((prev
->flags
& BFUN_UP_LINKS_TO_TAILCALL
) == 0)
870 DEBUG_FTRACE ("will fix up link in next iteration");
878 /* Connect function segments on the same level in the back trace at LHS and RHS.
879 The back traces at LHS and RHS are expected to match according to
880 ftrace_match_backtrace. BTINFO is the branch trace information for the
884 ftrace_connect_backtrace (struct btrace_thread_info
*btinfo
,
885 struct btrace_function
*lhs
,
886 struct btrace_function
*rhs
)
888 while (lhs
!= NULL
&& rhs
!= NULL
)
890 struct btrace_function
*prev
, *next
;
892 gdb_assert (!ftrace_function_switched (lhs
, rhs
->msym
, rhs
->sym
));
894 /* Connecting LHS and RHS may change the up link. */
898 lhs
= ftrace_get_caller (btinfo
, lhs
);
899 rhs
= ftrace_get_caller (btinfo
, rhs
);
901 ftrace_connect_bfun (btinfo
, prev
, next
);
905 /* Bridge the gap between two function segments left and right of a gap if their
906 respective back traces match in at least MIN_MATCHES functions. BTINFO is
907 the branch trace information for the current thread.
909 Returns non-zero if the gap could be bridged, zero otherwise. */
912 ftrace_bridge_gap (struct btrace_thread_info
*btinfo
,
913 struct btrace_function
*lhs
, struct btrace_function
*rhs
,
916 struct btrace_function
*best_l
, *best_r
, *cand_l
, *cand_r
;
919 DEBUG_FTRACE ("checking gap at insn %u (req matches: %d)",
920 rhs
->insn_offset
- 1, min_matches
);
926 /* We search the back traces of LHS and RHS for valid connections and connect
927 the two function segments that give the longest combined back trace. */
929 for (cand_l
= lhs
; cand_l
!= NULL
;
930 cand_l
= ftrace_get_caller (btinfo
, cand_l
))
931 for (cand_r
= rhs
; cand_r
!= NULL
;
932 cand_r
= ftrace_get_caller (btinfo
, cand_r
))
936 matches
= ftrace_match_backtrace (btinfo
, cand_l
, cand_r
);
937 if (best_matches
< matches
)
939 best_matches
= matches
;
945 /* We need at least MIN_MATCHES matches. */
946 gdb_assert (min_matches
> 0);
947 if (best_matches
< min_matches
)
950 DEBUG_FTRACE ("..matches: %d", best_matches
);
952 /* We will fix up the level of BEST_R and succeeding function segments such
953 that BEST_R's level matches BEST_L's when we connect BEST_L to BEST_R.
955 This will ignore the level of RHS and following if BEST_R != RHS. I.e. if
956 BEST_R is a successor of RHS in the back trace of RHS (phases 1 and 3).
958 To catch this, we already fix up the level here where we can start at RHS
959 instead of at BEST_R. We will ignore the level fixup when connecting
960 BEST_L to BEST_R as they will already be on the same level. */
961 ftrace_fixup_level (btinfo
, rhs
, best_l
->level
- best_r
->level
);
963 ftrace_connect_backtrace (btinfo
, best_l
, best_r
);
968 /* Try to bridge gaps due to overflow or decode errors by connecting the
969 function segments that are separated by the gap. */
972 btrace_bridge_gaps (struct thread_info
*tp
, std::vector
<unsigned int> &gaps
)
974 struct btrace_thread_info
*btinfo
= &tp
->btrace
;
975 std::vector
<unsigned int> remaining
;
978 DEBUG ("bridge gaps");
980 /* We require a minimum amount of matches for bridging a gap. The number of
981 required matches will be lowered with each iteration.
983 The more matches the higher our confidence that the bridging is correct.
984 For big gaps or small traces, however, it may not be feasible to require a
985 high number of matches. */
986 for (min_matches
= 5; min_matches
> 0; --min_matches
)
988 /* Let's try to bridge as many gaps as we can. In some cases, we need to
989 skip a gap and revisit it again after we closed later gaps. */
990 while (!gaps
.empty ())
992 for (const unsigned int number
: gaps
)
994 struct btrace_function
*gap
, *lhs
, *rhs
;
997 gap
= ftrace_find_call_by_number (btinfo
, number
);
999 /* We may have a sequence of gaps if we run from one error into
1000 the next as we try to re-sync onto the trace stream. Ignore
1001 all but the leftmost gap in such a sequence.
1003 Also ignore gaps at the beginning of the trace. */
1004 lhs
= ftrace_find_call_by_number (btinfo
, gap
->number
- 1);
1005 if (lhs
== NULL
|| lhs
->errcode
!= 0)
1008 /* Skip gaps to the right. */
1009 rhs
= ftrace_find_call_by_number (btinfo
, gap
->number
+ 1);
1010 while (rhs
!= NULL
&& rhs
->errcode
!= 0)
1011 rhs
= ftrace_find_call_by_number (btinfo
, rhs
->number
+ 1);
1013 /* Ignore gaps at the end of the trace. */
1017 bridged
= ftrace_bridge_gap (btinfo
, lhs
, rhs
, min_matches
);
1019 /* Keep track of gaps we were not able to bridge and try again.
1020 If we just pushed them to the end of GAPS we would risk an
1021 infinite loop in case we simply cannot bridge a gap. */
1023 remaining
.push_back (number
);
1026 /* Let's see if we made any progress. */
1027 if (remaining
.size () == gaps
.size ())
1031 gaps
.swap (remaining
);
1034 /* We get here if either GAPS is empty or if GAPS equals REMAINING. */
1041 /* We may omit this in some cases. Not sure it is worth the extra
1042 complication, though. */
1043 ftrace_compute_global_level_offset (btinfo
);
1046 /* Compute the function branch trace from BTS trace. */
1049 btrace_compute_ftrace_bts (struct thread_info
*tp
,
1050 const struct btrace_data_bts
*btrace
,
1051 std::vector
<unsigned int> &gaps
)
1053 /* We may end up doing target calls that require the current thread to be TP,
1054 for example reading memory through gdb_insn_length. Make sure TP is the
1056 scoped_restore_current_thread restore_thread
;
1057 switch_to_thread (tp
);
1059 struct btrace_thread_info
*btinfo
;
1063 gdbarch
*gdbarch
= current_inferior ()->arch ();
1064 btinfo
= &tp
->btrace
;
1065 blk
= btrace
->blocks
->size ();
1067 if (btinfo
->functions
.empty ())
1070 level
= -btinfo
->level
;
1078 const btrace_block
&block
= btrace
->blocks
->at (blk
);
1083 struct btrace_function
*bfun
;
1084 struct btrace_insn insn
;
1087 /* We should hit the end of the block. Warn if we went too far. */
1090 /* Indicate the gap in the trace. */
1091 bfun
= ftrace_new_gap (btinfo
, BDE_BTS_OVERFLOW
, gaps
);
1093 warning (_("Recorded trace may be corrupted at instruction "
1094 "%u (pc = %s)."), bfun
->insn_offset
- 1,
1095 core_addr_to_string_nz (pc
));
1100 bfun
= ftrace_update_function (btinfo
, pc
);
1102 /* Maintain the function level offset.
1103 For all but the last block, we do it here. */
1105 level
= std::min (level
, bfun
->level
);
1110 size
= gdb_insn_length (gdbarch
, pc
);
1112 catch (const gdb_exception_error
&error
)
1118 insn
.iclass
= ftrace_classify_insn (gdbarch
, pc
);
1121 ftrace_update_insns (bfun
, insn
);
1123 /* We're done once we pushed the instruction at the end. */
1124 if (block
.end
== pc
)
1127 /* We can't continue if we fail to compute the size. */
1130 /* Indicate the gap in the trace. We just added INSN so we're
1131 not at the beginning. */
1132 bfun
= ftrace_new_gap (btinfo
, BDE_BTS_INSN_SIZE
, gaps
);
1134 warning (_("Recorded trace may be incomplete at instruction %u "
1135 "(pc = %s)."), bfun
->insn_offset
- 1,
1136 core_addr_to_string_nz (pc
));
1143 /* Maintain the function level offset.
1144 For the last block, we do it here to not consider the last
1146 Since the last instruction corresponds to the current instruction
1147 and is not really part of the execution history, it shouldn't
1148 affect the level. */
1150 level
= std::min (level
, bfun
->level
);
1154 /* LEVEL is the minimal function level of all btrace function segments.
1155 Define the global level offset to -LEVEL so all function levels are
1156 normalized to start at zero. */
1157 btinfo
->level
= -level
;
1160 #if defined (HAVE_LIBIPT)
1162 static enum btrace_insn_class
1163 pt_reclassify_insn (enum pt_insn_class iclass
)
1168 return BTRACE_INSN_CALL
;
1171 return BTRACE_INSN_RETURN
;
1174 return BTRACE_INSN_JUMP
;
1177 return BTRACE_INSN_OTHER
;
1181 /* Return the btrace instruction flags for INSN. */
1183 static btrace_insn_flags
1184 pt_btrace_insn_flags (const struct pt_insn
&insn
)
1186 btrace_insn_flags flags
= 0;
1188 if (insn
.speculative
)
1189 flags
|= BTRACE_INSN_FLAG_SPECULATIVE
;
1194 /* Return the btrace instruction for INSN. */
1197 pt_btrace_insn (const struct pt_insn
&insn
)
1199 return {(CORE_ADDR
) insn
.ip
, (gdb_byte
) insn
.size
,
1200 pt_reclassify_insn (insn
.iclass
),
1201 pt_btrace_insn_flags (insn
)};
1204 /* Handle instruction decode events (libipt-v2). */
1207 handle_pt_insn_events (struct btrace_thread_info
*btinfo
,
1208 struct pt_insn_decoder
*decoder
,
1209 std::vector
<unsigned int> &gaps
, int status
)
1211 #if defined (HAVE_PT_INSN_EVENT)
1212 while (status
& pts_event_pending
)
1214 struct btrace_function
*bfun
;
1215 struct pt_event event
;
1218 status
= pt_insn_event (decoder
, &event
, sizeof (event
));
1228 if (event
.status_update
!= 0)
1231 if (event
.variant
.enabled
.resumed
== 0 && !btinfo
->functions
.empty ())
1233 bfun
= ftrace_new_gap (btinfo
, BDE_PT_DISABLED
, gaps
);
1235 pt_insn_get_offset (decoder
, &offset
);
1237 warning (_("Non-contiguous trace at instruction %u (offset = 0x%"
1238 PRIx64
")."), bfun
->insn_offset
- 1, offset
);
1244 bfun
= ftrace_new_gap (btinfo
, BDE_PT_OVERFLOW
, gaps
);
1246 pt_insn_get_offset (decoder
, &offset
);
1248 warning (_("Overflow at instruction %u (offset = 0x%" PRIx64
")."),
1249 bfun
->insn_offset
- 1, offset
);
1254 #endif /* defined (HAVE_PT_INSN_EVENT) */
1259 /* Handle events indicated by flags in INSN (libipt-v1). */
1262 handle_pt_insn_event_flags (struct btrace_thread_info
*btinfo
,
1263 struct pt_insn_decoder
*decoder
,
1264 const struct pt_insn
&insn
,
1265 std::vector
<unsigned int> &gaps
)
1267 #if defined (HAVE_STRUCT_PT_INSN_ENABLED)
1268 /* Tracing is disabled and re-enabled each time we enter the kernel. Most
1269 times, we continue from the same instruction we stopped before. This is
1270 indicated via the RESUMED instruction flag. The ENABLED instruction flag
1271 means that we continued from some other instruction. Indicate this as a
1272 trace gap except when tracing just started. */
1273 if (insn
.enabled
&& !btinfo
->functions
.empty ())
1275 struct btrace_function
*bfun
;
1278 bfun
= ftrace_new_gap (btinfo
, BDE_PT_DISABLED
, gaps
);
1280 pt_insn_get_offset (decoder
, &offset
);
1282 warning (_("Non-contiguous trace at instruction %u (offset = 0x%" PRIx64
1283 ", pc = 0x%" PRIx64
")."), bfun
->insn_offset
- 1, offset
,
1286 #endif /* defined (HAVE_STRUCT_PT_INSN_ENABLED) */
1288 #if defined (HAVE_STRUCT_PT_INSN_RESYNCED)
1289 /* Indicate trace overflows. */
1292 struct btrace_function
*bfun
;
1295 bfun
= ftrace_new_gap (btinfo
, BDE_PT_OVERFLOW
, gaps
);
1297 pt_insn_get_offset (decoder
, &offset
);
1299 warning (_("Overflow at instruction %u (offset = 0x%" PRIx64
", pc = 0x%"
1300 PRIx64
")."), bfun
->insn_offset
- 1, offset
, insn
.ip
);
1302 #endif /* defined (HAVE_STRUCT_PT_INSN_RESYNCED) */
1305 /* Add function branch trace to BTINFO using DECODER. */
1308 ftrace_add_pt (struct btrace_thread_info
*btinfo
,
1309 struct pt_insn_decoder
*decoder
,
1311 std::vector
<unsigned int> &gaps
)
1313 struct btrace_function
*bfun
;
1319 struct pt_insn insn
;
1321 status
= pt_insn_sync_forward (decoder
);
1324 if (status
!= -pte_eos
)
1325 warning (_("Failed to synchronize onto the Intel Processor "
1326 "Trace stream: %s."), pt_errstr (pt_errcode (status
)));
1332 /* Handle events from the previous iteration or synchronization. */
1333 status
= handle_pt_insn_events (btinfo
, decoder
, gaps
, status
);
1337 status
= pt_insn_next (decoder
, &insn
, sizeof(insn
));
1341 /* Handle events indicated by flags in INSN. */
1342 handle_pt_insn_event_flags (btinfo
, decoder
, insn
, gaps
);
1344 bfun
= ftrace_update_function (btinfo
, insn
.ip
);
1346 /* Maintain the function level offset. */
1347 *plevel
= std::min (*plevel
, bfun
->level
);
1349 ftrace_update_insns (bfun
, pt_btrace_insn (insn
));
1352 if (status
== -pte_eos
)
1355 /* Indicate the gap in the trace. */
1356 bfun
= ftrace_new_gap (btinfo
, status
, gaps
);
1358 pt_insn_get_offset (decoder
, &offset
);
1360 warning (_("Decode error (%d) at instruction %u (offset = 0x%" PRIx64
1361 ", pc = 0x%" PRIx64
"): %s."), status
, bfun
->insn_offset
- 1,
1362 offset
, insn
.ip
, pt_errstr (pt_errcode (status
)));
1366 /* A callback function to allow the trace decoder to read the inferior's
1370 btrace_pt_readmem_callback (gdb_byte
*buffer
, size_t size
,
1371 const struct pt_asid
*asid
, uint64_t pc
,
1374 int result
, errcode
;
1376 result
= (int) size
;
1379 errcode
= target_read_code ((CORE_ADDR
) pc
, buffer
, size
);
1381 result
= -pte_nomap
;
1383 catch (const gdb_exception_error
&error
)
1385 result
= -pte_nomap
;
1391 /* Translate the vendor from one enum to another. */
1393 static enum pt_cpu_vendor
1394 pt_translate_cpu_vendor (enum btrace_cpu_vendor vendor
)
1406 /* Finalize the function branch trace after decode. */
1408 static void btrace_finalize_ftrace_pt (struct pt_insn_decoder
*decoder
,
1409 struct thread_info
*tp
, int level
)
1411 pt_insn_free_decoder (decoder
);
1413 /* LEVEL is the minimal function level of all btrace function segments.
1414 Define the global level offset to -LEVEL so all function levels are
1415 normalized to start at zero. */
1416 tp
->btrace
.level
= -level
;
1418 /* Add a single last instruction entry for the current PC.
1419 This allows us to compute the backtrace at the current PC using both
1420 standard unwind and btrace unwind.
1421 This extra entry is ignored by all record commands. */
1425 /* Compute the function branch trace from Intel Processor Trace
1429 btrace_compute_ftrace_pt (struct thread_info
*tp
,
1430 const struct btrace_data_pt
*btrace
,
1431 std::vector
<unsigned int> &gaps
)
1433 /* We may end up doing target calls that require the current thread to be TP,
1434 for example reading memory through btrace_pt_readmem_callback. Make sure
1435 TP is the current thread. */
1436 scoped_restore_current_thread restore_thread
;
1437 switch_to_thread (tp
);
1439 struct btrace_thread_info
*btinfo
;
1440 struct pt_insn_decoder
*decoder
;
1441 struct pt_config config
;
1444 if (btrace
->size
== 0)
1447 btinfo
= &tp
->btrace
;
1448 if (btinfo
->functions
.empty ())
1451 level
= -btinfo
->level
;
1453 pt_config_init(&config
);
1454 config
.begin
= btrace
->data
;
1455 config
.end
= btrace
->data
+ btrace
->size
;
1457 /* We treat an unknown vendor as 'no errata'. */
1458 if (btrace
->config
.cpu
.vendor
!= CV_UNKNOWN
)
1461 = pt_translate_cpu_vendor (btrace
->config
.cpu
.vendor
);
1462 config
.cpu
.family
= btrace
->config
.cpu
.family
;
1463 config
.cpu
.model
= btrace
->config
.cpu
.model
;
1464 config
.cpu
.stepping
= btrace
->config
.cpu
.stepping
;
1466 errcode
= pt_cpu_errata (&config
.errata
, &config
.cpu
);
1468 error (_("Failed to configure the Intel Processor Trace "
1469 "decoder: %s."), pt_errstr (pt_errcode (errcode
)));
1472 decoder
= pt_insn_alloc_decoder (&config
);
1473 if (decoder
== NULL
)
1474 error (_("Failed to allocate the Intel Processor Trace decoder."));
1478 struct pt_image
*image
;
1480 image
= pt_insn_get_image(decoder
);
1482 error (_("Failed to configure the Intel Processor Trace decoder."));
1484 errcode
= pt_image_set_callback(image
, btrace_pt_readmem_callback
, NULL
);
1486 error (_("Failed to configure the Intel Processor Trace decoder: "
1487 "%s."), pt_errstr (pt_errcode (errcode
)));
1489 ftrace_add_pt (btinfo
, decoder
, &level
, gaps
);
1491 catch (const gdb_exception
&error
)
1493 /* Indicate a gap in the trace if we quit trace processing. */
1494 if (error
.reason
== RETURN_QUIT
&& !btinfo
->functions
.empty ())
1495 ftrace_new_gap (btinfo
, BDE_PT_USER_QUIT
, gaps
);
1497 btrace_finalize_ftrace_pt (decoder
, tp
, level
);
1502 btrace_finalize_ftrace_pt (decoder
, tp
, level
);
1505 #else /* defined (HAVE_LIBIPT) */
1508 btrace_compute_ftrace_pt (struct thread_info
*tp
,
1509 const struct btrace_data_pt
*btrace
,
1510 std::vector
<unsigned int> &gaps
)
1512 internal_error (_("Unexpected branch trace format."));
1515 #endif /* defined (HAVE_LIBIPT) */
1517 /* Compute the function branch trace from a block branch trace BTRACE for
1518 a thread given by BTINFO. If CPU is not NULL, overwrite the cpu in the
1519 branch trace configuration. This is currently only used for the PT
1523 btrace_compute_ftrace_1 (struct thread_info
*tp
,
1524 struct btrace_data
*btrace
,
1525 const struct btrace_cpu
*cpu
,
1526 std::vector
<unsigned int> &gaps
)
1528 DEBUG ("compute ftrace");
1530 switch (btrace
->format
)
1532 case BTRACE_FORMAT_NONE
:
1535 case BTRACE_FORMAT_BTS
:
1536 btrace_compute_ftrace_bts (tp
, &btrace
->variant
.bts
, gaps
);
1539 case BTRACE_FORMAT_PT
:
1540 /* Overwrite the cpu we use for enabling errata workarounds. */
1542 btrace
->variant
.pt
.config
.cpu
= *cpu
;
1544 btrace_compute_ftrace_pt (tp
, &btrace
->variant
.pt
, gaps
);
1548 internal_error (_("Unknown branch trace format."));
1552 btrace_finalize_ftrace (struct thread_info
*tp
, std::vector
<unsigned int> &gaps
)
1556 tp
->btrace
.ngaps
+= gaps
.size ();
1557 btrace_bridge_gaps (tp
, gaps
);
1562 btrace_compute_ftrace (struct thread_info
*tp
, struct btrace_data
*btrace
,
1563 const struct btrace_cpu
*cpu
)
1565 std::vector
<unsigned int> gaps
;
1569 btrace_compute_ftrace_1 (tp
, btrace
, cpu
, gaps
);
1571 catch (const gdb_exception
&error
)
1573 btrace_finalize_ftrace (tp
, gaps
);
1578 btrace_finalize_ftrace (tp
, gaps
);
1581 /* Add an entry for the current PC. */
1584 btrace_add_pc (struct thread_info
*tp
)
1586 struct btrace_data btrace
;
1587 struct regcache
*regcache
;
1590 regcache
= get_thread_regcache (tp
);
1591 pc
= regcache_read_pc (regcache
);
1593 btrace
.format
= BTRACE_FORMAT_BTS
;
1594 btrace
.variant
.bts
.blocks
= new std::vector
<btrace_block
>;
1596 btrace
.variant
.bts
.blocks
->emplace_back (pc
, pc
);
1598 btrace_compute_ftrace (tp
, &btrace
, NULL
);
1604 btrace_enable (struct thread_info
*tp
, const struct btrace_config
*conf
)
1606 if (tp
->btrace
.target
!= NULL
)
1607 error (_("Recording already enabled on thread %s (%s)."),
1608 print_thread_id (tp
), target_pid_to_str (tp
->ptid
).c_str ());
1610 #if !defined (HAVE_LIBIPT)
1611 if (conf
->format
== BTRACE_FORMAT_PT
)
1612 error (_("Intel Processor Trace support was disabled at compile time."));
1613 #endif /* !defined (HAVE_LIBIPT) */
1615 DEBUG ("enable thread %s (%s)", print_thread_id (tp
),
1616 tp
->ptid
.to_string ().c_str ());
1618 tp
->btrace
.target
= target_enable_btrace (tp
, conf
);
1620 if (tp
->btrace
.target
== NULL
)
1621 error (_("Failed to enable recording on thread %s (%s)."),
1622 print_thread_id (tp
), target_pid_to_str (tp
->ptid
).c_str ());
1624 /* We need to undo the enable in case of errors. */
1627 /* Add an entry for the current PC so we start tracing from where we
1630 If we can't access TP's registers, TP is most likely running. In this
1631 case, we can't really say where tracing was enabled so it should be
1632 safe to simply skip this step.
1634 This is not relevant for BTRACE_FORMAT_PT since the trace will already
1635 start at the PC at which tracing was enabled. */
1636 if (conf
->format
!= BTRACE_FORMAT_PT
1637 && can_access_registers_thread (tp
))
1640 catch (const gdb_exception
&exception
)
1642 btrace_disable (tp
);
1650 const struct btrace_config
*
1651 btrace_conf (const struct btrace_thread_info
*btinfo
)
1653 if (btinfo
->target
== NULL
)
1656 return target_btrace_conf (btinfo
->target
);
1662 btrace_disable (struct thread_info
*tp
)
1664 struct btrace_thread_info
*btp
= &tp
->btrace
;
1666 if (btp
->target
== NULL
)
1667 error (_("Recording not enabled on thread %s (%s)."),
1668 print_thread_id (tp
), target_pid_to_str (tp
->ptid
).c_str ());
1670 DEBUG ("disable thread %s (%s)", print_thread_id (tp
),
1671 tp
->ptid
.to_string ().c_str ());
1673 target_disable_btrace (btp
->target
);
1682 btrace_teardown (struct thread_info
*tp
)
1684 struct btrace_thread_info
*btp
= &tp
->btrace
;
1686 if (btp
->target
== NULL
)
1689 DEBUG ("teardown thread %s (%s)", print_thread_id (tp
),
1690 tp
->ptid
.to_string ().c_str ());
1692 target_teardown_btrace (btp
->target
);
1698 /* Stitch branch trace in BTS format. */
1701 btrace_stitch_bts (struct btrace_data_bts
*btrace
, struct thread_info
*tp
)
1703 struct btrace_thread_info
*btinfo
;
1704 struct btrace_function
*last_bfun
;
1705 btrace_block
*first_new_block
;
1707 btinfo
= &tp
->btrace
;
1708 gdb_assert (!btinfo
->functions
.empty ());
1709 gdb_assert (!btrace
->blocks
->empty ());
1711 last_bfun
= &btinfo
->functions
.back ();
1713 /* If the existing trace ends with a gap, we just glue the traces
1714 together. We need to drop the last (i.e. chronologically first) block
1715 of the new trace, though, since we can't fill in the start address.*/
1716 if (last_bfun
->insn
.empty ())
1718 btrace
->blocks
->pop_back ();
1722 /* Beware that block trace starts with the most recent block, so the
1723 chronologically first block in the new trace is the last block in
1724 the new trace's block vector. */
1725 first_new_block
= &btrace
->blocks
->back ();
1726 const btrace_insn
&last_insn
= last_bfun
->insn
.back ();
1728 /* If the current PC at the end of the block is the same as in our current
1729 trace, there are two explanations:
1730 1. we executed the instruction and some branch brought us back.
1731 2. we have not made any progress.
1732 In the first case, the delta trace vector should contain at least two
1734 In the second case, the delta trace vector should contain exactly one
1735 entry for the partial block containing the current PC. Remove it. */
1736 if (first_new_block
->end
== last_insn
.pc
&& btrace
->blocks
->size () == 1)
1738 btrace
->blocks
->pop_back ();
1742 DEBUG ("stitching %s to %s", ftrace_print_insn_addr (&last_insn
),
1743 core_addr_to_string_nz (first_new_block
->end
));
1745 /* Do a simple sanity check to make sure we don't accidentally end up
1746 with a bad block. This should not occur in practice. */
1747 if (first_new_block
->end
< last_insn
.pc
)
1749 warning (_("Error while trying to read delta trace. Falling back to "
1754 /* We adjust the last block to start at the end of our current trace. */
1755 gdb_assert (first_new_block
->begin
== 0);
1756 first_new_block
->begin
= last_insn
.pc
;
1758 /* We simply pop the last insn so we can insert it again as part of
1759 the normal branch trace computation.
1760 Since instruction iterators are based on indices in the instructions
1761 vector, we don't leave any pointers dangling. */
1762 DEBUG ("pruning insn at %s for stitching",
1763 ftrace_print_insn_addr (&last_insn
));
1765 last_bfun
->insn
.pop_back ();
1767 /* The instructions vector may become empty temporarily if this has
1768 been the only instruction in this function segment.
1769 This violates the invariant but will be remedied shortly by
1770 btrace_compute_ftrace when we add the new trace. */
1772 /* The only case where this would hurt is if the entire trace consisted
1773 of just that one instruction. If we remove it, we might turn the now
1774 empty btrace function segment into a gap. But we don't want gaps at
1775 the beginning. To avoid this, we remove the entire old trace. */
1776 if (last_bfun
->number
== 1 && last_bfun
->insn
.empty ())
1782 /* Adjust the block trace in order to stitch old and new trace together.
1783 BTRACE is the new delta trace between the last and the current stop.
1784 TP is the traced thread.
1785 May modifx BTRACE as well as the existing trace in TP.
1786 Return 0 on success, -1 otherwise. */
1789 btrace_stitch_trace (struct btrace_data
*btrace
, struct thread_info
*tp
)
1791 /* If we don't have trace, there's nothing to do. */
1792 if (btrace
->empty ())
1795 switch (btrace
->format
)
1797 case BTRACE_FORMAT_NONE
:
1800 case BTRACE_FORMAT_BTS
:
1801 return btrace_stitch_bts (&btrace
->variant
.bts
, tp
);
1803 case BTRACE_FORMAT_PT
:
1804 /* Delta reads are not supported. */
1808 internal_error (_("Unknown branch trace format."));
1811 /* Clear the branch trace histories in BTINFO. */
1814 btrace_clear_history (struct btrace_thread_info
*btinfo
)
1816 xfree (btinfo
->insn_history
);
1817 xfree (btinfo
->call_history
);
1818 xfree (btinfo
->replay
);
1820 btinfo
->insn_history
= NULL
;
1821 btinfo
->call_history
= NULL
;
1822 btinfo
->replay
= NULL
;
1825 /* Clear the branch trace maintenance histories in BTINFO. */
1828 btrace_maint_clear (struct btrace_thread_info
*btinfo
)
1830 switch (btinfo
->data
.format
)
1835 case BTRACE_FORMAT_BTS
:
1836 btinfo
->maint
.variant
.bts
.packet_history
.begin
= 0;
1837 btinfo
->maint
.variant
.bts
.packet_history
.end
= 0;
1840 #if defined (HAVE_LIBIPT)
1841 case BTRACE_FORMAT_PT
:
1842 delete btinfo
->maint
.variant
.pt
.packets
;
1844 btinfo
->maint
.variant
.pt
.packets
= NULL
;
1845 btinfo
->maint
.variant
.pt
.packet_history
.begin
= 0;
1846 btinfo
->maint
.variant
.pt
.packet_history
.end
= 0;
1848 #endif /* defined (HAVE_LIBIPT) */
1855 btrace_decode_error (enum btrace_format format
, int errcode
)
1859 case BTRACE_FORMAT_BTS
:
1862 case BDE_BTS_OVERFLOW
:
1863 return _("instruction overflow");
1865 case BDE_BTS_INSN_SIZE
:
1866 return _("unknown instruction");
1873 #if defined (HAVE_LIBIPT)
1874 case BTRACE_FORMAT_PT
:
1877 case BDE_PT_USER_QUIT
:
1878 return _("trace decode cancelled");
1880 case BDE_PT_DISABLED
:
1881 return _("disabled");
1883 case BDE_PT_OVERFLOW
:
1884 return _("overflow");
1888 return pt_errstr (pt_errcode (errcode
));
1892 #endif /* defined (HAVE_LIBIPT) */
1898 return _("unknown");
1904 btrace_fetch (struct thread_info
*tp
, const struct btrace_cpu
*cpu
)
1906 struct btrace_thread_info
*btinfo
;
1907 struct btrace_target_info
*tinfo
;
1908 struct btrace_data btrace
;
1911 DEBUG ("fetch thread %s (%s)", print_thread_id (tp
),
1912 tp
->ptid
.to_string ().c_str ());
1914 btinfo
= &tp
->btrace
;
1915 tinfo
= btinfo
->target
;
1919 /* There's no way we could get new trace while replaying.
1920 On the other hand, delta trace would return a partial record with the
1921 current PC, which is the replay PC, not the last PC, as expected. */
1922 if (btinfo
->replay
!= NULL
)
1925 /* With CLI usage, TP is always the current thread when we get here.
1926 However, since we can also store a gdb.Record object in Python
1927 referring to a different thread than the current one, we need to
1928 temporarily set the current thread. */
1929 scoped_restore_current_thread restore_thread
;
1930 switch_to_thread (tp
);
1932 /* We should not be called on running or exited threads. */
1933 gdb_assert (can_access_registers_thread (tp
));
1935 /* Let's first try to extend the trace we already have. */
1936 if (!btinfo
->functions
.empty ())
1938 errcode
= target_read_btrace (&btrace
, tinfo
, BTRACE_READ_DELTA
);
1941 /* Success. Let's try to stitch the traces together. */
1942 errcode
= btrace_stitch_trace (&btrace
, tp
);
1946 /* We failed to read delta trace. Let's try to read new trace. */
1947 errcode
= target_read_btrace (&btrace
, tinfo
, BTRACE_READ_NEW
);
1949 /* If we got any new trace, discard what we have. */
1950 if (errcode
== 0 && !btrace
.empty ())
1954 /* If we were not able to read the trace, we start over. */
1958 errcode
= target_read_btrace (&btrace
, tinfo
, BTRACE_READ_ALL
);
1962 errcode
= target_read_btrace (&btrace
, tinfo
, BTRACE_READ_ALL
);
1964 /* If we were not able to read the branch trace, signal an error. */
1966 error (_("Failed to read branch trace."));
1968 /* Compute the trace, provided we have any. */
1969 if (!btrace
.empty ())
1971 /* Store the raw trace data. The stored data will be cleared in
1972 btrace_clear, so we always append the new trace. */
1973 btrace_data_append (&btinfo
->data
, &btrace
);
1974 btrace_maint_clear (btinfo
);
1976 btrace_clear_history (btinfo
);
1977 btrace_compute_ftrace (tp
, &btrace
, cpu
);
1984 btrace_clear (struct thread_info
*tp
)
1986 struct btrace_thread_info
*btinfo
;
1988 DEBUG ("clear thread %s (%s)", print_thread_id (tp
),
1989 tp
->ptid
.to_string ().c_str ());
1991 /* Make sure btrace frames that may hold a pointer into the branch
1992 trace data are destroyed. */
1993 reinit_frame_cache ();
1995 btinfo
= &tp
->btrace
;
1997 btinfo
->functions
.clear ();
2000 /* Must clear the maint data before - it depends on BTINFO->DATA. */
2001 btrace_maint_clear (btinfo
);
2002 btinfo
->data
.clear ();
2003 btrace_clear_history (btinfo
);
2009 btrace_free_objfile (struct objfile
*objfile
)
2011 DEBUG ("free objfile");
2013 for (thread_info
*tp
: all_non_exited_threads ())
2019 const struct btrace_insn
*
2020 btrace_insn_get (const struct btrace_insn_iterator
*it
)
2022 const struct btrace_function
*bfun
;
2023 unsigned int index
, end
;
2025 index
= it
->insn_index
;
2026 bfun
= &it
->btinfo
->functions
[it
->call_index
];
2028 /* Check if the iterator points to a gap in the trace. */
2029 if (bfun
->errcode
!= 0)
2032 /* The index is within the bounds of this function's instruction vector. */
2033 end
= bfun
->insn
.size ();
2034 gdb_assert (0 < end
);
2035 gdb_assert (index
< end
);
2037 return &bfun
->insn
[index
];
2043 btrace_insn_get_error (const struct btrace_insn_iterator
*it
)
2045 return it
->btinfo
->functions
[it
->call_index
].errcode
;
2051 btrace_insn_number (const struct btrace_insn_iterator
*it
)
2053 return it
->btinfo
->functions
[it
->call_index
].insn_offset
+ it
->insn_index
;
2059 btrace_insn_begin (struct btrace_insn_iterator
*it
,
2060 const struct btrace_thread_info
*btinfo
)
2062 if (btinfo
->functions
.empty ())
2063 error (_("No trace."));
2065 it
->btinfo
= btinfo
;
2073 btrace_insn_end (struct btrace_insn_iterator
*it
,
2074 const struct btrace_thread_info
*btinfo
)
2076 const struct btrace_function
*bfun
;
2077 unsigned int length
;
2079 if (btinfo
->functions
.empty ())
2080 error (_("No trace."));
2082 bfun
= &btinfo
->functions
.back ();
2083 length
= bfun
->insn
.size ();
2085 /* The last function may either be a gap or it contains the current
2086 instruction, which is one past the end of the execution trace; ignore
2091 it
->btinfo
= btinfo
;
2092 it
->call_index
= bfun
->number
- 1;
2093 it
->insn_index
= length
;
2099 btrace_insn_next (struct btrace_insn_iterator
*it
, unsigned int stride
)
2101 const struct btrace_function
*bfun
;
2102 unsigned int index
, steps
;
2104 bfun
= &it
->btinfo
->functions
[it
->call_index
];
2106 index
= it
->insn_index
;
2110 unsigned int end
, space
, adv
;
2112 end
= bfun
->insn
.size ();
2114 /* An empty function segment represents a gap in the trace. We count
2115 it as one instruction. */
2118 const struct btrace_function
*next
;
2120 next
= ftrace_find_call_by_number (it
->btinfo
, bfun
->number
+ 1);
2133 gdb_assert (0 < end
);
2134 gdb_assert (index
< end
);
2136 /* Compute the number of instructions remaining in this segment. */
2137 space
= end
- index
;
2139 /* Advance the iterator as far as possible within this segment. */
2140 adv
= std::min (space
, stride
);
2145 /* Move to the next function if we're at the end of this one. */
2148 const struct btrace_function
*next
;
2150 next
= ftrace_find_call_by_number (it
->btinfo
, bfun
->number
+ 1);
2153 /* We stepped past the last function.
2155 Let's adjust the index to point to the last instruction in
2156 the previous function. */
2162 /* We now point to the first instruction in the new function. */
2167 /* We did make progress. */
2168 gdb_assert (adv
> 0);
2171 /* Update the iterator. */
2172 it
->call_index
= bfun
->number
- 1;
2173 it
->insn_index
= index
;
2181 btrace_insn_prev (struct btrace_insn_iterator
*it
, unsigned int stride
)
2183 const struct btrace_function
*bfun
;
2184 unsigned int index
, steps
;
2186 bfun
= &it
->btinfo
->functions
[it
->call_index
];
2188 index
= it
->insn_index
;
2194 /* Move to the previous function if we're at the start of this one. */
2197 const struct btrace_function
*prev
;
2199 prev
= ftrace_find_call_by_number (it
->btinfo
, bfun
->number
- 1);
2203 /* We point to one after the last instruction in the new function. */
2205 index
= bfun
->insn
.size ();
2207 /* An empty function segment represents a gap in the trace. We count
2208 it as one instruction. */
2218 /* Advance the iterator as far as possible within this segment. */
2219 adv
= std::min (index
, stride
);
2225 /* We did make progress. */
2226 gdb_assert (adv
> 0);
2229 /* Update the iterator. */
2230 it
->call_index
= bfun
->number
- 1;
2231 it
->insn_index
= index
;
2239 btrace_insn_cmp (const struct btrace_insn_iterator
*lhs
,
2240 const struct btrace_insn_iterator
*rhs
)
2242 gdb_assert (lhs
->btinfo
== rhs
->btinfo
);
2244 if (lhs
->call_index
!= rhs
->call_index
)
2245 return lhs
->call_index
- rhs
->call_index
;
2247 return lhs
->insn_index
- rhs
->insn_index
;
2253 btrace_find_insn_by_number (struct btrace_insn_iterator
*it
,
2254 const struct btrace_thread_info
*btinfo
,
2255 unsigned int number
)
2257 const struct btrace_function
*bfun
;
2258 unsigned int upper
, lower
;
2260 if (btinfo
->functions
.empty ())
2264 bfun
= &btinfo
->functions
[lower
];
2265 if (number
< bfun
->insn_offset
)
2268 upper
= btinfo
->functions
.size () - 1;
2269 bfun
= &btinfo
->functions
[upper
];
2270 if (number
>= bfun
->insn_offset
+ ftrace_call_num_insn (bfun
))
2273 /* We assume that there are no holes in the numbering. */
2276 const unsigned int average
= lower
+ (upper
- lower
) / 2;
2278 bfun
= &btinfo
->functions
[average
];
2280 if (number
< bfun
->insn_offset
)
2282 upper
= average
- 1;
2286 if (number
>= bfun
->insn_offset
+ ftrace_call_num_insn (bfun
))
2288 lower
= average
+ 1;
2295 it
->btinfo
= btinfo
;
2296 it
->call_index
= bfun
->number
- 1;
2297 it
->insn_index
= number
- bfun
->insn_offset
;
2301 /* Returns true if the recording ends with a function segment that
2302 contains only a single (i.e. the current) instruction. */
2305 btrace_ends_with_single_insn (const struct btrace_thread_info
*btinfo
)
2307 const btrace_function
*bfun
;
2309 if (btinfo
->functions
.empty ())
2312 bfun
= &btinfo
->functions
.back ();
2313 if (bfun
->errcode
!= 0)
2316 return ftrace_call_num_insn (bfun
) == 1;
2321 const struct btrace_function
*
2322 btrace_call_get (const struct btrace_call_iterator
*it
)
2324 if (it
->index
>= it
->btinfo
->functions
.size ())
2327 return &it
->btinfo
->functions
[it
->index
];
2333 btrace_call_number (const struct btrace_call_iterator
*it
)
2335 const unsigned int length
= it
->btinfo
->functions
.size ();
2337 /* If the last function segment contains only a single instruction (i.e. the
2338 current instruction), skip it. */
2339 if ((it
->index
== length
) && btrace_ends_with_single_insn (it
->btinfo
))
2342 return it
->index
+ 1;
2348 btrace_call_begin (struct btrace_call_iterator
*it
,
2349 const struct btrace_thread_info
*btinfo
)
2351 if (btinfo
->functions
.empty ())
2352 error (_("No trace."));
2354 it
->btinfo
= btinfo
;
2361 btrace_call_end (struct btrace_call_iterator
*it
,
2362 const struct btrace_thread_info
*btinfo
)
2364 if (btinfo
->functions
.empty ())
2365 error (_("No trace."));
2367 it
->btinfo
= btinfo
;
2368 it
->index
= btinfo
->functions
.size ();
2374 btrace_call_next (struct btrace_call_iterator
*it
, unsigned int stride
)
2376 const unsigned int length
= it
->btinfo
->functions
.size ();
2378 if (it
->index
+ stride
< length
- 1)
2379 /* Default case: Simply advance the iterator. */
2380 it
->index
+= stride
;
2381 else if (it
->index
+ stride
== length
- 1)
2383 /* We land exactly at the last function segment. If it contains only one
2384 instruction (i.e. the current instruction) it is not actually part of
2386 if (btrace_ends_with_single_insn (it
->btinfo
))
2389 it
->index
= length
- 1;
2393 /* We land past the last function segment and have to adjust the stride.
2394 If the last function segment contains only one instruction (i.e. the
2395 current instruction) it is not actually part of the trace. */
2396 if (btrace_ends_with_single_insn (it
->btinfo
))
2397 stride
= length
- it
->index
- 1;
2399 stride
= length
- it
->index
;
2410 btrace_call_prev (struct btrace_call_iterator
*it
, unsigned int stride
)
2412 const unsigned int length
= it
->btinfo
->functions
.size ();
2415 gdb_assert (it
->index
<= length
);
2417 if (stride
== 0 || it
->index
== 0)
2420 /* If we are at the end, the first step is a special case. If the last
2421 function segment contains only one instruction (i.e. the current
2422 instruction) it is not actually part of the trace. To be able to step
2423 over this instruction, we need at least one more function segment. */
2424 if ((it
->index
== length
) && (length
> 1))
2426 if (btrace_ends_with_single_insn (it
->btinfo
))
2427 it
->index
= length
- 2;
2429 it
->index
= length
- 1;
2435 stride
= std::min (stride
, it
->index
);
2437 it
->index
-= stride
;
2438 return steps
+ stride
;
2444 btrace_call_cmp (const struct btrace_call_iterator
*lhs
,
2445 const struct btrace_call_iterator
*rhs
)
2447 gdb_assert (lhs
->btinfo
== rhs
->btinfo
);
2448 return (int) (lhs
->index
- rhs
->index
);
2454 btrace_find_call_by_number (struct btrace_call_iterator
*it
,
2455 const struct btrace_thread_info
*btinfo
,
2456 unsigned int number
)
2458 const unsigned int length
= btinfo
->functions
.size ();
2460 if ((number
== 0) || (number
> length
))
2463 it
->btinfo
= btinfo
;
2464 it
->index
= number
- 1;
2471 btrace_set_insn_history (struct btrace_thread_info
*btinfo
,
2472 const struct btrace_insn_iterator
*begin
,
2473 const struct btrace_insn_iterator
*end
)
2475 if (btinfo
->insn_history
== NULL
)
2476 btinfo
->insn_history
= XCNEW (struct btrace_insn_history
);
2478 btinfo
->insn_history
->begin
= *begin
;
2479 btinfo
->insn_history
->end
= *end
;
2485 btrace_set_call_history (struct btrace_thread_info
*btinfo
,
2486 const struct btrace_call_iterator
*begin
,
2487 const struct btrace_call_iterator
*end
)
2489 gdb_assert (begin
->btinfo
== end
->btinfo
);
2491 if (btinfo
->call_history
== NULL
)
2492 btinfo
->call_history
= XCNEW (struct btrace_call_history
);
2494 btinfo
->call_history
->begin
= *begin
;
2495 btinfo
->call_history
->end
= *end
;
2501 btrace_is_replaying (struct thread_info
*tp
)
2503 return tp
->btrace
.replay
!= NULL
;
2509 btrace_is_empty (struct thread_info
*tp
)
2511 struct btrace_insn_iterator begin
, end
;
2512 struct btrace_thread_info
*btinfo
;
2514 btinfo
= &tp
->btrace
;
2516 if (btinfo
->functions
.empty ())
2519 btrace_insn_begin (&begin
, btinfo
);
2520 btrace_insn_end (&end
, btinfo
);
2522 return btrace_insn_cmp (&begin
, &end
) == 0;
2525 #if defined (HAVE_LIBIPT)
2527 /* Print a single packet. */
2530 pt_print_packet (const struct pt_packet
*packet
)
2532 switch (packet
->type
)
2535 gdb_printf (("[??: %x]"), packet
->type
);
2539 gdb_printf (("psb"));
2543 gdb_printf (("psbend"));
2547 gdb_printf (("pad"));
2551 gdb_printf (("tip %u: 0x%" PRIx64
""),
2552 packet
->payload
.ip
.ipc
,
2553 packet
->payload
.ip
.ip
);
2557 gdb_printf (("tip.pge %u: 0x%" PRIx64
""),
2558 packet
->payload
.ip
.ipc
,
2559 packet
->payload
.ip
.ip
);
2563 gdb_printf (("tip.pgd %u: 0x%" PRIx64
""),
2564 packet
->payload
.ip
.ipc
,
2565 packet
->payload
.ip
.ip
);
2569 gdb_printf (("fup %u: 0x%" PRIx64
""),
2570 packet
->payload
.ip
.ipc
,
2571 packet
->payload
.ip
.ip
);
2575 gdb_printf (("tnt-8 %u: 0x%" PRIx64
""),
2576 packet
->payload
.tnt
.bit_size
,
2577 packet
->payload
.tnt
.payload
);
2581 gdb_printf (("tnt-64 %u: 0x%" PRIx64
""),
2582 packet
->payload
.tnt
.bit_size
,
2583 packet
->payload
.tnt
.payload
);
2587 gdb_printf (("pip %" PRIx64
"%s"), packet
->payload
.pip
.cr3
,
2588 packet
->payload
.pip
.nr
? (" nr") : (""));
2592 gdb_printf (("tsc %" PRIx64
""), packet
->payload
.tsc
.tsc
);
2596 gdb_printf (("cbr %u"), packet
->payload
.cbr
.ratio
);
2600 switch (packet
->payload
.mode
.leaf
)
2603 gdb_printf (("mode %u"), packet
->payload
.mode
.leaf
);
2607 gdb_printf (("mode.exec%s%s"),
2608 packet
->payload
.mode
.bits
.exec
.csl
2610 packet
->payload
.mode
.bits
.exec
.csd
2611 ? (" cs.d") : (""));
2615 gdb_printf (("mode.tsx%s%s"),
2616 packet
->payload
.mode
.bits
.tsx
.intx
2618 packet
->payload
.mode
.bits
.tsx
.abrt
2619 ? (" abrt") : (""));
2625 gdb_printf (("ovf"));
2629 gdb_printf (("stop"));
2633 gdb_printf (("vmcs %" PRIx64
""), packet
->payload
.vmcs
.base
);
2637 gdb_printf (("tma %x %x"), packet
->payload
.tma
.ctc
,
2638 packet
->payload
.tma
.fc
);
2642 gdb_printf (("mtc %x"), packet
->payload
.mtc
.ctc
);
2646 gdb_printf (("cyc %" PRIx64
""), packet
->payload
.cyc
.value
);
2650 gdb_printf (("mnt %" PRIx64
""), packet
->payload
.mnt
.payload
);
2655 /* Decode packets into MAINT using DECODER. */
2658 btrace_maint_decode_pt (struct btrace_maint_info
*maint
,
2659 struct pt_packet_decoder
*decoder
)
2663 if (maint
->variant
.pt
.packets
== NULL
)
2664 maint
->variant
.pt
.packets
= new std::vector
<btrace_pt_packet
>;
2668 struct btrace_pt_packet packet
;
2670 errcode
= pt_pkt_sync_forward (decoder
);
2676 pt_pkt_get_offset (decoder
, &packet
.offset
);
2678 errcode
= pt_pkt_next (decoder
, &packet
.packet
,
2679 sizeof(packet
.packet
));
2683 if (maint_btrace_pt_skip_pad
== 0 || packet
.packet
.type
!= ppt_pad
)
2685 packet
.errcode
= pt_errcode (errcode
);
2686 maint
->variant
.pt
.packets
->push_back (packet
);
2690 if (errcode
== -pte_eos
)
2693 packet
.errcode
= pt_errcode (errcode
);
2694 maint
->variant
.pt
.packets
->push_back (packet
);
2696 warning (_("Error at trace offset 0x%" PRIx64
": %s."),
2697 packet
.offset
, pt_errstr (packet
.errcode
));
2700 if (errcode
!= -pte_eos
)
2701 warning (_("Failed to synchronize onto the Intel Processor Trace "
2702 "stream: %s."), pt_errstr (pt_errcode (errcode
)));
2705 /* Update the packet history in BTINFO. */
2708 btrace_maint_update_pt_packets (struct btrace_thread_info
*btinfo
)
2710 struct pt_packet_decoder
*decoder
;
2711 const struct btrace_cpu
*cpu
;
2712 struct btrace_data_pt
*pt
;
2713 struct pt_config config
;
2716 pt
= &btinfo
->data
.variant
.pt
;
2718 /* Nothing to do if there is no trace. */
2722 memset (&config
, 0, sizeof(config
));
2724 config
.size
= sizeof (config
);
2725 config
.begin
= pt
->data
;
2726 config
.end
= pt
->data
+ pt
->size
;
2728 cpu
= record_btrace_get_cpu ();
2730 cpu
= &pt
->config
.cpu
;
2732 /* We treat an unknown vendor as 'no errata'. */
2733 if (cpu
->vendor
!= CV_UNKNOWN
)
2735 config
.cpu
.vendor
= pt_translate_cpu_vendor (cpu
->vendor
);
2736 config
.cpu
.family
= cpu
->family
;
2737 config
.cpu
.model
= cpu
->model
;
2738 config
.cpu
.stepping
= cpu
->stepping
;
2740 errcode
= pt_cpu_errata (&config
.errata
, &config
.cpu
);
2742 error (_("Failed to configure the Intel Processor Trace "
2743 "decoder: %s."), pt_errstr (pt_errcode (errcode
)));
2746 decoder
= pt_pkt_alloc_decoder (&config
);
2747 if (decoder
== NULL
)
2748 error (_("Failed to allocate the Intel Processor Trace decoder."));
2752 btrace_maint_decode_pt (&btinfo
->maint
, decoder
);
2754 catch (const gdb_exception
&except
)
2756 pt_pkt_free_decoder (decoder
);
2758 if (except
.reason
< 0)
2762 pt_pkt_free_decoder (decoder
);
2765 #endif /* !defined (HAVE_LIBIPT) */
2767 /* Update the packet maintenance information for BTINFO and store the
2768 low and high bounds into BEGIN and END, respectively.
2769 Store the current iterator state into FROM and TO. */
2772 btrace_maint_update_packets (struct btrace_thread_info
*btinfo
,
2773 unsigned int *begin
, unsigned int *end
,
2774 unsigned int *from
, unsigned int *to
)
2776 switch (btinfo
->data
.format
)
2785 case BTRACE_FORMAT_BTS
:
2786 /* Nothing to do - we operate directly on BTINFO->DATA. */
2788 *end
= btinfo
->data
.variant
.bts
.blocks
->size ();
2789 *from
= btinfo
->maint
.variant
.bts
.packet_history
.begin
;
2790 *to
= btinfo
->maint
.variant
.bts
.packet_history
.end
;
2793 #if defined (HAVE_LIBIPT)
2794 case BTRACE_FORMAT_PT
:
2795 if (btinfo
->maint
.variant
.pt
.packets
== nullptr)
2796 btinfo
->maint
.variant
.pt
.packets
= new std::vector
<btrace_pt_packet
>;
2798 if (btinfo
->maint
.variant
.pt
.packets
->empty ())
2799 btrace_maint_update_pt_packets (btinfo
);
2802 *end
= btinfo
->maint
.variant
.pt
.packets
->size ();
2803 *from
= btinfo
->maint
.variant
.pt
.packet_history
.begin
;
2804 *to
= btinfo
->maint
.variant
.pt
.packet_history
.end
;
2806 #endif /* defined (HAVE_LIBIPT) */
2810 /* Print packets in BTINFO from BEGIN (inclusive) until END (exclusive) and
2811 update the current iterator position. */
2814 btrace_maint_print_packets (struct btrace_thread_info
*btinfo
,
2815 unsigned int begin
, unsigned int end
)
2817 switch (btinfo
->data
.format
)
2822 case BTRACE_FORMAT_BTS
:
2824 const std::vector
<btrace_block
> &blocks
2825 = *btinfo
->data
.variant
.bts
.blocks
;
2828 for (blk
= begin
; blk
< end
; ++blk
)
2830 const btrace_block
&block
= blocks
.at (blk
);
2832 gdb_printf ("%u\tbegin: %s, end: %s\n", blk
,
2833 core_addr_to_string_nz (block
.begin
),
2834 core_addr_to_string_nz (block
.end
));
2837 btinfo
->maint
.variant
.bts
.packet_history
.begin
= begin
;
2838 btinfo
->maint
.variant
.bts
.packet_history
.end
= end
;
2842 #if defined (HAVE_LIBIPT)
2843 case BTRACE_FORMAT_PT
:
2845 const std::vector
<btrace_pt_packet
> &packets
2846 = *btinfo
->maint
.variant
.pt
.packets
;
2849 for (pkt
= begin
; pkt
< end
; ++pkt
)
2851 const struct btrace_pt_packet
&packet
= packets
.at (pkt
);
2853 gdb_printf ("%u\t", pkt
);
2854 gdb_printf ("0x%" PRIx64
"\t", packet
.offset
);
2856 if (packet
.errcode
== pte_ok
)
2857 pt_print_packet (&packet
.packet
);
2859 gdb_printf ("[error: %s]", pt_errstr (packet
.errcode
));
2864 btinfo
->maint
.variant
.pt
.packet_history
.begin
= begin
;
2865 btinfo
->maint
.variant
.pt
.packet_history
.end
= end
;
2868 #endif /* defined (HAVE_LIBIPT) */
2872 /* Read a number from an argument string. */
2875 get_uint (const char **arg
)
2877 const char *begin
, *pos
;
2879 unsigned long number
;
2882 pos
= skip_spaces (begin
);
2884 if (!isdigit (*pos
))
2885 error (_("Expected positive number, got: %s."), pos
);
2887 number
= strtoul (pos
, &end
, 10);
2888 if (number
> UINT_MAX
)
2889 error (_("Number too big."));
2891 *arg
+= (end
- begin
);
2893 return (unsigned int) number
;
2896 /* Read a context size from an argument string. */
2899 get_context_size (const char **arg
)
2901 const char *pos
= skip_spaces (*arg
);
2903 if (!isdigit (*pos
))
2904 error (_("Expected positive number, got: %s."), pos
);
2907 long result
= strtol (pos
, &end
, 10);
2912 /* Complain about junk at the end of an argument string. */
2915 no_chunk (const char *arg
)
2918 error (_("Junk after argument: %s."), arg
);
2921 /* The "maintenance btrace packet-history" command. */
2924 maint_btrace_packet_history_cmd (const char *arg
, int from_tty
)
2926 struct btrace_thread_info
*btinfo
;
2927 unsigned int size
, begin
, end
, from
, to
;
2929 thread_info
*tp
= current_inferior ()->find_thread (inferior_ptid
);
2931 error (_("No thread."));
2934 btinfo
= &tp
->btrace
;
2936 btrace_maint_update_packets (btinfo
, &begin
, &end
, &from
, &to
);
2939 gdb_printf (_("No trace.\n"));
2943 if (arg
== NULL
|| *arg
== 0 || strcmp (arg
, "+") == 0)
2947 if (end
- from
< size
)
2951 else if (strcmp (arg
, "-") == 0)
2955 if (to
- begin
< size
)
2961 from
= get_uint (&arg
);
2963 error (_("'%u' is out of range."), from
);
2965 arg
= skip_spaces (arg
);
2968 arg
= skip_spaces (++arg
);
2973 size
= get_context_size (&arg
);
2977 if (end
- from
< size
)
2981 else if (*arg
== '-')
2984 size
= get_context_size (&arg
);
2988 /* Include the packet given as first argument. */
2992 if (to
- begin
< size
)
2998 to
= get_uint (&arg
);
3000 /* Include the packet at the second argument and silently
3001 truncate the range. */
3014 if (end
- from
< size
)
3022 btrace_maint_print_packets (btinfo
, from
, to
);
3025 /* The "maintenance btrace clear-packet-history" command. */
3028 maint_btrace_clear_packet_history_cmd (const char *args
, int from_tty
)
3030 if (args
!= NULL
&& *args
!= 0)
3031 error (_("Invalid argument."));
3033 if (inferior_ptid
== null_ptid
)
3034 error (_("No thread."));
3036 thread_info
*tp
= inferior_thread ();
3037 btrace_thread_info
*btinfo
= &tp
->btrace
;
3039 /* Must clear the maint data before - it depends on BTINFO->DATA. */
3040 btrace_maint_clear (btinfo
);
3041 btinfo
->data
.clear ();
3044 /* The "maintenance btrace clear" command. */
3047 maint_btrace_clear_cmd (const char *args
, int from_tty
)
3049 if (args
!= NULL
&& *args
!= 0)
3050 error (_("Invalid argument."));
3052 if (inferior_ptid
== null_ptid
)
3053 error (_("No thread."));
3055 thread_info
*tp
= inferior_thread ();
3059 /* The "maintenance info btrace" command. */
3062 maint_info_btrace_cmd (const char *args
, int from_tty
)
3064 struct btrace_thread_info
*btinfo
;
3065 const struct btrace_config
*conf
;
3067 if (args
!= NULL
&& *args
!= 0)
3068 error (_("Invalid argument."));
3070 if (inferior_ptid
== null_ptid
)
3071 error (_("No thread."));
3073 thread_info
*tp
= inferior_thread ();
3075 btinfo
= &tp
->btrace
;
3077 conf
= btrace_conf (btinfo
);
3079 error (_("No btrace configuration."));
3081 gdb_printf (_("Format: %s.\n"),
3082 btrace_format_string (conf
->format
));
3084 switch (conf
->format
)
3089 case BTRACE_FORMAT_BTS
:
3090 gdb_printf (_("Number of packets: %zu.\n"),
3091 btinfo
->data
.variant
.bts
.blocks
->size ());
3094 #if defined (HAVE_LIBIPT)
3095 case BTRACE_FORMAT_PT
:
3097 struct pt_version version
;
3099 version
= pt_library_version ();
3100 gdb_printf (_("Version: %u.%u.%u%s.\n"), version
.major
,
3101 version
.minor
, version
.build
,
3102 version
.ext
!= NULL
? version
.ext
: "");
3104 btrace_maint_update_pt_packets (btinfo
);
3105 gdb_printf (_("Number of packets: %zu.\n"),
3106 ((btinfo
->maint
.variant
.pt
.packets
== nullptr)
3107 ? 0 : btinfo
->maint
.variant
.pt
.packets
->size ()));
3110 #endif /* defined (HAVE_LIBIPT) */
3114 /* The "maint show btrace pt skip-pad" show value function. */
3117 show_maint_btrace_pt_skip_pad (struct ui_file
*file
, int from_tty
,
3118 struct cmd_list_element
*c
,
3121 gdb_printf (file
, _("Skip PAD packets is %s.\n"), value
);
3125 /* Initialize btrace maintenance commands. */
3127 void _initialize_btrace ();
3129 _initialize_btrace ()
3131 add_cmd ("btrace", class_maintenance
, maint_info_btrace_cmd
,
3132 _("Info about branch tracing data."), &maintenanceinfolist
);
3134 add_basic_prefix_cmd ("btrace", class_maintenance
,
3135 _("Branch tracing maintenance commands."),
3136 &maint_btrace_cmdlist
, 0, &maintenancelist
);
3138 add_setshow_prefix_cmd ("btrace", class_maintenance
,
3139 _("Set branch tracing specific variables."),
3140 _("Show branch tracing specific variables."),
3141 &maint_btrace_set_cmdlist
,
3142 &maint_btrace_show_cmdlist
,
3143 &maintenance_set_cmdlist
,
3144 &maintenance_show_cmdlist
);
3146 add_setshow_prefix_cmd ("pt", class_maintenance
,
3147 _("Set Intel Processor Trace specific variables."),
3148 _("Show Intel Processor Trace specific variables."),
3149 &maint_btrace_pt_set_cmdlist
,
3150 &maint_btrace_pt_show_cmdlist
,
3151 &maint_btrace_set_cmdlist
,
3152 &maint_btrace_show_cmdlist
);
3154 add_setshow_boolean_cmd ("skip-pad", class_maintenance
,
3155 &maint_btrace_pt_skip_pad
, _("\
3156 Set whether PAD packets should be skipped in the btrace packet history."), _("\
3157 Show whether PAD packets should be skipped in the btrace packet history."),_("\
3158 When enabled, PAD packets are ignored in the btrace packet history."),
3159 NULL
, show_maint_btrace_pt_skip_pad
,
3160 &maint_btrace_pt_set_cmdlist
,
3161 &maint_btrace_pt_show_cmdlist
);
3163 add_cmd ("packet-history", class_maintenance
, maint_btrace_packet_history_cmd
,
3164 _("Print the raw branch tracing data.\n\
3165 With no argument, print ten more packets after the previous ten-line print.\n\
3166 With '-' as argument print ten packets before a previous ten-line print.\n\
3167 One argument specifies the starting packet of a ten-line print.\n\
3168 Two arguments with comma between specify starting and ending packets to \
3170 Preceded with '+'/'-' the second argument specifies the distance from the \
3172 &maint_btrace_cmdlist
);
3174 add_cmd ("clear-packet-history", class_maintenance
,
3175 maint_btrace_clear_packet_history_cmd
,
3176 _("Clears the branch tracing packet history.\n\
3177 Discards the raw branch tracing data but not the execution history data."),
3178 &maint_btrace_cmdlist
);
3180 add_cmd ("clear", class_maintenance
, maint_btrace_clear_cmd
,
3181 _("Clears the branch tracing data.\n\
3182 Discards the raw branch tracing data and the execution history data.\n\
3183 The next 'record' command will fetch the branch tracing data anew."),
3184 &maint_btrace_cmdlist
);