Regenerate AArch64 opcodes files
[binutils-gdb.git] / gdb / record-btrace.c
blob6350400c318ad583e06bfece2a7668cf16c52974
1 /* Branch trace support for GDB, the GNU debugger.
3 Copyright (C) 2013-2024 Free Software Foundation, Inc.
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
22 #include "defs.h"
23 #include "record.h"
24 #include "record-btrace.h"
25 #include "gdbthread.h"
26 #include "target.h"
27 #include "gdbcmd.h"
28 #include "disasm.h"
29 #include "observable.h"
30 #include "cli/cli-utils.h"
31 #include "source.h"
32 #include "ui-out.h"
33 #include "symtab.h"
34 #include "filenames.h"
35 #include "regcache.h"
36 #include "frame-unwind.h"
37 #include "hashtab.h"
38 #include "infrun.h"
39 #include "gdbsupport/event-loop.h"
40 #include "inf-loop.h"
41 #include "inferior.h"
42 #include <algorithm>
43 #include "gdbarch.h"
44 #include "cli/cli-style.h"
45 #include "async-event.h"
46 #include <forward_list>
47 #include "objfiles.h"
48 #include "interps.h"
50 static const target_info record_btrace_target_info = {
51 "record-btrace",
52 N_("Branch tracing target"),
53 N_("Collect control-flow trace and provide the execution history.")
56 /* The target_ops of record-btrace. */
58 class record_btrace_target final : public target_ops
60 public:
61 const target_info &info () const override
62 { return record_btrace_target_info; }
64 strata stratum () const override { return record_stratum; }
66 void close () override;
67 void async (bool) override;
69 void detach (inferior *inf, int from_tty) override
70 { record_detach (this, inf, from_tty); }
72 void disconnect (const char *, int) override;
74 void mourn_inferior () override
75 { record_mourn_inferior (this); }
77 void kill () override
78 { record_kill (this); }
80 enum record_method record_method (ptid_t ptid) override;
82 void stop_recording () override;
83 void info_record () override;
85 void insn_history (int size, gdb_disassembly_flags flags) override;
86 void insn_history_from (ULONGEST from, int size,
87 gdb_disassembly_flags flags) override;
88 void insn_history_range (ULONGEST begin, ULONGEST end,
89 gdb_disassembly_flags flags) override;
90 void call_history (int size, record_print_flags flags) override;
91 void call_history_from (ULONGEST begin, int size, record_print_flags flags)
92 override;
93 void call_history_range (ULONGEST begin, ULONGEST end, record_print_flags flags)
94 override;
96 bool record_is_replaying (ptid_t ptid) override;
97 bool record_will_replay (ptid_t ptid, int dir) override;
98 void record_stop_replaying () override;
100 enum target_xfer_status xfer_partial (enum target_object object,
101 const char *annex,
102 gdb_byte *readbuf,
103 const gdb_byte *writebuf,
104 ULONGEST offset, ULONGEST len,
105 ULONGEST *xfered_len) override;
107 int insert_breakpoint (struct gdbarch *,
108 struct bp_target_info *) override;
109 int remove_breakpoint (struct gdbarch *, struct bp_target_info *,
110 enum remove_bp_reason) override;
112 void fetch_registers (struct regcache *, int) override;
114 void store_registers (struct regcache *, int) override;
115 void prepare_to_store (struct regcache *) override;
117 const struct frame_unwind *get_unwinder () override;
119 const struct frame_unwind *get_tailcall_unwinder () override;
121 void resume (ptid_t, int, enum gdb_signal) override;
122 ptid_t wait (ptid_t, struct target_waitstatus *, target_wait_flags) override;
124 void stop (ptid_t) override;
125 void update_thread_list () override;
126 bool thread_alive (ptid_t ptid) override;
127 void goto_record_begin () override;
128 void goto_record_end () override;
129 void goto_record (ULONGEST insn) override;
131 bool can_execute_reverse () override;
133 bool stopped_by_sw_breakpoint () override;
134 bool supports_stopped_by_sw_breakpoint () override;
136 bool stopped_by_hw_breakpoint () override;
137 bool supports_stopped_by_hw_breakpoint () override;
139 enum exec_direction_kind execution_direction () override;
140 void prepare_to_generate_core () override;
141 void done_generating_core () override;
144 static record_btrace_target record_btrace_ops;
146 /* Initialize the record-btrace target ops. */
148 /* Token associated with a new-thread observer enabling branch tracing
149 for the new thread. */
150 static const gdb::observers::token record_btrace_thread_observer_token {};
152 /* Memory access types used in set/show record btrace replay-memory-access. */
153 static const char replay_memory_access_read_only[] = "read-only";
154 static const char replay_memory_access_read_write[] = "read-write";
155 static const char *const replay_memory_access_types[] =
157 replay_memory_access_read_only,
158 replay_memory_access_read_write,
159 NULL
162 /* The currently allowed replay memory access type. */
163 static const char *replay_memory_access = replay_memory_access_read_only;
165 /* The cpu state kinds. */
166 enum record_btrace_cpu_state_kind
168 CS_AUTO,
169 CS_NONE,
170 CS_CPU
173 /* The current cpu state. */
174 static enum record_btrace_cpu_state_kind record_btrace_cpu_state = CS_AUTO;
176 /* The current cpu for trace decode. */
177 static struct btrace_cpu record_btrace_cpu;
179 /* Command lists for "set/show record btrace". */
180 static struct cmd_list_element *set_record_btrace_cmdlist;
181 static struct cmd_list_element *show_record_btrace_cmdlist;
183 /* The execution direction of the last resume we got. See record-full.c. */
184 static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
186 /* The async event handler for reverse/replay execution. */
187 static struct async_event_handler *record_btrace_async_inferior_event_handler;
189 /* A flag indicating that we are currently generating a core file. */
190 static int record_btrace_generating_corefile;
192 /* The current branch trace configuration. */
193 static struct btrace_config record_btrace_conf;
195 /* Command list for "record btrace". */
196 static struct cmd_list_element *record_btrace_cmdlist;
198 /* Command lists for "set/show record btrace bts". */
199 static struct cmd_list_element *set_record_btrace_bts_cmdlist;
200 static struct cmd_list_element *show_record_btrace_bts_cmdlist;
202 /* Command lists for "set/show record btrace pt". */
203 static struct cmd_list_element *set_record_btrace_pt_cmdlist;
204 static struct cmd_list_element *show_record_btrace_pt_cmdlist;
206 /* Command list for "set record btrace cpu". */
207 static struct cmd_list_element *set_record_btrace_cpu_cmdlist;
209 /* Print a record-btrace debug message. Use do ... while (0) to avoid
210 ambiguities when used in if statements. */
212 #define DEBUG(msg, args...) \
213 do \
215 if (record_debug != 0) \
216 gdb_printf (gdb_stdlog, \
217 "[record-btrace] " msg "\n", ##args); \
219 while (0)
222 /* Return the cpu configured by the user. Returns NULL if the cpu was
223 configured as auto. */
224 const struct btrace_cpu *
225 record_btrace_get_cpu (void)
227 switch (record_btrace_cpu_state)
229 case CS_AUTO:
230 return nullptr;
232 case CS_NONE:
233 record_btrace_cpu.vendor = CV_UNKNOWN;
234 [[fallthrough]];
235 case CS_CPU:
236 return &record_btrace_cpu;
239 error (_("Internal error: bad record btrace cpu state."));
242 /* Update the branch trace for the current thread and return a pointer to its
243 thread_info.
245 Throws an error if there is no thread or no trace. This function never
246 returns NULL. */
248 static struct thread_info *
249 require_btrace_thread (void)
251 DEBUG ("require");
253 if (inferior_ptid == null_ptid)
254 error (_("No thread."));
256 thread_info *tp = inferior_thread ();
258 validate_registers_access ();
260 btrace_fetch (tp, record_btrace_get_cpu ());
262 if (btrace_is_empty (tp))
263 error (_("No trace."));
265 return tp;
268 /* Update the branch trace for the current thread and return a pointer to its
269 branch trace information struct.
271 Throws an error if there is no thread or no trace. This function never
272 returns NULL. */
274 static struct btrace_thread_info *
275 require_btrace (void)
277 struct thread_info *tp;
279 tp = require_btrace_thread ();
281 return &tp->btrace;
284 /* The new thread observer. */
286 static void
287 record_btrace_on_new_thread (struct thread_info *tp)
289 /* Ignore this thread if its inferior is not recorded by us. */
290 target_ops *rec = tp->inf->target_at (record_stratum);
291 if (rec != &record_btrace_ops)
292 return;
296 btrace_enable (tp, &record_btrace_conf);
298 catch (const gdb_exception_error &error)
300 warning ("%s", error.what ());
304 /* Enable automatic tracing of new threads. */
306 static void
307 record_btrace_auto_enable (void)
309 DEBUG ("attach thread observer");
311 gdb::observers::new_thread.attach (record_btrace_on_new_thread,
312 record_btrace_thread_observer_token,
313 "record-btrace");
316 /* Disable automatic tracing of new threads. */
318 static void
319 record_btrace_auto_disable (void)
321 DEBUG ("detach thread observer");
323 gdb::observers::new_thread.detach (record_btrace_thread_observer_token);
326 /* The record-btrace async event handler function. */
328 static void
329 record_btrace_handle_async_inferior_event (gdb_client_data data)
331 inferior_event_handler (INF_REG_EVENT);
334 /* See record-btrace.h. */
336 void
337 record_btrace_push_target (void)
339 const char *format;
341 record_btrace_auto_enable ();
343 current_inferior ()->push_target (&record_btrace_ops);
345 record_btrace_async_inferior_event_handler
346 = create_async_event_handler (record_btrace_handle_async_inferior_event,
347 NULL, "record-btrace");
348 record_btrace_generating_corefile = 0;
350 format = btrace_format_short_string (record_btrace_conf.format);
351 interps_notify_record_changed (current_inferior (), 1, "btrace", format);
354 /* Disable btrace on a set of threads on scope exit. */
356 struct scoped_btrace_disable
358 scoped_btrace_disable () = default;
360 DISABLE_COPY_AND_ASSIGN (scoped_btrace_disable);
362 ~scoped_btrace_disable ()
364 for (thread_info *tp : m_threads)
365 btrace_disable (tp);
368 void add_thread (thread_info *thread)
370 m_threads.push_front (thread);
373 void discard ()
375 m_threads.clear ();
378 private:
379 std::forward_list<thread_info *> m_threads;
382 /* Open target record-btrace. */
384 static void
385 record_btrace_target_open (const char *args, int from_tty)
387 /* If we fail to enable btrace for one thread, disable it for the threads for
388 which it was successfully enabled. */
389 scoped_btrace_disable btrace_disable;
391 DEBUG ("open");
393 record_preopen ();
395 if (!target_has_execution ())
396 error (_("The program is not being run."));
398 for (thread_info *tp : current_inferior ()->non_exited_threads ())
399 if (args == NULL || *args == 0 || number_is_in_list (args, tp->global_num))
401 btrace_enable (tp, &record_btrace_conf);
403 btrace_disable.add_thread (tp);
406 record_btrace_push_target ();
408 btrace_disable.discard ();
411 /* The stop_recording method of target record-btrace. */
413 void
414 record_btrace_target::stop_recording ()
416 DEBUG ("stop recording");
418 record_btrace_auto_disable ();
420 for (thread_info *tp : current_inferior ()->non_exited_threads ())
421 if (tp->btrace.target != NULL)
422 btrace_disable (tp);
425 /* The disconnect method of target record-btrace. */
427 void
428 record_btrace_target::disconnect (const char *args,
429 int from_tty)
431 struct target_ops *beneath = this->beneath ();
433 /* Do not stop recording, just clean up GDB side. */
434 current_inferior ()->unpush_target (this);
436 /* Forward disconnect. */
437 beneath->disconnect (args, from_tty);
440 /* The close method of target record-btrace. */
442 void
443 record_btrace_target::close ()
445 if (record_btrace_async_inferior_event_handler != NULL)
446 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
448 /* Make sure automatic recording gets disabled even if we did not stop
449 recording before closing the record-btrace target. */
450 record_btrace_auto_disable ();
452 /* We should have already stopped recording.
453 Tear down btrace in case we have not. */
454 for (thread_info *tp : current_inferior ()->non_exited_threads ())
455 btrace_teardown (tp);
458 /* The async method of target record-btrace. */
460 void
461 record_btrace_target::async (bool enable)
463 if (enable)
464 mark_async_event_handler (record_btrace_async_inferior_event_handler);
465 else
466 clear_async_event_handler (record_btrace_async_inferior_event_handler);
468 this->beneath ()->async (enable);
471 /* Adjusts the size and returns a human readable size suffix. */
473 static const char *
474 record_btrace_adjust_size (unsigned int *size)
476 unsigned int sz;
478 sz = *size;
480 if ((sz & ((1u << 30) - 1)) == 0)
482 *size = sz >> 30;
483 return "GB";
485 else if ((sz & ((1u << 20) - 1)) == 0)
487 *size = sz >> 20;
488 return "MB";
490 else if ((sz & ((1u << 10) - 1)) == 0)
492 *size = sz >> 10;
493 return "kB";
495 else
496 return "";
499 /* Print a BTS configuration. */
501 static void
502 record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
504 const char *suffix;
505 unsigned int size;
507 size = conf->size;
508 if (size > 0)
510 suffix = record_btrace_adjust_size (&size);
511 gdb_printf (_("Buffer size: %u%s.\n"), size, suffix);
515 /* Print an Intel Processor Trace configuration. */
517 static void
518 record_btrace_print_pt_conf (const struct btrace_config_pt *conf)
520 const char *suffix;
521 unsigned int size;
523 size = conf->size;
524 if (size > 0)
526 suffix = record_btrace_adjust_size (&size);
527 gdb_printf (_("Buffer size: %u%s.\n"), size, suffix);
531 /* Print a branch tracing configuration. */
533 static void
534 record_btrace_print_conf (const struct btrace_config *conf)
536 gdb_printf (_("Recording format: %s.\n"),
537 btrace_format_string (conf->format));
539 switch (conf->format)
541 case BTRACE_FORMAT_NONE:
542 return;
544 case BTRACE_FORMAT_BTS:
545 record_btrace_print_bts_conf (&conf->bts);
546 return;
548 case BTRACE_FORMAT_PT:
549 record_btrace_print_pt_conf (&conf->pt);
550 return;
553 internal_error (_("Unknown branch trace format."));
556 /* The info_record method of target record-btrace. */
558 void
559 record_btrace_target::info_record ()
561 struct btrace_thread_info *btinfo;
562 const struct btrace_config *conf;
563 struct thread_info *tp;
564 unsigned int insns, calls, gaps;
566 DEBUG ("info");
568 if (inferior_ptid == null_ptid)
569 error (_("No thread."));
571 tp = inferior_thread ();
573 validate_registers_access ();
575 btinfo = &tp->btrace;
577 conf = ::btrace_conf (btinfo);
578 if (conf != NULL)
579 record_btrace_print_conf (conf);
581 btrace_fetch (tp, record_btrace_get_cpu ());
583 insns = 0;
584 calls = 0;
585 gaps = 0;
587 if (!btrace_is_empty (tp))
589 struct btrace_call_iterator call;
590 struct btrace_insn_iterator insn;
592 btrace_call_end (&call, btinfo);
593 btrace_call_prev (&call, 1);
594 calls = btrace_call_number (&call);
596 btrace_insn_end (&insn, btinfo);
597 insns = btrace_insn_number (&insn);
599 /* If the last instruction is not a gap, it is the current instruction
600 that is not actually part of the record. */
601 if (btrace_insn_get (&insn) != NULL)
602 insns -= 1;
604 gaps = btinfo->ngaps;
607 gdb_printf (_("Recorded %u instructions in %u functions (%u gaps) "
608 "for thread %s (%s).\n"), insns, calls, gaps,
609 print_thread_id (tp),
610 target_pid_to_str (tp->ptid).c_str ());
612 if (btrace_is_replaying (tp))
613 gdb_printf (_("Replay in progress. At instruction %u.\n"),
614 btrace_insn_number (btinfo->replay));
617 /* Print a decode error. */
619 static void
620 btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
621 enum btrace_format format)
623 const char *errstr = btrace_decode_error (format, errcode);
625 uiout->text (_("["));
626 /* ERRCODE > 0 indicates notifications on BTRACE_FORMAT_PT. */
627 if (!(format == BTRACE_FORMAT_PT && errcode > 0))
629 uiout->text (_("decode error ("));
630 uiout->field_signed ("errcode", errcode);
631 uiout->text (_("): "));
633 uiout->text (errstr);
634 uiout->text (_("]\n"));
637 /* A range of source lines. */
639 struct btrace_line_range
641 /* The symtab this line is from. */
642 struct symtab *symtab;
644 /* The first line (inclusive). */
645 int begin;
647 /* The last line (exclusive). */
648 int end;
651 /* Construct a line range. */
653 static struct btrace_line_range
654 btrace_mk_line_range (struct symtab *symtab, int begin, int end)
656 struct btrace_line_range range;
658 range.symtab = symtab;
659 range.begin = begin;
660 range.end = end;
662 return range;
665 /* Add a line to a line range. */
667 static struct btrace_line_range
668 btrace_line_range_add (struct btrace_line_range range, int line)
670 if (range.end <= range.begin)
672 /* This is the first entry. */
673 range.begin = line;
674 range.end = line + 1;
676 else if (line < range.begin)
677 range.begin = line;
678 else if (range.end < line)
679 range.end = line;
681 return range;
684 /* Return non-zero if RANGE is empty, zero otherwise. */
686 static int
687 btrace_line_range_is_empty (struct btrace_line_range range)
689 return range.end <= range.begin;
692 /* Return non-zero if LHS contains RHS, zero otherwise. */
694 static int
695 btrace_line_range_contains_range (struct btrace_line_range lhs,
696 struct btrace_line_range rhs)
698 return ((lhs.symtab == rhs.symtab)
699 && (lhs.begin <= rhs.begin)
700 && (rhs.end <= lhs.end));
703 /* Find the line range associated with PC. */
705 static struct btrace_line_range
706 btrace_find_line_range (CORE_ADDR pc)
708 struct btrace_line_range range;
709 const linetable_entry *lines;
710 const linetable *ltable;
711 struct symtab *symtab;
712 int nlines, i;
714 symtab = find_pc_line_symtab (pc);
715 if (symtab == NULL)
716 return btrace_mk_line_range (NULL, 0, 0);
718 ltable = symtab->linetable ();
719 if (ltable == NULL)
720 return btrace_mk_line_range (symtab, 0, 0);
722 nlines = ltable->nitems;
723 lines = ltable->item;
724 if (nlines <= 0)
725 return btrace_mk_line_range (symtab, 0, 0);
727 struct objfile *objfile = symtab->compunit ()->objfile ();
728 unrelocated_addr unrel_pc
729 = unrelocated_addr (pc - objfile->text_section_offset ());
731 range = btrace_mk_line_range (symtab, 0, 0);
732 for (i = 0; i < nlines - 1; i++)
734 /* The test of is_stmt here was added when the is_stmt field was
735 introduced to the 'struct linetable_entry' structure. This
736 ensured that this loop maintained the same behaviour as before we
737 introduced is_stmt. That said, it might be that we would be
738 better off not checking is_stmt here, this would lead to us
739 possibly adding more line numbers to the range. At the time this
740 change was made I was unsure how to test this so chose to go with
741 maintaining the existing experience. */
742 if (lines[i].unrelocated_pc () == unrel_pc && lines[i].line != 0
743 && lines[i].is_stmt)
744 range = btrace_line_range_add (range, lines[i].line);
747 return range;
750 /* Print source lines in LINES to UIOUT.
752 UI_ITEM_CHAIN is a cleanup chain for the last source line and the
753 instructions corresponding to that source line. When printing a new source
754 line, we do the cleanups for the open chain and open a new cleanup chain for
755 the new source line. If the source line range in LINES is not empty, this
756 function will leave the cleanup chain for the last printed source line open
757 so instructions can be added to it. */
759 static void
760 btrace_print_lines (struct btrace_line_range lines, struct ui_out *uiout,
761 std::optional<ui_out_emit_tuple> *src_and_asm_tuple,
762 std::optional<ui_out_emit_list> *asm_list,
763 gdb_disassembly_flags flags)
765 print_source_lines_flags psl_flags;
767 if (flags & DISASSEMBLY_FILENAME)
768 psl_flags |= PRINT_SOURCE_LINES_FILENAME;
770 for (int line = lines.begin; line < lines.end; ++line)
772 asm_list->reset ();
774 src_and_asm_tuple->emplace (uiout, "src_and_asm_line");
776 print_source_lines (lines.symtab, line, line + 1, psl_flags);
778 asm_list->emplace (uiout, "line_asm_insn");
782 /* Disassemble a section of the recorded instruction trace. */
784 static void
785 btrace_insn_history (struct ui_out *uiout,
786 const struct btrace_thread_info *btinfo,
787 const struct btrace_insn_iterator *begin,
788 const struct btrace_insn_iterator *end,
789 gdb_disassembly_flags flags)
791 DEBUG ("itrace (0x%x): [%u; %u)", (unsigned) flags,
792 btrace_insn_number (begin), btrace_insn_number (end));
794 flags |= DISASSEMBLY_SPECULATIVE;
796 gdbarch *gdbarch = current_inferior ()->arch ();
797 btrace_line_range last_lines = btrace_mk_line_range (NULL, 0, 0);
799 ui_out_emit_list list_emitter (uiout, "asm_insns");
801 std::optional<ui_out_emit_tuple> src_and_asm_tuple;
802 std::optional<ui_out_emit_list> asm_list;
804 gdb_pretty_print_disassembler disasm (gdbarch, uiout);
806 for (btrace_insn_iterator it = *begin; btrace_insn_cmp (&it, end) != 0;
807 btrace_insn_next (&it, 1))
809 const struct btrace_insn *insn;
811 insn = btrace_insn_get (&it);
813 /* A NULL instruction indicates a gap in the trace. */
814 if (insn == NULL)
816 const struct btrace_config *conf;
818 conf = btrace_conf (btinfo);
820 /* We have trace so we must have a configuration. */
821 gdb_assert (conf != NULL);
823 uiout->field_fmt ("insn-number", "%u",
824 btrace_insn_number (&it));
825 uiout->text ("\t");
827 btrace_ui_out_decode_error (uiout, btrace_insn_get_error (&it),
828 conf->format);
830 else
832 struct disasm_insn dinsn;
834 if ((flags & DISASSEMBLY_SOURCE) != 0)
836 struct btrace_line_range lines;
838 lines = btrace_find_line_range (insn->pc);
839 if (!btrace_line_range_is_empty (lines)
840 && !btrace_line_range_contains_range (last_lines, lines))
842 btrace_print_lines (lines, uiout, &src_and_asm_tuple, &asm_list,
843 flags);
844 last_lines = lines;
846 else if (!src_and_asm_tuple.has_value ())
848 gdb_assert (!asm_list.has_value ());
850 src_and_asm_tuple.emplace (uiout, "src_and_asm_line");
852 /* No source information. */
853 asm_list.emplace (uiout, "line_asm_insn");
856 gdb_assert (src_and_asm_tuple.has_value ());
857 gdb_assert (asm_list.has_value ());
860 memset (&dinsn, 0, sizeof (dinsn));
861 dinsn.number = btrace_insn_number (&it);
862 dinsn.addr = insn->pc;
864 if ((insn->flags & BTRACE_INSN_FLAG_SPECULATIVE) != 0)
865 dinsn.is_speculative = 1;
867 disasm.pretty_print_insn (&dinsn, flags);
872 /* The insn_history method of target record-btrace. */
874 void
875 record_btrace_target::insn_history (int size, gdb_disassembly_flags flags)
877 struct btrace_thread_info *btinfo;
878 struct btrace_insn_history *history;
879 struct btrace_insn_iterator begin, end;
880 struct ui_out *uiout;
881 unsigned int context, covered;
883 uiout = current_uiout;
884 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
885 context = abs (size);
886 if (context == 0)
887 error (_("Bad record instruction-history-size."));
889 btinfo = require_btrace ();
890 history = btinfo->insn_history;
891 if (history == NULL)
893 struct btrace_insn_iterator *replay;
895 DEBUG ("insn-history (0x%x): %d", (unsigned) flags, size);
897 /* If we're replaying, we start at the replay position. Otherwise, we
898 start at the tail of the trace. */
899 replay = btinfo->replay;
900 if (replay != NULL)
901 begin = *replay;
902 else
903 btrace_insn_end (&begin, btinfo);
905 /* We start from here and expand in the requested direction. Then we
906 expand in the other direction, as well, to fill up any remaining
907 context. */
908 end = begin;
909 if (size < 0)
911 /* We want the current position covered, as well. */
912 covered = btrace_insn_next (&end, 1);
913 covered += btrace_insn_prev (&begin, context - covered);
914 covered += btrace_insn_next (&end, context - covered);
916 else
918 covered = btrace_insn_next (&end, context);
919 covered += btrace_insn_prev (&begin, context - covered);
922 else
924 begin = history->begin;
925 end = history->end;
927 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", (unsigned) flags, size,
928 btrace_insn_number (&begin), btrace_insn_number (&end));
930 if (size < 0)
932 end = begin;
933 covered = btrace_insn_prev (&begin, context);
935 else
937 begin = end;
938 covered = btrace_insn_next (&end, context);
942 if (covered > 0)
943 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
944 else
946 if (size < 0)
947 gdb_printf (_("At the start of the branch trace record.\n"));
948 else
949 gdb_printf (_("At the end of the branch trace record.\n"));
952 btrace_set_insn_history (btinfo, &begin, &end);
955 /* The insn_history_range method of target record-btrace. */
957 void
958 record_btrace_target::insn_history_range (ULONGEST from, ULONGEST to,
959 gdb_disassembly_flags flags)
961 struct btrace_thread_info *btinfo;
962 struct btrace_insn_iterator begin, end;
963 struct ui_out *uiout;
964 unsigned int low, high;
965 int found;
967 uiout = current_uiout;
968 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
969 low = from;
970 high = to;
972 DEBUG ("insn-history (0x%x): [%u; %u)", (unsigned) flags, low, high);
974 /* Check for wrap-arounds. */
975 if (low != from || high != to)
976 error (_("Bad range."));
978 if (high < low)
979 error (_("Bad range."));
981 btinfo = require_btrace ();
983 found = btrace_find_insn_by_number (&begin, btinfo, low);
984 if (found == 0)
985 error (_("Range out of bounds."));
987 found = btrace_find_insn_by_number (&end, btinfo, high);
988 if (found == 0)
990 /* Silently truncate the range. */
991 btrace_insn_end (&end, btinfo);
993 else
995 /* We want both begin and end to be inclusive. */
996 btrace_insn_next (&end, 1);
999 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
1000 btrace_set_insn_history (btinfo, &begin, &end);
1003 /* The insn_history_from method of target record-btrace. */
1005 void
1006 record_btrace_target::insn_history_from (ULONGEST from, int size,
1007 gdb_disassembly_flags flags)
1009 ULONGEST begin, end, context;
1011 context = abs (size);
1012 if (context == 0)
1013 error (_("Bad record instruction-history-size."));
1015 if (size < 0)
1017 end = from;
1019 if (from < context)
1020 begin = 0;
1021 else
1022 begin = from - context + 1;
1024 else
1026 begin = from;
1027 end = from + context - 1;
1029 /* Check for wrap-around. */
1030 if (end < begin)
1031 end = ULONGEST_MAX;
1034 insn_history_range (begin, end, flags);
1037 /* Print the instruction number range for a function call history line. */
1039 static void
1040 btrace_call_history_insn_range (struct ui_out *uiout,
1041 const struct btrace_function *bfun)
1043 unsigned int begin, end, size;
1045 size = bfun->insn.size ();
1046 gdb_assert (size > 0);
1048 begin = bfun->insn_offset;
1049 end = begin + size - 1;
1051 uiout->field_unsigned ("insn begin", begin);
1052 uiout->text (",");
1053 uiout->field_unsigned ("insn end", end);
1056 /* Compute the lowest and highest source line for the instructions in BFUN
1057 and return them in PBEGIN and PEND.
1058 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
1059 result from inlining or macro expansion. */
1061 static void
1062 btrace_compute_src_line_range (const struct btrace_function *bfun,
1063 int *pbegin, int *pend)
1065 struct symtab *symtab;
1066 struct symbol *sym;
1067 int begin, end;
1069 begin = INT_MAX;
1070 end = INT_MIN;
1072 sym = bfun->sym;
1073 if (sym == NULL)
1074 goto out;
1076 symtab = sym->symtab ();
1078 for (const btrace_insn &insn : bfun->insn)
1080 struct symtab_and_line sal;
1082 sal = find_pc_line (insn.pc, 0);
1083 if (sal.symtab != symtab || sal.line == 0)
1084 continue;
1086 begin = std::min (begin, sal.line);
1087 end = std::max (end, sal.line);
1090 out:
1091 *pbegin = begin;
1092 *pend = end;
1095 /* Print the source line information for a function call history line. */
1097 static void
1098 btrace_call_history_src_line (struct ui_out *uiout,
1099 const struct btrace_function *bfun)
1101 struct symbol *sym;
1102 int begin, end;
1104 sym = bfun->sym;
1105 if (sym == NULL)
1106 return;
1108 uiout->field_string ("file",
1109 symtab_to_filename_for_display (sym->symtab ()),
1110 file_name_style.style ());
1112 btrace_compute_src_line_range (bfun, &begin, &end);
1113 if (end < begin)
1114 return;
1116 uiout->text (":");
1117 uiout->field_signed ("min line", begin);
1119 if (end == begin)
1120 return;
1122 uiout->text (",");
1123 uiout->field_signed ("max line", end);
1126 /* Get the name of a branch trace function. */
1128 static const char *
1129 btrace_get_bfun_name (const struct btrace_function *bfun)
1131 struct minimal_symbol *msym;
1132 struct symbol *sym;
1134 if (bfun == NULL)
1135 return "??";
1137 msym = bfun->msym;
1138 sym = bfun->sym;
1140 if (sym != NULL)
1141 return sym->print_name ();
1142 else if (msym != NULL)
1143 return msym->print_name ();
1144 else
1145 return "??";
1148 /* Disassemble a section of the recorded function trace. */
1150 static void
1151 btrace_call_history (struct ui_out *uiout,
1152 const struct btrace_thread_info *btinfo,
1153 const struct btrace_call_iterator *begin,
1154 const struct btrace_call_iterator *end,
1155 int int_flags)
1157 struct btrace_call_iterator it;
1158 record_print_flags flags = (enum record_print_flag) int_flags;
1160 DEBUG ("ftrace (0x%x): [%u; %u)", int_flags, btrace_call_number (begin),
1161 btrace_call_number (end));
1163 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
1165 const struct btrace_function *bfun;
1166 struct minimal_symbol *msym;
1167 struct symbol *sym;
1169 bfun = btrace_call_get (&it);
1170 sym = bfun->sym;
1171 msym = bfun->msym;
1173 /* Print the function index. */
1174 uiout->field_unsigned ("index", bfun->number);
1175 uiout->text ("\t");
1177 /* Indicate gaps in the trace. */
1178 if (bfun->errcode != 0)
1180 const struct btrace_config *conf;
1182 conf = btrace_conf (btinfo);
1184 /* We have trace so we must have a configuration. */
1185 gdb_assert (conf != NULL);
1187 btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
1189 continue;
1192 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
1194 int level = bfun->level + btinfo->level, i;
1196 for (i = 0; i < level; ++i)
1197 uiout->text (" ");
1200 if (sym != NULL)
1201 uiout->field_string ("function", sym->print_name (),
1202 function_name_style.style ());
1203 else if (msym != NULL)
1204 uiout->field_string ("function", msym->print_name (),
1205 function_name_style.style ());
1206 else if (!uiout->is_mi_like_p ())
1207 uiout->field_string ("function", "??",
1208 function_name_style.style ());
1210 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
1212 uiout->text (_("\tinst "));
1213 btrace_call_history_insn_range (uiout, bfun);
1216 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
1218 uiout->text (_("\tat "));
1219 btrace_call_history_src_line (uiout, bfun);
1222 uiout->text ("\n");
1226 /* The call_history method of target record-btrace. */
1228 void
1229 record_btrace_target::call_history (int size, record_print_flags flags)
1231 struct btrace_thread_info *btinfo;
1232 struct btrace_call_history *history;
1233 struct btrace_call_iterator begin, end;
1234 struct ui_out *uiout;
1235 unsigned int context, covered;
1237 uiout = current_uiout;
1238 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
1239 context = abs (size);
1240 if (context == 0)
1241 error (_("Bad record function-call-history-size."));
1243 btinfo = require_btrace ();
1244 history = btinfo->call_history;
1245 if (history == NULL)
1247 struct btrace_insn_iterator *replay;
1249 DEBUG ("call-history (0x%x): %d", (int) flags, size);
1251 /* If we're replaying, we start at the replay position. Otherwise, we
1252 start at the tail of the trace. */
1253 replay = btinfo->replay;
1254 if (replay != NULL)
1256 begin.btinfo = btinfo;
1257 begin.index = replay->call_index;
1259 else
1260 btrace_call_end (&begin, btinfo);
1262 /* We start from here and expand in the requested direction. Then we
1263 expand in the other direction, as well, to fill up any remaining
1264 context. */
1265 end = begin;
1266 if (size < 0)
1268 /* We want the current position covered, as well. */
1269 covered = btrace_call_next (&end, 1);
1270 covered += btrace_call_prev (&begin, context - covered);
1271 covered += btrace_call_next (&end, context - covered);
1273 else
1275 covered = btrace_call_next (&end, context);
1276 covered += btrace_call_prev (&begin, context- covered);
1279 else
1281 begin = history->begin;
1282 end = history->end;
1284 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", (int) flags, size,
1285 btrace_call_number (&begin), btrace_call_number (&end));
1287 if (size < 0)
1289 end = begin;
1290 covered = btrace_call_prev (&begin, context);
1292 else
1294 begin = end;
1295 covered = btrace_call_next (&end, context);
1299 if (covered > 0)
1300 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1301 else
1303 if (size < 0)
1304 gdb_printf (_("At the start of the branch trace record.\n"));
1305 else
1306 gdb_printf (_("At the end of the branch trace record.\n"));
1309 btrace_set_call_history (btinfo, &begin, &end);
1312 /* The call_history_range method of target record-btrace. */
1314 void
1315 record_btrace_target::call_history_range (ULONGEST from, ULONGEST to,
1316 record_print_flags flags)
1318 struct btrace_thread_info *btinfo;
1319 struct btrace_call_iterator begin, end;
1320 struct ui_out *uiout;
1321 unsigned int low, high;
1322 int found;
1324 uiout = current_uiout;
1325 ui_out_emit_tuple tuple_emitter (uiout, "func history");
1326 low = from;
1327 high = to;
1329 DEBUG ("call-history (0x%x): [%u; %u)", (int) flags, low, high);
1331 /* Check for wrap-arounds. */
1332 if (low != from || high != to)
1333 error (_("Bad range."));
1335 if (high < low)
1336 error (_("Bad range."));
1338 btinfo = require_btrace ();
1340 found = btrace_find_call_by_number (&begin, btinfo, low);
1341 if (found == 0)
1342 error (_("Range out of bounds."));
1344 found = btrace_find_call_by_number (&end, btinfo, high);
1345 if (found == 0)
1347 /* Silently truncate the range. */
1348 btrace_call_end (&end, btinfo);
1350 else
1352 /* We want both begin and end to be inclusive. */
1353 btrace_call_next (&end, 1);
1356 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1357 btrace_set_call_history (btinfo, &begin, &end);
1360 /* The call_history_from method of target record-btrace. */
1362 void
1363 record_btrace_target::call_history_from (ULONGEST from, int size,
1364 record_print_flags flags)
1366 ULONGEST begin, end, context;
1368 context = abs (size);
1369 if (context == 0)
1370 error (_("Bad record function-call-history-size."));
1372 if (size < 0)
1374 end = from;
1376 if (from < context)
1377 begin = 0;
1378 else
1379 begin = from - context + 1;
1381 else
1383 begin = from;
1384 end = from + context - 1;
1386 /* Check for wrap-around. */
1387 if (end < begin)
1388 end = ULONGEST_MAX;
1391 call_history_range ( begin, end, flags);
1394 /* The record_method method of target record-btrace. */
1396 enum record_method
1397 record_btrace_target::record_method (ptid_t ptid)
1399 process_stratum_target *proc_target = current_inferior ()->process_target ();
1400 thread_info *const tp = proc_target->find_thread (ptid);
1402 if (tp == NULL)
1403 error (_("No thread."));
1405 if (tp->btrace.target == NULL)
1406 return RECORD_METHOD_NONE;
1408 return RECORD_METHOD_BTRACE;
1411 /* The record_is_replaying method of target record-btrace. */
1413 bool
1414 record_btrace_target::record_is_replaying (ptid_t ptid)
1416 process_stratum_target *proc_target = current_inferior ()->process_target ();
1417 for (thread_info *tp : all_non_exited_threads (proc_target, ptid))
1418 if (btrace_is_replaying (tp))
1419 return true;
1421 return false;
1424 /* The record_will_replay method of target record-btrace. */
1426 bool
1427 record_btrace_target::record_will_replay (ptid_t ptid, int dir)
1429 return dir == EXEC_REVERSE || record_is_replaying (ptid);
1432 /* The xfer_partial method of target record-btrace. */
1434 enum target_xfer_status
1435 record_btrace_target::xfer_partial (enum target_object object,
1436 const char *annex, gdb_byte *readbuf,
1437 const gdb_byte *writebuf, ULONGEST offset,
1438 ULONGEST len, ULONGEST *xfered_len)
1440 /* Filter out requests that don't make sense during replay. */
1441 if (replay_memory_access == replay_memory_access_read_only
1442 && !record_btrace_generating_corefile
1443 && record_is_replaying (inferior_ptid))
1445 switch (object)
1447 case TARGET_OBJECT_MEMORY:
1449 const struct target_section *section;
1451 /* We do not allow writing memory in general. */
1452 if (writebuf != NULL)
1454 *xfered_len = len;
1455 return TARGET_XFER_UNAVAILABLE;
1458 /* We allow reading readonly memory. */
1459 section = target_section_by_addr (this, offset);
1460 if (section != NULL)
1462 /* Check if the section we found is readonly. */
1463 if ((bfd_section_flags (section->the_bfd_section)
1464 & SEC_READONLY) != 0)
1466 /* Truncate the request to fit into this section. */
1467 len = std::min (len, section->endaddr - offset);
1468 break;
1472 *xfered_len = len;
1473 return TARGET_XFER_UNAVAILABLE;
1478 /* Forward the request. */
1479 return this->beneath ()->xfer_partial (object, annex, readbuf, writebuf,
1480 offset, len, xfered_len);
1483 /* The insert_breakpoint method of target record-btrace. */
1486 record_btrace_target::insert_breakpoint (struct gdbarch *gdbarch,
1487 struct bp_target_info *bp_tgt)
1489 const char *old;
1490 int ret;
1492 /* Inserting breakpoints requires accessing memory. Allow it for the
1493 duration of this function. */
1494 old = replay_memory_access;
1495 replay_memory_access = replay_memory_access_read_write;
1497 ret = 0;
1500 ret = this->beneath ()->insert_breakpoint (gdbarch, bp_tgt);
1502 catch (const gdb_exception &except)
1504 replay_memory_access = old;
1505 throw;
1507 replay_memory_access = old;
1509 return ret;
1512 /* The remove_breakpoint method of target record-btrace. */
1515 record_btrace_target::remove_breakpoint (struct gdbarch *gdbarch,
1516 struct bp_target_info *bp_tgt,
1517 enum remove_bp_reason reason)
1519 const char *old;
1520 int ret;
1522 /* Removing breakpoints requires accessing memory. Allow it for the
1523 duration of this function. */
1524 old = replay_memory_access;
1525 replay_memory_access = replay_memory_access_read_write;
1527 ret = 0;
1530 ret = this->beneath ()->remove_breakpoint (gdbarch, bp_tgt, reason);
1532 catch (const gdb_exception &except)
1534 replay_memory_access = old;
1535 throw;
1537 replay_memory_access = old;
1539 return ret;
1542 /* The fetch_registers method of target record-btrace. */
1544 void
1545 record_btrace_target::fetch_registers (struct regcache *regcache, int regno)
1547 btrace_insn_iterator *replay = nullptr;
1549 /* Thread-db may ask for a thread's registers before GDB knows about the
1550 thread. We forward the request to the target beneath in this
1551 case. */
1552 thread_info *tp
1553 = current_inferior ()->process_target ()->find_thread (regcache->ptid ());
1554 if (tp != nullptr)
1555 replay = tp->btrace.replay;
1557 if (replay != nullptr && !record_btrace_generating_corefile)
1559 const struct btrace_insn *insn;
1560 struct gdbarch *gdbarch;
1561 int pcreg;
1563 gdbarch = regcache->arch ();
1564 pcreg = gdbarch_pc_regnum (gdbarch);
1565 if (pcreg < 0)
1566 return;
1568 /* We can only provide the PC register. */
1569 if (regno >= 0 && regno != pcreg)
1570 return;
1572 insn = btrace_insn_get (replay);
1573 gdb_assert (insn != NULL);
1575 regcache->raw_supply (regno, &insn->pc);
1577 else
1578 this->beneath ()->fetch_registers (regcache, regno);
1581 /* The store_registers method of target record-btrace. */
1583 void
1584 record_btrace_target::store_registers (struct regcache *regcache, int regno)
1586 if (!record_btrace_generating_corefile
1587 && record_is_replaying (regcache->ptid ()))
1588 error (_("Cannot write registers while replaying."));
1590 gdb_assert (may_write_registers);
1592 this->beneath ()->store_registers (regcache, regno);
1595 /* The prepare_to_store method of target record-btrace. */
1597 void
1598 record_btrace_target::prepare_to_store (struct regcache *regcache)
1600 if (!record_btrace_generating_corefile
1601 && record_is_replaying (regcache->ptid ()))
1602 return;
1604 this->beneath ()->prepare_to_store (regcache);
1607 /* The branch trace frame cache. */
1609 struct btrace_frame_cache
1611 /* The thread. */
1612 struct thread_info *tp;
1614 /* The frame info. */
1615 frame_info *frame;
1617 /* The branch trace function segment. */
1618 const struct btrace_function *bfun;
1621 /* A struct btrace_frame_cache hash table indexed by NEXT. */
1623 static htab_t bfcache;
1625 /* hash_f for htab_create_alloc of bfcache. */
1627 static hashval_t
1628 bfcache_hash (const void *arg)
1630 const struct btrace_frame_cache *cache
1631 = (const struct btrace_frame_cache *) arg;
1633 return htab_hash_pointer (cache->frame);
1636 /* eq_f for htab_create_alloc of bfcache. */
1638 static int
1639 bfcache_eq (const void *arg1, const void *arg2)
1641 const struct btrace_frame_cache *cache1
1642 = (const struct btrace_frame_cache *) arg1;
1643 const struct btrace_frame_cache *cache2
1644 = (const struct btrace_frame_cache *) arg2;
1646 return cache1->frame == cache2->frame;
1649 /* Create a new btrace frame cache. */
1651 static struct btrace_frame_cache *
1652 bfcache_new (const frame_info_ptr &frame)
1654 struct btrace_frame_cache *cache;
1655 void **slot;
1657 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1658 cache->frame = frame.get ();
1660 slot = htab_find_slot (bfcache, cache, INSERT);
1661 gdb_assert (*slot == NULL);
1662 *slot = cache;
1664 return cache;
1667 /* Extract the branch trace function from a branch trace frame. */
1669 static const struct btrace_function *
1670 btrace_get_frame_function (const frame_info_ptr &frame)
1672 const struct btrace_frame_cache *cache;
1673 struct btrace_frame_cache pattern;
1674 void **slot;
1676 pattern.frame = frame.get ();
1678 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1679 if (slot == NULL)
1680 return NULL;
1682 cache = (const struct btrace_frame_cache *) *slot;
1683 return cache->bfun;
1686 /* Implement stop_reason method for record_btrace_frame_unwind. */
1688 static enum unwind_stop_reason
1689 record_btrace_frame_unwind_stop_reason (const frame_info_ptr &this_frame,
1690 void **this_cache)
1692 const struct btrace_frame_cache *cache;
1693 const struct btrace_function *bfun;
1695 cache = (const struct btrace_frame_cache *) *this_cache;
1696 bfun = cache->bfun;
1697 gdb_assert (bfun != NULL);
1699 if (bfun->up == 0)
1700 return UNWIND_UNAVAILABLE;
1702 return UNWIND_NO_REASON;
1705 /* Implement this_id method for record_btrace_frame_unwind. */
1707 static void
1708 record_btrace_frame_this_id (const frame_info_ptr &this_frame, void **this_cache,
1709 struct frame_id *this_id)
1711 const struct btrace_frame_cache *cache;
1712 const struct btrace_function *bfun;
1713 struct btrace_call_iterator it;
1714 CORE_ADDR code, special;
1716 cache = (const struct btrace_frame_cache *) *this_cache;
1718 bfun = cache->bfun;
1719 gdb_assert (bfun != NULL);
1721 while (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->prev) != 0)
1722 bfun = btrace_call_get (&it);
1724 code = get_frame_func (this_frame);
1725 special = bfun->number;
1727 *this_id = frame_id_build_unavailable_stack_special (code, special);
1729 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1730 btrace_get_bfun_name (cache->bfun),
1731 core_addr_to_string_nz (this_id->code_addr),
1732 core_addr_to_string_nz (this_id->special_addr));
1735 /* Implement prev_register method for record_btrace_frame_unwind. */
1737 static struct value *
1738 record_btrace_frame_prev_register (const frame_info_ptr &this_frame,
1739 void **this_cache,
1740 int regnum)
1742 const struct btrace_frame_cache *cache;
1743 const struct btrace_function *bfun, *caller;
1744 struct btrace_call_iterator it;
1745 struct gdbarch *gdbarch;
1746 CORE_ADDR pc;
1747 int pcreg;
1749 gdbarch = get_frame_arch (this_frame);
1750 pcreg = gdbarch_pc_regnum (gdbarch);
1751 if (pcreg < 0 || regnum != pcreg)
1752 throw_error (NOT_AVAILABLE_ERROR,
1753 _("Registers are not available in btrace record history"));
1755 cache = (const struct btrace_frame_cache *) *this_cache;
1756 bfun = cache->bfun;
1757 gdb_assert (bfun != NULL);
1759 if (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->up) == 0)
1760 throw_error (NOT_AVAILABLE_ERROR,
1761 _("No caller in btrace record history"));
1763 caller = btrace_call_get (&it);
1765 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1766 pc = caller->insn.front ().pc;
1767 else
1769 pc = caller->insn.back ().pc;
1770 pc += gdb_insn_length (gdbarch, pc);
1773 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1774 btrace_get_bfun_name (bfun), bfun->level,
1775 core_addr_to_string_nz (pc));
1777 return frame_unwind_got_address (this_frame, regnum, pc);
1780 /* Implement sniffer method for record_btrace_frame_unwind. */
1782 static int
1783 record_btrace_frame_sniffer (const struct frame_unwind *self,
1784 const frame_info_ptr &this_frame,
1785 void **this_cache)
1787 const struct btrace_function *bfun;
1788 struct btrace_frame_cache *cache;
1789 struct thread_info *tp;
1790 frame_info_ptr next;
1792 /* THIS_FRAME does not contain a reference to its thread. */
1793 tp = inferior_thread ();
1795 bfun = NULL;
1796 next = get_next_frame (this_frame);
1797 if (next == NULL)
1799 const struct btrace_insn_iterator *replay;
1801 replay = tp->btrace.replay;
1802 if (replay != NULL)
1803 bfun = &replay->btinfo->functions[replay->call_index];
1805 else
1807 const struct btrace_function *callee;
1808 struct btrace_call_iterator it;
1810 callee = btrace_get_frame_function (next);
1811 if (callee == NULL || (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) != 0)
1812 return 0;
1814 if (btrace_find_call_by_number (&it, &tp->btrace, callee->up) == 0)
1815 return 0;
1817 bfun = btrace_call_get (&it);
1820 if (bfun == NULL)
1821 return 0;
1823 DEBUG ("[frame] sniffed frame for %s on level %d",
1824 btrace_get_bfun_name (bfun), bfun->level);
1826 /* This is our frame. Initialize the frame cache. */
1827 cache = bfcache_new (this_frame);
1828 cache->tp = tp;
1829 cache->bfun = bfun;
1831 *this_cache = cache;
1832 return 1;
1835 /* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1837 static int
1838 record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1839 const frame_info_ptr &this_frame,
1840 void **this_cache)
1842 const struct btrace_function *bfun, *callee;
1843 struct btrace_frame_cache *cache;
1844 struct btrace_call_iterator it;
1845 frame_info_ptr next;
1846 struct thread_info *tinfo;
1848 next = get_next_frame (this_frame);
1849 if (next == NULL)
1850 return 0;
1852 callee = btrace_get_frame_function (next);
1853 if (callee == NULL)
1854 return 0;
1856 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1857 return 0;
1859 tinfo = inferior_thread ();
1860 if (btrace_find_call_by_number (&it, &tinfo->btrace, callee->up) == 0)
1861 return 0;
1863 bfun = btrace_call_get (&it);
1865 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1866 btrace_get_bfun_name (bfun), bfun->level);
1868 /* This is our frame. Initialize the frame cache. */
1869 cache = bfcache_new (this_frame);
1870 cache->tp = tinfo;
1871 cache->bfun = bfun;
1873 *this_cache = cache;
1874 return 1;
1877 static void
1878 record_btrace_frame_dealloc_cache (frame_info *self, void *this_cache)
1880 struct btrace_frame_cache *cache;
1881 void **slot;
1883 cache = (struct btrace_frame_cache *) this_cache;
1885 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1886 gdb_assert (slot != NULL);
1888 htab_remove_elt (bfcache, cache);
1891 /* btrace recording does not store previous memory content, neither the stack
1892 frames content. Any unwinding would return erroneous results as the stack
1893 contents no longer matches the changed PC value restored from history.
1894 Therefore this unwinder reports any possibly unwound registers as
1895 <unavailable>. */
1897 const struct frame_unwind record_btrace_frame_unwind =
1899 "record-btrace",
1900 NORMAL_FRAME,
1901 record_btrace_frame_unwind_stop_reason,
1902 record_btrace_frame_this_id,
1903 record_btrace_frame_prev_register,
1904 NULL,
1905 record_btrace_frame_sniffer,
1906 record_btrace_frame_dealloc_cache
1909 const struct frame_unwind record_btrace_tailcall_frame_unwind =
1911 "record-btrace tailcall",
1912 TAILCALL_FRAME,
1913 record_btrace_frame_unwind_stop_reason,
1914 record_btrace_frame_this_id,
1915 record_btrace_frame_prev_register,
1916 NULL,
1917 record_btrace_tailcall_frame_sniffer,
1918 record_btrace_frame_dealloc_cache
1921 /* Implement the get_unwinder method. */
1923 const struct frame_unwind *
1924 record_btrace_target::get_unwinder ()
1926 return &record_btrace_frame_unwind;
1929 /* Implement the get_tailcall_unwinder method. */
1931 const struct frame_unwind *
1932 record_btrace_target::get_tailcall_unwinder ()
1934 return &record_btrace_tailcall_frame_unwind;
1937 /* Return a human-readable string for FLAG. */
1939 static const char *
1940 btrace_thread_flag_to_str (btrace_thread_flags flag)
1942 switch (flag)
1944 case BTHR_STEP:
1945 return "step";
1947 case BTHR_RSTEP:
1948 return "reverse-step";
1950 case BTHR_CONT:
1951 return "cont";
1953 case BTHR_RCONT:
1954 return "reverse-cont";
1956 case BTHR_STOP:
1957 return "stop";
1960 return "<invalid>";
1963 /* Indicate that TP should be resumed according to FLAG. */
1965 static void
1966 record_btrace_resume_thread (struct thread_info *tp,
1967 enum btrace_thread_flag flag)
1969 struct btrace_thread_info *btinfo;
1971 DEBUG ("resuming thread %s (%s): %x (%s)", print_thread_id (tp),
1972 tp->ptid.to_string ().c_str (), flag,
1973 btrace_thread_flag_to_str (flag));
1975 btinfo = &tp->btrace;
1977 /* Fetch the latest branch trace. */
1978 btrace_fetch (tp, record_btrace_get_cpu ());
1980 /* A resume request overwrites a preceding resume or stop request. */
1981 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
1982 btinfo->flags |= flag;
1985 /* Get the current frame for TP. */
1987 static struct frame_id
1988 get_thread_current_frame_id (struct thread_info *tp)
1990 /* Set current thread, which is implicitly used by
1991 get_current_frame. */
1992 scoped_restore_current_thread restore_thread;
1994 switch_to_thread (tp);
1996 process_stratum_target *proc_target = tp->inf->process_target ();
1998 /* Clear the executing flag to allow changes to the current frame.
1999 We are not actually running, yet. We just started a reverse execution
2000 command or a record goto command.
2001 For the latter, EXECUTING is false and this has no effect.
2002 For the former, EXECUTING is true and we're in wait, about to
2003 move the thread. Since we need to recompute the stack, we temporarily
2004 set EXECUTING to false. */
2005 bool executing = tp->executing ();
2006 set_executing (proc_target, inferior_ptid, false);
2007 SCOPE_EXIT
2009 set_executing (proc_target, inferior_ptid, executing);
2011 return get_frame_id (get_current_frame ());
2014 /* Start replaying a thread. */
2016 static struct btrace_insn_iterator *
2017 record_btrace_start_replaying (struct thread_info *tp)
2019 struct btrace_insn_iterator *replay;
2020 struct btrace_thread_info *btinfo;
2022 btinfo = &tp->btrace;
2023 replay = NULL;
2025 /* We can't start replaying without trace. */
2026 if (btinfo->functions.empty ())
2027 error (_("No trace."));
2029 /* GDB stores the current frame_id when stepping in order to detects steps
2030 into subroutines.
2031 Since frames are computed differently when we're replaying, we need to
2032 recompute those stored frames and fix them up so we can still detect
2033 subroutines after we started replaying. */
2036 struct frame_id frame_id;
2037 int upd_step_frame_id, upd_step_stack_frame_id;
2039 /* The current frame without replaying - computed via normal unwind. */
2040 frame_id = get_thread_current_frame_id (tp);
2042 /* Check if we need to update any stepping-related frame id's. */
2043 upd_step_frame_id = (frame_id == tp->control.step_frame_id);
2044 upd_step_stack_frame_id = (frame_id == tp->control.step_stack_frame_id);
2046 /* We start replaying at the end of the branch trace. This corresponds
2047 to the current instruction. */
2048 replay = XNEW (struct btrace_insn_iterator);
2049 btrace_insn_end (replay, btinfo);
2051 /* Skip gaps at the end of the trace. */
2052 while (btrace_insn_get (replay) == NULL)
2054 unsigned int steps;
2056 steps = btrace_insn_prev (replay, 1);
2057 if (steps == 0)
2058 error (_("No trace."));
2061 /* We're not replaying, yet. */
2062 gdb_assert (btinfo->replay == NULL);
2063 btinfo->replay = replay;
2065 /* Make sure we're not using any stale registers. */
2066 registers_changed_thread (tp);
2068 /* The current frame with replaying - computed via btrace unwind. */
2069 frame_id = get_thread_current_frame_id (tp);
2071 /* Replace stepping related frames where necessary. */
2072 if (upd_step_frame_id)
2073 tp->control.step_frame_id = frame_id;
2074 if (upd_step_stack_frame_id)
2075 tp->control.step_stack_frame_id = frame_id;
2077 catch (const gdb_exception &except)
2079 xfree (btinfo->replay);
2080 btinfo->replay = NULL;
2082 registers_changed_thread (tp);
2084 throw;
2087 return replay;
2090 /* Stop replaying a thread. */
2092 static void
2093 record_btrace_stop_replaying (struct thread_info *tp)
2095 struct btrace_thread_info *btinfo;
2097 btinfo = &tp->btrace;
2099 xfree (btinfo->replay);
2100 btinfo->replay = NULL;
2102 /* Make sure we're not leaving any stale registers. */
2103 registers_changed_thread (tp);
2106 /* Stop replaying TP if it is at the end of its execution history. */
2108 static void
2109 record_btrace_stop_replaying_at_end (struct thread_info *tp)
2111 struct btrace_insn_iterator *replay, end;
2112 struct btrace_thread_info *btinfo;
2114 btinfo = &tp->btrace;
2115 replay = btinfo->replay;
2117 if (replay == NULL)
2118 return;
2120 btrace_insn_end (&end, btinfo);
2122 if (btrace_insn_cmp (replay, &end) == 0)
2123 record_btrace_stop_replaying (tp);
2126 /* The resume method of target record-btrace. */
2128 void
2129 record_btrace_target::resume (ptid_t ptid, int step, enum gdb_signal signal)
2131 enum btrace_thread_flag flag, cflag;
2133 DEBUG ("resume %s: %s%s", ptid.to_string ().c_str (),
2134 ::execution_direction == EXEC_REVERSE ? "reverse-" : "",
2135 step ? "step" : "cont");
2137 /* Store the execution direction of the last resume.
2139 If there is more than one resume call, we have to rely on infrun
2140 to not change the execution direction in-between. */
2141 record_btrace_resume_exec_dir = ::execution_direction;
2143 /* As long as we're not replaying, just forward the request.
2145 For non-stop targets this means that no thread is replaying. In order to
2146 make progress, we may need to explicitly move replaying threads to the end
2147 of their execution history. */
2148 if ((::execution_direction != EXEC_REVERSE)
2149 && !record_is_replaying (minus_one_ptid))
2151 this->beneath ()->resume (ptid, step, signal);
2152 return;
2155 /* Compute the btrace thread flag for the requested move. */
2156 if (::execution_direction == EXEC_REVERSE)
2158 flag = step == 0 ? BTHR_RCONT : BTHR_RSTEP;
2159 cflag = BTHR_RCONT;
2161 else
2163 flag = step == 0 ? BTHR_CONT : BTHR_STEP;
2164 cflag = BTHR_CONT;
2167 /* We just indicate the resume intent here. The actual stepping happens in
2168 record_btrace_wait below.
2170 For all-stop targets, we only step INFERIOR_PTID and continue others. */
2172 process_stratum_target *proc_target = current_inferior ()->process_target ();
2174 if (!target_is_non_stop_p ())
2176 gdb_assert (inferior_ptid.matches (ptid));
2178 for (thread_info *tp : all_non_exited_threads (proc_target, ptid))
2180 if (tp->ptid.matches (inferior_ptid))
2181 record_btrace_resume_thread (tp, flag);
2182 else
2183 record_btrace_resume_thread (tp, cflag);
2186 else
2188 for (thread_info *tp : all_non_exited_threads (proc_target, ptid))
2189 record_btrace_resume_thread (tp, flag);
2192 /* Async support. */
2193 if (target_can_async_p ())
2195 target_async (true);
2196 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2200 /* Cancel resuming TP. */
2202 static void
2203 record_btrace_cancel_resume (struct thread_info *tp)
2205 btrace_thread_flags flags;
2207 flags = tp->btrace.flags & (BTHR_MOVE | BTHR_STOP);
2208 if (flags == 0)
2209 return;
2211 DEBUG ("cancel resume thread %s (%s): %x (%s)",
2212 print_thread_id (tp),
2213 tp->ptid.to_string ().c_str (), flags.raw (),
2214 btrace_thread_flag_to_str (flags));
2216 tp->btrace.flags &= ~(BTHR_MOVE | BTHR_STOP);
2217 record_btrace_stop_replaying_at_end (tp);
2220 /* Return a target_waitstatus indicating that we ran out of history. */
2222 static struct target_waitstatus
2223 btrace_step_no_history (void)
2225 struct target_waitstatus status;
2227 status.set_no_history ();
2229 return status;
2232 /* Return a target_waitstatus indicating that a step finished. */
2234 static struct target_waitstatus
2235 btrace_step_stopped (void)
2237 struct target_waitstatus status;
2239 status.set_stopped (GDB_SIGNAL_TRAP);
2241 return status;
2244 /* Return a target_waitstatus indicating that a thread was stopped as
2245 requested. */
2247 static struct target_waitstatus
2248 btrace_step_stopped_on_request (void)
2250 struct target_waitstatus status;
2252 status.set_stopped (GDB_SIGNAL_0);
2254 return status;
2257 /* Return a target_waitstatus indicating a spurious stop. */
2259 static struct target_waitstatus
2260 btrace_step_spurious (void)
2262 struct target_waitstatus status;
2264 status.set_spurious ();
2266 return status;
2269 /* Return a target_waitstatus indicating that the thread was not resumed. */
2271 static struct target_waitstatus
2272 btrace_step_no_resumed (void)
2274 struct target_waitstatus status;
2276 status.set_no_resumed ();
2278 return status;
2281 /* Return a target_waitstatus indicating that we should wait again. */
2283 static struct target_waitstatus
2284 btrace_step_again (void)
2286 struct target_waitstatus status;
2288 status.set_ignore ();
2290 return status;
2293 /* Clear the record histories. */
2295 static void
2296 record_btrace_clear_histories (struct btrace_thread_info *btinfo)
2298 xfree (btinfo->insn_history);
2299 xfree (btinfo->call_history);
2301 btinfo->insn_history = NULL;
2302 btinfo->call_history = NULL;
2305 /* Check whether TP's current replay position is at a breakpoint. */
2307 static int
2308 record_btrace_replay_at_breakpoint (struct thread_info *tp)
2310 struct btrace_insn_iterator *replay;
2311 struct btrace_thread_info *btinfo;
2312 const struct btrace_insn *insn;
2314 btinfo = &tp->btrace;
2315 replay = btinfo->replay;
2317 if (replay == NULL)
2318 return 0;
2320 insn = btrace_insn_get (replay);
2321 if (insn == NULL)
2322 return 0;
2324 return record_check_stopped_by_breakpoint (tp->inf->aspace.get (), insn->pc,
2325 &btinfo->stop_reason);
2328 /* Step one instruction in forward direction. */
2330 static struct target_waitstatus
2331 record_btrace_single_step_forward (struct thread_info *tp)
2333 struct btrace_insn_iterator *replay, end, start;
2334 struct btrace_thread_info *btinfo;
2336 btinfo = &tp->btrace;
2337 replay = btinfo->replay;
2339 /* We're done if we're not replaying. */
2340 if (replay == NULL)
2341 return btrace_step_no_history ();
2343 /* Check if we're stepping a breakpoint. */
2344 if (record_btrace_replay_at_breakpoint (tp))
2345 return btrace_step_stopped ();
2347 /* Skip gaps during replay. If we end up at a gap (at the end of the trace),
2348 jump back to the instruction at which we started. */
2349 start = *replay;
2352 unsigned int steps;
2354 /* We will bail out here if we continue stepping after reaching the end
2355 of the execution history. */
2356 steps = btrace_insn_next (replay, 1);
2357 if (steps == 0)
2359 *replay = start;
2360 return btrace_step_no_history ();
2363 while (btrace_insn_get (replay) == NULL);
2365 /* Determine the end of the instruction trace. */
2366 btrace_insn_end (&end, btinfo);
2368 /* The execution trace contains (and ends with) the current instruction.
2369 This instruction has not been executed, yet, so the trace really ends
2370 one instruction earlier. */
2371 if (btrace_insn_cmp (replay, &end) == 0)
2372 return btrace_step_no_history ();
2374 return btrace_step_spurious ();
2377 /* Step one instruction in backward direction. */
2379 static struct target_waitstatus
2380 record_btrace_single_step_backward (struct thread_info *tp)
2382 struct btrace_insn_iterator *replay, start;
2383 struct btrace_thread_info *btinfo;
2385 btinfo = &tp->btrace;
2386 replay = btinfo->replay;
2388 /* Start replaying if we're not already doing so. */
2389 if (replay == NULL)
2390 replay = record_btrace_start_replaying (tp);
2392 /* If we can't step any further, we reached the end of the history.
2393 Skip gaps during replay. If we end up at a gap (at the beginning of
2394 the trace), jump back to the instruction at which we started. */
2395 start = *replay;
2398 unsigned int steps;
2400 steps = btrace_insn_prev (replay, 1);
2401 if (steps == 0)
2403 *replay = start;
2404 return btrace_step_no_history ();
2407 while (btrace_insn_get (replay) == NULL);
2409 /* Check if we're stepping a breakpoint.
2411 For reverse-stepping, this check is after the step. There is logic in
2412 infrun.c that handles reverse-stepping separately. See, for example,
2413 proceed and adjust_pc_after_break.
2415 This code assumes that for reverse-stepping, PC points to the last
2416 de-executed instruction, whereas for forward-stepping PC points to the
2417 next to-be-executed instruction. */
2418 if (record_btrace_replay_at_breakpoint (tp))
2419 return btrace_step_stopped ();
2421 return btrace_step_spurious ();
2424 /* Step a single thread. */
2426 static struct target_waitstatus
2427 record_btrace_step_thread (struct thread_info *tp)
2429 struct btrace_thread_info *btinfo;
2430 struct target_waitstatus status;
2431 btrace_thread_flags flags;
2433 btinfo = &tp->btrace;
2435 flags = btinfo->flags & (BTHR_MOVE | BTHR_STOP);
2436 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
2438 DEBUG ("stepping thread %s (%s): %x (%s)", print_thread_id (tp),
2439 tp->ptid.to_string ().c_str (), flags.raw (),
2440 btrace_thread_flag_to_str (flags));
2442 /* We can't step without an execution history. */
2443 if ((flags & BTHR_MOVE) != 0 && btrace_is_empty (tp))
2444 return btrace_step_no_history ();
2446 switch (flags)
2448 default:
2449 internal_error (_("invalid stepping type."));
2451 case BTHR_STOP:
2452 return btrace_step_stopped_on_request ();
2454 case BTHR_STEP:
2455 status = record_btrace_single_step_forward (tp);
2456 if (status.kind () != TARGET_WAITKIND_SPURIOUS)
2457 break;
2459 return btrace_step_stopped ();
2461 case BTHR_RSTEP:
2462 status = record_btrace_single_step_backward (tp);
2463 if (status.kind () != TARGET_WAITKIND_SPURIOUS)
2464 break;
2466 return btrace_step_stopped ();
2468 case BTHR_CONT:
2469 status = record_btrace_single_step_forward (tp);
2470 if (status.kind () != TARGET_WAITKIND_SPURIOUS)
2471 break;
2473 btinfo->flags |= flags;
2474 return btrace_step_again ();
2476 case BTHR_RCONT:
2477 status = record_btrace_single_step_backward (tp);
2478 if (status.kind () != TARGET_WAITKIND_SPURIOUS)
2479 break;
2481 btinfo->flags |= flags;
2482 return btrace_step_again ();
2485 /* We keep threads moving at the end of their execution history. The wait
2486 method will stop the thread for whom the event is reported. */
2487 if (status.kind () == TARGET_WAITKIND_NO_HISTORY)
2488 btinfo->flags |= flags;
2490 return status;
2493 /* Announce further events if necessary. */
2495 static void
2496 record_btrace_maybe_mark_async_event
2497 (const std::vector<thread_info *> &moving,
2498 const std::vector<thread_info *> &no_history)
2500 bool more_moving = !moving.empty ();
2501 bool more_no_history = !no_history.empty ();;
2503 if (!more_moving && !more_no_history)
2504 return;
2506 if (more_moving)
2507 DEBUG ("movers pending");
2509 if (more_no_history)
2510 DEBUG ("no-history pending");
2512 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2515 /* The wait method of target record-btrace. */
2517 ptid_t
2518 record_btrace_target::wait (ptid_t ptid, struct target_waitstatus *status,
2519 target_wait_flags options)
2521 std::vector<thread_info *> moving;
2522 std::vector<thread_info *> no_history;
2524 /* Clear this, if needed we'll re-mark it below. */
2525 clear_async_event_handler (record_btrace_async_inferior_event_handler);
2527 DEBUG ("wait %s (0x%x)", ptid.to_string ().c_str (),
2528 (unsigned) options);
2530 /* As long as we're not replaying, just forward the request. */
2531 if ((::execution_direction != EXEC_REVERSE)
2532 && !record_is_replaying (minus_one_ptid))
2534 return this->beneath ()->wait (ptid, status, options);
2537 /* Keep a work list of moving threads. */
2538 process_stratum_target *proc_target = current_inferior ()->process_target ();
2539 for (thread_info *tp : all_non_exited_threads (proc_target, ptid))
2540 if ((tp->btrace.flags & (BTHR_MOVE | BTHR_STOP)) != 0)
2541 moving.push_back (tp);
2543 if (moving.empty ())
2545 *status = btrace_step_no_resumed ();
2547 DEBUG ("wait ended by %s: %s", null_ptid.to_string ().c_str (),
2548 status->to_string ().c_str ());
2550 return null_ptid;
2553 /* Step moving threads one by one, one step each, until either one thread
2554 reports an event or we run out of threads to step.
2556 When stepping more than one thread, chances are that some threads reach
2557 the end of their execution history earlier than others. If we reported
2558 this immediately, all-stop on top of non-stop would stop all threads and
2559 resume the same threads next time. And we would report the same thread
2560 having reached the end of its execution history again.
2562 In the worst case, this would starve the other threads. But even if other
2563 threads would be allowed to make progress, this would result in far too
2564 many intermediate stops.
2566 We therefore delay the reporting of "no execution history" until we have
2567 nothing else to report. By this time, all threads should have moved to
2568 either the beginning or the end of their execution history. There will
2569 be a single user-visible stop. */
2570 struct thread_info *eventing = NULL;
2571 while ((eventing == NULL) && !moving.empty ())
2573 for (unsigned int ix = 0; eventing == NULL && ix < moving.size ();)
2575 thread_info *tp = moving[ix];
2577 *status = record_btrace_step_thread (tp);
2579 switch (status->kind ())
2581 case TARGET_WAITKIND_IGNORE:
2582 ix++;
2583 break;
2585 case TARGET_WAITKIND_NO_HISTORY:
2586 no_history.push_back (ordered_remove (moving, ix));
2587 break;
2589 default:
2590 eventing = unordered_remove (moving, ix);
2591 break;
2596 if (eventing == NULL)
2598 /* We started with at least one moving thread. This thread must have
2599 either stopped or reached the end of its execution history.
2601 In the former case, EVENTING must not be NULL.
2602 In the latter case, NO_HISTORY must not be empty. */
2603 gdb_assert (!no_history.empty ());
2605 /* We kept threads moving at the end of their execution history. Stop
2606 EVENTING now that we are going to report its stop. */
2607 eventing = unordered_remove (no_history, 0);
2608 eventing->btrace.flags &= ~BTHR_MOVE;
2610 *status = btrace_step_no_history ();
2613 gdb_assert (eventing != NULL);
2615 /* We kept threads replaying at the end of their execution history. Stop
2616 replaying EVENTING now that we are going to report its stop. */
2617 record_btrace_stop_replaying_at_end (eventing);
2619 /* Stop all other threads. */
2620 if (!target_is_non_stop_p ())
2622 for (thread_info *tp : current_inferior ()->non_exited_threads ())
2623 record_btrace_cancel_resume (tp);
2626 /* In async mode, we need to announce further events. */
2627 if (target_is_async_p ())
2628 record_btrace_maybe_mark_async_event (moving, no_history);
2630 /* Start record histories anew from the current position. */
2631 record_btrace_clear_histories (&eventing->btrace);
2633 /* We moved the replay position but did not update registers. */
2634 registers_changed_thread (eventing);
2636 DEBUG ("wait ended by thread %s (%s): %s",
2637 print_thread_id (eventing),
2638 eventing->ptid.to_string ().c_str (),
2639 status->to_string ().c_str ());
2641 return eventing->ptid;
2644 /* The stop method of target record-btrace. */
2646 void
2647 record_btrace_target::stop (ptid_t ptid)
2649 DEBUG ("stop %s", ptid.to_string ().c_str ());
2651 /* As long as we're not replaying, just forward the request. */
2652 if ((::execution_direction != EXEC_REVERSE)
2653 && !record_is_replaying (minus_one_ptid))
2655 this->beneath ()->stop (ptid);
2657 else
2659 process_stratum_target *proc_target
2660 = current_inferior ()->process_target ();
2662 for (thread_info *tp : all_non_exited_threads (proc_target, ptid))
2664 tp->btrace.flags &= ~BTHR_MOVE;
2665 tp->btrace.flags |= BTHR_STOP;
2670 /* The can_execute_reverse method of target record-btrace. */
2672 bool
2673 record_btrace_target::can_execute_reverse ()
2675 return true;
2678 /* The stopped_by_sw_breakpoint method of target record-btrace. */
2680 bool
2681 record_btrace_target::stopped_by_sw_breakpoint ()
2683 if (record_is_replaying (minus_one_ptid))
2685 struct thread_info *tp = inferior_thread ();
2687 return tp->btrace.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2690 return this->beneath ()->stopped_by_sw_breakpoint ();
2693 /* The supports_stopped_by_sw_breakpoint method of target
2694 record-btrace. */
2696 bool
2697 record_btrace_target::supports_stopped_by_sw_breakpoint ()
2699 if (record_is_replaying (minus_one_ptid))
2700 return true;
2702 return this->beneath ()->supports_stopped_by_sw_breakpoint ();
2705 /* The stopped_by_sw_breakpoint method of target record-btrace. */
2707 bool
2708 record_btrace_target::stopped_by_hw_breakpoint ()
2710 if (record_is_replaying (minus_one_ptid))
2712 struct thread_info *tp = inferior_thread ();
2714 return tp->btrace.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2717 return this->beneath ()->stopped_by_hw_breakpoint ();
2720 /* The supports_stopped_by_hw_breakpoint method of target
2721 record-btrace. */
2723 bool
2724 record_btrace_target::supports_stopped_by_hw_breakpoint ()
2726 if (record_is_replaying (minus_one_ptid))
2727 return true;
2729 return this->beneath ()->supports_stopped_by_hw_breakpoint ();
2732 /* The update_thread_list method of target record-btrace. */
2734 void
2735 record_btrace_target::update_thread_list ()
2737 /* We don't add or remove threads during replay. */
2738 if (record_is_replaying (minus_one_ptid))
2739 return;
2741 /* Forward the request. */
2742 this->beneath ()->update_thread_list ();
2745 /* The thread_alive method of target record-btrace. */
2747 bool
2748 record_btrace_target::thread_alive (ptid_t ptid)
2750 /* We don't add or remove threads during replay. */
2751 if (record_is_replaying (minus_one_ptid))
2752 return true;
2754 /* Forward the request. */
2755 return this->beneath ()->thread_alive (ptid);
2758 /* Set the replay branch trace instruction iterator. If IT is NULL, replay
2759 is stopped. */
2761 static void
2762 record_btrace_set_replay (struct thread_info *tp,
2763 const struct btrace_insn_iterator *it)
2765 struct btrace_thread_info *btinfo;
2767 btinfo = &tp->btrace;
2769 if (it == NULL)
2770 record_btrace_stop_replaying (tp);
2771 else
2773 if (btinfo->replay == NULL)
2774 record_btrace_start_replaying (tp);
2775 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2776 return;
2778 *btinfo->replay = *it;
2779 registers_changed_thread (tp);
2782 /* Start anew from the new replay position. */
2783 record_btrace_clear_histories (btinfo);
2785 tp->set_stop_pc (regcache_read_pc (get_thread_regcache (tp)));
2786 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
2789 /* The goto_record_begin method of target record-btrace. */
2791 void
2792 record_btrace_target::goto_record_begin ()
2794 struct thread_info *tp;
2795 struct btrace_insn_iterator begin;
2797 tp = require_btrace_thread ();
2799 btrace_insn_begin (&begin, &tp->btrace);
2801 /* Skip gaps at the beginning of the trace. */
2802 while (btrace_insn_get (&begin) == NULL)
2804 unsigned int steps;
2806 steps = btrace_insn_next (&begin, 1);
2807 if (steps == 0)
2808 error (_("No trace."));
2811 record_btrace_set_replay (tp, &begin);
2814 /* The goto_record_end method of target record-btrace. */
2816 void
2817 record_btrace_target::goto_record_end ()
2819 struct thread_info *tp;
2821 tp = require_btrace_thread ();
2823 record_btrace_set_replay (tp, NULL);
2826 /* The goto_record method of target record-btrace. */
2828 void
2829 record_btrace_target::goto_record (ULONGEST insn)
2831 struct thread_info *tp;
2832 struct btrace_insn_iterator it;
2833 unsigned int number;
2834 int found;
2836 number = insn;
2838 /* Check for wrap-arounds. */
2839 if (number != insn)
2840 error (_("Instruction number out of range."));
2842 tp = require_btrace_thread ();
2844 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
2846 /* Check if the instruction could not be found or is a gap. */
2847 if (found == 0 || btrace_insn_get (&it) == NULL)
2848 error (_("No such instruction."));
2850 record_btrace_set_replay (tp, &it);
2853 /* The record_stop_replaying method of target record-btrace. */
2855 void
2856 record_btrace_target::record_stop_replaying ()
2858 for (thread_info *tp : current_inferior ()->non_exited_threads ())
2859 record_btrace_stop_replaying (tp);
2862 /* The execution_direction target method. */
2864 enum exec_direction_kind
2865 record_btrace_target::execution_direction ()
2867 return record_btrace_resume_exec_dir;
2870 /* The prepare_to_generate_core target method. */
2872 void
2873 record_btrace_target::prepare_to_generate_core ()
2875 record_btrace_generating_corefile = 1;
2878 /* The done_generating_core target method. */
2880 void
2881 record_btrace_target::done_generating_core ()
2883 record_btrace_generating_corefile = 0;
2886 /* Start recording in BTS format. */
2888 static void
2889 cmd_record_btrace_bts_start (const char *args, int from_tty)
2891 if (args != NULL && *args != 0)
2892 error (_("Invalid argument."));
2894 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2898 execute_command ("target record-btrace", from_tty);
2900 catch (const gdb_exception &exception)
2902 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2903 throw;
2907 /* Start recording in Intel Processor Trace format. */
2909 static void
2910 cmd_record_btrace_pt_start (const char *args, int from_tty)
2912 if (args != NULL && *args != 0)
2913 error (_("Invalid argument."));
2915 record_btrace_conf.format = BTRACE_FORMAT_PT;
2919 execute_command ("target record-btrace", from_tty);
2921 catch (const gdb_exception &exception)
2923 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2924 throw;
2928 /* Alias for "target record". */
2930 static void
2931 cmd_record_btrace_start (const char *args, int from_tty)
2933 if (args != NULL && *args != 0)
2934 error (_("Invalid argument."));
2936 record_btrace_conf.format = BTRACE_FORMAT_PT;
2940 execute_command ("target record-btrace", from_tty);
2942 catch (const gdb_exception_error &exception)
2944 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2948 execute_command ("target record-btrace", from_tty);
2950 catch (const gdb_exception &ex)
2952 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2953 throw;
2958 /* The "show record btrace replay-memory-access" command. */
2960 static void
2961 cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
2962 struct cmd_list_element *c, const char *value)
2964 gdb_printf (file, _("Replay memory access is %s.\n"),
2965 replay_memory_access);
2968 /* The "set record btrace cpu none" command. */
2970 static void
2971 cmd_set_record_btrace_cpu_none (const char *args, int from_tty)
2973 if (args != nullptr && *args != 0)
2974 error (_("Trailing junk: '%s'."), args);
2976 record_btrace_cpu_state = CS_NONE;
2979 /* The "set record btrace cpu auto" command. */
2981 static void
2982 cmd_set_record_btrace_cpu_auto (const char *args, int from_tty)
2984 if (args != nullptr && *args != 0)
2985 error (_("Trailing junk: '%s'."), args);
2987 record_btrace_cpu_state = CS_AUTO;
2990 /* The "set record btrace cpu" command. */
2992 static void
2993 cmd_set_record_btrace_cpu (const char *args, int from_tty)
2995 if (args == nullptr)
2996 args = "";
2998 /* We use a hard-coded vendor string for now. */
2999 unsigned int family, model, stepping;
3000 int l1, l2, matches = sscanf (args, "intel: %u/%u%n/%u%n", &family,
3001 &model, &l1, &stepping, &l2);
3002 if (matches == 3)
3004 if (strlen (args) != l2)
3005 error (_("Trailing junk: '%s'."), args + l2);
3007 else if (matches == 2)
3009 if (strlen (args) != l1)
3010 error (_("Trailing junk: '%s'."), args + l1);
3012 stepping = 0;
3014 else
3015 error (_("Bad format. See \"help set record btrace cpu\"."));
3017 if (USHRT_MAX < family)
3018 error (_("Cpu family too big."));
3020 if (UCHAR_MAX < model)
3021 error (_("Cpu model too big."));
3023 if (UCHAR_MAX < stepping)
3024 error (_("Cpu stepping too big."));
3026 record_btrace_cpu.vendor = CV_INTEL;
3027 record_btrace_cpu.family = family;
3028 record_btrace_cpu.model = model;
3029 record_btrace_cpu.stepping = stepping;
3031 record_btrace_cpu_state = CS_CPU;
3034 /* The "show record btrace cpu" command. */
3036 static void
3037 cmd_show_record_btrace_cpu (const char *args, int from_tty)
3039 if (args != nullptr && *args != 0)
3040 error (_("Trailing junk: '%s'."), args);
3042 switch (record_btrace_cpu_state)
3044 case CS_AUTO:
3045 gdb_printf (_("btrace cpu is 'auto'.\n"));
3046 return;
3048 case CS_NONE:
3049 gdb_printf (_("btrace cpu is 'none'.\n"));
3050 return;
3052 case CS_CPU:
3053 switch (record_btrace_cpu.vendor)
3055 case CV_INTEL:
3056 if (record_btrace_cpu.stepping == 0)
3057 gdb_printf (_("btrace cpu is 'intel: %u/%u'.\n"),
3058 record_btrace_cpu.family,
3059 record_btrace_cpu.model);
3060 else
3061 gdb_printf (_("btrace cpu is 'intel: %u/%u/%u'.\n"),
3062 record_btrace_cpu.family,
3063 record_btrace_cpu.model,
3064 record_btrace_cpu.stepping);
3065 return;
3069 error (_("Internal error: bad cpu state."));
3072 /* The "record bts buffer-size" show value function. */
3074 static void
3075 show_record_bts_buffer_size_value (struct ui_file *file, int from_tty,
3076 struct cmd_list_element *c,
3077 const char *value)
3079 gdb_printf (file, _("The record/replay bts buffer size is %s.\n"),
3080 value);
3083 /* The "record pt buffer-size" show value function. */
3085 static void
3086 show_record_pt_buffer_size_value (struct ui_file *file, int from_tty,
3087 struct cmd_list_element *c,
3088 const char *value)
3090 gdb_printf (file, _("The record/replay pt buffer size is %s.\n"),
3091 value);
3094 /* Initialize btrace commands. */
3096 void _initialize_record_btrace ();
3097 void
3098 _initialize_record_btrace ()
3100 cmd_list_element *record_btrace_cmd
3101 = add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
3102 _("Start branch trace recording."),
3103 &record_btrace_cmdlist, 0, &record_cmdlist);
3104 add_alias_cmd ("b", record_btrace_cmd, class_obscure, 1, &record_cmdlist);
3106 cmd_list_element *record_btrace_bts_cmd
3107 = add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
3108 _("\
3109 Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
3110 The processor stores a from/to record for each branch into a cyclic buffer.\n\
3111 This format may not be available on all processors."),
3112 &record_btrace_cmdlist);
3113 add_alias_cmd ("bts", record_btrace_bts_cmd, class_obscure, 1,
3114 &record_cmdlist);
3116 cmd_list_element *record_btrace_pt_cmd
3117 = add_cmd ("pt", class_obscure, cmd_record_btrace_pt_start,
3118 _("\
3119 Start branch trace recording in Intel Processor Trace format.\n\n\
3120 This format may not be available on all processors."),
3121 &record_btrace_cmdlist);
3122 add_alias_cmd ("pt", record_btrace_pt_cmd, class_obscure, 1, &record_cmdlist);
3124 add_setshow_prefix_cmd ("btrace", class_support,
3125 _("Set record options."),
3126 _("Show record options."),
3127 &set_record_btrace_cmdlist,
3128 &show_record_btrace_cmdlist,
3129 &set_record_cmdlist, &show_record_cmdlist);
3131 add_setshow_enum_cmd ("replay-memory-access", no_class,
3132 replay_memory_access_types, &replay_memory_access, _("\
3133 Set what memory accesses are allowed during replay."), _("\
3134 Show what memory accesses are allowed during replay."),
3135 _("Default is READ-ONLY.\n\n\
3136 The btrace record target does not trace data.\n\
3137 The memory therefore corresponds to the live target and not \
3138 to the current replay position.\n\n\
3139 When READ-ONLY, allow accesses to read-only memory during replay.\n\
3140 When READ-WRITE, allow accesses to read-only and read-write memory during \
3141 replay."),
3142 NULL, cmd_show_replay_memory_access,
3143 &set_record_btrace_cmdlist,
3144 &show_record_btrace_cmdlist);
3146 add_prefix_cmd ("cpu", class_support, cmd_set_record_btrace_cpu,
3147 _("\
3148 Set the cpu to be used for trace decode.\n\n\
3149 The format is \"VENDOR:IDENTIFIER\" or \"none\" or \"auto\" (default).\n\
3150 For vendor \"intel\" the format is \"FAMILY/MODEL[/STEPPING]\".\n\n\
3151 When decoding branch trace, enable errata workarounds for the specified cpu.\n\
3152 The default is \"auto\", which uses the cpu on which the trace was recorded.\n\
3153 When GDB does not support that cpu, this option can be used to enable\n\
3154 workarounds for a similar cpu that GDB supports.\n\n\
3155 When set to \"none\", errata workarounds are disabled."),
3156 &set_record_btrace_cpu_cmdlist,
3158 &set_record_btrace_cmdlist);
3160 add_cmd ("auto", class_support, cmd_set_record_btrace_cpu_auto, _("\
3161 Automatically determine the cpu to be used for trace decode."),
3162 &set_record_btrace_cpu_cmdlist);
3164 add_cmd ("none", class_support, cmd_set_record_btrace_cpu_none, _("\
3165 Do not enable errata workarounds for trace decode."),
3166 &set_record_btrace_cpu_cmdlist);
3168 add_cmd ("cpu", class_support, cmd_show_record_btrace_cpu, _("\
3169 Show the cpu to be used for trace decode."),
3170 &show_record_btrace_cmdlist);
3172 add_setshow_prefix_cmd ("bts", class_support,
3173 _("Set record btrace bts options."),
3174 _("Show record btrace bts options."),
3175 &set_record_btrace_bts_cmdlist,
3176 &show_record_btrace_bts_cmdlist,
3177 &set_record_btrace_cmdlist,
3178 &show_record_btrace_cmdlist);
3180 add_setshow_uinteger_cmd ("buffer-size", no_class,
3181 &record_btrace_conf.bts.size,
3182 _("Set the record/replay bts buffer size."),
3183 _("Show the record/replay bts buffer size."), _("\
3184 When starting recording request a trace buffer of this size. \
3185 The actual buffer size may differ from the requested size. \
3186 Use \"info record\" to see the actual buffer size.\n\n\
3187 Bigger buffers allow longer recording but also take more time to process \
3188 the recorded execution trace.\n\n\
3189 The trace buffer size may not be changed while recording."), NULL,
3190 show_record_bts_buffer_size_value,
3191 &set_record_btrace_bts_cmdlist,
3192 &show_record_btrace_bts_cmdlist);
3194 add_setshow_prefix_cmd ("pt", class_support,
3195 _("Set record btrace pt options."),
3196 _("Show record btrace pt options."),
3197 &set_record_btrace_pt_cmdlist,
3198 &show_record_btrace_pt_cmdlist,
3199 &set_record_btrace_cmdlist,
3200 &show_record_btrace_cmdlist);
3202 add_setshow_uinteger_cmd ("buffer-size", no_class,
3203 &record_btrace_conf.pt.size,
3204 _("Set the record/replay pt buffer size."),
3205 _("Show the record/replay pt buffer size."), _("\
3206 Bigger buffers allow longer recording but also take more time to process \
3207 the recorded execution.\n\
3208 The actual buffer size may differ from the requested size. Use \"info record\" \
3209 to see the actual buffer size."), NULL, show_record_pt_buffer_size_value,
3210 &set_record_btrace_pt_cmdlist,
3211 &show_record_btrace_pt_cmdlist);
3213 add_target (record_btrace_target_info, record_btrace_target_open);
3215 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
3216 xcalloc, xfree);
3218 record_btrace_conf.bts.size = 64 * 1024;
3219 record_btrace_conf.pt.size = 16 * 1024;