Fix dw2-ifort-parameter.exp on PPC64
[binutils-gdb.git] / gdb / record-btrace.c
blobd4c0d4244a713c0f340aee4ac0c805b448a58763
1 /* Branch trace support for GDB, the GNU debugger.
3 Copyright (C) 2013-2014 Free Software Foundation, Inc.
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
22 #include "defs.h"
23 #include "record.h"
24 #include "gdbthread.h"
25 #include "target.h"
26 #include "gdbcmd.h"
27 #include "disasm.h"
28 #include "observer.h"
29 #include "exceptions.h"
30 #include "cli/cli-utils.h"
31 #include "source.h"
32 #include "ui-out.h"
33 #include "symtab.h"
34 #include "filenames.h"
35 #include "regcache.h"
36 #include "frame-unwind.h"
37 #include "hashtab.h"
39 /* The target_ops of record-btrace. */
40 static struct target_ops record_btrace_ops;
42 /* A new thread observer enabling branch tracing for the new thread. */
43 static struct observer *record_btrace_thread_observer;
45 /* Temporarily allow memory accesses. */
46 static int record_btrace_allow_memory_access;
48 /* Print a record-btrace debug message. Use do ... while (0) to avoid
49 ambiguities when used in if statements. */
51 #define DEBUG(msg, args...) \
52 do \
53 { \
54 if (record_debug != 0) \
55 fprintf_unfiltered (gdb_stdlog, \
56 "[record-btrace] " msg "\n", ##args); \
57 } \
58 while (0)
61 /* Update the branch trace for the current thread and return a pointer to its
62 thread_info.
64 Throws an error if there is no thread or no trace. This function never
65 returns NULL. */
67 static struct thread_info *
68 require_btrace_thread (void)
70 struct thread_info *tp;
72 DEBUG ("require");
74 tp = find_thread_ptid (inferior_ptid);
75 if (tp == NULL)
76 error (_("No thread."));
78 btrace_fetch (tp);
80 if (btrace_is_empty (tp))
81 error (_("No trace."));
83 return tp;
86 /* Update the branch trace for the current thread and return a pointer to its
87 branch trace information struct.
89 Throws an error if there is no thread or no trace. This function never
90 returns NULL. */
92 static struct btrace_thread_info *
93 require_btrace (void)
95 struct thread_info *tp;
97 tp = require_btrace_thread ();
99 return &tp->btrace;
102 /* Enable branch tracing for one thread. Warn on errors. */
104 static void
105 record_btrace_enable_warn (struct thread_info *tp)
107 volatile struct gdb_exception error;
109 TRY_CATCH (error, RETURN_MASK_ERROR)
110 btrace_enable (tp);
112 if (error.message != NULL)
113 warning ("%s", error.message);
116 /* Callback function to disable branch tracing for one thread. */
118 static void
119 record_btrace_disable_callback (void *arg)
121 struct thread_info *tp;
123 tp = arg;
125 btrace_disable (tp);
128 /* Enable automatic tracing of new threads. */
130 static void
131 record_btrace_auto_enable (void)
133 DEBUG ("attach thread observer");
135 record_btrace_thread_observer
136 = observer_attach_new_thread (record_btrace_enable_warn);
139 /* Disable automatic tracing of new threads. */
141 static void
142 record_btrace_auto_disable (void)
144 /* The observer may have been detached, already. */
145 if (record_btrace_thread_observer == NULL)
146 return;
148 DEBUG ("detach thread observer");
150 observer_detach_new_thread (record_btrace_thread_observer);
151 record_btrace_thread_observer = NULL;
154 /* The to_open method of target record-btrace. */
156 static void
157 record_btrace_open (char *args, int from_tty)
159 struct cleanup *disable_chain;
160 struct thread_info *tp;
162 DEBUG ("open");
164 record_preopen ();
166 if (!target_has_execution)
167 error (_("The program is not being run."));
169 if (!target_supports_btrace ())
170 error (_("Target does not support branch tracing."));
172 if (non_stop)
173 error (_("Record btrace can't debug inferior in non-stop mode."));
175 gdb_assert (record_btrace_thread_observer == NULL);
177 disable_chain = make_cleanup (null_cleanup, NULL);
178 ALL_THREADS (tp)
179 if (args == NULL || *args == 0 || number_is_in_list (args, tp->num))
181 btrace_enable (tp);
183 make_cleanup (record_btrace_disable_callback, tp);
186 record_btrace_auto_enable ();
188 push_target (&record_btrace_ops);
190 observer_notify_record_changed (current_inferior (), 1);
192 discard_cleanups (disable_chain);
195 /* The to_stop_recording method of target record-btrace. */
197 static void
198 record_btrace_stop_recording (struct target_ops *self)
200 struct thread_info *tp;
202 DEBUG ("stop recording");
204 record_btrace_auto_disable ();
206 ALL_THREADS (tp)
207 if (tp->btrace.target != NULL)
208 btrace_disable (tp);
211 /* The to_close method of target record-btrace. */
213 static void
214 record_btrace_close (struct target_ops *self)
216 struct thread_info *tp;
218 /* Make sure automatic recording gets disabled even if we did not stop
219 recording before closing the record-btrace target. */
220 record_btrace_auto_disable ();
222 /* We should have already stopped recording.
223 Tear down btrace in case we have not. */
224 ALL_THREADS (tp)
225 btrace_teardown (tp);
228 /* The to_info_record method of target record-btrace. */
230 static void
231 record_btrace_info (struct target_ops *self)
233 struct btrace_thread_info *btinfo;
234 struct thread_info *tp;
235 unsigned int insns, calls;
237 DEBUG ("info");
239 tp = find_thread_ptid (inferior_ptid);
240 if (tp == NULL)
241 error (_("No thread."));
243 btrace_fetch (tp);
245 insns = 0;
246 calls = 0;
248 btinfo = &tp->btrace;
250 if (!btrace_is_empty (tp))
252 struct btrace_call_iterator call;
253 struct btrace_insn_iterator insn;
255 btrace_call_end (&call, btinfo);
256 btrace_call_prev (&call, 1);
257 calls = btrace_call_number (&call);
259 btrace_insn_end (&insn, btinfo);
260 btrace_insn_prev (&insn, 1);
261 insns = btrace_insn_number (&insn);
264 printf_unfiltered (_("Recorded %u instructions in %u functions for thread "
265 "%d (%s).\n"), insns, calls, tp->num,
266 target_pid_to_str (tp->ptid));
268 if (btrace_is_replaying (tp))
269 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
270 btrace_insn_number (btinfo->replay));
273 /* Print an unsigned int. */
275 static void
276 ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
278 ui_out_field_fmt (uiout, fld, "%u", val);
281 /* Disassemble a section of the recorded instruction trace. */
283 static void
284 btrace_insn_history (struct ui_out *uiout,
285 const struct btrace_insn_iterator *begin,
286 const struct btrace_insn_iterator *end, int flags)
288 struct gdbarch *gdbarch;
289 struct btrace_insn_iterator it;
291 DEBUG ("itrace (0x%x): [%u; %u)", flags, btrace_insn_number (begin),
292 btrace_insn_number (end));
294 gdbarch = target_gdbarch ();
296 for (it = *begin; btrace_insn_cmp (&it, end) != 0; btrace_insn_next (&it, 1))
298 const struct btrace_insn *insn;
300 insn = btrace_insn_get (&it);
302 /* Print the instruction index. */
303 ui_out_field_uint (uiout, "index", btrace_insn_number (&it));
304 ui_out_text (uiout, "\t");
306 /* Disassembly with '/m' flag may not produce the expected result.
307 See PR gdb/11833. */
308 gdb_disassembly (gdbarch, uiout, NULL, flags, 1, insn->pc, insn->pc + 1);
312 /* The to_insn_history method of target record-btrace. */
314 static void
315 record_btrace_insn_history (struct target_ops *self, int size, int flags)
317 struct btrace_thread_info *btinfo;
318 struct btrace_insn_history *history;
319 struct btrace_insn_iterator begin, end;
320 struct cleanup *uiout_cleanup;
321 struct ui_out *uiout;
322 unsigned int context, covered;
324 uiout = current_uiout;
325 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
326 "insn history");
327 context = abs (size);
328 if (context == 0)
329 error (_("Bad record instruction-history-size."));
331 btinfo = require_btrace ();
332 history = btinfo->insn_history;
333 if (history == NULL)
335 struct btrace_insn_iterator *replay;
337 DEBUG ("insn-history (0x%x): %d", flags, size);
339 /* If we're replaying, we start at the replay position. Otherwise, we
340 start at the tail of the trace. */
341 replay = btinfo->replay;
342 if (replay != NULL)
343 begin = *replay;
344 else
345 btrace_insn_end (&begin, btinfo);
347 /* We start from here and expand in the requested direction. Then we
348 expand in the other direction, as well, to fill up any remaining
349 context. */
350 end = begin;
351 if (size < 0)
353 /* We want the current position covered, as well. */
354 covered = btrace_insn_next (&end, 1);
355 covered += btrace_insn_prev (&begin, context - covered);
356 covered += btrace_insn_next (&end, context - covered);
358 else
360 covered = btrace_insn_next (&end, context);
361 covered += btrace_insn_prev (&begin, context - covered);
364 else
366 begin = history->begin;
367 end = history->end;
369 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", flags, size,
370 btrace_insn_number (&begin), btrace_insn_number (&end));
372 if (size < 0)
374 end = begin;
375 covered = btrace_insn_prev (&begin, context);
377 else
379 begin = end;
380 covered = btrace_insn_next (&end, context);
384 if (covered > 0)
385 btrace_insn_history (uiout, &begin, &end, flags);
386 else
388 if (size < 0)
389 printf_unfiltered (_("At the start of the branch trace record.\n"));
390 else
391 printf_unfiltered (_("At the end of the branch trace record.\n"));
394 btrace_set_insn_history (btinfo, &begin, &end);
395 do_cleanups (uiout_cleanup);
398 /* The to_insn_history_range method of target record-btrace. */
400 static void
401 record_btrace_insn_history_range (struct target_ops *self,
402 ULONGEST from, ULONGEST to, int flags)
404 struct btrace_thread_info *btinfo;
405 struct btrace_insn_history *history;
406 struct btrace_insn_iterator begin, end;
407 struct cleanup *uiout_cleanup;
408 struct ui_out *uiout;
409 unsigned int low, high;
410 int found;
412 uiout = current_uiout;
413 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
414 "insn history");
415 low = from;
416 high = to;
418 DEBUG ("insn-history (0x%x): [%u; %u)", flags, low, high);
420 /* Check for wrap-arounds. */
421 if (low != from || high != to)
422 error (_("Bad range."));
424 if (high < low)
425 error (_("Bad range."));
427 btinfo = require_btrace ();
429 found = btrace_find_insn_by_number (&begin, btinfo, low);
430 if (found == 0)
431 error (_("Range out of bounds."));
433 found = btrace_find_insn_by_number (&end, btinfo, high);
434 if (found == 0)
436 /* Silently truncate the range. */
437 btrace_insn_end (&end, btinfo);
439 else
441 /* We want both begin and end to be inclusive. */
442 btrace_insn_next (&end, 1);
445 btrace_insn_history (uiout, &begin, &end, flags);
446 btrace_set_insn_history (btinfo, &begin, &end);
448 do_cleanups (uiout_cleanup);
451 /* The to_insn_history_from method of target record-btrace. */
453 static void
454 record_btrace_insn_history_from (struct target_ops *self,
455 ULONGEST from, int size, int flags)
457 ULONGEST begin, end, context;
459 context = abs (size);
460 if (context == 0)
461 error (_("Bad record instruction-history-size."));
463 if (size < 0)
465 end = from;
467 if (from < context)
468 begin = 0;
469 else
470 begin = from - context + 1;
472 else
474 begin = from;
475 end = from + context - 1;
477 /* Check for wrap-around. */
478 if (end < begin)
479 end = ULONGEST_MAX;
482 record_btrace_insn_history_range (self, begin, end, flags);
485 /* Print the instruction number range for a function call history line. */
487 static void
488 btrace_call_history_insn_range (struct ui_out *uiout,
489 const struct btrace_function *bfun)
491 unsigned int begin, end, size;
493 size = VEC_length (btrace_insn_s, bfun->insn);
494 gdb_assert (size > 0);
496 begin = bfun->insn_offset;
497 end = begin + size - 1;
499 ui_out_field_uint (uiout, "insn begin", begin);
500 ui_out_text (uiout, ",");
501 ui_out_field_uint (uiout, "insn end", end);
504 /* Print the source line information for a function call history line. */
506 static void
507 btrace_call_history_src_line (struct ui_out *uiout,
508 const struct btrace_function *bfun)
510 struct symbol *sym;
511 int begin, end;
513 sym = bfun->sym;
514 if (sym == NULL)
515 return;
517 ui_out_field_string (uiout, "file",
518 symtab_to_filename_for_display (sym->symtab));
520 begin = bfun->lbegin;
521 end = bfun->lend;
523 if (end < begin)
524 return;
526 ui_out_text (uiout, ":");
527 ui_out_field_int (uiout, "min line", begin);
529 if (end == begin)
530 return;
532 ui_out_text (uiout, ",");
533 ui_out_field_int (uiout, "max line", end);
536 /* Get the name of a branch trace function. */
538 static const char *
539 btrace_get_bfun_name (const struct btrace_function *bfun)
541 struct minimal_symbol *msym;
542 struct symbol *sym;
544 if (bfun == NULL)
545 return "??";
547 msym = bfun->msym;
548 sym = bfun->sym;
550 if (sym != NULL)
551 return SYMBOL_PRINT_NAME (sym);
552 else if (msym != NULL)
553 return MSYMBOL_PRINT_NAME (msym);
554 else
555 return "??";
558 /* Disassemble a section of the recorded function trace. */
560 static void
561 btrace_call_history (struct ui_out *uiout,
562 const struct btrace_thread_info *btinfo,
563 const struct btrace_call_iterator *begin,
564 const struct btrace_call_iterator *end,
565 enum record_print_flag flags)
567 struct btrace_call_iterator it;
569 DEBUG ("ftrace (0x%x): [%u; %u)", flags, btrace_call_number (begin),
570 btrace_call_number (end));
572 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
574 const struct btrace_function *bfun;
575 struct minimal_symbol *msym;
576 struct symbol *sym;
578 bfun = btrace_call_get (&it);
579 sym = bfun->sym;
580 msym = bfun->msym;
582 /* Print the function index. */
583 ui_out_field_uint (uiout, "index", bfun->number);
584 ui_out_text (uiout, "\t");
586 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
588 int level = bfun->level + btinfo->level, i;
590 for (i = 0; i < level; ++i)
591 ui_out_text (uiout, " ");
594 if (sym != NULL)
595 ui_out_field_string (uiout, "function", SYMBOL_PRINT_NAME (sym));
596 else if (msym != NULL)
597 ui_out_field_string (uiout, "function", MSYMBOL_PRINT_NAME (msym));
598 else if (!ui_out_is_mi_like_p (uiout))
599 ui_out_field_string (uiout, "function", "??");
601 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
603 ui_out_text (uiout, _("\tinst "));
604 btrace_call_history_insn_range (uiout, bfun);
607 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
609 ui_out_text (uiout, _("\tat "));
610 btrace_call_history_src_line (uiout, bfun);
613 ui_out_text (uiout, "\n");
617 /* The to_call_history method of target record-btrace. */
619 static void
620 record_btrace_call_history (struct target_ops *self, int size, int flags)
622 struct btrace_thread_info *btinfo;
623 struct btrace_call_history *history;
624 struct btrace_call_iterator begin, end;
625 struct cleanup *uiout_cleanup;
626 struct ui_out *uiout;
627 unsigned int context, covered;
629 uiout = current_uiout;
630 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
631 "insn history");
632 context = abs (size);
633 if (context == 0)
634 error (_("Bad record function-call-history-size."));
636 btinfo = require_btrace ();
637 history = btinfo->call_history;
638 if (history == NULL)
640 struct btrace_insn_iterator *replay;
642 DEBUG ("call-history (0x%x): %d", flags, size);
644 /* If we're replaying, we start at the replay position. Otherwise, we
645 start at the tail of the trace. */
646 replay = btinfo->replay;
647 if (replay != NULL)
649 begin.function = replay->function;
650 begin.btinfo = btinfo;
652 else
653 btrace_call_end (&begin, btinfo);
655 /* We start from here and expand in the requested direction. Then we
656 expand in the other direction, as well, to fill up any remaining
657 context. */
658 end = begin;
659 if (size < 0)
661 /* We want the current position covered, as well. */
662 covered = btrace_call_next (&end, 1);
663 covered += btrace_call_prev (&begin, context - covered);
664 covered += btrace_call_next (&end, context - covered);
666 else
668 covered = btrace_call_next (&end, context);
669 covered += btrace_call_prev (&begin, context- covered);
672 else
674 begin = history->begin;
675 end = history->end;
677 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", flags, size,
678 btrace_call_number (&begin), btrace_call_number (&end));
680 if (size < 0)
682 end = begin;
683 covered = btrace_call_prev (&begin, context);
685 else
687 begin = end;
688 covered = btrace_call_next (&end, context);
692 if (covered > 0)
693 btrace_call_history (uiout, btinfo, &begin, &end, flags);
694 else
696 if (size < 0)
697 printf_unfiltered (_("At the start of the branch trace record.\n"));
698 else
699 printf_unfiltered (_("At the end of the branch trace record.\n"));
702 btrace_set_call_history (btinfo, &begin, &end);
703 do_cleanups (uiout_cleanup);
706 /* The to_call_history_range method of target record-btrace. */
708 static void
709 record_btrace_call_history_range (struct target_ops *self,
710 ULONGEST from, ULONGEST to, int flags)
712 struct btrace_thread_info *btinfo;
713 struct btrace_call_history *history;
714 struct btrace_call_iterator begin, end;
715 struct cleanup *uiout_cleanup;
716 struct ui_out *uiout;
717 unsigned int low, high;
718 int found;
720 uiout = current_uiout;
721 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
722 "func history");
723 low = from;
724 high = to;
726 DEBUG ("call-history (0x%x): [%u; %u)", flags, low, high);
728 /* Check for wrap-arounds. */
729 if (low != from || high != to)
730 error (_("Bad range."));
732 if (high < low)
733 error (_("Bad range."));
735 btinfo = require_btrace ();
737 found = btrace_find_call_by_number (&begin, btinfo, low);
738 if (found == 0)
739 error (_("Range out of bounds."));
741 found = btrace_find_call_by_number (&end, btinfo, high);
742 if (found == 0)
744 /* Silently truncate the range. */
745 btrace_call_end (&end, btinfo);
747 else
749 /* We want both begin and end to be inclusive. */
750 btrace_call_next (&end, 1);
753 btrace_call_history (uiout, btinfo, &begin, &end, flags);
754 btrace_set_call_history (btinfo, &begin, &end);
756 do_cleanups (uiout_cleanup);
759 /* The to_call_history_from method of target record-btrace. */
761 static void
762 record_btrace_call_history_from (struct target_ops *self,
763 ULONGEST from, int size, int flags)
765 ULONGEST begin, end, context;
767 context = abs (size);
768 if (context == 0)
769 error (_("Bad record function-call-history-size."));
771 if (size < 0)
773 end = from;
775 if (from < context)
776 begin = 0;
777 else
778 begin = from - context + 1;
780 else
782 begin = from;
783 end = from + context - 1;
785 /* Check for wrap-around. */
786 if (end < begin)
787 end = ULONGEST_MAX;
790 record_btrace_call_history_range (self, begin, end, flags);
793 /* The to_record_is_replaying method of target record-btrace. */
795 static int
796 record_btrace_is_replaying (struct target_ops *self)
798 struct thread_info *tp;
800 ALL_THREADS (tp)
801 if (btrace_is_replaying (tp))
802 return 1;
804 return 0;
807 /* The to_xfer_partial method of target record-btrace. */
809 static enum target_xfer_status
810 record_btrace_xfer_partial (struct target_ops *ops, enum target_object object,
811 const char *annex, gdb_byte *readbuf,
812 const gdb_byte *writebuf, ULONGEST offset,
813 ULONGEST len, ULONGEST *xfered_len)
815 struct target_ops *t;
817 /* Filter out requests that don't make sense during replay. */
818 if (!record_btrace_allow_memory_access && record_btrace_is_replaying (ops))
820 switch (object)
822 case TARGET_OBJECT_MEMORY:
824 struct target_section *section;
826 /* We do not allow writing memory in general. */
827 if (writebuf != NULL)
829 *xfered_len = len;
830 return TARGET_XFER_UNAVAILABLE;
833 /* We allow reading readonly memory. */
834 section = target_section_by_addr (ops, offset);
835 if (section != NULL)
837 /* Check if the section we found is readonly. */
838 if ((bfd_get_section_flags (section->the_bfd_section->owner,
839 section->the_bfd_section)
840 & SEC_READONLY) != 0)
842 /* Truncate the request to fit into this section. */
843 len = min (len, section->endaddr - offset);
844 break;
848 *xfered_len = len;
849 return TARGET_XFER_UNAVAILABLE;
854 /* Forward the request. */
855 for (ops = ops->beneath; ops != NULL; ops = ops->beneath)
856 if (ops->to_xfer_partial != NULL)
857 return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
858 offset, len, xfered_len);
860 *xfered_len = len;
861 return TARGET_XFER_UNAVAILABLE;
864 /* The to_insert_breakpoint method of target record-btrace. */
866 static int
867 record_btrace_insert_breakpoint (struct target_ops *ops,
868 struct gdbarch *gdbarch,
869 struct bp_target_info *bp_tgt)
871 volatile struct gdb_exception except;
872 int old, ret;
874 /* Inserting breakpoints requires accessing memory. Allow it for the
875 duration of this function. */
876 old = record_btrace_allow_memory_access;
877 record_btrace_allow_memory_access = 1;
879 ret = 0;
880 TRY_CATCH (except, RETURN_MASK_ALL)
881 ret = ops->beneath->to_insert_breakpoint (ops->beneath, gdbarch, bp_tgt);
883 record_btrace_allow_memory_access = old;
885 if (except.reason < 0)
886 throw_exception (except);
888 return ret;
891 /* The to_remove_breakpoint method of target record-btrace. */
893 static int
894 record_btrace_remove_breakpoint (struct target_ops *ops,
895 struct gdbarch *gdbarch,
896 struct bp_target_info *bp_tgt)
898 volatile struct gdb_exception except;
899 int old, ret;
901 /* Removing breakpoints requires accessing memory. Allow it for the
902 duration of this function. */
903 old = record_btrace_allow_memory_access;
904 record_btrace_allow_memory_access = 1;
906 ret = 0;
907 TRY_CATCH (except, RETURN_MASK_ALL)
908 ret = ops->beneath->to_remove_breakpoint (ops->beneath, gdbarch, bp_tgt);
910 record_btrace_allow_memory_access = old;
912 if (except.reason < 0)
913 throw_exception (except);
915 return ret;
918 /* The to_fetch_registers method of target record-btrace. */
920 static void
921 record_btrace_fetch_registers (struct target_ops *ops,
922 struct regcache *regcache, int regno)
924 struct btrace_insn_iterator *replay;
925 struct thread_info *tp;
927 tp = find_thread_ptid (inferior_ptid);
928 gdb_assert (tp != NULL);
930 replay = tp->btrace.replay;
931 if (replay != NULL)
933 const struct btrace_insn *insn;
934 struct gdbarch *gdbarch;
935 int pcreg;
937 gdbarch = get_regcache_arch (regcache);
938 pcreg = gdbarch_pc_regnum (gdbarch);
939 if (pcreg < 0)
940 return;
942 /* We can only provide the PC register. */
943 if (regno >= 0 && regno != pcreg)
944 return;
946 insn = btrace_insn_get (replay);
947 gdb_assert (insn != NULL);
949 regcache_raw_supply (regcache, regno, &insn->pc);
951 else
953 struct target_ops *t;
955 for (t = ops->beneath; t != NULL; t = t->beneath)
956 if (t->to_fetch_registers != NULL)
958 t->to_fetch_registers (t, regcache, regno);
959 break;
964 /* The to_store_registers method of target record-btrace. */
966 static void
967 record_btrace_store_registers (struct target_ops *ops,
968 struct regcache *regcache, int regno)
970 struct target_ops *t;
972 if (record_btrace_is_replaying (ops))
973 error (_("This record target does not allow writing registers."));
975 gdb_assert (may_write_registers != 0);
977 for (t = ops->beneath; t != NULL; t = t->beneath)
978 if (t->to_store_registers != NULL)
980 t->to_store_registers (t, regcache, regno);
981 return;
984 noprocess ();
987 /* The to_prepare_to_store method of target record-btrace. */
989 static void
990 record_btrace_prepare_to_store (struct target_ops *ops,
991 struct regcache *regcache)
993 struct target_ops *t;
995 if (record_btrace_is_replaying (ops))
996 return;
998 for (t = ops->beneath; t != NULL; t = t->beneath)
999 if (t->to_prepare_to_store != NULL)
1001 t->to_prepare_to_store (t, regcache);
1002 return;
1006 /* The branch trace frame cache. */
1008 struct btrace_frame_cache
1010 /* The thread. */
1011 struct thread_info *tp;
1013 /* The frame info. */
1014 struct frame_info *frame;
1016 /* The branch trace function segment. */
1017 const struct btrace_function *bfun;
1020 /* A struct btrace_frame_cache hash table indexed by NEXT. */
1022 static htab_t bfcache;
1024 /* hash_f for htab_create_alloc of bfcache. */
1026 static hashval_t
1027 bfcache_hash (const void *arg)
1029 const struct btrace_frame_cache *cache = arg;
1031 return htab_hash_pointer (cache->frame);
1034 /* eq_f for htab_create_alloc of bfcache. */
1036 static int
1037 bfcache_eq (const void *arg1, const void *arg2)
1039 const struct btrace_frame_cache *cache1 = arg1;
1040 const struct btrace_frame_cache *cache2 = arg2;
1042 return cache1->frame == cache2->frame;
1045 /* Create a new btrace frame cache. */
1047 static struct btrace_frame_cache *
1048 bfcache_new (struct frame_info *frame)
1050 struct btrace_frame_cache *cache;
1051 void **slot;
1053 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1054 cache->frame = frame;
1056 slot = htab_find_slot (bfcache, cache, INSERT);
1057 gdb_assert (*slot == NULL);
1058 *slot = cache;
1060 return cache;
1063 /* Extract the branch trace function from a branch trace frame. */
1065 static const struct btrace_function *
1066 btrace_get_frame_function (struct frame_info *frame)
1068 const struct btrace_frame_cache *cache;
1069 const struct btrace_function *bfun;
1070 struct btrace_frame_cache pattern;
1071 void **slot;
1073 pattern.frame = frame;
1075 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1076 if (slot == NULL)
1077 return NULL;
1079 cache = *slot;
1080 return cache->bfun;
1083 /* Implement stop_reason method for record_btrace_frame_unwind. */
1085 static enum unwind_stop_reason
1086 record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1087 void **this_cache)
1089 const struct btrace_frame_cache *cache;
1090 const struct btrace_function *bfun;
1092 cache = *this_cache;
1093 bfun = cache->bfun;
1094 gdb_assert (bfun != NULL);
1096 if (bfun->up == NULL)
1097 return UNWIND_UNAVAILABLE;
1099 return UNWIND_NO_REASON;
1102 /* Implement this_id method for record_btrace_frame_unwind. */
1104 static void
1105 record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1106 struct frame_id *this_id)
1108 const struct btrace_frame_cache *cache;
1109 const struct btrace_function *bfun;
1110 CORE_ADDR code, special;
1112 cache = *this_cache;
1114 bfun = cache->bfun;
1115 gdb_assert (bfun != NULL);
1117 while (bfun->segment.prev != NULL)
1118 bfun = bfun->segment.prev;
1120 code = get_frame_func (this_frame);
1121 special = bfun->number;
1123 *this_id = frame_id_build_unavailable_stack_special (code, special);
1125 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1126 btrace_get_bfun_name (cache->bfun),
1127 core_addr_to_string_nz (this_id->code_addr),
1128 core_addr_to_string_nz (this_id->special_addr));
1131 /* Implement prev_register method for record_btrace_frame_unwind. */
1133 static struct value *
1134 record_btrace_frame_prev_register (struct frame_info *this_frame,
1135 void **this_cache,
1136 int regnum)
1138 const struct btrace_frame_cache *cache;
1139 const struct btrace_function *bfun, *caller;
1140 const struct btrace_insn *insn;
1141 struct gdbarch *gdbarch;
1142 CORE_ADDR pc;
1143 int pcreg;
1145 gdbarch = get_frame_arch (this_frame);
1146 pcreg = gdbarch_pc_regnum (gdbarch);
1147 if (pcreg < 0 || regnum != pcreg)
1148 throw_error (NOT_AVAILABLE_ERROR,
1149 _("Registers are not available in btrace record history"));
1151 cache = *this_cache;
1152 bfun = cache->bfun;
1153 gdb_assert (bfun != NULL);
1155 caller = bfun->up;
1156 if (caller == NULL)
1157 throw_error (NOT_AVAILABLE_ERROR,
1158 _("No caller in btrace record history"));
1160 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1162 insn = VEC_index (btrace_insn_s, caller->insn, 0);
1163 pc = insn->pc;
1165 else
1167 insn = VEC_last (btrace_insn_s, caller->insn);
1168 pc = insn->pc;
1170 pc += gdb_insn_length (gdbarch, pc);
1173 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1174 btrace_get_bfun_name (bfun), bfun->level,
1175 core_addr_to_string_nz (pc));
1177 return frame_unwind_got_address (this_frame, regnum, pc);
1180 /* Implement sniffer method for record_btrace_frame_unwind. */
1182 static int
1183 record_btrace_frame_sniffer (const struct frame_unwind *self,
1184 struct frame_info *this_frame,
1185 void **this_cache)
1187 const struct btrace_function *bfun;
1188 struct btrace_frame_cache *cache;
1189 struct thread_info *tp;
1190 struct frame_info *next;
1192 /* THIS_FRAME does not contain a reference to its thread. */
1193 tp = find_thread_ptid (inferior_ptid);
1194 gdb_assert (tp != NULL);
1196 bfun = NULL;
1197 next = get_next_frame (this_frame);
1198 if (next == NULL)
1200 const struct btrace_insn_iterator *replay;
1202 replay = tp->btrace.replay;
1203 if (replay != NULL)
1204 bfun = replay->function;
1206 else
1208 const struct btrace_function *callee;
1210 callee = btrace_get_frame_function (next);
1211 if (callee != NULL && (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1212 bfun = callee->up;
1215 if (bfun == NULL)
1216 return 0;
1218 DEBUG ("[frame] sniffed frame for %s on level %d",
1219 btrace_get_bfun_name (bfun), bfun->level);
1221 /* This is our frame. Initialize the frame cache. */
1222 cache = bfcache_new (this_frame);
1223 cache->tp = tp;
1224 cache->bfun = bfun;
1226 *this_cache = cache;
1227 return 1;
1230 /* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1232 static int
1233 record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1234 struct frame_info *this_frame,
1235 void **this_cache)
1237 const struct btrace_function *bfun, *callee;
1238 struct btrace_frame_cache *cache;
1239 struct frame_info *next;
1241 next = get_next_frame (this_frame);
1242 if (next == NULL)
1243 return 0;
1245 callee = btrace_get_frame_function (next);
1246 if (callee == NULL)
1247 return 0;
1249 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1250 return 0;
1252 bfun = callee->up;
1253 if (bfun == NULL)
1254 return 0;
1256 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1257 btrace_get_bfun_name (bfun), bfun->level);
1259 /* This is our frame. Initialize the frame cache. */
1260 cache = bfcache_new (this_frame);
1261 cache->tp = find_thread_ptid (inferior_ptid);
1262 cache->bfun = bfun;
1264 *this_cache = cache;
1265 return 1;
1268 static void
1269 record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1271 struct btrace_frame_cache *cache;
1272 void **slot;
1274 cache = this_cache;
1276 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1277 gdb_assert (slot != NULL);
1279 htab_remove_elt (bfcache, cache);
1282 /* btrace recording does not store previous memory content, neither the stack
1283 frames content. Any unwinding would return errorneous results as the stack
1284 contents no longer matches the changed PC value restored from history.
1285 Therefore this unwinder reports any possibly unwound registers as
1286 <unavailable>. */
1288 const struct frame_unwind record_btrace_frame_unwind =
1290 NORMAL_FRAME,
1291 record_btrace_frame_unwind_stop_reason,
1292 record_btrace_frame_this_id,
1293 record_btrace_frame_prev_register,
1294 NULL,
1295 record_btrace_frame_sniffer,
1296 record_btrace_frame_dealloc_cache
1299 const struct frame_unwind record_btrace_tailcall_frame_unwind =
1301 TAILCALL_FRAME,
1302 record_btrace_frame_unwind_stop_reason,
1303 record_btrace_frame_this_id,
1304 record_btrace_frame_prev_register,
1305 NULL,
1306 record_btrace_tailcall_frame_sniffer,
1307 record_btrace_frame_dealloc_cache
1310 /* Implement the to_get_unwinder method. */
1312 static const struct frame_unwind *
1313 record_btrace_to_get_unwinder (struct target_ops *self)
1315 return &record_btrace_frame_unwind;
1318 /* Implement the to_get_tailcall_unwinder method. */
1320 static const struct frame_unwind *
1321 record_btrace_to_get_tailcall_unwinder (struct target_ops *self)
1323 return &record_btrace_tailcall_frame_unwind;
1326 /* Indicate that TP should be resumed according to FLAG. */
1328 static void
1329 record_btrace_resume_thread (struct thread_info *tp,
1330 enum btrace_thread_flag flag)
1332 struct btrace_thread_info *btinfo;
1334 DEBUG ("resuming %d (%s): %u", tp->num, target_pid_to_str (tp->ptid), flag);
1336 btinfo = &tp->btrace;
1338 if ((btinfo->flags & BTHR_MOVE) != 0)
1339 error (_("Thread already moving."));
1341 /* Fetch the latest branch trace. */
1342 btrace_fetch (tp);
1344 btinfo->flags |= flag;
1347 /* Find the thread to resume given a PTID. */
1349 static struct thread_info *
1350 record_btrace_find_resume_thread (ptid_t ptid)
1352 struct thread_info *tp;
1354 /* When asked to resume everything, we pick the current thread. */
1355 if (ptid_equal (minus_one_ptid, ptid) || ptid_is_pid (ptid))
1356 ptid = inferior_ptid;
1358 return find_thread_ptid (ptid);
1361 /* Start replaying a thread. */
1363 static struct btrace_insn_iterator *
1364 record_btrace_start_replaying (struct thread_info *tp)
1366 volatile struct gdb_exception except;
1367 struct btrace_insn_iterator *replay;
1368 struct btrace_thread_info *btinfo;
1369 int executing;
1371 btinfo = &tp->btrace;
1372 replay = NULL;
1374 /* We can't start replaying without trace. */
1375 if (btinfo->begin == NULL)
1376 return NULL;
1378 /* Clear the executing flag to allow changes to the current frame.
1379 We are not actually running, yet. We just started a reverse execution
1380 command or a record goto command.
1381 For the latter, EXECUTING is false and this has no effect.
1382 For the former, EXECUTING is true and we're in to_wait, about to
1383 move the thread. Since we need to recompute the stack, we temporarily
1384 set EXECUTING to flase. */
1385 executing = is_executing (tp->ptid);
1386 set_executing (tp->ptid, 0);
1388 /* GDB stores the current frame_id when stepping in order to detects steps
1389 into subroutines.
1390 Since frames are computed differently when we're replaying, we need to
1391 recompute those stored frames and fix them up so we can still detect
1392 subroutines after we started replaying. */
1393 TRY_CATCH (except, RETURN_MASK_ALL)
1395 struct frame_info *frame;
1396 struct frame_id frame_id;
1397 int upd_step_frame_id, upd_step_stack_frame_id;
1399 /* The current frame without replaying - computed via normal unwind. */
1400 frame = get_current_frame ();
1401 frame_id = get_frame_id (frame);
1403 /* Check if we need to update any stepping-related frame id's. */
1404 upd_step_frame_id = frame_id_eq (frame_id,
1405 tp->control.step_frame_id);
1406 upd_step_stack_frame_id = frame_id_eq (frame_id,
1407 tp->control.step_stack_frame_id);
1409 /* We start replaying at the end of the branch trace. This corresponds
1410 to the current instruction. */
1411 replay = xmalloc (sizeof (*replay));
1412 btrace_insn_end (replay, btinfo);
1414 /* We're not replaying, yet. */
1415 gdb_assert (btinfo->replay == NULL);
1416 btinfo->replay = replay;
1418 /* Make sure we're not using any stale registers. */
1419 registers_changed_ptid (tp->ptid);
1421 /* The current frame with replaying - computed via btrace unwind. */
1422 frame = get_current_frame ();
1423 frame_id = get_frame_id (frame);
1425 /* Replace stepping related frames where necessary. */
1426 if (upd_step_frame_id)
1427 tp->control.step_frame_id = frame_id;
1428 if (upd_step_stack_frame_id)
1429 tp->control.step_stack_frame_id = frame_id;
1432 /* Restore the previous execution state. */
1433 set_executing (tp->ptid, executing);
1435 if (except.reason < 0)
1437 xfree (btinfo->replay);
1438 btinfo->replay = NULL;
1440 registers_changed_ptid (tp->ptid);
1442 throw_exception (except);
1445 return replay;
1448 /* Stop replaying a thread. */
1450 static void
1451 record_btrace_stop_replaying (struct thread_info *tp)
1453 struct btrace_thread_info *btinfo;
1455 btinfo = &tp->btrace;
1457 xfree (btinfo->replay);
1458 btinfo->replay = NULL;
1460 /* Make sure we're not leaving any stale registers. */
1461 registers_changed_ptid (tp->ptid);
1464 /* The to_resume method of target record-btrace. */
1466 static void
1467 record_btrace_resume (struct target_ops *ops, ptid_t ptid, int step,
1468 enum gdb_signal signal)
1470 struct thread_info *tp, *other;
1471 enum btrace_thread_flag flag;
1473 DEBUG ("resume %s: %s", target_pid_to_str (ptid), step ? "step" : "cont");
1475 tp = record_btrace_find_resume_thread (ptid);
1476 if (tp == NULL)
1477 error (_("Cannot find thread to resume."));
1479 /* Stop replaying other threads if the thread to resume is not replaying. */
1480 if (!btrace_is_replaying (tp) && execution_direction != EXEC_REVERSE)
1481 ALL_THREADS (other)
1482 record_btrace_stop_replaying (other);
1484 /* As long as we're not replaying, just forward the request. */
1485 if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE)
1487 for (ops = ops->beneath; ops != NULL; ops = ops->beneath)
1488 if (ops->to_resume != NULL)
1489 return ops->to_resume (ops, ptid, step, signal);
1491 error (_("Cannot find target for stepping."));
1494 /* Compute the btrace thread flag for the requested move. */
1495 if (step == 0)
1496 flag = execution_direction == EXEC_REVERSE ? BTHR_RCONT : BTHR_CONT;
1497 else
1498 flag = execution_direction == EXEC_REVERSE ? BTHR_RSTEP : BTHR_STEP;
1500 /* At the moment, we only move a single thread. We could also move
1501 all threads in parallel by single-stepping each resumed thread
1502 until the first runs into an event.
1503 When we do that, we would want to continue all other threads.
1504 For now, just resume one thread to not confuse to_wait. */
1505 record_btrace_resume_thread (tp, flag);
1507 /* We just indicate the resume intent here. The actual stepping happens in
1508 record_btrace_wait below. */
1511 /* Find a thread to move. */
1513 static struct thread_info *
1514 record_btrace_find_thread_to_move (ptid_t ptid)
1516 struct thread_info *tp;
1518 /* First check the parameter thread. */
1519 tp = find_thread_ptid (ptid);
1520 if (tp != NULL && (tp->btrace.flags & BTHR_MOVE) != 0)
1521 return tp;
1523 /* Otherwise, find one other thread that has been resumed. */
1524 ALL_THREADS (tp)
1525 if ((tp->btrace.flags & BTHR_MOVE) != 0)
1526 return tp;
1528 return NULL;
1531 /* Return a target_waitstatus indicating that we ran out of history. */
1533 static struct target_waitstatus
1534 btrace_step_no_history (void)
1536 struct target_waitstatus status;
1538 status.kind = TARGET_WAITKIND_NO_HISTORY;
1540 return status;
1543 /* Return a target_waitstatus indicating that a step finished. */
1545 static struct target_waitstatus
1546 btrace_step_stopped (void)
1548 struct target_waitstatus status;
1550 status.kind = TARGET_WAITKIND_STOPPED;
1551 status.value.sig = GDB_SIGNAL_TRAP;
1553 return status;
1556 /* Clear the record histories. */
1558 static void
1559 record_btrace_clear_histories (struct btrace_thread_info *btinfo)
1561 xfree (btinfo->insn_history);
1562 xfree (btinfo->call_history);
1564 btinfo->insn_history = NULL;
1565 btinfo->call_history = NULL;
1568 /* Step a single thread. */
1570 static struct target_waitstatus
1571 record_btrace_step_thread (struct thread_info *tp)
1573 struct btrace_insn_iterator *replay, end;
1574 struct btrace_thread_info *btinfo;
1575 struct address_space *aspace;
1576 struct inferior *inf;
1577 enum btrace_thread_flag flags;
1578 unsigned int steps;
1580 btinfo = &tp->btrace;
1581 replay = btinfo->replay;
1583 flags = btinfo->flags & BTHR_MOVE;
1584 btinfo->flags &= ~BTHR_MOVE;
1586 DEBUG ("stepping %d (%s): %u", tp->num, target_pid_to_str (tp->ptid), flags);
1588 switch (flags)
1590 default:
1591 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
1593 case BTHR_STEP:
1594 /* We're done if we're not replaying. */
1595 if (replay == NULL)
1596 return btrace_step_no_history ();
1598 /* We are always able to step at least once. */
1599 steps = btrace_insn_next (replay, 1);
1600 gdb_assert (steps == 1);
1602 /* Determine the end of the instruction trace. */
1603 btrace_insn_end (&end, btinfo);
1605 /* We stop replaying if we reached the end of the trace. */
1606 if (btrace_insn_cmp (replay, &end) == 0)
1607 record_btrace_stop_replaying (tp);
1609 return btrace_step_stopped ();
1611 case BTHR_RSTEP:
1612 /* Start replaying if we're not already doing so. */
1613 if (replay == NULL)
1614 replay = record_btrace_start_replaying (tp);
1616 /* If we can't step any further, we reached the end of the history. */
1617 steps = btrace_insn_prev (replay, 1);
1618 if (steps == 0)
1619 return btrace_step_no_history ();
1621 return btrace_step_stopped ();
1623 case BTHR_CONT:
1624 /* We're done if we're not replaying. */
1625 if (replay == NULL)
1626 return btrace_step_no_history ();
1628 inf = find_inferior_pid (ptid_get_pid (tp->ptid));
1629 aspace = inf->aspace;
1631 /* Determine the end of the instruction trace. */
1632 btrace_insn_end (&end, btinfo);
1634 for (;;)
1636 const struct btrace_insn *insn;
1638 /* We are always able to step at least once. */
1639 steps = btrace_insn_next (replay, 1);
1640 gdb_assert (steps == 1);
1642 /* We stop replaying if we reached the end of the trace. */
1643 if (btrace_insn_cmp (replay, &end) == 0)
1645 record_btrace_stop_replaying (tp);
1646 return btrace_step_no_history ();
1649 insn = btrace_insn_get (replay);
1650 gdb_assert (insn);
1652 DEBUG ("stepping %d (%s) ... %s", tp->num,
1653 target_pid_to_str (tp->ptid),
1654 core_addr_to_string_nz (insn->pc));
1656 if (breakpoint_here_p (aspace, insn->pc))
1657 return btrace_step_stopped ();
1660 case BTHR_RCONT:
1661 /* Start replaying if we're not already doing so. */
1662 if (replay == NULL)
1663 replay = record_btrace_start_replaying (tp);
1665 inf = find_inferior_pid (ptid_get_pid (tp->ptid));
1666 aspace = inf->aspace;
1668 for (;;)
1670 const struct btrace_insn *insn;
1672 /* If we can't step any further, we're done. */
1673 steps = btrace_insn_prev (replay, 1);
1674 if (steps == 0)
1675 return btrace_step_no_history ();
1677 insn = btrace_insn_get (replay);
1678 gdb_assert (insn);
1680 DEBUG ("reverse-stepping %d (%s) ... %s", tp->num,
1681 target_pid_to_str (tp->ptid),
1682 core_addr_to_string_nz (insn->pc));
1684 if (breakpoint_here_p (aspace, insn->pc))
1685 return btrace_step_stopped ();
1690 /* The to_wait method of target record-btrace. */
1692 static ptid_t
1693 record_btrace_wait (struct target_ops *ops, ptid_t ptid,
1694 struct target_waitstatus *status, int options)
1696 struct thread_info *tp, *other;
1698 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
1700 /* As long as we're not replaying, just forward the request. */
1701 if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE)
1703 for (ops = ops->beneath; ops != NULL; ops = ops->beneath)
1704 if (ops->to_wait != NULL)
1705 return ops->to_wait (ops, ptid, status, options);
1707 error (_("Cannot find target for waiting."));
1710 /* Let's find a thread to move. */
1711 tp = record_btrace_find_thread_to_move (ptid);
1712 if (tp == NULL)
1714 DEBUG ("wait %s: no thread", target_pid_to_str (ptid));
1716 status->kind = TARGET_WAITKIND_IGNORE;
1717 return minus_one_ptid;
1720 /* We only move a single thread. We're not able to correlate threads. */
1721 *status = record_btrace_step_thread (tp);
1723 /* Stop all other threads. */
1724 if (!non_stop)
1725 ALL_THREADS (other)
1726 other->btrace.flags &= ~BTHR_MOVE;
1728 /* Start record histories anew from the current position. */
1729 record_btrace_clear_histories (&tp->btrace);
1731 /* We moved the replay position but did not update registers. */
1732 registers_changed_ptid (tp->ptid);
1734 return tp->ptid;
1737 /* The to_can_execute_reverse method of target record-btrace. */
1739 static int
1740 record_btrace_can_execute_reverse (struct target_ops *self)
1742 return 1;
1745 /* The to_decr_pc_after_break method of target record-btrace. */
1747 static CORE_ADDR
1748 record_btrace_decr_pc_after_break (struct target_ops *ops,
1749 struct gdbarch *gdbarch)
1751 /* When replaying, we do not actually execute the breakpoint instruction
1752 so there is no need to adjust the PC after hitting a breakpoint. */
1753 if (record_btrace_is_replaying (ops))
1754 return 0;
1756 return ops->beneath->to_decr_pc_after_break (ops->beneath, gdbarch);
1759 /* The to_find_new_threads method of target record-btrace. */
1761 static void
1762 record_btrace_find_new_threads (struct target_ops *ops)
1764 /* Don't expect new threads if we're replaying. */
1765 if (record_btrace_is_replaying (ops))
1766 return;
1768 /* Forward the request. */
1769 for (ops = ops->beneath; ops != NULL; ops = ops->beneath)
1770 if (ops->to_find_new_threads != NULL)
1772 ops->to_find_new_threads (ops);
1773 break;
1777 /* The to_thread_alive method of target record-btrace. */
1779 static int
1780 record_btrace_thread_alive (struct target_ops *ops, ptid_t ptid)
1782 /* We don't add or remove threads during replay. */
1783 if (record_btrace_is_replaying (ops))
1784 return find_thread_ptid (ptid) != NULL;
1786 /* Forward the request. */
1787 for (ops = ops->beneath; ops != NULL; ops = ops->beneath)
1788 if (ops->to_thread_alive != NULL)
1789 return ops->to_thread_alive (ops, ptid);
1791 return 0;
1794 /* Set the replay branch trace instruction iterator. If IT is NULL, replay
1795 is stopped. */
1797 static void
1798 record_btrace_set_replay (struct thread_info *tp,
1799 const struct btrace_insn_iterator *it)
1801 struct btrace_thread_info *btinfo;
1803 btinfo = &tp->btrace;
1805 if (it == NULL || it->function == NULL)
1806 record_btrace_stop_replaying (tp);
1807 else
1809 if (btinfo->replay == NULL)
1810 record_btrace_start_replaying (tp);
1811 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
1812 return;
1814 *btinfo->replay = *it;
1815 registers_changed_ptid (tp->ptid);
1818 /* Start anew from the new replay position. */
1819 record_btrace_clear_histories (btinfo);
1822 /* The to_goto_record_begin method of target record-btrace. */
1824 static void
1825 record_btrace_goto_begin (struct target_ops *self)
1827 struct thread_info *tp;
1828 struct btrace_insn_iterator begin;
1830 tp = require_btrace_thread ();
1832 btrace_insn_begin (&begin, &tp->btrace);
1833 record_btrace_set_replay (tp, &begin);
1835 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
1838 /* The to_goto_record_end method of target record-btrace. */
1840 static void
1841 record_btrace_goto_end (struct target_ops *ops)
1843 struct thread_info *tp;
1845 tp = require_btrace_thread ();
1847 record_btrace_set_replay (tp, NULL);
1849 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
1852 /* The to_goto_record method of target record-btrace. */
1854 static void
1855 record_btrace_goto (struct target_ops *self, ULONGEST insn)
1857 struct thread_info *tp;
1858 struct btrace_insn_iterator it;
1859 unsigned int number;
1860 int found;
1862 number = insn;
1864 /* Check for wrap-arounds. */
1865 if (number != insn)
1866 error (_("Instruction number out of range."));
1868 tp = require_btrace_thread ();
1870 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
1871 if (found == 0)
1872 error (_("No such instruction."));
1874 record_btrace_set_replay (tp, &it);
1876 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
1879 /* Initialize the record-btrace target ops. */
1881 static void
1882 init_record_btrace_ops (void)
1884 struct target_ops *ops;
1886 ops = &record_btrace_ops;
1887 ops->to_shortname = "record-btrace";
1888 ops->to_longname = "Branch tracing target";
1889 ops->to_doc = "Collect control-flow trace and provide the execution history.";
1890 ops->to_open = record_btrace_open;
1891 ops->to_close = record_btrace_close;
1892 ops->to_detach = record_detach;
1893 ops->to_disconnect = record_disconnect;
1894 ops->to_mourn_inferior = record_mourn_inferior;
1895 ops->to_kill = record_kill;
1896 ops->to_create_inferior = find_default_create_inferior;
1897 ops->to_stop_recording = record_btrace_stop_recording;
1898 ops->to_info_record = record_btrace_info;
1899 ops->to_insn_history = record_btrace_insn_history;
1900 ops->to_insn_history_from = record_btrace_insn_history_from;
1901 ops->to_insn_history_range = record_btrace_insn_history_range;
1902 ops->to_call_history = record_btrace_call_history;
1903 ops->to_call_history_from = record_btrace_call_history_from;
1904 ops->to_call_history_range = record_btrace_call_history_range;
1905 ops->to_record_is_replaying = record_btrace_is_replaying;
1906 ops->to_xfer_partial = record_btrace_xfer_partial;
1907 ops->to_remove_breakpoint = record_btrace_remove_breakpoint;
1908 ops->to_insert_breakpoint = record_btrace_insert_breakpoint;
1909 ops->to_fetch_registers = record_btrace_fetch_registers;
1910 ops->to_store_registers = record_btrace_store_registers;
1911 ops->to_prepare_to_store = record_btrace_prepare_to_store;
1912 ops->to_get_unwinder = &record_btrace_to_get_unwinder;
1913 ops->to_get_tailcall_unwinder = &record_btrace_to_get_tailcall_unwinder;
1914 ops->to_resume = record_btrace_resume;
1915 ops->to_wait = record_btrace_wait;
1916 ops->to_find_new_threads = record_btrace_find_new_threads;
1917 ops->to_thread_alive = record_btrace_thread_alive;
1918 ops->to_goto_record_begin = record_btrace_goto_begin;
1919 ops->to_goto_record_end = record_btrace_goto_end;
1920 ops->to_goto_record = record_btrace_goto;
1921 ops->to_can_execute_reverse = record_btrace_can_execute_reverse;
1922 ops->to_decr_pc_after_break = record_btrace_decr_pc_after_break;
1923 ops->to_stratum = record_stratum;
1924 ops->to_magic = OPS_MAGIC;
1927 /* Alias for "target record". */
1929 static void
1930 cmd_record_btrace_start (char *args, int from_tty)
1932 if (args != NULL && *args != 0)
1933 error (_("Invalid argument."));
1935 execute_command ("target record-btrace", from_tty);
1938 void _initialize_record_btrace (void);
1940 /* Initialize btrace commands. */
1942 void
1943 _initialize_record_btrace (void)
1945 add_cmd ("btrace", class_obscure, cmd_record_btrace_start,
1946 _("Start branch trace recording."),
1947 &record_cmdlist);
1948 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
1950 init_record_btrace_ops ();
1951 add_target (&record_btrace_ops);
1953 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
1954 xcalloc, xfree);