s390-vregs.exp: Avoid compile errors with older GCCs and on 31-bit targets
[binutils-gdb.git] / gdb / record-btrace.c
blob240a8dc8555f59f6b371f4a6e32729816043b9ae
1 /* Branch trace support for GDB, the GNU debugger.
3 Copyright (C) 2013-2015 Free Software Foundation, Inc.
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
22 #include "defs.h"
23 #include "record.h"
24 #include "gdbthread.h"
25 #include "target.h"
26 #include "gdbcmd.h"
27 #include "disasm.h"
28 #include "observer.h"
29 #include "cli/cli-utils.h"
30 #include "source.h"
31 #include "ui-out.h"
32 #include "symtab.h"
33 #include "filenames.h"
34 #include "regcache.h"
35 #include "frame-unwind.h"
36 #include "hashtab.h"
37 #include "infrun.h"
38 #include "event-loop.h"
39 #include "inf-loop.h"
41 /* The target_ops of record-btrace. */
42 static struct target_ops record_btrace_ops;
44 /* A new thread observer enabling branch tracing for the new thread. */
45 static struct observer *record_btrace_thread_observer;
47 /* Memory access types used in set/show record btrace replay-memory-access. */
48 static const char replay_memory_access_read_only[] = "read-only";
49 static const char replay_memory_access_read_write[] = "read-write";
50 static const char *const replay_memory_access_types[] =
52 replay_memory_access_read_only,
53 replay_memory_access_read_write,
54 NULL
57 /* The currently allowed replay memory access type. */
58 static const char *replay_memory_access = replay_memory_access_read_only;
60 /* Command lists for "set/show record btrace". */
61 static struct cmd_list_element *set_record_btrace_cmdlist;
62 static struct cmd_list_element *show_record_btrace_cmdlist;
64 /* The execution direction of the last resume we got. See record-full.c. */
65 static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
67 /* The async event handler for reverse/replay execution. */
68 static struct async_event_handler *record_btrace_async_inferior_event_handler;
70 /* A flag indicating that we are currently generating a core file. */
71 static int record_btrace_generating_corefile;
73 /* The current branch trace configuration. */
74 static struct btrace_config record_btrace_conf;
76 /* Command list for "record btrace". */
77 static struct cmd_list_element *record_btrace_cmdlist;
79 /* Command lists for "set/show record btrace bts". */
80 static struct cmd_list_element *set_record_btrace_bts_cmdlist;
81 static struct cmd_list_element *show_record_btrace_bts_cmdlist;
83 /* Print a record-btrace debug message. Use do ... while (0) to avoid
84 ambiguities when used in if statements. */
86 #define DEBUG(msg, args...) \
87 do \
88 { \
89 if (record_debug != 0) \
90 fprintf_unfiltered (gdb_stdlog, \
91 "[record-btrace] " msg "\n", ##args); \
92 } \
93 while (0)
96 /* Update the branch trace for the current thread and return a pointer to its
97 thread_info.
99 Throws an error if there is no thread or no trace. This function never
100 returns NULL. */
102 static struct thread_info *
103 require_btrace_thread (void)
105 struct thread_info *tp;
107 DEBUG ("require");
109 tp = find_thread_ptid (inferior_ptid);
110 if (tp == NULL)
111 error (_("No thread."));
113 btrace_fetch (tp);
115 if (btrace_is_empty (tp))
116 error (_("No trace."));
118 return tp;
121 /* Update the branch trace for the current thread and return a pointer to its
122 branch trace information struct.
124 Throws an error if there is no thread or no trace. This function never
125 returns NULL. */
127 static struct btrace_thread_info *
128 require_btrace (void)
130 struct thread_info *tp;
132 tp = require_btrace_thread ();
134 return &tp->btrace;
137 /* Enable branch tracing for one thread. Warn on errors. */
139 static void
140 record_btrace_enable_warn (struct thread_info *tp)
144 btrace_enable (tp, &record_btrace_conf);
146 CATCH (error, RETURN_MASK_ERROR)
148 warning ("%s", error.message);
150 END_CATCH
153 /* Callback function to disable branch tracing for one thread. */
155 static void
156 record_btrace_disable_callback (void *arg)
158 struct thread_info *tp;
160 tp = arg;
162 btrace_disable (tp);
165 /* Enable automatic tracing of new threads. */
167 static void
168 record_btrace_auto_enable (void)
170 DEBUG ("attach thread observer");
172 record_btrace_thread_observer
173 = observer_attach_new_thread (record_btrace_enable_warn);
176 /* Disable automatic tracing of new threads. */
178 static void
179 record_btrace_auto_disable (void)
181 /* The observer may have been detached, already. */
182 if (record_btrace_thread_observer == NULL)
183 return;
185 DEBUG ("detach thread observer");
187 observer_detach_new_thread (record_btrace_thread_observer);
188 record_btrace_thread_observer = NULL;
191 /* The record-btrace async event handler function. */
193 static void
194 record_btrace_handle_async_inferior_event (gdb_client_data data)
196 inferior_event_handler (INF_REG_EVENT, NULL);
199 /* The to_open method of target record-btrace. */
201 static void
202 record_btrace_open (const char *args, int from_tty)
204 struct cleanup *disable_chain;
205 struct thread_info *tp;
207 DEBUG ("open");
209 record_preopen ();
211 if (!target_has_execution)
212 error (_("The program is not being run."));
214 if (non_stop)
215 error (_("Record btrace can't debug inferior in non-stop mode."));
217 gdb_assert (record_btrace_thread_observer == NULL);
219 disable_chain = make_cleanup (null_cleanup, NULL);
220 ALL_NON_EXITED_THREADS (tp)
221 if (args == NULL || *args == 0 || number_is_in_list (args, tp->num))
223 btrace_enable (tp, &record_btrace_conf);
225 make_cleanup (record_btrace_disable_callback, tp);
228 record_btrace_auto_enable ();
230 push_target (&record_btrace_ops);
232 record_btrace_async_inferior_event_handler
233 = create_async_event_handler (record_btrace_handle_async_inferior_event,
234 NULL);
235 record_btrace_generating_corefile = 0;
237 observer_notify_record_changed (current_inferior (), 1);
239 discard_cleanups (disable_chain);
242 /* The to_stop_recording method of target record-btrace. */
244 static void
245 record_btrace_stop_recording (struct target_ops *self)
247 struct thread_info *tp;
249 DEBUG ("stop recording");
251 record_btrace_auto_disable ();
253 ALL_NON_EXITED_THREADS (tp)
254 if (tp->btrace.target != NULL)
255 btrace_disable (tp);
258 /* The to_close method of target record-btrace. */
260 static void
261 record_btrace_close (struct target_ops *self)
263 struct thread_info *tp;
265 if (record_btrace_async_inferior_event_handler != NULL)
266 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
268 /* Make sure automatic recording gets disabled even if we did not stop
269 recording before closing the record-btrace target. */
270 record_btrace_auto_disable ();
272 /* We should have already stopped recording.
273 Tear down btrace in case we have not. */
274 ALL_NON_EXITED_THREADS (tp)
275 btrace_teardown (tp);
278 /* The to_async method of target record-btrace. */
280 static void
281 record_btrace_async (struct target_ops *ops, int enable)
283 if (enable)
284 mark_async_event_handler (record_btrace_async_inferior_event_handler);
285 else
286 clear_async_event_handler (record_btrace_async_inferior_event_handler);
288 ops->beneath->to_async (ops->beneath, enable);
291 /* Adjusts the size and returns a human readable size suffix. */
293 static const char *
294 record_btrace_adjust_size (unsigned int *size)
296 unsigned int sz;
298 sz = *size;
300 if ((sz & ((1u << 30) - 1)) == 0)
302 *size = sz >> 30;
303 return "GB";
305 else if ((sz & ((1u << 20) - 1)) == 0)
307 *size = sz >> 20;
308 return "MB";
310 else if ((sz & ((1u << 10) - 1)) == 0)
312 *size = sz >> 10;
313 return "kB";
315 else
316 return "";
319 /* Print a BTS configuration. */
321 static void
322 record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
324 const char *suffix;
325 unsigned int size;
327 size = conf->size;
328 if (size > 0)
330 suffix = record_btrace_adjust_size (&size);
331 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
335 /* Print a branch tracing configuration. */
337 static void
338 record_btrace_print_conf (const struct btrace_config *conf)
340 printf_unfiltered (_("Recording format: %s.\n"),
341 btrace_format_string (conf->format));
343 switch (conf->format)
345 case BTRACE_FORMAT_NONE:
346 return;
348 case BTRACE_FORMAT_BTS:
349 record_btrace_print_bts_conf (&conf->bts);
350 return;
353 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
356 /* The to_info_record method of target record-btrace. */
358 static void
359 record_btrace_info (struct target_ops *self)
361 struct btrace_thread_info *btinfo;
362 const struct btrace_config *conf;
363 struct thread_info *tp;
364 unsigned int insns, calls, gaps;
366 DEBUG ("info");
368 tp = find_thread_ptid (inferior_ptid);
369 if (tp == NULL)
370 error (_("No thread."));
372 btinfo = &tp->btrace;
374 conf = btrace_conf (btinfo);
375 if (conf != NULL)
376 record_btrace_print_conf (conf);
378 btrace_fetch (tp);
380 insns = 0;
381 calls = 0;
382 gaps = 0;
384 if (!btrace_is_empty (tp))
386 struct btrace_call_iterator call;
387 struct btrace_insn_iterator insn;
389 btrace_call_end (&call, btinfo);
390 btrace_call_prev (&call, 1);
391 calls = btrace_call_number (&call);
393 btrace_insn_end (&insn, btinfo);
395 insns = btrace_insn_number (&insn);
396 if (insns != 0)
398 /* The last instruction does not really belong to the trace. */
399 insns -= 1;
401 else
403 unsigned int steps;
405 /* Skip gaps at the end. */
408 steps = btrace_insn_prev (&insn, 1);
409 if (steps == 0)
410 break;
412 insns = btrace_insn_number (&insn);
414 while (insns == 0);
417 gaps = btinfo->ngaps;
420 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
421 "for thread %d (%s).\n"), insns, calls, gaps,
422 tp->num, target_pid_to_str (tp->ptid));
424 if (btrace_is_replaying (tp))
425 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
426 btrace_insn_number (btinfo->replay));
429 /* Print a decode error. */
431 static void
432 btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
433 enum btrace_format format)
435 const char *errstr;
436 int is_error;
438 errstr = _("unknown");
439 is_error = 1;
441 switch (format)
443 default:
444 break;
446 case BTRACE_FORMAT_BTS:
447 switch (errcode)
449 default:
450 break;
452 case BDE_BTS_OVERFLOW:
453 errstr = _("instruction overflow");
454 break;
456 case BDE_BTS_INSN_SIZE:
457 errstr = _("unknown instruction");
458 break;
460 break;
463 ui_out_text (uiout, _("["));
464 if (is_error)
466 ui_out_text (uiout, _("decode error ("));
467 ui_out_field_int (uiout, "errcode", errcode);
468 ui_out_text (uiout, _("): "));
470 ui_out_text (uiout, errstr);
471 ui_out_text (uiout, _("]\n"));
474 /* Print an unsigned int. */
476 static void
477 ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
479 ui_out_field_fmt (uiout, fld, "%u", val);
482 /* Disassemble a section of the recorded instruction trace. */
484 static void
485 btrace_insn_history (struct ui_out *uiout,
486 const struct btrace_thread_info *btinfo,
487 const struct btrace_insn_iterator *begin,
488 const struct btrace_insn_iterator *end, int flags)
490 struct gdbarch *gdbarch;
491 struct btrace_insn_iterator it;
493 DEBUG ("itrace (0x%x): [%u; %u)", flags, btrace_insn_number (begin),
494 btrace_insn_number (end));
496 gdbarch = target_gdbarch ();
498 for (it = *begin; btrace_insn_cmp (&it, end) != 0; btrace_insn_next (&it, 1))
500 const struct btrace_insn *insn;
502 insn = btrace_insn_get (&it);
504 /* A NULL instruction indicates a gap in the trace. */
505 if (insn == NULL)
507 const struct btrace_config *conf;
509 conf = btrace_conf (btinfo);
511 /* We have trace so we must have a configuration. */
512 gdb_assert (conf != NULL);
514 btrace_ui_out_decode_error (uiout, it.function->errcode,
515 conf->format);
517 else
519 /* Print the instruction index. */
520 ui_out_field_uint (uiout, "index", btrace_insn_number (&it));
521 ui_out_text (uiout, "\t");
523 /* Disassembly with '/m' flag may not produce the expected result.
524 See PR gdb/11833. */
525 gdb_disassembly (gdbarch, uiout, NULL, flags, 1, insn->pc,
526 insn->pc + 1);
531 /* The to_insn_history method of target record-btrace. */
533 static void
534 record_btrace_insn_history (struct target_ops *self, int size, int flags)
536 struct btrace_thread_info *btinfo;
537 struct btrace_insn_history *history;
538 struct btrace_insn_iterator begin, end;
539 struct cleanup *uiout_cleanup;
540 struct ui_out *uiout;
541 unsigned int context, covered;
543 uiout = current_uiout;
544 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
545 "insn history");
546 context = abs (size);
547 if (context == 0)
548 error (_("Bad record instruction-history-size."));
550 btinfo = require_btrace ();
551 history = btinfo->insn_history;
552 if (history == NULL)
554 struct btrace_insn_iterator *replay;
556 DEBUG ("insn-history (0x%x): %d", flags, size);
558 /* If we're replaying, we start at the replay position. Otherwise, we
559 start at the tail of the trace. */
560 replay = btinfo->replay;
561 if (replay != NULL)
562 begin = *replay;
563 else
564 btrace_insn_end (&begin, btinfo);
566 /* We start from here and expand in the requested direction. Then we
567 expand in the other direction, as well, to fill up any remaining
568 context. */
569 end = begin;
570 if (size < 0)
572 /* We want the current position covered, as well. */
573 covered = btrace_insn_next (&end, 1);
574 covered += btrace_insn_prev (&begin, context - covered);
575 covered += btrace_insn_next (&end, context - covered);
577 else
579 covered = btrace_insn_next (&end, context);
580 covered += btrace_insn_prev (&begin, context - covered);
583 else
585 begin = history->begin;
586 end = history->end;
588 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", flags, size,
589 btrace_insn_number (&begin), btrace_insn_number (&end));
591 if (size < 0)
593 end = begin;
594 covered = btrace_insn_prev (&begin, context);
596 else
598 begin = end;
599 covered = btrace_insn_next (&end, context);
603 if (covered > 0)
604 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
605 else
607 if (size < 0)
608 printf_unfiltered (_("At the start of the branch trace record.\n"));
609 else
610 printf_unfiltered (_("At the end of the branch trace record.\n"));
613 btrace_set_insn_history (btinfo, &begin, &end);
614 do_cleanups (uiout_cleanup);
617 /* The to_insn_history_range method of target record-btrace. */
619 static void
620 record_btrace_insn_history_range (struct target_ops *self,
621 ULONGEST from, ULONGEST to, int flags)
623 struct btrace_thread_info *btinfo;
624 struct btrace_insn_history *history;
625 struct btrace_insn_iterator begin, end;
626 struct cleanup *uiout_cleanup;
627 struct ui_out *uiout;
628 unsigned int low, high;
629 int found;
631 uiout = current_uiout;
632 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
633 "insn history");
634 low = from;
635 high = to;
637 DEBUG ("insn-history (0x%x): [%u; %u)", flags, low, high);
639 /* Check for wrap-arounds. */
640 if (low != from || high != to)
641 error (_("Bad range."));
643 if (high < low)
644 error (_("Bad range."));
646 btinfo = require_btrace ();
648 found = btrace_find_insn_by_number (&begin, btinfo, low);
649 if (found == 0)
650 error (_("Range out of bounds."));
652 found = btrace_find_insn_by_number (&end, btinfo, high);
653 if (found == 0)
655 /* Silently truncate the range. */
656 btrace_insn_end (&end, btinfo);
658 else
660 /* We want both begin and end to be inclusive. */
661 btrace_insn_next (&end, 1);
664 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
665 btrace_set_insn_history (btinfo, &begin, &end);
667 do_cleanups (uiout_cleanup);
670 /* The to_insn_history_from method of target record-btrace. */
672 static void
673 record_btrace_insn_history_from (struct target_ops *self,
674 ULONGEST from, int size, int flags)
676 ULONGEST begin, end, context;
678 context = abs (size);
679 if (context == 0)
680 error (_("Bad record instruction-history-size."));
682 if (size < 0)
684 end = from;
686 if (from < context)
687 begin = 0;
688 else
689 begin = from - context + 1;
691 else
693 begin = from;
694 end = from + context - 1;
696 /* Check for wrap-around. */
697 if (end < begin)
698 end = ULONGEST_MAX;
701 record_btrace_insn_history_range (self, begin, end, flags);
704 /* Print the instruction number range for a function call history line. */
706 static void
707 btrace_call_history_insn_range (struct ui_out *uiout,
708 const struct btrace_function *bfun)
710 unsigned int begin, end, size;
712 size = VEC_length (btrace_insn_s, bfun->insn);
713 gdb_assert (size > 0);
715 begin = bfun->insn_offset;
716 end = begin + size - 1;
718 ui_out_field_uint (uiout, "insn begin", begin);
719 ui_out_text (uiout, ",");
720 ui_out_field_uint (uiout, "insn end", end);
723 /* Compute the lowest and highest source line for the instructions in BFUN
724 and return them in PBEGIN and PEND.
725 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
726 result from inlining or macro expansion. */
728 static void
729 btrace_compute_src_line_range (const struct btrace_function *bfun,
730 int *pbegin, int *pend)
732 struct btrace_insn *insn;
733 struct symtab *symtab;
734 struct symbol *sym;
735 unsigned int idx;
736 int begin, end;
738 begin = INT_MAX;
739 end = INT_MIN;
741 sym = bfun->sym;
742 if (sym == NULL)
743 goto out;
745 symtab = symbol_symtab (sym);
747 for (idx = 0; VEC_iterate (btrace_insn_s, bfun->insn, idx, insn); ++idx)
749 struct symtab_and_line sal;
751 sal = find_pc_line (insn->pc, 0);
752 if (sal.symtab != symtab || sal.line == 0)
753 continue;
755 begin = min (begin, sal.line);
756 end = max (end, sal.line);
759 out:
760 *pbegin = begin;
761 *pend = end;
764 /* Print the source line information for a function call history line. */
766 static void
767 btrace_call_history_src_line (struct ui_out *uiout,
768 const struct btrace_function *bfun)
770 struct symbol *sym;
771 int begin, end;
773 sym = bfun->sym;
774 if (sym == NULL)
775 return;
777 ui_out_field_string (uiout, "file",
778 symtab_to_filename_for_display (symbol_symtab (sym)));
780 btrace_compute_src_line_range (bfun, &begin, &end);
781 if (end < begin)
782 return;
784 ui_out_text (uiout, ":");
785 ui_out_field_int (uiout, "min line", begin);
787 if (end == begin)
788 return;
790 ui_out_text (uiout, ",");
791 ui_out_field_int (uiout, "max line", end);
794 /* Get the name of a branch trace function. */
796 static const char *
797 btrace_get_bfun_name (const struct btrace_function *bfun)
799 struct minimal_symbol *msym;
800 struct symbol *sym;
802 if (bfun == NULL)
803 return "??";
805 msym = bfun->msym;
806 sym = bfun->sym;
808 if (sym != NULL)
809 return SYMBOL_PRINT_NAME (sym);
810 else if (msym != NULL)
811 return MSYMBOL_PRINT_NAME (msym);
812 else
813 return "??";
816 /* Disassemble a section of the recorded function trace. */
818 static void
819 btrace_call_history (struct ui_out *uiout,
820 const struct btrace_thread_info *btinfo,
821 const struct btrace_call_iterator *begin,
822 const struct btrace_call_iterator *end,
823 enum record_print_flag flags)
825 struct btrace_call_iterator it;
827 DEBUG ("ftrace (0x%x): [%u; %u)", flags, btrace_call_number (begin),
828 btrace_call_number (end));
830 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
832 const struct btrace_function *bfun;
833 struct minimal_symbol *msym;
834 struct symbol *sym;
836 bfun = btrace_call_get (&it);
837 sym = bfun->sym;
838 msym = bfun->msym;
840 /* Print the function index. */
841 ui_out_field_uint (uiout, "index", bfun->number);
842 ui_out_text (uiout, "\t");
844 /* Indicate gaps in the trace. */
845 if (bfun->errcode != 0)
847 const struct btrace_config *conf;
849 conf = btrace_conf (btinfo);
851 /* We have trace so we must have a configuration. */
852 gdb_assert (conf != NULL);
854 btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
856 continue;
859 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
861 int level = bfun->level + btinfo->level, i;
863 for (i = 0; i < level; ++i)
864 ui_out_text (uiout, " ");
867 if (sym != NULL)
868 ui_out_field_string (uiout, "function", SYMBOL_PRINT_NAME (sym));
869 else if (msym != NULL)
870 ui_out_field_string (uiout, "function", MSYMBOL_PRINT_NAME (msym));
871 else if (!ui_out_is_mi_like_p (uiout))
872 ui_out_field_string (uiout, "function", "??");
874 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
876 ui_out_text (uiout, _("\tinst "));
877 btrace_call_history_insn_range (uiout, bfun);
880 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
882 ui_out_text (uiout, _("\tat "));
883 btrace_call_history_src_line (uiout, bfun);
886 ui_out_text (uiout, "\n");
890 /* The to_call_history method of target record-btrace. */
892 static void
893 record_btrace_call_history (struct target_ops *self, int size, int flags)
895 struct btrace_thread_info *btinfo;
896 struct btrace_call_history *history;
897 struct btrace_call_iterator begin, end;
898 struct cleanup *uiout_cleanup;
899 struct ui_out *uiout;
900 unsigned int context, covered;
902 uiout = current_uiout;
903 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
904 "insn history");
905 context = abs (size);
906 if (context == 0)
907 error (_("Bad record function-call-history-size."));
909 btinfo = require_btrace ();
910 history = btinfo->call_history;
911 if (history == NULL)
913 struct btrace_insn_iterator *replay;
915 DEBUG ("call-history (0x%x): %d", flags, size);
917 /* If we're replaying, we start at the replay position. Otherwise, we
918 start at the tail of the trace. */
919 replay = btinfo->replay;
920 if (replay != NULL)
922 begin.function = replay->function;
923 begin.btinfo = btinfo;
925 else
926 btrace_call_end (&begin, btinfo);
928 /* We start from here and expand in the requested direction. Then we
929 expand in the other direction, as well, to fill up any remaining
930 context. */
931 end = begin;
932 if (size < 0)
934 /* We want the current position covered, as well. */
935 covered = btrace_call_next (&end, 1);
936 covered += btrace_call_prev (&begin, context - covered);
937 covered += btrace_call_next (&end, context - covered);
939 else
941 covered = btrace_call_next (&end, context);
942 covered += btrace_call_prev (&begin, context- covered);
945 else
947 begin = history->begin;
948 end = history->end;
950 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", flags, size,
951 btrace_call_number (&begin), btrace_call_number (&end));
953 if (size < 0)
955 end = begin;
956 covered = btrace_call_prev (&begin, context);
958 else
960 begin = end;
961 covered = btrace_call_next (&end, context);
965 if (covered > 0)
966 btrace_call_history (uiout, btinfo, &begin, &end, flags);
967 else
969 if (size < 0)
970 printf_unfiltered (_("At the start of the branch trace record.\n"));
971 else
972 printf_unfiltered (_("At the end of the branch trace record.\n"));
975 btrace_set_call_history (btinfo, &begin, &end);
976 do_cleanups (uiout_cleanup);
979 /* The to_call_history_range method of target record-btrace. */
981 static void
982 record_btrace_call_history_range (struct target_ops *self,
983 ULONGEST from, ULONGEST to, int flags)
985 struct btrace_thread_info *btinfo;
986 struct btrace_call_history *history;
987 struct btrace_call_iterator begin, end;
988 struct cleanup *uiout_cleanup;
989 struct ui_out *uiout;
990 unsigned int low, high;
991 int found;
993 uiout = current_uiout;
994 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
995 "func history");
996 low = from;
997 high = to;
999 DEBUG ("call-history (0x%x): [%u; %u)", flags, low, high);
1001 /* Check for wrap-arounds. */
1002 if (low != from || high != to)
1003 error (_("Bad range."));
1005 if (high < low)
1006 error (_("Bad range."));
1008 btinfo = require_btrace ();
1010 found = btrace_find_call_by_number (&begin, btinfo, low);
1011 if (found == 0)
1012 error (_("Range out of bounds."));
1014 found = btrace_find_call_by_number (&end, btinfo, high);
1015 if (found == 0)
1017 /* Silently truncate the range. */
1018 btrace_call_end (&end, btinfo);
1020 else
1022 /* We want both begin and end to be inclusive. */
1023 btrace_call_next (&end, 1);
1026 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1027 btrace_set_call_history (btinfo, &begin, &end);
1029 do_cleanups (uiout_cleanup);
1032 /* The to_call_history_from method of target record-btrace. */
1034 static void
1035 record_btrace_call_history_from (struct target_ops *self,
1036 ULONGEST from, int size, int flags)
1038 ULONGEST begin, end, context;
1040 context = abs (size);
1041 if (context == 0)
1042 error (_("Bad record function-call-history-size."));
1044 if (size < 0)
1046 end = from;
1048 if (from < context)
1049 begin = 0;
1050 else
1051 begin = from - context + 1;
1053 else
1055 begin = from;
1056 end = from + context - 1;
1058 /* Check for wrap-around. */
1059 if (end < begin)
1060 end = ULONGEST_MAX;
1063 record_btrace_call_history_range (self, begin, end, flags);
1066 /* The to_record_is_replaying method of target record-btrace. */
1068 static int
1069 record_btrace_is_replaying (struct target_ops *self)
1071 struct thread_info *tp;
1073 ALL_NON_EXITED_THREADS (tp)
1074 if (btrace_is_replaying (tp))
1075 return 1;
1077 return 0;
1080 /* The to_xfer_partial method of target record-btrace. */
1082 static enum target_xfer_status
1083 record_btrace_xfer_partial (struct target_ops *ops, enum target_object object,
1084 const char *annex, gdb_byte *readbuf,
1085 const gdb_byte *writebuf, ULONGEST offset,
1086 ULONGEST len, ULONGEST *xfered_len)
1088 struct target_ops *t;
1090 /* Filter out requests that don't make sense during replay. */
1091 if (replay_memory_access == replay_memory_access_read_only
1092 && !record_btrace_generating_corefile
1093 && record_btrace_is_replaying (ops))
1095 switch (object)
1097 case TARGET_OBJECT_MEMORY:
1099 struct target_section *section;
1101 /* We do not allow writing memory in general. */
1102 if (writebuf != NULL)
1104 *xfered_len = len;
1105 return TARGET_XFER_UNAVAILABLE;
1108 /* We allow reading readonly memory. */
1109 section = target_section_by_addr (ops, offset);
1110 if (section != NULL)
1112 /* Check if the section we found is readonly. */
1113 if ((bfd_get_section_flags (section->the_bfd_section->owner,
1114 section->the_bfd_section)
1115 & SEC_READONLY) != 0)
1117 /* Truncate the request to fit into this section. */
1118 len = min (len, section->endaddr - offset);
1119 break;
1123 *xfered_len = len;
1124 return TARGET_XFER_UNAVAILABLE;
1129 /* Forward the request. */
1130 ops = ops->beneath;
1131 return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
1132 offset, len, xfered_len);
1135 /* The to_insert_breakpoint method of target record-btrace. */
1137 static int
1138 record_btrace_insert_breakpoint (struct target_ops *ops,
1139 struct gdbarch *gdbarch,
1140 struct bp_target_info *bp_tgt)
1142 const char *old;
1143 int ret;
1145 /* Inserting breakpoints requires accessing memory. Allow it for the
1146 duration of this function. */
1147 old = replay_memory_access;
1148 replay_memory_access = replay_memory_access_read_write;
1150 ret = 0;
1153 ret = ops->beneath->to_insert_breakpoint (ops->beneath, gdbarch, bp_tgt);
1155 CATCH (except, RETURN_MASK_ALL)
1157 replay_memory_access = old;
1158 throw_exception (except);
1160 END_CATCH
1161 replay_memory_access = old;
1163 return ret;
1166 /* The to_remove_breakpoint method of target record-btrace. */
1168 static int
1169 record_btrace_remove_breakpoint (struct target_ops *ops,
1170 struct gdbarch *gdbarch,
1171 struct bp_target_info *bp_tgt)
1173 const char *old;
1174 int ret;
1176 /* Removing breakpoints requires accessing memory. Allow it for the
1177 duration of this function. */
1178 old = replay_memory_access;
1179 replay_memory_access = replay_memory_access_read_write;
1181 ret = 0;
1184 ret = ops->beneath->to_remove_breakpoint (ops->beneath, gdbarch, bp_tgt);
1186 CATCH (except, RETURN_MASK_ALL)
1188 replay_memory_access = old;
1189 throw_exception (except);
1191 END_CATCH
1192 replay_memory_access = old;
1194 return ret;
1197 /* The to_fetch_registers method of target record-btrace. */
1199 static void
1200 record_btrace_fetch_registers (struct target_ops *ops,
1201 struct regcache *regcache, int regno)
1203 struct btrace_insn_iterator *replay;
1204 struct thread_info *tp;
1206 tp = find_thread_ptid (inferior_ptid);
1207 gdb_assert (tp != NULL);
1209 replay = tp->btrace.replay;
1210 if (replay != NULL && !record_btrace_generating_corefile)
1212 const struct btrace_insn *insn;
1213 struct gdbarch *gdbarch;
1214 int pcreg;
1216 gdbarch = get_regcache_arch (regcache);
1217 pcreg = gdbarch_pc_regnum (gdbarch);
1218 if (pcreg < 0)
1219 return;
1221 /* We can only provide the PC register. */
1222 if (regno >= 0 && regno != pcreg)
1223 return;
1225 insn = btrace_insn_get (replay);
1226 gdb_assert (insn != NULL);
1228 regcache_raw_supply (regcache, regno, &insn->pc);
1230 else
1232 struct target_ops *t = ops->beneath;
1234 t->to_fetch_registers (t, regcache, regno);
1238 /* The to_store_registers method of target record-btrace. */
1240 static void
1241 record_btrace_store_registers (struct target_ops *ops,
1242 struct regcache *regcache, int regno)
1244 struct target_ops *t;
1246 if (!record_btrace_generating_corefile && record_btrace_is_replaying (ops))
1247 error (_("This record target does not allow writing registers."));
1249 gdb_assert (may_write_registers != 0);
1251 t = ops->beneath;
1252 t->to_store_registers (t, regcache, regno);
1255 /* The to_prepare_to_store method of target record-btrace. */
1257 static void
1258 record_btrace_prepare_to_store (struct target_ops *ops,
1259 struct regcache *regcache)
1261 struct target_ops *t;
1263 if (!record_btrace_generating_corefile && record_btrace_is_replaying (ops))
1264 return;
1266 t = ops->beneath;
1267 t->to_prepare_to_store (t, regcache);
1270 /* The branch trace frame cache. */
1272 struct btrace_frame_cache
1274 /* The thread. */
1275 struct thread_info *tp;
1277 /* The frame info. */
1278 struct frame_info *frame;
1280 /* The branch trace function segment. */
1281 const struct btrace_function *bfun;
1284 /* A struct btrace_frame_cache hash table indexed by NEXT. */
1286 static htab_t bfcache;
1288 /* hash_f for htab_create_alloc of bfcache. */
1290 static hashval_t
1291 bfcache_hash (const void *arg)
1293 const struct btrace_frame_cache *cache = arg;
1295 return htab_hash_pointer (cache->frame);
1298 /* eq_f for htab_create_alloc of bfcache. */
1300 static int
1301 bfcache_eq (const void *arg1, const void *arg2)
1303 const struct btrace_frame_cache *cache1 = arg1;
1304 const struct btrace_frame_cache *cache2 = arg2;
1306 return cache1->frame == cache2->frame;
1309 /* Create a new btrace frame cache. */
1311 static struct btrace_frame_cache *
1312 bfcache_new (struct frame_info *frame)
1314 struct btrace_frame_cache *cache;
1315 void **slot;
1317 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1318 cache->frame = frame;
1320 slot = htab_find_slot (bfcache, cache, INSERT);
1321 gdb_assert (*slot == NULL);
1322 *slot = cache;
1324 return cache;
1327 /* Extract the branch trace function from a branch trace frame. */
1329 static const struct btrace_function *
1330 btrace_get_frame_function (struct frame_info *frame)
1332 const struct btrace_frame_cache *cache;
1333 const struct btrace_function *bfun;
1334 struct btrace_frame_cache pattern;
1335 void **slot;
1337 pattern.frame = frame;
1339 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1340 if (slot == NULL)
1341 return NULL;
1343 cache = *slot;
1344 return cache->bfun;
1347 /* Implement stop_reason method for record_btrace_frame_unwind. */
1349 static enum unwind_stop_reason
1350 record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1351 void **this_cache)
1353 const struct btrace_frame_cache *cache;
1354 const struct btrace_function *bfun;
1356 cache = *this_cache;
1357 bfun = cache->bfun;
1358 gdb_assert (bfun != NULL);
1360 if (bfun->up == NULL)
1361 return UNWIND_UNAVAILABLE;
1363 return UNWIND_NO_REASON;
1366 /* Implement this_id method for record_btrace_frame_unwind. */
1368 static void
1369 record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1370 struct frame_id *this_id)
1372 const struct btrace_frame_cache *cache;
1373 const struct btrace_function *bfun;
1374 CORE_ADDR code, special;
1376 cache = *this_cache;
1378 bfun = cache->bfun;
1379 gdb_assert (bfun != NULL);
1381 while (bfun->segment.prev != NULL)
1382 bfun = bfun->segment.prev;
1384 code = get_frame_func (this_frame);
1385 special = bfun->number;
1387 *this_id = frame_id_build_unavailable_stack_special (code, special);
1389 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1390 btrace_get_bfun_name (cache->bfun),
1391 core_addr_to_string_nz (this_id->code_addr),
1392 core_addr_to_string_nz (this_id->special_addr));
1395 /* Implement prev_register method for record_btrace_frame_unwind. */
1397 static struct value *
1398 record_btrace_frame_prev_register (struct frame_info *this_frame,
1399 void **this_cache,
1400 int regnum)
1402 const struct btrace_frame_cache *cache;
1403 const struct btrace_function *bfun, *caller;
1404 const struct btrace_insn *insn;
1405 struct gdbarch *gdbarch;
1406 CORE_ADDR pc;
1407 int pcreg;
1409 gdbarch = get_frame_arch (this_frame);
1410 pcreg = gdbarch_pc_regnum (gdbarch);
1411 if (pcreg < 0 || regnum != pcreg)
1412 throw_error (NOT_AVAILABLE_ERROR,
1413 _("Registers are not available in btrace record history"));
1415 cache = *this_cache;
1416 bfun = cache->bfun;
1417 gdb_assert (bfun != NULL);
1419 caller = bfun->up;
1420 if (caller == NULL)
1421 throw_error (NOT_AVAILABLE_ERROR,
1422 _("No caller in btrace record history"));
1424 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1426 insn = VEC_index (btrace_insn_s, caller->insn, 0);
1427 pc = insn->pc;
1429 else
1431 insn = VEC_last (btrace_insn_s, caller->insn);
1432 pc = insn->pc;
1434 pc += gdb_insn_length (gdbarch, pc);
1437 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1438 btrace_get_bfun_name (bfun), bfun->level,
1439 core_addr_to_string_nz (pc));
1441 return frame_unwind_got_address (this_frame, regnum, pc);
1444 /* Implement sniffer method for record_btrace_frame_unwind. */
1446 static int
1447 record_btrace_frame_sniffer (const struct frame_unwind *self,
1448 struct frame_info *this_frame,
1449 void **this_cache)
1451 const struct btrace_function *bfun;
1452 struct btrace_frame_cache *cache;
1453 struct thread_info *tp;
1454 struct frame_info *next;
1456 /* THIS_FRAME does not contain a reference to its thread. */
1457 tp = find_thread_ptid (inferior_ptid);
1458 gdb_assert (tp != NULL);
1460 bfun = NULL;
1461 next = get_next_frame (this_frame);
1462 if (next == NULL)
1464 const struct btrace_insn_iterator *replay;
1466 replay = tp->btrace.replay;
1467 if (replay != NULL)
1468 bfun = replay->function;
1470 else
1472 const struct btrace_function *callee;
1474 callee = btrace_get_frame_function (next);
1475 if (callee != NULL && (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1476 bfun = callee->up;
1479 if (bfun == NULL)
1480 return 0;
1482 DEBUG ("[frame] sniffed frame for %s on level %d",
1483 btrace_get_bfun_name (bfun), bfun->level);
1485 /* This is our frame. Initialize the frame cache. */
1486 cache = bfcache_new (this_frame);
1487 cache->tp = tp;
1488 cache->bfun = bfun;
1490 *this_cache = cache;
1491 return 1;
1494 /* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1496 static int
1497 record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1498 struct frame_info *this_frame,
1499 void **this_cache)
1501 const struct btrace_function *bfun, *callee;
1502 struct btrace_frame_cache *cache;
1503 struct frame_info *next;
1505 next = get_next_frame (this_frame);
1506 if (next == NULL)
1507 return 0;
1509 callee = btrace_get_frame_function (next);
1510 if (callee == NULL)
1511 return 0;
1513 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1514 return 0;
1516 bfun = callee->up;
1517 if (bfun == NULL)
1518 return 0;
1520 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1521 btrace_get_bfun_name (bfun), bfun->level);
1523 /* This is our frame. Initialize the frame cache. */
1524 cache = bfcache_new (this_frame);
1525 cache->tp = find_thread_ptid (inferior_ptid);
1526 cache->bfun = bfun;
1528 *this_cache = cache;
1529 return 1;
1532 static void
1533 record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1535 struct btrace_frame_cache *cache;
1536 void **slot;
1538 cache = this_cache;
1540 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1541 gdb_assert (slot != NULL);
1543 htab_remove_elt (bfcache, cache);
1546 /* btrace recording does not store previous memory content, neither the stack
1547 frames content. Any unwinding would return errorneous results as the stack
1548 contents no longer matches the changed PC value restored from history.
1549 Therefore this unwinder reports any possibly unwound registers as
1550 <unavailable>. */
1552 const struct frame_unwind record_btrace_frame_unwind =
1554 NORMAL_FRAME,
1555 record_btrace_frame_unwind_stop_reason,
1556 record_btrace_frame_this_id,
1557 record_btrace_frame_prev_register,
1558 NULL,
1559 record_btrace_frame_sniffer,
1560 record_btrace_frame_dealloc_cache
1563 const struct frame_unwind record_btrace_tailcall_frame_unwind =
1565 TAILCALL_FRAME,
1566 record_btrace_frame_unwind_stop_reason,
1567 record_btrace_frame_this_id,
1568 record_btrace_frame_prev_register,
1569 NULL,
1570 record_btrace_tailcall_frame_sniffer,
1571 record_btrace_frame_dealloc_cache
1574 /* Implement the to_get_unwinder method. */
1576 static const struct frame_unwind *
1577 record_btrace_to_get_unwinder (struct target_ops *self)
1579 return &record_btrace_frame_unwind;
1582 /* Implement the to_get_tailcall_unwinder method. */
1584 static const struct frame_unwind *
1585 record_btrace_to_get_tailcall_unwinder (struct target_ops *self)
1587 return &record_btrace_tailcall_frame_unwind;
1590 /* Indicate that TP should be resumed according to FLAG. */
1592 static void
1593 record_btrace_resume_thread (struct thread_info *tp,
1594 enum btrace_thread_flag flag)
1596 struct btrace_thread_info *btinfo;
1598 DEBUG ("resuming %d (%s): %u", tp->num, target_pid_to_str (tp->ptid), flag);
1600 btinfo = &tp->btrace;
1602 if ((btinfo->flags & BTHR_MOVE) != 0)
1603 error (_("Thread already moving."));
1605 /* Fetch the latest branch trace. */
1606 btrace_fetch (tp);
1608 btinfo->flags |= flag;
1611 /* Find the thread to resume given a PTID. */
1613 static struct thread_info *
1614 record_btrace_find_resume_thread (ptid_t ptid)
1616 struct thread_info *tp;
1618 /* When asked to resume everything, we pick the current thread. */
1619 if (ptid_equal (minus_one_ptid, ptid) || ptid_is_pid (ptid))
1620 ptid = inferior_ptid;
1622 return find_thread_ptid (ptid);
1625 /* Start replaying a thread. */
1627 static struct btrace_insn_iterator *
1628 record_btrace_start_replaying (struct thread_info *tp)
1630 struct btrace_insn_iterator *replay;
1631 struct btrace_thread_info *btinfo;
1632 int executing;
1634 btinfo = &tp->btrace;
1635 replay = NULL;
1637 /* We can't start replaying without trace. */
1638 if (btinfo->begin == NULL)
1639 return NULL;
1641 /* Clear the executing flag to allow changes to the current frame.
1642 We are not actually running, yet. We just started a reverse execution
1643 command or a record goto command.
1644 For the latter, EXECUTING is false and this has no effect.
1645 For the former, EXECUTING is true and we're in to_wait, about to
1646 move the thread. Since we need to recompute the stack, we temporarily
1647 set EXECUTING to flase. */
1648 executing = is_executing (tp->ptid);
1649 set_executing (tp->ptid, 0);
1651 /* GDB stores the current frame_id when stepping in order to detects steps
1652 into subroutines.
1653 Since frames are computed differently when we're replaying, we need to
1654 recompute those stored frames and fix them up so we can still detect
1655 subroutines after we started replaying. */
1658 struct frame_info *frame;
1659 struct frame_id frame_id;
1660 int upd_step_frame_id, upd_step_stack_frame_id;
1662 /* The current frame without replaying - computed via normal unwind. */
1663 frame = get_current_frame ();
1664 frame_id = get_frame_id (frame);
1666 /* Check if we need to update any stepping-related frame id's. */
1667 upd_step_frame_id = frame_id_eq (frame_id,
1668 tp->control.step_frame_id);
1669 upd_step_stack_frame_id = frame_id_eq (frame_id,
1670 tp->control.step_stack_frame_id);
1672 /* We start replaying at the end of the branch trace. This corresponds
1673 to the current instruction. */
1674 replay = xmalloc (sizeof (*replay));
1675 btrace_insn_end (replay, btinfo);
1677 /* Skip gaps at the end of the trace. */
1678 while (btrace_insn_get (replay) == NULL)
1680 unsigned int steps;
1682 steps = btrace_insn_prev (replay, 1);
1683 if (steps == 0)
1684 error (_("No trace."));
1687 /* We're not replaying, yet. */
1688 gdb_assert (btinfo->replay == NULL);
1689 btinfo->replay = replay;
1691 /* Make sure we're not using any stale registers. */
1692 registers_changed_ptid (tp->ptid);
1694 /* The current frame with replaying - computed via btrace unwind. */
1695 frame = get_current_frame ();
1696 frame_id = get_frame_id (frame);
1698 /* Replace stepping related frames where necessary. */
1699 if (upd_step_frame_id)
1700 tp->control.step_frame_id = frame_id;
1701 if (upd_step_stack_frame_id)
1702 tp->control.step_stack_frame_id = frame_id;
1704 CATCH (except, RETURN_MASK_ALL)
1706 /* Restore the previous execution state. */
1707 set_executing (tp->ptid, executing);
1709 xfree (btinfo->replay);
1710 btinfo->replay = NULL;
1712 registers_changed_ptid (tp->ptid);
1714 throw_exception (except);
1716 END_CATCH
1718 /* Restore the previous execution state. */
1719 set_executing (tp->ptid, executing);
1721 return replay;
1724 /* Stop replaying a thread. */
1726 static void
1727 record_btrace_stop_replaying (struct thread_info *tp)
1729 struct btrace_thread_info *btinfo;
1731 btinfo = &tp->btrace;
1733 xfree (btinfo->replay);
1734 btinfo->replay = NULL;
1736 /* Make sure we're not leaving any stale registers. */
1737 registers_changed_ptid (tp->ptid);
1740 /* The to_resume method of target record-btrace. */
1742 static void
1743 record_btrace_resume (struct target_ops *ops, ptid_t ptid, int step,
1744 enum gdb_signal signal)
1746 struct thread_info *tp, *other;
1747 enum btrace_thread_flag flag;
1749 DEBUG ("resume %s: %s", target_pid_to_str (ptid), step ? "step" : "cont");
1751 /* Store the execution direction of the last resume. */
1752 record_btrace_resume_exec_dir = execution_direction;
1754 tp = record_btrace_find_resume_thread (ptid);
1755 if (tp == NULL)
1756 error (_("Cannot find thread to resume."));
1758 /* Stop replaying other threads if the thread to resume is not replaying. */
1759 if (!btrace_is_replaying (tp) && execution_direction != EXEC_REVERSE)
1760 ALL_NON_EXITED_THREADS (other)
1761 record_btrace_stop_replaying (other);
1763 /* As long as we're not replaying, just forward the request. */
1764 if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE)
1766 ops = ops->beneath;
1767 return ops->to_resume (ops, ptid, step, signal);
1770 /* Compute the btrace thread flag for the requested move. */
1771 if (step == 0)
1772 flag = execution_direction == EXEC_REVERSE ? BTHR_RCONT : BTHR_CONT;
1773 else
1774 flag = execution_direction == EXEC_REVERSE ? BTHR_RSTEP : BTHR_STEP;
1776 /* At the moment, we only move a single thread. We could also move
1777 all threads in parallel by single-stepping each resumed thread
1778 until the first runs into an event.
1779 When we do that, we would want to continue all other threads.
1780 For now, just resume one thread to not confuse to_wait. */
1781 record_btrace_resume_thread (tp, flag);
1783 /* We just indicate the resume intent here. The actual stepping happens in
1784 record_btrace_wait below. */
1786 /* Async support. */
1787 if (target_can_async_p ())
1789 target_async (1);
1790 mark_async_event_handler (record_btrace_async_inferior_event_handler);
1794 /* Find a thread to move. */
1796 static struct thread_info *
1797 record_btrace_find_thread_to_move (ptid_t ptid)
1799 struct thread_info *tp;
1801 /* First check the parameter thread. */
1802 tp = find_thread_ptid (ptid);
1803 if (tp != NULL && (tp->btrace.flags & BTHR_MOVE) != 0)
1804 return tp;
1806 /* Otherwise, find one other thread that has been resumed. */
1807 ALL_NON_EXITED_THREADS (tp)
1808 if ((tp->btrace.flags & BTHR_MOVE) != 0)
1809 return tp;
1811 return NULL;
1814 /* Return a target_waitstatus indicating that we ran out of history. */
1816 static struct target_waitstatus
1817 btrace_step_no_history (void)
1819 struct target_waitstatus status;
1821 status.kind = TARGET_WAITKIND_NO_HISTORY;
1823 return status;
1826 /* Return a target_waitstatus indicating that a step finished. */
1828 static struct target_waitstatus
1829 btrace_step_stopped (void)
1831 struct target_waitstatus status;
1833 status.kind = TARGET_WAITKIND_STOPPED;
1834 status.value.sig = GDB_SIGNAL_TRAP;
1836 return status;
1839 /* Clear the record histories. */
1841 static void
1842 record_btrace_clear_histories (struct btrace_thread_info *btinfo)
1844 xfree (btinfo->insn_history);
1845 xfree (btinfo->call_history);
1847 btinfo->insn_history = NULL;
1848 btinfo->call_history = NULL;
1851 /* Step a single thread. */
1853 static struct target_waitstatus
1854 record_btrace_step_thread (struct thread_info *tp)
1856 struct btrace_insn_iterator *replay, end;
1857 struct btrace_thread_info *btinfo;
1858 struct address_space *aspace;
1859 struct inferior *inf;
1860 enum btrace_thread_flag flags;
1861 unsigned int steps;
1863 /* We can't step without an execution history. */
1864 if (btrace_is_empty (tp))
1865 return btrace_step_no_history ();
1867 btinfo = &tp->btrace;
1868 replay = btinfo->replay;
1870 flags = btinfo->flags & BTHR_MOVE;
1871 btinfo->flags &= ~BTHR_MOVE;
1873 DEBUG ("stepping %d (%s): %u", tp->num, target_pid_to_str (tp->ptid), flags);
1875 switch (flags)
1877 default:
1878 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
1880 case BTHR_STEP:
1881 /* We're done if we're not replaying. */
1882 if (replay == NULL)
1883 return btrace_step_no_history ();
1885 /* Skip gaps during replay. */
1888 steps = btrace_insn_next (replay, 1);
1889 if (steps == 0)
1891 record_btrace_stop_replaying (tp);
1892 return btrace_step_no_history ();
1895 while (btrace_insn_get (replay) == NULL);
1897 /* Determine the end of the instruction trace. */
1898 btrace_insn_end (&end, btinfo);
1900 /* We stop replaying if we reached the end of the trace. */
1901 if (btrace_insn_cmp (replay, &end) == 0)
1902 record_btrace_stop_replaying (tp);
1904 return btrace_step_stopped ();
1906 case BTHR_RSTEP:
1907 /* Start replaying if we're not already doing so. */
1908 if (replay == NULL)
1909 replay = record_btrace_start_replaying (tp);
1911 /* If we can't step any further, we reached the end of the history.
1912 Skip gaps during replay. */
1915 steps = btrace_insn_prev (replay, 1);
1916 if (steps == 0)
1917 return btrace_step_no_history ();
1920 while (btrace_insn_get (replay) == NULL);
1922 return btrace_step_stopped ();
1924 case BTHR_CONT:
1925 /* We're done if we're not replaying. */
1926 if (replay == NULL)
1927 return btrace_step_no_history ();
1929 inf = find_inferior_ptid (tp->ptid);
1930 aspace = inf->aspace;
1932 /* Determine the end of the instruction trace. */
1933 btrace_insn_end (&end, btinfo);
1935 for (;;)
1937 const struct btrace_insn *insn;
1939 /* Skip gaps during replay. */
1942 steps = btrace_insn_next (replay, 1);
1943 if (steps == 0)
1945 record_btrace_stop_replaying (tp);
1946 return btrace_step_no_history ();
1949 insn = btrace_insn_get (replay);
1951 while (insn == NULL);
1953 /* We stop replaying if we reached the end of the trace. */
1954 if (btrace_insn_cmp (replay, &end) == 0)
1956 record_btrace_stop_replaying (tp);
1957 return btrace_step_no_history ();
1960 DEBUG ("stepping %d (%s) ... %s", tp->num,
1961 target_pid_to_str (tp->ptid),
1962 core_addr_to_string_nz (insn->pc));
1964 if (record_check_stopped_by_breakpoint (aspace, insn->pc,
1965 &btinfo->stop_reason))
1966 return btrace_step_stopped ();
1969 case BTHR_RCONT:
1970 /* Start replaying if we're not already doing so. */
1971 if (replay == NULL)
1972 replay = record_btrace_start_replaying (tp);
1974 inf = find_inferior_ptid (tp->ptid);
1975 aspace = inf->aspace;
1977 for (;;)
1979 const struct btrace_insn *insn;
1981 /* If we can't step any further, we reached the end of the history.
1982 Skip gaps during replay. */
1985 steps = btrace_insn_prev (replay, 1);
1986 if (steps == 0)
1987 return btrace_step_no_history ();
1989 insn = btrace_insn_get (replay);
1991 while (insn == NULL);
1993 DEBUG ("reverse-stepping %d (%s) ... %s", tp->num,
1994 target_pid_to_str (tp->ptid),
1995 core_addr_to_string_nz (insn->pc));
1997 if (record_check_stopped_by_breakpoint (aspace, insn->pc,
1998 &btinfo->stop_reason))
1999 return btrace_step_stopped ();
2004 /* The to_wait method of target record-btrace. */
2006 static ptid_t
2007 record_btrace_wait (struct target_ops *ops, ptid_t ptid,
2008 struct target_waitstatus *status, int options)
2010 struct thread_info *tp, *other;
2012 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
2014 /* As long as we're not replaying, just forward the request. */
2015 if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE)
2017 ops = ops->beneath;
2018 return ops->to_wait (ops, ptid, status, options);
2021 /* Let's find a thread to move. */
2022 tp = record_btrace_find_thread_to_move (ptid);
2023 if (tp == NULL)
2025 DEBUG ("wait %s: no thread", target_pid_to_str (ptid));
2027 status->kind = TARGET_WAITKIND_IGNORE;
2028 return minus_one_ptid;
2031 /* We only move a single thread. We're not able to correlate threads. */
2032 *status = record_btrace_step_thread (tp);
2034 /* Stop all other threads. */
2035 if (!non_stop)
2036 ALL_NON_EXITED_THREADS (other)
2037 other->btrace.flags &= ~BTHR_MOVE;
2039 /* Start record histories anew from the current position. */
2040 record_btrace_clear_histories (&tp->btrace);
2042 /* We moved the replay position but did not update registers. */
2043 registers_changed_ptid (tp->ptid);
2045 return tp->ptid;
2048 /* The to_can_execute_reverse method of target record-btrace. */
2050 static int
2051 record_btrace_can_execute_reverse (struct target_ops *self)
2053 return 1;
2056 /* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2058 static int
2059 record_btrace_stopped_by_sw_breakpoint (struct target_ops *ops)
2061 if (record_btrace_is_replaying (ops))
2063 struct thread_info *tp = inferior_thread ();
2065 return tp->btrace.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2068 return ops->beneath->to_stopped_by_sw_breakpoint (ops->beneath);
2071 /* The to_supports_stopped_by_sw_breakpoint method of target
2072 record-btrace. */
2074 static int
2075 record_btrace_supports_stopped_by_sw_breakpoint (struct target_ops *ops)
2077 if (record_btrace_is_replaying (ops))
2078 return 1;
2080 return ops->beneath->to_supports_stopped_by_sw_breakpoint (ops->beneath);
2083 /* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2085 static int
2086 record_btrace_stopped_by_hw_breakpoint (struct target_ops *ops)
2088 if (record_btrace_is_replaying (ops))
2090 struct thread_info *tp = inferior_thread ();
2092 return tp->btrace.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2095 return ops->beneath->to_stopped_by_hw_breakpoint (ops->beneath);
2098 /* The to_supports_stopped_by_hw_breakpoint method of target
2099 record-btrace. */
2101 static int
2102 record_btrace_supports_stopped_by_hw_breakpoint (struct target_ops *ops)
2104 if (record_btrace_is_replaying (ops))
2105 return 1;
2107 return ops->beneath->to_supports_stopped_by_hw_breakpoint (ops->beneath);
2110 /* The to_update_thread_list method of target record-btrace. */
2112 static void
2113 record_btrace_update_thread_list (struct target_ops *ops)
2115 /* We don't add or remove threads during replay. */
2116 if (record_btrace_is_replaying (ops))
2117 return;
2119 /* Forward the request. */
2120 ops = ops->beneath;
2121 ops->to_update_thread_list (ops);
2124 /* The to_thread_alive method of target record-btrace. */
2126 static int
2127 record_btrace_thread_alive (struct target_ops *ops, ptid_t ptid)
2129 /* We don't add or remove threads during replay. */
2130 if (record_btrace_is_replaying (ops))
2131 return find_thread_ptid (ptid) != NULL;
2133 /* Forward the request. */
2134 ops = ops->beneath;
2135 return ops->to_thread_alive (ops, ptid);
2138 /* Set the replay branch trace instruction iterator. If IT is NULL, replay
2139 is stopped. */
2141 static void
2142 record_btrace_set_replay (struct thread_info *tp,
2143 const struct btrace_insn_iterator *it)
2145 struct btrace_thread_info *btinfo;
2147 btinfo = &tp->btrace;
2149 if (it == NULL || it->function == NULL)
2150 record_btrace_stop_replaying (tp);
2151 else
2153 if (btinfo->replay == NULL)
2154 record_btrace_start_replaying (tp);
2155 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2156 return;
2158 *btinfo->replay = *it;
2159 registers_changed_ptid (tp->ptid);
2162 /* Start anew from the new replay position. */
2163 record_btrace_clear_histories (btinfo);
2166 /* The to_goto_record_begin method of target record-btrace. */
2168 static void
2169 record_btrace_goto_begin (struct target_ops *self)
2171 struct thread_info *tp;
2172 struct btrace_insn_iterator begin;
2174 tp = require_btrace_thread ();
2176 btrace_insn_begin (&begin, &tp->btrace);
2177 record_btrace_set_replay (tp, &begin);
2179 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
2182 /* The to_goto_record_end method of target record-btrace. */
2184 static void
2185 record_btrace_goto_end (struct target_ops *ops)
2187 struct thread_info *tp;
2189 tp = require_btrace_thread ();
2191 record_btrace_set_replay (tp, NULL);
2193 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
2196 /* The to_goto_record method of target record-btrace. */
2198 static void
2199 record_btrace_goto (struct target_ops *self, ULONGEST insn)
2201 struct thread_info *tp;
2202 struct btrace_insn_iterator it;
2203 unsigned int number;
2204 int found;
2206 number = insn;
2208 /* Check for wrap-arounds. */
2209 if (number != insn)
2210 error (_("Instruction number out of range."));
2212 tp = require_btrace_thread ();
2214 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
2215 if (found == 0)
2216 error (_("No such instruction."));
2218 record_btrace_set_replay (tp, &it);
2220 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
2223 /* The to_execution_direction target method. */
2225 static enum exec_direction_kind
2226 record_btrace_execution_direction (struct target_ops *self)
2228 return record_btrace_resume_exec_dir;
2231 /* The to_prepare_to_generate_core target method. */
2233 static void
2234 record_btrace_prepare_to_generate_core (struct target_ops *self)
2236 record_btrace_generating_corefile = 1;
2239 /* The to_done_generating_core target method. */
2241 static void
2242 record_btrace_done_generating_core (struct target_ops *self)
2244 record_btrace_generating_corefile = 0;
2247 /* Initialize the record-btrace target ops. */
2249 static void
2250 init_record_btrace_ops (void)
2252 struct target_ops *ops;
2254 ops = &record_btrace_ops;
2255 ops->to_shortname = "record-btrace";
2256 ops->to_longname = "Branch tracing target";
2257 ops->to_doc = "Collect control-flow trace and provide the execution history.";
2258 ops->to_open = record_btrace_open;
2259 ops->to_close = record_btrace_close;
2260 ops->to_async = record_btrace_async;
2261 ops->to_detach = record_detach;
2262 ops->to_disconnect = record_disconnect;
2263 ops->to_mourn_inferior = record_mourn_inferior;
2264 ops->to_kill = record_kill;
2265 ops->to_stop_recording = record_btrace_stop_recording;
2266 ops->to_info_record = record_btrace_info;
2267 ops->to_insn_history = record_btrace_insn_history;
2268 ops->to_insn_history_from = record_btrace_insn_history_from;
2269 ops->to_insn_history_range = record_btrace_insn_history_range;
2270 ops->to_call_history = record_btrace_call_history;
2271 ops->to_call_history_from = record_btrace_call_history_from;
2272 ops->to_call_history_range = record_btrace_call_history_range;
2273 ops->to_record_is_replaying = record_btrace_is_replaying;
2274 ops->to_xfer_partial = record_btrace_xfer_partial;
2275 ops->to_remove_breakpoint = record_btrace_remove_breakpoint;
2276 ops->to_insert_breakpoint = record_btrace_insert_breakpoint;
2277 ops->to_fetch_registers = record_btrace_fetch_registers;
2278 ops->to_store_registers = record_btrace_store_registers;
2279 ops->to_prepare_to_store = record_btrace_prepare_to_store;
2280 ops->to_get_unwinder = &record_btrace_to_get_unwinder;
2281 ops->to_get_tailcall_unwinder = &record_btrace_to_get_tailcall_unwinder;
2282 ops->to_resume = record_btrace_resume;
2283 ops->to_wait = record_btrace_wait;
2284 ops->to_update_thread_list = record_btrace_update_thread_list;
2285 ops->to_thread_alive = record_btrace_thread_alive;
2286 ops->to_goto_record_begin = record_btrace_goto_begin;
2287 ops->to_goto_record_end = record_btrace_goto_end;
2288 ops->to_goto_record = record_btrace_goto;
2289 ops->to_can_execute_reverse = record_btrace_can_execute_reverse;
2290 ops->to_stopped_by_sw_breakpoint = record_btrace_stopped_by_sw_breakpoint;
2291 ops->to_supports_stopped_by_sw_breakpoint
2292 = record_btrace_supports_stopped_by_sw_breakpoint;
2293 ops->to_stopped_by_hw_breakpoint = record_btrace_stopped_by_hw_breakpoint;
2294 ops->to_supports_stopped_by_hw_breakpoint
2295 = record_btrace_supports_stopped_by_hw_breakpoint;
2296 ops->to_execution_direction = record_btrace_execution_direction;
2297 ops->to_prepare_to_generate_core = record_btrace_prepare_to_generate_core;
2298 ops->to_done_generating_core = record_btrace_done_generating_core;
2299 ops->to_stratum = record_stratum;
2300 ops->to_magic = OPS_MAGIC;
2303 /* Start recording in BTS format. */
2305 static void
2306 cmd_record_btrace_bts_start (char *args, int from_tty)
2309 if (args != NULL && *args != 0)
2310 error (_("Invalid argument."));
2312 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2316 execute_command ("target record-btrace", from_tty);
2318 CATCH (exception, RETURN_MASK_ALL)
2320 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2321 throw_exception (exception);
2323 END_CATCH
2326 /* Alias for "target record". */
2328 static void
2329 cmd_record_btrace_start (char *args, int from_tty)
2332 if (args != NULL && *args != 0)
2333 error (_("Invalid argument."));
2335 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2339 execute_command ("target record-btrace", from_tty);
2341 CATCH (exception, RETURN_MASK_ALL)
2343 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2344 throw_exception (exception);
2346 END_CATCH
2349 /* The "set record btrace" command. */
2351 static void
2352 cmd_set_record_btrace (char *args, int from_tty)
2354 cmd_show_list (set_record_btrace_cmdlist, from_tty, "");
2357 /* The "show record btrace" command. */
2359 static void
2360 cmd_show_record_btrace (char *args, int from_tty)
2362 cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
2365 /* The "show record btrace replay-memory-access" command. */
2367 static void
2368 cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
2369 struct cmd_list_element *c, const char *value)
2371 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
2372 replay_memory_access);
2375 /* The "set record btrace bts" command. */
2377 static void
2378 cmd_set_record_btrace_bts (char *args, int from_tty)
2380 printf_unfiltered (_("\"set record btrace bts\" must be followed "
2381 "by an apporpriate subcommand.\n"));
2382 help_list (set_record_btrace_bts_cmdlist, "set record btrace bts ",
2383 all_commands, gdb_stdout);
2386 /* The "show record btrace bts" command. */
2388 static void
2389 cmd_show_record_btrace_bts (char *args, int from_tty)
2391 cmd_show_list (show_record_btrace_bts_cmdlist, from_tty, "");
2394 void _initialize_record_btrace (void);
2396 /* Initialize btrace commands. */
2398 void
2399 _initialize_record_btrace (void)
2401 add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
2402 _("Start branch trace recording."), &record_btrace_cmdlist,
2403 "record btrace ", 0, &record_cmdlist);
2404 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
2406 add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
2407 _("\
2408 Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
2409 The processor stores a from/to record for each branch into a cyclic buffer.\n\
2410 This format may not be available on all processors."),
2411 &record_btrace_cmdlist);
2412 add_alias_cmd ("bts", "btrace bts", class_obscure, 1, &record_cmdlist);
2414 add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
2415 _("Set record options"), &set_record_btrace_cmdlist,
2416 "set record btrace ", 0, &set_record_cmdlist);
2418 add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
2419 _("Show record options"), &show_record_btrace_cmdlist,
2420 "show record btrace ", 0, &show_record_cmdlist);
2422 add_setshow_enum_cmd ("replay-memory-access", no_class,
2423 replay_memory_access_types, &replay_memory_access, _("\
2424 Set what memory accesses are allowed during replay."), _("\
2425 Show what memory accesses are allowed during replay."),
2426 _("Default is READ-ONLY.\n\n\
2427 The btrace record target does not trace data.\n\
2428 The memory therefore corresponds to the live target and not \
2429 to the current replay position.\n\n\
2430 When READ-ONLY, allow accesses to read-only memory during replay.\n\
2431 When READ-WRITE, allow accesses to read-only and read-write memory during \
2432 replay."),
2433 NULL, cmd_show_replay_memory_access,
2434 &set_record_btrace_cmdlist,
2435 &show_record_btrace_cmdlist);
2437 add_prefix_cmd ("bts", class_support, cmd_set_record_btrace_bts,
2438 _("Set record btrace bts options"),
2439 &set_record_btrace_bts_cmdlist,
2440 "set record btrace bts ", 0, &set_record_btrace_cmdlist);
2442 add_prefix_cmd ("bts", class_support, cmd_show_record_btrace_bts,
2443 _("Show record btrace bts options"),
2444 &show_record_btrace_bts_cmdlist,
2445 "show record btrace bts ", 0, &show_record_btrace_cmdlist);
2447 add_setshow_uinteger_cmd ("buffer-size", no_class,
2448 &record_btrace_conf.bts.size,
2449 _("Set the record/replay bts buffer size."),
2450 _("Show the record/replay bts buffer size."), _("\
2451 When starting recording request a trace buffer of this size. \
2452 The actual buffer size may differ from the requested size. \
2453 Use \"info record\" to see the actual buffer size.\n\n\
2454 Bigger buffers allow longer recording but also take more time to process \
2455 the recorded execution trace.\n\n\
2456 The trace buffer size may not be changed while recording."), NULL, NULL,
2457 &set_record_btrace_bts_cmdlist,
2458 &show_record_btrace_bts_cmdlist);
2460 init_record_btrace_ops ();
2461 add_target (&record_btrace_ops);
2463 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
2464 xcalloc, xfree);
2466 record_btrace_conf.bts.size = 64 * 1024;