Automatic date update in version.in
[binutils-gdb.git] / gdb / infrun.c
blob2b6c1207391b83d12b238e69096336519696d0ea
1 /* Target-struct-independent code to start (run) and stop an inferior
2 process.
4 Copyright (C) 1986-2024 Free Software Foundation, Inc.
6 This file is part of GDB.
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21 #include "cli/cli-cmds.h"
22 #include "displaced-stepping.h"
23 #include "infrun.h"
24 #include <ctype.h>
25 #include "symtab.h"
26 #include "frame.h"
27 #include "inferior.h"
28 #include "breakpoint.h"
29 #include "gdbcore.h"
30 #include "target.h"
31 #include "target-connection.h"
32 #include "gdbthread.h"
33 #include "annotate.h"
34 #include "symfile.h"
35 #include "top.h"
36 #include "ui.h"
37 #include "inf-loop.h"
38 #include "regcache.h"
39 #include "value.h"
40 #include "observable.h"
41 #include "language.h"
42 #include "solib.h"
43 #include "main.h"
44 #include "block.h"
45 #include "mi/mi-common.h"
46 #include "event-top.h"
47 #include "record.h"
48 #include "record-full.h"
49 #include "inline-frame.h"
50 #include "jit.h"
51 #include "tracepoint.h"
52 #include "skip.h"
53 #include "probe.h"
54 #include "objfiles.h"
55 #include "completer.h"
56 #include "target-descriptions.h"
57 #include "target-dcache.h"
58 #include "terminal.h"
59 #include "solist.h"
60 #include "gdbsupport/event-loop.h"
61 #include "thread-fsm.h"
62 #include "gdbsupport/enum-flags.h"
63 #include "progspace-and-thread.h"
64 #include <optional>
65 #include "arch-utils.h"
66 #include "gdbsupport/scope-exit.h"
67 #include "gdbsupport/forward-scope-exit.h"
68 #include "gdbsupport/gdb_select.h"
69 #include <unordered_map>
70 #include "async-event.h"
71 #include "gdbsupport/selftest.h"
72 #include "scoped-mock-context.h"
73 #include "test-target.h"
74 #include "gdbsupport/common-debug.h"
75 #include "gdbsupport/buildargv.h"
76 #include "extension.h"
77 #include "disasm.h"
78 #include "interps.h"
80 /* Prototypes for local functions */
82 static void sig_print_info (enum gdb_signal);
84 static void sig_print_header (void);
86 static void follow_inferior_reset_breakpoints (void);
88 static bool currently_stepping (struct thread_info *tp);
90 static void insert_hp_step_resume_breakpoint_at_frame (const frame_info_ptr &);
92 static void insert_step_resume_breakpoint_at_caller (const frame_info_ptr &);
94 static void insert_longjmp_resume_breakpoint (struct gdbarch *, CORE_ADDR);
96 static bool maybe_software_singlestep (struct gdbarch *gdbarch);
98 static void resume (gdb_signal sig);
100 static void wait_for_inferior (inferior *inf);
102 static void restart_threads (struct thread_info *event_thread,
103 inferior *inf = nullptr);
105 static bool start_step_over (void);
107 static bool step_over_info_valid_p (void);
109 static bool schedlock_applies (struct thread_info *tp);
111 /* Asynchronous signal handler registered as event loop source for
112 when we have pending events ready to be passed to the core. */
113 static struct async_event_handler *infrun_async_inferior_event_token;
115 /* Stores whether infrun_async was previously enabled or disabled.
116 Starts off as -1, indicating "never enabled/disabled". */
117 static int infrun_is_async = -1;
118 static CORE_ADDR update_line_range_start (CORE_ADDR pc,
119 struct execution_control_state *ecs);
121 /* See infrun.h. */
123 void
124 infrun_async (int enable)
126 if (infrun_is_async != enable)
128 infrun_is_async = enable;
130 infrun_debug_printf ("enable=%d", enable);
132 if (enable)
133 mark_async_event_handler (infrun_async_inferior_event_token);
134 else
135 clear_async_event_handler (infrun_async_inferior_event_token);
139 /* See infrun.h. */
141 void
142 mark_infrun_async_event_handler (void)
144 mark_async_event_handler (infrun_async_inferior_event_token);
147 /* When set, stop the 'step' command if we enter a function which has
148 no line number information. The normal behavior is that we step
149 over such function. */
150 bool step_stop_if_no_debug = false;
151 static void
152 show_step_stop_if_no_debug (struct ui_file *file, int from_tty,
153 struct cmd_list_element *c, const char *value)
155 gdb_printf (file, _("Mode of the step operation is %s.\n"), value);
158 /* proceed and normal_stop use this to notify the user when the
159 inferior stopped in a different thread than it had been running in.
160 It can also be used to find for which thread normal_stop last
161 reported a stop. */
162 static thread_info_ref previous_thread;
164 /* See infrun.h. */
166 void
167 update_previous_thread ()
169 if (inferior_ptid == null_ptid)
170 previous_thread = nullptr;
171 else
172 previous_thread = thread_info_ref::new_reference (inferior_thread ());
175 /* See infrun.h. */
177 thread_info *
178 get_previous_thread ()
180 return previous_thread.get ();
183 /* If set (default for legacy reasons), when following a fork, GDB
184 will detach from one of the fork branches, child or parent.
185 Exactly which branch is detached depends on 'set follow-fork-mode'
186 setting. */
188 static bool detach_fork = true;
190 bool debug_infrun = false;
191 static void
192 show_debug_infrun (struct ui_file *file, int from_tty,
193 struct cmd_list_element *c, const char *value)
195 gdb_printf (file, _("Inferior debugging is %s.\n"), value);
198 /* Support for disabling address space randomization. */
200 bool disable_randomization = true;
202 static void
203 show_disable_randomization (struct ui_file *file, int from_tty,
204 struct cmd_list_element *c, const char *value)
206 if (target_supports_disable_randomization ())
207 gdb_printf (file,
208 _("Disabling randomization of debuggee's "
209 "virtual address space is %s.\n"),
210 value);
211 else
212 gdb_puts (_("Disabling randomization of debuggee's "
213 "virtual address space is unsupported on\n"
214 "this platform.\n"), file);
217 static void
218 set_disable_randomization (const char *args, int from_tty,
219 struct cmd_list_element *c)
221 if (!target_supports_disable_randomization ())
222 error (_("Disabling randomization of debuggee's "
223 "virtual address space is unsupported on\n"
224 "this platform."));
227 /* User interface for non-stop mode. */
229 bool non_stop = false;
230 static bool non_stop_1 = false;
232 static void
233 set_non_stop (const char *args, int from_tty,
234 struct cmd_list_element *c)
236 if (target_has_execution ())
238 non_stop_1 = non_stop;
239 error (_("Cannot change this setting while the inferior is running."));
242 non_stop = non_stop_1;
245 static void
246 show_non_stop (struct ui_file *file, int from_tty,
247 struct cmd_list_element *c, const char *value)
249 gdb_printf (file,
250 _("Controlling the inferior in non-stop mode is %s.\n"),
251 value);
254 /* "Observer mode" is somewhat like a more extreme version of
255 non-stop, in which all GDB operations that might affect the
256 target's execution have been disabled. */
258 static bool observer_mode = false;
259 static bool observer_mode_1 = false;
261 static void
262 set_observer_mode (const char *args, int from_tty,
263 struct cmd_list_element *c)
265 if (target_has_execution ())
267 observer_mode_1 = observer_mode;
268 error (_("Cannot change this setting while the inferior is running."));
271 observer_mode = observer_mode_1;
273 may_write_registers = !observer_mode;
274 may_write_memory = !observer_mode;
275 may_insert_breakpoints = !observer_mode;
276 may_insert_tracepoints = !observer_mode;
277 /* We can insert fast tracepoints in or out of observer mode,
278 but enable them if we're going into this mode. */
279 if (observer_mode)
280 may_insert_fast_tracepoints = true;
281 may_stop = !observer_mode;
282 update_target_permissions ();
284 /* Going *into* observer mode we must force non-stop, then
285 going out we leave it that way. */
286 if (observer_mode)
288 pagination_enabled = false;
289 non_stop = non_stop_1 = true;
292 if (from_tty)
293 gdb_printf (_("Observer mode is now %s.\n"),
294 (observer_mode ? "on" : "off"));
297 static void
298 show_observer_mode (struct ui_file *file, int from_tty,
299 struct cmd_list_element *c, const char *value)
301 gdb_printf (file, _("Observer mode is %s.\n"), value);
304 /* This updates the value of observer mode based on changes in
305 permissions. Note that we are deliberately ignoring the values of
306 may-write-registers and may-write-memory, since the user may have
307 reason to enable these during a session, for instance to turn on a
308 debugging-related global. */
310 void
311 update_observer_mode (void)
313 bool newval = (!may_insert_breakpoints
314 && !may_insert_tracepoints
315 && may_insert_fast_tracepoints
316 && !may_stop
317 && non_stop);
319 /* Let the user know if things change. */
320 if (newval != observer_mode)
321 gdb_printf (_("Observer mode is now %s.\n"),
322 (newval ? "on" : "off"));
324 observer_mode = observer_mode_1 = newval;
327 /* Tables of how to react to signals; the user sets them. */
329 static unsigned char signal_stop[GDB_SIGNAL_LAST];
330 static unsigned char signal_print[GDB_SIGNAL_LAST];
331 static unsigned char signal_program[GDB_SIGNAL_LAST];
333 /* Table of signals that are registered with "catch signal". A
334 non-zero entry indicates that the signal is caught by some "catch
335 signal" command. */
336 static unsigned char signal_catch[GDB_SIGNAL_LAST];
338 /* Table of signals that the target may silently handle.
339 This is automatically determined from the flags above,
340 and simply cached here. */
341 static unsigned char signal_pass[GDB_SIGNAL_LAST];
343 #define SET_SIGS(nsigs,sigs,flags) \
344 do { \
345 int signum = (nsigs); \
346 while (signum-- > 0) \
347 if ((sigs)[signum]) \
348 (flags)[signum] = 1; \
349 } while (0)
351 #define UNSET_SIGS(nsigs,sigs,flags) \
352 do { \
353 int signum = (nsigs); \
354 while (signum-- > 0) \
355 if ((sigs)[signum]) \
356 (flags)[signum] = 0; \
357 } while (0)
359 /* Update the target's copy of SIGNAL_PROGRAM. The sole purpose of
360 this function is to avoid exporting `signal_program'. */
362 void
363 update_signals_program_target (void)
365 target_program_signals (signal_program);
368 /* Value to pass to target_resume() to cause all threads to resume. */
370 #define RESUME_ALL minus_one_ptid
372 /* Command list pointer for the "stop" placeholder. */
374 static struct cmd_list_element *stop_command;
376 /* Nonzero if we want to give control to the user when we're notified
377 of shared library events by the dynamic linker. */
378 int stop_on_solib_events;
380 /* Enable or disable optional shared library event breakpoints
381 as appropriate when the above flag is changed. */
383 static void
384 set_stop_on_solib_events (const char *args,
385 int from_tty, struct cmd_list_element *c)
387 update_solib_breakpoints ();
390 static void
391 show_stop_on_solib_events (struct ui_file *file, int from_tty,
392 struct cmd_list_element *c, const char *value)
394 gdb_printf (file, _("Stopping for shared library events is %s.\n"),
395 value);
398 /* True after stop if current stack frame should be printed. */
400 static bool stop_print_frame;
402 /* This is a cached copy of the target/ptid/waitstatus of the last
403 event returned by target_wait().
404 This information is returned by get_last_target_status(). */
405 static process_stratum_target *target_last_proc_target;
406 static ptid_t target_last_wait_ptid;
407 static struct target_waitstatus target_last_waitstatus;
409 void init_thread_stepping_state (struct thread_info *tss);
411 static const char follow_fork_mode_child[] = "child";
412 static const char follow_fork_mode_parent[] = "parent";
414 static const char *const follow_fork_mode_kind_names[] = {
415 follow_fork_mode_child,
416 follow_fork_mode_parent,
417 nullptr
420 static const char *follow_fork_mode_string = follow_fork_mode_parent;
421 static void
422 show_follow_fork_mode_string (struct ui_file *file, int from_tty,
423 struct cmd_list_element *c, const char *value)
425 gdb_printf (file,
426 _("Debugger response to a program "
427 "call of fork or vfork is \"%s\".\n"),
428 value);
432 /* Handle changes to the inferior list based on the type of fork,
433 which process is being followed, and whether the other process
434 should be detached. On entry inferior_ptid must be the ptid of
435 the fork parent. At return inferior_ptid is the ptid of the
436 followed inferior. */
438 static bool
439 follow_fork_inferior (bool follow_child, bool detach_fork)
441 INFRUN_SCOPED_DEBUG_ENTER_EXIT;
443 infrun_debug_printf ("follow_child = %d, detach_fork = %d",
444 follow_child, detach_fork);
446 target_waitkind fork_kind = inferior_thread ()->pending_follow.kind ();
447 gdb_assert (fork_kind == TARGET_WAITKIND_FORKED
448 || fork_kind == TARGET_WAITKIND_VFORKED);
449 bool has_vforked = fork_kind == TARGET_WAITKIND_VFORKED;
450 ptid_t parent_ptid = inferior_ptid;
451 ptid_t child_ptid = inferior_thread ()->pending_follow.child_ptid ();
453 if (has_vforked
454 && !non_stop /* Non-stop always resumes both branches. */
455 && current_ui->prompt_state == PROMPT_BLOCKED
456 && !(follow_child || detach_fork || sched_multi))
458 /* The parent stays blocked inside the vfork syscall until the
459 child execs or exits. If we don't let the child run, then
460 the parent stays blocked. If we're telling the parent to run
461 in the foreground, the user will not be able to ctrl-c to get
462 back the terminal, effectively hanging the debug session. */
463 gdb_printf (gdb_stderr, _("\
464 Can not resume the parent process over vfork in the foreground while\n\
465 holding the child stopped. Try \"set detach-on-fork\" or \
466 \"set schedule-multiple\".\n"));
467 return true;
470 inferior *parent_inf = current_inferior ();
471 inferior *child_inf = nullptr;
473 gdb_assert (parent_inf->thread_waiting_for_vfork_done == nullptr);
475 if (!follow_child)
477 /* Detach new forked process? */
478 if (detach_fork)
480 /* Before detaching from the child, remove all breakpoints
481 from it. If we forked, then this has already been taken
482 care of by infrun.c. If we vforked however, any
483 breakpoint inserted in the parent is visible in the
484 child, even those added while stopped in a vfork
485 catchpoint. This will remove the breakpoints from the
486 parent also, but they'll be reinserted below. */
487 if (has_vforked)
489 /* Keep breakpoints list in sync. */
490 remove_breakpoints_inf (current_inferior ());
493 if (print_inferior_events)
495 /* Ensure that we have a process ptid. */
496 ptid_t process_ptid = ptid_t (child_ptid.pid ());
498 target_terminal::ours_for_output ();
499 gdb_printf (_("[Detaching after %s from child %s]\n"),
500 has_vforked ? "vfork" : "fork",
501 target_pid_to_str (process_ptid).c_str ());
504 else
506 /* Add process to GDB's tables. */
507 child_inf = add_inferior (child_ptid.pid ());
509 child_inf->attach_flag = parent_inf->attach_flag;
510 copy_terminal_info (child_inf, parent_inf);
511 child_inf->set_arch (parent_inf->arch ());
512 child_inf->tdesc_info = parent_inf->tdesc_info;
514 child_inf->symfile_flags = SYMFILE_NO_READ;
516 /* If this is a vfork child, then the address-space is
517 shared with the parent. */
518 if (has_vforked)
520 child_inf->pspace = parent_inf->pspace;
521 child_inf->aspace = parent_inf->aspace;
523 exec_on_vfork (child_inf);
525 /* The parent will be frozen until the child is done
526 with the shared region. Keep track of the
527 parent. */
528 child_inf->vfork_parent = parent_inf;
529 child_inf->pending_detach = false;
530 parent_inf->vfork_child = child_inf;
531 parent_inf->pending_detach = false;
533 else
535 child_inf->pspace = new program_space (new_address_space ());
536 child_inf->aspace = child_inf->pspace->aspace;
537 child_inf->removable = true;
538 clone_program_space (child_inf->pspace, parent_inf->pspace);
542 if (has_vforked)
544 /* If we detached from the child, then we have to be careful
545 to not insert breakpoints in the parent until the child
546 is done with the shared memory region. However, if we're
547 staying attached to the child, then we can and should
548 insert breakpoints, so that we can debug it. A
549 subsequent child exec or exit is enough to know when does
550 the child stops using the parent's address space. */
551 parent_inf->thread_waiting_for_vfork_done
552 = detach_fork ? inferior_thread () : nullptr;
553 parent_inf->pspace->breakpoints_not_allowed = detach_fork;
555 infrun_debug_printf
556 ("parent_inf->thread_waiting_for_vfork_done == %s",
557 (parent_inf->thread_waiting_for_vfork_done == nullptr
558 ? "nullptr"
559 : (parent_inf->thread_waiting_for_vfork_done
560 ->ptid.to_string ().c_str ())));
563 else
565 /* Follow the child. */
567 if (print_inferior_events)
569 std::string parent_pid = target_pid_to_str (parent_ptid);
570 std::string child_pid = target_pid_to_str (child_ptid);
572 target_terminal::ours_for_output ();
573 gdb_printf (_("[Attaching after %s %s to child %s]\n"),
574 parent_pid.c_str (),
575 has_vforked ? "vfork" : "fork",
576 child_pid.c_str ());
579 /* Add the new inferior first, so that the target_detach below
580 doesn't unpush the target. */
582 child_inf = add_inferior (child_ptid.pid ());
584 child_inf->attach_flag = parent_inf->attach_flag;
585 copy_terminal_info (child_inf, parent_inf);
586 child_inf->set_arch (parent_inf->arch ());
587 child_inf->tdesc_info = parent_inf->tdesc_info;
589 if (has_vforked)
591 /* If this is a vfork child, then the address-space is shared
592 with the parent. */
593 child_inf->aspace = parent_inf->aspace;
594 child_inf->pspace = parent_inf->pspace;
596 exec_on_vfork (child_inf);
598 else if (detach_fork)
600 /* We follow the child and detach from the parent: move the parent's
601 program space to the child. This simplifies some things, like
602 doing "next" over fork() and landing on the expected line in the
603 child (note, that is broken with "set detach-on-fork off").
605 Before assigning brand new spaces for the parent, remove
606 breakpoints from it: because the new pspace won't match
607 currently inserted locations, the normal detach procedure
608 wouldn't remove them, and we would leave them inserted when
609 detaching. */
610 remove_breakpoints_inf (parent_inf);
612 child_inf->aspace = parent_inf->aspace;
613 child_inf->pspace = parent_inf->pspace;
614 parent_inf->pspace = new program_space (new_address_space ());
615 parent_inf->aspace = parent_inf->pspace->aspace;
616 clone_program_space (parent_inf->pspace, child_inf->pspace);
618 /* The parent inferior is still the current one, so keep things
619 in sync. */
620 set_current_program_space (parent_inf->pspace);
622 else
624 child_inf->pspace = new program_space (new_address_space ());
625 child_inf->aspace = child_inf->pspace->aspace;
626 child_inf->removable = true;
627 child_inf->symfile_flags = SYMFILE_NO_READ;
628 clone_program_space (child_inf->pspace, parent_inf->pspace);
632 gdb_assert (current_inferior () == parent_inf);
634 /* If we are setting up an inferior for the child, target_follow_fork is
635 responsible for pushing the appropriate targets on the new inferior's
636 target stack and adding the initial thread (with ptid CHILD_PTID).
638 If we are not setting up an inferior for the child (because following
639 the parent and detach_fork is true), it is responsible for detaching
640 from CHILD_PTID. */
641 target_follow_fork (child_inf, child_ptid, fork_kind, follow_child,
642 detach_fork);
644 gdb::observers::inferior_forked.notify (parent_inf, child_inf, fork_kind);
646 /* target_follow_fork must leave the parent as the current inferior. If we
647 want to follow the child, we make it the current one below. */
648 gdb_assert (current_inferior () == parent_inf);
650 /* If there is a child inferior, target_follow_fork must have created a thread
651 for it. */
652 if (child_inf != nullptr)
653 gdb_assert (!child_inf->thread_list.empty ());
655 /* Clear the parent thread's pending follow field. Do this before calling
656 target_detach, so that the target can differentiate the two following
657 cases:
659 - We continue past a fork with "follow-fork-mode == child" &&
660 "detach-on-fork on", and therefore detach the parent. In that
661 case the target should not detach the fork child.
662 - We run to a fork catchpoint and the user types "detach". In that
663 case, the target should detach the fork child in addition to the
664 parent.
666 The former case will have pending_follow cleared, the later will have
667 pending_follow set. */
668 thread_info *parent_thread = parent_inf->find_thread (parent_ptid);
669 gdb_assert (parent_thread != nullptr);
670 parent_thread->pending_follow.set_spurious ();
672 /* Detach the parent if needed. */
673 if (follow_child)
675 /* If we're vforking, we want to hold on to the parent until
676 the child exits or execs. At child exec or exit time we
677 can remove the old breakpoints from the parent and detach
678 or resume debugging it. Otherwise, detach the parent now;
679 we'll want to reuse it's program/address spaces, but we
680 can't set them to the child before removing breakpoints
681 from the parent, otherwise, the breakpoints module could
682 decide to remove breakpoints from the wrong process (since
683 they'd be assigned to the same address space). */
685 if (has_vforked)
687 gdb_assert (child_inf->vfork_parent == nullptr);
688 gdb_assert (parent_inf->vfork_child == nullptr);
689 child_inf->vfork_parent = parent_inf;
690 child_inf->pending_detach = false;
691 parent_inf->vfork_child = child_inf;
692 parent_inf->pending_detach = detach_fork;
694 else if (detach_fork)
696 if (print_inferior_events)
698 /* Ensure that we have a process ptid. */
699 ptid_t process_ptid = ptid_t (parent_ptid.pid ());
701 target_terminal::ours_for_output ();
702 gdb_printf (_("[Detaching after fork from "
703 "parent %s]\n"),
704 target_pid_to_str (process_ptid).c_str ());
707 target_detach (parent_inf, 0);
711 /* If we ended up creating a new inferior, call post_create_inferior to inform
712 the various subcomponents. */
713 if (child_inf != nullptr)
715 /* If FOLLOW_CHILD, we leave CHILD_INF as the current inferior
716 (do not restore the parent as the current inferior). */
717 std::optional<scoped_restore_current_thread> maybe_restore;
719 if (!follow_child && !sched_multi)
720 maybe_restore.emplace ();
722 switch_to_thread (*child_inf->threads ().begin ());
723 post_create_inferior (0);
726 return false;
729 /* Set the last target status as TP having stopped. */
731 static void
732 set_last_target_status_stopped (thread_info *tp)
734 set_last_target_status (tp->inf->process_target (), tp->ptid,
735 target_waitstatus {}.set_stopped (GDB_SIGNAL_0));
738 /* Tell the target to follow the fork we're stopped at. Returns true
739 if the inferior should be resumed; false, if the target for some
740 reason decided it's best not to resume. */
742 static bool
743 follow_fork ()
745 INFRUN_SCOPED_DEBUG_ENTER_EXIT;
747 bool follow_child = (follow_fork_mode_string == follow_fork_mode_child);
748 bool should_resume = true;
750 /* Copy user stepping state to the new inferior thread. FIXME: the
751 followed fork child thread should have a copy of most of the
752 parent thread structure's run control related fields, not just these.
753 Initialized to avoid "may be used uninitialized" warnings from gcc. */
754 struct breakpoint *step_resume_breakpoint = nullptr;
755 struct breakpoint *exception_resume_breakpoint = nullptr;
756 CORE_ADDR step_range_start = 0;
757 CORE_ADDR step_range_end = 0;
758 int current_line = 0;
759 symtab *current_symtab = nullptr;
760 struct frame_id step_frame_id = { 0 };
762 if (!non_stop)
764 thread_info *cur_thr = inferior_thread ();
766 ptid_t resume_ptid
767 = user_visible_resume_ptid (cur_thr->control.stepping_command);
768 process_stratum_target *resume_target
769 = user_visible_resume_target (resume_ptid);
771 /* Check if there's a thread that we're about to resume, other
772 than the current, with an unfollowed fork/vfork. If so,
773 switch back to it, to tell the target to follow it (in either
774 direction). We'll afterwards refuse to resume, and inform
775 the user what happened. */
776 for (thread_info *tp : all_non_exited_threads (resume_target,
777 resume_ptid))
779 if (tp == cur_thr)
780 continue;
782 /* follow_fork_inferior clears tp->pending_follow, and below
783 we'll need the value after the follow_fork_inferior
784 call. */
785 target_waitkind kind = tp->pending_follow.kind ();
787 if (kind != TARGET_WAITKIND_SPURIOUS)
789 infrun_debug_printf ("need to follow-fork [%s] first",
790 tp->ptid.to_string ().c_str ());
792 switch_to_thread (tp);
794 /* Set up inferior(s) as specified by the caller, and
795 tell the target to do whatever is necessary to follow
796 either parent or child. */
797 if (follow_child)
799 /* The thread that started the execution command
800 won't exist in the child. Abort the command and
801 immediately stop in this thread, in the child,
802 inside fork. */
803 should_resume = false;
805 else
807 /* Following the parent, so let the thread fork its
808 child freely, it won't influence the current
809 execution command. */
810 if (follow_fork_inferior (follow_child, detach_fork))
812 /* Target refused to follow, or there's some
813 other reason we shouldn't resume. */
814 switch_to_thread (cur_thr);
815 set_last_target_status_stopped (cur_thr);
816 return false;
819 /* If we're following a vfork, when we need to leave
820 the just-forked thread as selected, as we need to
821 solo-resume it to collect the VFORK_DONE event.
822 If we're following a fork, however, switch back
823 to the original thread that we continue stepping
824 it, etc. */
825 if (kind != TARGET_WAITKIND_VFORKED)
827 gdb_assert (kind == TARGET_WAITKIND_FORKED);
828 switch_to_thread (cur_thr);
832 break;
837 thread_info *tp = inferior_thread ();
839 /* If there were any forks/vforks that were caught and are now to be
840 followed, then do so now. */
841 switch (tp->pending_follow.kind ())
843 case TARGET_WAITKIND_FORKED:
844 case TARGET_WAITKIND_VFORKED:
846 ptid_t parent, child;
847 std::unique_ptr<struct thread_fsm> thread_fsm;
849 /* If the user did a next/step, etc, over a fork call,
850 preserve the stepping state in the fork child. */
851 if (follow_child && should_resume)
853 step_resume_breakpoint = clone_momentary_breakpoint
854 (tp->control.step_resume_breakpoint);
855 step_range_start = tp->control.step_range_start;
856 step_range_end = tp->control.step_range_end;
857 current_line = tp->current_line;
858 current_symtab = tp->current_symtab;
859 step_frame_id = tp->control.step_frame_id;
860 exception_resume_breakpoint
861 = clone_momentary_breakpoint (tp->control.exception_resume_breakpoint);
862 thread_fsm = tp->release_thread_fsm ();
864 /* For now, delete the parent's sr breakpoint, otherwise,
865 parent/child sr breakpoints are considered duplicates,
866 and the child version will not be installed. Remove
867 this when the breakpoints module becomes aware of
868 inferiors and address spaces. */
869 delete_step_resume_breakpoint (tp);
870 tp->control.step_range_start = 0;
871 tp->control.step_range_end = 0;
872 tp->control.step_frame_id = null_frame_id;
873 delete_exception_resume_breakpoint (tp);
876 parent = inferior_ptid;
877 child = tp->pending_follow.child_ptid ();
879 /* If handling a vfork, stop all the inferior's threads, they will be
880 restarted when the vfork shared region is complete. */
881 if (tp->pending_follow.kind () == TARGET_WAITKIND_VFORKED
882 && target_is_non_stop_p ())
883 stop_all_threads ("handling vfork", tp->inf);
885 process_stratum_target *parent_targ = tp->inf->process_target ();
886 /* Set up inferior(s) as specified by the caller, and tell the
887 target to do whatever is necessary to follow either parent
888 or child. */
889 if (follow_fork_inferior (follow_child, detach_fork))
891 /* Target refused to follow, or there's some other reason
892 we shouldn't resume. */
893 should_resume = 0;
895 else
897 /* If we followed the child, switch to it... */
898 if (follow_child)
900 tp = parent_targ->find_thread (child);
901 switch_to_thread (tp);
903 /* ... and preserve the stepping state, in case the
904 user was stepping over the fork call. */
905 if (should_resume)
907 tp->control.step_resume_breakpoint
908 = step_resume_breakpoint;
909 tp->control.step_range_start = step_range_start;
910 tp->control.step_range_end = step_range_end;
911 tp->current_line = current_line;
912 tp->current_symtab = current_symtab;
913 tp->control.step_frame_id = step_frame_id;
914 tp->control.exception_resume_breakpoint
915 = exception_resume_breakpoint;
916 tp->set_thread_fsm (std::move (thread_fsm));
918 else
920 /* If we get here, it was because we're trying to
921 resume from a fork catchpoint, but, the user
922 has switched threads away from the thread that
923 forked. In that case, the resume command
924 issued is most likely not applicable to the
925 child, so just warn, and refuse to resume. */
926 warning (_("Not resuming: switched threads "
927 "before following fork child."));
930 /* Reset breakpoints in the child as appropriate. */
931 follow_inferior_reset_breakpoints ();
935 break;
936 case TARGET_WAITKIND_SPURIOUS:
937 /* Nothing to follow. */
938 break;
939 default:
940 internal_error ("Unexpected pending_follow.kind %d\n",
941 tp->pending_follow.kind ());
942 break;
945 if (!should_resume)
946 set_last_target_status_stopped (tp);
947 return should_resume;
950 static void
951 follow_inferior_reset_breakpoints (void)
953 struct thread_info *tp = inferior_thread ();
955 /* Was there a step_resume breakpoint? (There was if the user
956 did a "next" at the fork() call.) If so, explicitly reset its
957 thread number. Cloned step_resume breakpoints are disabled on
958 creation, so enable it here now that it is associated with the
959 correct thread.
961 step_resumes are a form of bp that are made to be per-thread.
962 Since we created the step_resume bp when the parent process
963 was being debugged, and now are switching to the child process,
964 from the breakpoint package's viewpoint, that's a switch of
965 "threads". We must update the bp's notion of which thread
966 it is for, or it'll be ignored when it triggers. */
968 if (tp->control.step_resume_breakpoint)
970 breakpoint_re_set_thread (tp->control.step_resume_breakpoint);
971 tp->control.step_resume_breakpoint->first_loc ().enabled = 1;
974 /* Treat exception_resume breakpoints like step_resume breakpoints. */
975 if (tp->control.exception_resume_breakpoint)
977 breakpoint_re_set_thread (tp->control.exception_resume_breakpoint);
978 tp->control.exception_resume_breakpoint->first_loc ().enabled = 1;
981 /* Reinsert all breakpoints in the child. The user may have set
982 breakpoints after catching the fork, in which case those
983 were never set in the child, but only in the parent. This makes
984 sure the inserted breakpoints match the breakpoint list. */
986 breakpoint_re_set ();
987 insert_breakpoints ();
990 /* The child has exited or execed: resume THREAD, a thread of the parent,
991 if it was meant to be executing. */
993 static void
994 proceed_after_vfork_done (thread_info *thread)
996 if (thread->state == THREAD_RUNNING
997 && !thread->executing ()
998 && !thread->stop_requested
999 && thread->stop_signal () == GDB_SIGNAL_0)
1001 infrun_debug_printf ("resuming vfork parent thread %s",
1002 thread->ptid.to_string ().c_str ());
1004 switch_to_thread (thread);
1005 clear_proceed_status (0);
1006 proceed ((CORE_ADDR) -1, GDB_SIGNAL_DEFAULT);
1010 /* Called whenever we notice an exec or exit event, to handle
1011 detaching or resuming a vfork parent. */
1013 static void
1014 handle_vfork_child_exec_or_exit (int exec)
1016 INFRUN_SCOPED_DEBUG_ENTER_EXIT;
1018 struct inferior *inf = current_inferior ();
1020 if (inf->vfork_parent)
1022 inferior *resume_parent = nullptr;
1024 /* This exec or exit marks the end of the shared memory region
1025 between the parent and the child. Break the bonds. */
1026 inferior *vfork_parent = inf->vfork_parent;
1027 inf->vfork_parent->vfork_child = nullptr;
1028 inf->vfork_parent = nullptr;
1030 /* If the user wanted to detach from the parent, now is the
1031 time. */
1032 if (vfork_parent->pending_detach)
1034 struct program_space *pspace;
1036 /* follow-fork child, detach-on-fork on. */
1038 vfork_parent->pending_detach = false;
1040 scoped_restore_current_pspace_and_thread restore_thread;
1042 /* We're letting loose of the parent. */
1043 thread_info *tp = any_live_thread_of_inferior (vfork_parent);
1044 switch_to_thread (tp);
1046 /* We're about to detach from the parent, which implicitly
1047 removes breakpoints from its address space. There's a
1048 catch here: we want to reuse the spaces for the child,
1049 but, parent/child are still sharing the pspace at this
1050 point, although the exec in reality makes the kernel give
1051 the child a fresh set of new pages. The problem here is
1052 that the breakpoints module being unaware of this, would
1053 likely chose the child process to write to the parent
1054 address space. Swapping the child temporarily away from
1055 the spaces has the desired effect. Yes, this is "sort
1056 of" a hack. */
1058 pspace = inf->pspace;
1059 inf->pspace = nullptr;
1060 address_space_ref_ptr aspace = std::move (inf->aspace);
1062 if (print_inferior_events)
1064 std::string pidstr
1065 = target_pid_to_str (ptid_t (vfork_parent->pid));
1067 target_terminal::ours_for_output ();
1069 if (exec)
1071 gdb_printf (_("[Detaching vfork parent %s "
1072 "after child exec]\n"), pidstr.c_str ());
1074 else
1076 gdb_printf (_("[Detaching vfork parent %s "
1077 "after child exit]\n"), pidstr.c_str ());
1081 target_detach (vfork_parent, 0);
1083 /* Put it back. */
1084 inf->pspace = pspace;
1085 inf->aspace = aspace;
1087 else if (exec)
1089 /* We're staying attached to the parent, so, really give the
1090 child a new address space. */
1091 inf->pspace = new program_space (maybe_new_address_space ());
1092 inf->aspace = inf->pspace->aspace;
1093 inf->removable = true;
1094 set_current_program_space (inf->pspace);
1096 resume_parent = vfork_parent;
1098 else
1100 /* If this is a vfork child exiting, then the pspace and
1101 aspaces were shared with the parent. Since we're
1102 reporting the process exit, we'll be mourning all that is
1103 found in the address space, and switching to null_ptid,
1104 preparing to start a new inferior. But, since we don't
1105 want to clobber the parent's address/program spaces, we
1106 go ahead and create a new one for this exiting
1107 inferior. */
1109 scoped_restore_current_thread restore_thread;
1111 /* Temporarily switch to the vfork parent, to facilitate ptrace
1112 calls done during maybe_new_address_space. */
1113 switch_to_thread (any_live_thread_of_inferior (vfork_parent));
1114 address_space_ref_ptr aspace = maybe_new_address_space ();
1116 /* Switch back to the vfork child inferior. Switch to no-thread
1117 while running clone_program_space, so that clone_program_space
1118 doesn't want to read the selected frame of a dead process. */
1119 switch_to_inferior_no_thread (inf);
1121 inf->pspace = new program_space (std::move (aspace));
1122 inf->aspace = inf->pspace->aspace;
1123 set_current_program_space (inf->pspace);
1124 inf->removable = true;
1125 inf->symfile_flags = SYMFILE_NO_READ;
1126 clone_program_space (inf->pspace, vfork_parent->pspace);
1128 resume_parent = vfork_parent;
1131 gdb_assert (current_program_space == inf->pspace);
1133 if (non_stop && resume_parent != nullptr)
1135 /* If the user wanted the parent to be running, let it go
1136 free now. */
1137 scoped_restore_current_thread restore_thread;
1139 infrun_debug_printf ("resuming vfork parent process %d",
1140 resume_parent->pid);
1142 for (thread_info *thread : resume_parent->threads ())
1143 proceed_after_vfork_done (thread);
1148 /* Handle TARGET_WAITKIND_VFORK_DONE. */
1150 static void
1151 handle_vfork_done (thread_info *event_thread)
1153 INFRUN_SCOPED_DEBUG_ENTER_EXIT;
1155 /* We only care about this event if inferior::thread_waiting_for_vfork_done is
1156 set, that is if we are waiting for a vfork child not under our control
1157 (because we detached it) to exec or exit.
1159 If an inferior has vforked and we are debugging the child, we don't use
1160 the vfork-done event to get notified about the end of the shared address
1161 space window. We rely instead on the child's exec or exit event, and the
1162 inferior::vfork_{parent,child} fields are used instead. See
1163 handle_vfork_child_exec_or_exit for that. */
1164 if (event_thread->inf->thread_waiting_for_vfork_done == nullptr)
1166 infrun_debug_printf ("not waiting for a vfork-done event");
1167 return;
1170 /* We stopped all threads (other than the vforking thread) of the inferior in
1171 follow_fork and kept them stopped until now. It should therefore not be
1172 possible for another thread to have reported a vfork during that window.
1173 If THREAD_WAITING_FOR_VFORK_DONE is set, it has to be the same thread whose
1174 vfork-done we are handling right now. */
1175 gdb_assert (event_thread->inf->thread_waiting_for_vfork_done == event_thread);
1177 event_thread->inf->thread_waiting_for_vfork_done = nullptr;
1178 event_thread->inf->pspace->breakpoints_not_allowed = 0;
1180 /* On non-stop targets, we stopped all the inferior's threads in follow_fork,
1181 resume them now. On all-stop targets, everything that needs to be resumed
1182 will be when we resume the event thread. */
1183 if (target_is_non_stop_p ())
1185 /* restart_threads and start_step_over may change the current thread, make
1186 sure we leave the event thread as the current thread. */
1187 scoped_restore_current_thread restore_thread;
1189 insert_breakpoints ();
1190 start_step_over ();
1192 if (!step_over_info_valid_p ())
1193 restart_threads (event_thread, event_thread->inf);
1197 /* Enum strings for "set|show follow-exec-mode". */
1199 static const char follow_exec_mode_new[] = "new";
1200 static const char follow_exec_mode_same[] = "same";
1201 static const char *const follow_exec_mode_names[] =
1203 follow_exec_mode_new,
1204 follow_exec_mode_same,
1205 nullptr,
1208 static const char *follow_exec_mode_string = follow_exec_mode_same;
1209 static void
1210 show_follow_exec_mode_string (struct ui_file *file, int from_tty,
1211 struct cmd_list_element *c, const char *value)
1213 gdb_printf (file, _("Follow exec mode is \"%s\".\n"), value);
1216 /* EXEC_FILE_TARGET is assumed to be non-NULL. */
1218 static void
1219 follow_exec (ptid_t ptid, const char *exec_file_target)
1221 int pid = ptid.pid ();
1222 ptid_t process_ptid;
1224 /* Switch terminal for any messages produced e.g. by
1225 breakpoint_re_set. */
1226 target_terminal::ours_for_output ();
1228 /* This is an exec event that we actually wish to pay attention to.
1229 Refresh our symbol table to the newly exec'd program, remove any
1230 momentary bp's, etc.
1232 If there are breakpoints, they aren't really inserted now,
1233 since the exec() transformed our inferior into a fresh set
1234 of instructions.
1236 We want to preserve symbolic breakpoints on the list, since
1237 we have hopes that they can be reset after the new a.out's
1238 symbol table is read.
1240 However, any "raw" breakpoints must be removed from the list
1241 (e.g., the solib bp's), since their address is probably invalid
1242 now.
1244 And, we DON'T want to call delete_breakpoints() here, since
1245 that may write the bp's "shadow contents" (the instruction
1246 value that was overwritten with a TRAP instruction). Since
1247 we now have a new a.out, those shadow contents aren't valid. */
1249 mark_breakpoints_out (current_program_space);
1251 /* The target reports the exec event to the main thread, even if
1252 some other thread does the exec, and even if the main thread was
1253 stopped or already gone. We may still have non-leader threads of
1254 the process on our list. E.g., on targets that don't have thread
1255 exit events (like remote) and nothing forces an update of the
1256 thread list up to here. When debugging remotely, it's best to
1257 avoid extra traffic, when possible, so avoid syncing the thread
1258 list with the target, and instead go ahead and delete all threads
1259 of the process but the one that reported the event. Note this must
1260 be done before calling update_breakpoints_after_exec, as
1261 otherwise clearing the threads' resources would reference stale
1262 thread breakpoints -- it may have been one of these threads that
1263 stepped across the exec. We could just clear their stepping
1264 states, but as long as we're iterating, might as well delete
1265 them. Deleting them now rather than at the next user-visible
1266 stop provides a nicer sequence of events for user and MI
1267 notifications. */
1268 for (thread_info *th : all_threads_safe ())
1269 if (th->ptid.pid () == pid && th->ptid != ptid)
1270 delete_thread (th);
1272 /* We also need to clear any left over stale state for the
1273 leader/event thread. E.g., if there was any step-resume
1274 breakpoint or similar, it's gone now. We cannot truly
1275 step-to-next statement through an exec(). */
1276 thread_info *th = inferior_thread ();
1277 th->control.step_resume_breakpoint = nullptr;
1278 th->control.exception_resume_breakpoint = nullptr;
1279 th->control.single_step_breakpoints = nullptr;
1280 th->control.step_range_start = 0;
1281 th->control.step_range_end = 0;
1283 /* The user may have had the main thread held stopped in the
1284 previous image (e.g., schedlock on, or non-stop). Release
1285 it now. */
1286 th->stop_requested = 0;
1288 update_breakpoints_after_exec ();
1290 /* What is this a.out's name? */
1291 process_ptid = ptid_t (pid);
1292 gdb_printf (_("%s is executing new program: %s\n"),
1293 target_pid_to_str (process_ptid).c_str (),
1294 exec_file_target);
1296 /* We've followed the inferior through an exec. Therefore, the
1297 inferior has essentially been killed & reborn. */
1299 breakpoint_init_inferior (current_inferior (), inf_execd);
1301 gdb::unique_xmalloc_ptr<char> exec_file_host
1302 = exec_file_find (exec_file_target, nullptr);
1304 /* If we were unable to map the executable target pathname onto a host
1305 pathname, tell the user that. Otherwise GDB's subsequent behavior
1306 is confusing. Maybe it would even be better to stop at this point
1307 so that the user can specify a file manually before continuing. */
1308 if (exec_file_host == nullptr)
1309 warning (_("Could not load symbols for executable %s.\n"
1310 "Do you need \"set sysroot\"?"),
1311 exec_file_target);
1313 /* Reset the shared library package. This ensures that we get a
1314 shlib event when the child reaches "_start", at which point the
1315 dld will have had a chance to initialize the child. */
1316 /* Also, loading a symbol file below may trigger symbol lookups, and
1317 we don't want those to be satisfied by the libraries of the
1318 previous incarnation of this process. */
1319 no_shared_libraries (nullptr, 0);
1321 inferior *execing_inferior = current_inferior ();
1322 inferior *following_inferior;
1324 if (follow_exec_mode_string == follow_exec_mode_new)
1326 /* The user wants to keep the old inferior and program spaces
1327 around. Create a new fresh one, and switch to it. */
1329 /* Do exit processing for the original inferior before setting the new
1330 inferior's pid. Having two inferiors with the same pid would confuse
1331 find_inferior_p(t)id. Transfer the terminal state and info from the
1332 old to the new inferior. */
1333 following_inferior = add_inferior_with_spaces ();
1335 swap_terminal_info (following_inferior, execing_inferior);
1336 exit_inferior (execing_inferior);
1338 following_inferior->pid = pid;
1340 else
1342 /* follow-exec-mode is "same", we continue execution in the execing
1343 inferior. */
1344 following_inferior = execing_inferior;
1346 /* The old description may no longer be fit for the new image.
1347 E.g, a 64-bit process exec'ed a 32-bit process. Clear the
1348 old description; we'll read a new one below. No need to do
1349 this on "follow-exec-mode new", as the old inferior stays
1350 around (its description is later cleared/refetched on
1351 restart). */
1352 target_clear_description ();
1355 target_follow_exec (following_inferior, ptid, exec_file_target);
1357 gdb_assert (current_inferior () == following_inferior);
1358 gdb_assert (current_program_space == following_inferior->pspace);
1360 /* Attempt to open the exec file. SYMFILE_DEFER_BP_RESET is used
1361 because the proper displacement for a PIE (Position Independent
1362 Executable) main symbol file will only be computed by
1363 solib_create_inferior_hook below. breakpoint_re_set would fail
1364 to insert the breakpoints with the zero displacement. */
1365 try_open_exec_file (exec_file_host.get (), following_inferior,
1366 SYMFILE_DEFER_BP_RESET);
1368 /* If the target can specify a description, read it. Must do this
1369 after flipping to the new executable (because the target supplied
1370 description must be compatible with the executable's
1371 architecture, and the old executable may e.g., be 32-bit, while
1372 the new one 64-bit), and before anything involving memory or
1373 registers. */
1374 target_find_description ();
1376 gdb::observers::inferior_execd.notify (execing_inferior, following_inferior);
1378 breakpoint_re_set ();
1380 /* Reinsert all breakpoints. (Those which were symbolic have
1381 been reset to the proper address in the new a.out, thanks
1382 to symbol_file_command...). */
1383 insert_breakpoints ();
1385 /* The next resume of this inferior should bring it to the shlib
1386 startup breakpoints. (If the user had also set bp's on
1387 "main" from the old (parent) process, then they'll auto-
1388 matically get reset there in the new process.). */
1391 /* The chain of threads that need to do a step-over operation to get
1392 past e.g., a breakpoint. What technique is used to step over the
1393 breakpoint/watchpoint does not matter -- all threads end up in the
1394 same queue, to maintain rough temporal order of execution, in order
1395 to avoid starvation, otherwise, we could e.g., find ourselves
1396 constantly stepping the same couple threads past their breakpoints
1397 over and over, if the single-step finish fast enough. */
1398 thread_step_over_list global_thread_step_over_list;
1400 /* Bit flags indicating what the thread needs to step over. */
1402 enum step_over_what_flag
1404 /* Step over a breakpoint. */
1405 STEP_OVER_BREAKPOINT = 1,
1407 /* Step past a non-continuable watchpoint, in order to let the
1408 instruction execute so we can evaluate the watchpoint
1409 expression. */
1410 STEP_OVER_WATCHPOINT = 2
1412 DEF_ENUM_FLAGS_TYPE (enum step_over_what_flag, step_over_what);
1414 /* Info about an instruction that is being stepped over. */
1416 struct step_over_info
1418 /* If we're stepping past a breakpoint, this is the address space
1419 and address of the instruction the breakpoint is set at. We'll
1420 skip inserting all breakpoints here. Valid iff ASPACE is
1421 non-NULL. */
1422 const address_space *aspace = nullptr;
1423 CORE_ADDR address = 0;
1425 /* The instruction being stepped over triggers a nonsteppable
1426 watchpoint. If true, we'll skip inserting watchpoints. */
1427 int nonsteppable_watchpoint_p = 0;
1429 /* The thread's global number. */
1430 int thread = -1;
1433 /* The step-over info of the location that is being stepped over.
1435 Note that with async/breakpoint always-inserted mode, a user might
1436 set a new breakpoint/watchpoint/etc. exactly while a breakpoint is
1437 being stepped over. As setting a new breakpoint inserts all
1438 breakpoints, we need to make sure the breakpoint being stepped over
1439 isn't inserted then. We do that by only clearing the step-over
1440 info when the step-over is actually finished (or aborted).
1442 Presently GDB can only step over one breakpoint at any given time.
1443 Given threads that can't run code in the same address space as the
1444 breakpoint's can't really miss the breakpoint, GDB could be taught
1445 to step-over at most one breakpoint per address space (so this info
1446 could move to the address space object if/when GDB is extended).
1447 The set of breakpoints being stepped over will normally be much
1448 smaller than the set of all breakpoints, so a flag in the
1449 breakpoint location structure would be wasteful. A separate list
1450 also saves complexity and run-time, as otherwise we'd have to go
1451 through all breakpoint locations clearing their flag whenever we
1452 start a new sequence. Similar considerations weigh against storing
1453 this info in the thread object. Plus, not all step overs actually
1454 have breakpoint locations -- e.g., stepping past a single-step
1455 breakpoint, or stepping to complete a non-continuable
1456 watchpoint. */
1457 static struct step_over_info step_over_info;
1459 /* Record the address of the breakpoint/instruction we're currently
1460 stepping over.
1461 N.B. We record the aspace and address now, instead of say just the thread,
1462 because when we need the info later the thread may be running. */
1464 static void
1465 set_step_over_info (const address_space *aspace, CORE_ADDR address,
1466 int nonsteppable_watchpoint_p,
1467 int thread)
1469 step_over_info.aspace = aspace;
1470 step_over_info.address = address;
1471 step_over_info.nonsteppable_watchpoint_p = nonsteppable_watchpoint_p;
1472 step_over_info.thread = thread;
1475 /* Called when we're not longer stepping over a breakpoint / an
1476 instruction, so all breakpoints are free to be (re)inserted. */
1478 static void
1479 clear_step_over_info (void)
1481 infrun_debug_printf ("clearing step over info");
1482 step_over_info.aspace = nullptr;
1483 step_over_info.address = 0;
1484 step_over_info.nonsteppable_watchpoint_p = 0;
1485 step_over_info.thread = -1;
1488 /* See infrun.h. */
1491 stepping_past_instruction_at (struct address_space *aspace,
1492 CORE_ADDR address)
1494 return (step_over_info.aspace != nullptr
1495 && breakpoint_address_match (aspace, address,
1496 step_over_info.aspace,
1497 step_over_info.address));
1500 /* See infrun.h. */
1503 thread_is_stepping_over_breakpoint (int thread)
1505 return (step_over_info.thread != -1
1506 && thread == step_over_info.thread);
1509 /* See infrun.h. */
1512 stepping_past_nonsteppable_watchpoint (void)
1514 return step_over_info.nonsteppable_watchpoint_p;
1517 /* Returns true if step-over info is valid. */
1519 static bool
1520 step_over_info_valid_p (void)
1522 return (step_over_info.aspace != nullptr
1523 || stepping_past_nonsteppable_watchpoint ());
1527 /* Displaced stepping. */
1529 /* In non-stop debugging mode, we must take special care to manage
1530 breakpoints properly; in particular, the traditional strategy for
1531 stepping a thread past a breakpoint it has hit is unsuitable.
1532 'Displaced stepping' is a tactic for stepping one thread past a
1533 breakpoint it has hit while ensuring that other threads running
1534 concurrently will hit the breakpoint as they should.
1536 The traditional way to step a thread T off a breakpoint in a
1537 multi-threaded program in all-stop mode is as follows:
1539 a0) Initially, all threads are stopped, and breakpoints are not
1540 inserted.
1541 a1) We single-step T, leaving breakpoints uninserted.
1542 a2) We insert breakpoints, and resume all threads.
1544 In non-stop debugging, however, this strategy is unsuitable: we
1545 don't want to have to stop all threads in the system in order to
1546 continue or step T past a breakpoint. Instead, we use displaced
1547 stepping:
1549 n0) Initially, T is stopped, other threads are running, and
1550 breakpoints are inserted.
1551 n1) We copy the instruction "under" the breakpoint to a separate
1552 location, outside the main code stream, making any adjustments
1553 to the instruction, register, and memory state as directed by
1554 T's architecture.
1555 n2) We single-step T over the instruction at its new location.
1556 n3) We adjust the resulting register and memory state as directed
1557 by T's architecture. This includes resetting T's PC to point
1558 back into the main instruction stream.
1559 n4) We resume T.
1561 This approach depends on the following gdbarch methods:
1563 - gdbarch_max_insn_length and gdbarch_displaced_step_location
1564 indicate where to copy the instruction, and how much space must
1565 be reserved there. We use these in step n1.
1567 - gdbarch_displaced_step_copy_insn copies a instruction to a new
1568 address, and makes any necessary adjustments to the instruction,
1569 register contents, and memory. We use this in step n1.
1571 - gdbarch_displaced_step_fixup adjusts registers and memory after
1572 we have successfully single-stepped the instruction, to yield the
1573 same effect the instruction would have had if we had executed it
1574 at its original address. We use this in step n3.
1576 The gdbarch_displaced_step_copy_insn and
1577 gdbarch_displaced_step_fixup functions must be written so that
1578 copying an instruction with gdbarch_displaced_step_copy_insn,
1579 single-stepping across the copied instruction, and then applying
1580 gdbarch_displaced_insn_fixup should have the same effects on the
1581 thread's memory and registers as stepping the instruction in place
1582 would have. Exactly which responsibilities fall to the copy and
1583 which fall to the fixup is up to the author of those functions.
1585 See the comments in gdbarch.sh for details.
1587 Note that displaced stepping and software single-step cannot
1588 currently be used in combination, although with some care I think
1589 they could be made to. Software single-step works by placing
1590 breakpoints on all possible subsequent instructions; if the
1591 displaced instruction is a PC-relative jump, those breakpoints
1592 could fall in very strange places --- on pages that aren't
1593 executable, or at addresses that are not proper instruction
1594 boundaries. (We do generally let other threads run while we wait
1595 to hit the software single-step breakpoint, and they might
1596 encounter such a corrupted instruction.) One way to work around
1597 this would be to have gdbarch_displaced_step_copy_insn fully
1598 simulate the effect of PC-relative instructions (and return NULL)
1599 on architectures that use software single-stepping.
1601 In non-stop mode, we can have independent and simultaneous step
1602 requests, so more than one thread may need to simultaneously step
1603 over a breakpoint. The current implementation assumes there is
1604 only one scratch space per process. In this case, we have to
1605 serialize access to the scratch space. If thread A wants to step
1606 over a breakpoint, but we are currently waiting for some other
1607 thread to complete a displaced step, we leave thread A stopped and
1608 place it in the displaced_step_request_queue. Whenever a displaced
1609 step finishes, we pick the next thread in the queue and start a new
1610 displaced step operation on it. See displaced_step_prepare and
1611 displaced_step_finish for details. */
1613 /* Return true if THREAD is doing a displaced step. */
1615 static bool
1616 displaced_step_in_progress_thread (thread_info *thread)
1618 gdb_assert (thread != nullptr);
1620 return thread->displaced_step_state.in_progress ();
1623 /* Return true if INF has a thread doing a displaced step. */
1625 static bool
1626 displaced_step_in_progress (inferior *inf)
1628 return inf->displaced_step_state.in_progress_count > 0;
1631 /* Return true if any thread is doing a displaced step. */
1633 static bool
1634 displaced_step_in_progress_any_thread ()
1636 for (inferior *inf : all_non_exited_inferiors ())
1638 if (displaced_step_in_progress (inf))
1639 return true;
1642 return false;
1645 static void
1646 infrun_inferior_exit (struct inferior *inf)
1648 inf->displaced_step_state.reset ();
1649 inf->thread_waiting_for_vfork_done = nullptr;
1652 static void
1653 infrun_inferior_execd (inferior *exec_inf, inferior *follow_inf)
1655 /* If some threads where was doing a displaced step in this inferior at the
1656 moment of the exec, they no longer exist. Even if the exec'ing thread
1657 doing a displaced step, we don't want to to any fixup nor restore displaced
1658 stepping buffer bytes. */
1659 follow_inf->displaced_step_state.reset ();
1661 for (thread_info *thread : follow_inf->threads ())
1662 thread->displaced_step_state.reset ();
1664 /* Since an in-line step is done with everything else stopped, if there was
1665 one in progress at the time of the exec, it must have been the exec'ing
1666 thread. */
1667 clear_step_over_info ();
1669 follow_inf->thread_waiting_for_vfork_done = nullptr;
1672 /* If ON, and the architecture supports it, GDB will use displaced
1673 stepping to step over breakpoints. If OFF, or if the architecture
1674 doesn't support it, GDB will instead use the traditional
1675 hold-and-step approach. If AUTO (which is the default), GDB will
1676 decide which technique to use to step over breakpoints depending on
1677 whether the target works in a non-stop way (see use_displaced_stepping). */
1679 static enum auto_boolean can_use_displaced_stepping = AUTO_BOOLEAN_AUTO;
1681 static void
1682 show_can_use_displaced_stepping (struct ui_file *file, int from_tty,
1683 struct cmd_list_element *c,
1684 const char *value)
1686 if (can_use_displaced_stepping == AUTO_BOOLEAN_AUTO)
1687 gdb_printf (file,
1688 _("Debugger's willingness to use displaced stepping "
1689 "to step over breakpoints is %s (currently %s).\n"),
1690 value, target_is_non_stop_p () ? "on" : "off");
1691 else
1692 gdb_printf (file,
1693 _("Debugger's willingness to use displaced stepping "
1694 "to step over breakpoints is %s.\n"), value);
1697 /* Return true if the gdbarch implements the required methods to use
1698 displaced stepping. */
1700 static bool
1701 gdbarch_supports_displaced_stepping (gdbarch *arch)
1703 /* Only check for the presence of `prepare`. The gdbarch verification ensures
1704 that if `prepare` is provided, so is `finish`. */
1705 return gdbarch_displaced_step_prepare_p (arch);
1708 /* Return non-zero if displaced stepping can/should be used to step
1709 over breakpoints of thread TP. */
1711 static bool
1712 use_displaced_stepping (thread_info *tp)
1714 /* If the user disabled it explicitly, don't use displaced stepping. */
1715 if (can_use_displaced_stepping == AUTO_BOOLEAN_FALSE)
1716 return false;
1718 /* If "auto", only use displaced stepping if the target operates in a non-stop
1719 way. */
1720 if (can_use_displaced_stepping == AUTO_BOOLEAN_AUTO
1721 && !target_is_non_stop_p ())
1722 return false;
1724 gdbarch *gdbarch = get_thread_regcache (tp)->arch ();
1726 /* If the architecture doesn't implement displaced stepping, don't use
1727 it. */
1728 if (!gdbarch_supports_displaced_stepping (gdbarch))
1729 return false;
1731 /* If recording, don't use displaced stepping. */
1732 if (find_record_target () != nullptr)
1733 return false;
1735 /* If displaced stepping failed before for this inferior, don't bother trying
1736 again. */
1737 if (tp->inf->displaced_step_state.failed_before)
1738 return false;
1740 return true;
1743 /* Simple function wrapper around displaced_step_thread_state::reset. */
1745 static void
1746 displaced_step_reset (displaced_step_thread_state *displaced)
1748 displaced->reset ();
1751 /* A cleanup that wraps displaced_step_reset. We use this instead of, say,
1752 SCOPE_EXIT, because it needs to be discardable with "cleanup.release ()". */
1754 using displaced_step_reset_cleanup = FORWARD_SCOPE_EXIT (displaced_step_reset);
1756 /* Prepare to single-step, using displaced stepping.
1758 Note that we cannot use displaced stepping when we have a signal to
1759 deliver. If we have a signal to deliver and an instruction to step
1760 over, then after the step, there will be no indication from the
1761 target whether the thread entered a signal handler or ignored the
1762 signal and stepped over the instruction successfully --- both cases
1763 result in a simple SIGTRAP. In the first case we mustn't do a
1764 fixup, and in the second case we must --- but we can't tell which.
1765 Comments in the code for 'random signals' in handle_inferior_event
1766 explain how we handle this case instead.
1768 Returns DISPLACED_STEP_PREPARE_STATUS_OK if preparing was successful -- this
1769 thread is going to be stepped now; DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE
1770 if displaced stepping this thread got queued; or
1771 DISPLACED_STEP_PREPARE_STATUS_CANT if this instruction can't be displaced
1772 stepped. */
1774 static displaced_step_prepare_status
1775 displaced_step_prepare_throw (thread_info *tp)
1777 regcache *regcache = get_thread_regcache (tp);
1778 struct gdbarch *gdbarch = regcache->arch ();
1779 displaced_step_thread_state &disp_step_thread_state
1780 = tp->displaced_step_state;
1782 /* We should never reach this function if the architecture does not
1783 support displaced stepping. */
1784 gdb_assert (gdbarch_supports_displaced_stepping (gdbarch));
1786 /* Nor if the thread isn't meant to step over a breakpoint. */
1787 gdb_assert (tp->control.trap_expected);
1789 /* Disable range stepping while executing in the scratch pad. We
1790 want a single-step even if executing the displaced instruction in
1791 the scratch buffer lands within the stepping range (e.g., a
1792 jump/branch). */
1793 tp->control.may_range_step = 0;
1795 /* We are about to start a displaced step for this thread. If one is already
1796 in progress, something's wrong. */
1797 gdb_assert (!disp_step_thread_state.in_progress ());
1799 if (tp->inf->displaced_step_state.unavailable)
1801 /* The gdbarch tells us it's not worth asking to try a prepare because
1802 it is likely that it will return unavailable, so don't bother asking. */
1804 displaced_debug_printf ("deferring step of %s",
1805 tp->ptid.to_string ().c_str ());
1807 global_thread_step_over_chain_enqueue (tp);
1808 return DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE;
1811 displaced_debug_printf ("displaced-stepping %s now",
1812 tp->ptid.to_string ().c_str ());
1814 scoped_restore_current_thread restore_thread;
1816 switch_to_thread (tp);
1818 CORE_ADDR original_pc = regcache_read_pc (regcache);
1819 CORE_ADDR displaced_pc;
1821 /* Display the instruction we are going to displaced step. */
1822 if (debug_displaced)
1824 string_file tmp_stream;
1825 int dislen = gdb_print_insn (gdbarch, original_pc, &tmp_stream,
1826 nullptr);
1828 if (dislen > 0)
1830 gdb::byte_vector insn_buf (dislen);
1831 read_memory (original_pc, insn_buf.data (), insn_buf.size ());
1833 std::string insn_bytes = bytes_to_string (insn_buf);
1835 displaced_debug_printf ("original insn %s: %s \t %s",
1836 paddress (gdbarch, original_pc),
1837 insn_bytes.c_str (),
1838 tmp_stream.string ().c_str ());
1840 else
1841 displaced_debug_printf ("original insn %s: invalid length: %d",
1842 paddress (gdbarch, original_pc), dislen);
1845 displaced_step_prepare_status status
1846 = gdbarch_displaced_step_prepare (gdbarch, tp, displaced_pc);
1848 if (status == DISPLACED_STEP_PREPARE_STATUS_CANT)
1850 displaced_debug_printf ("failed to prepare (%s)",
1851 tp->ptid.to_string ().c_str ());
1853 return DISPLACED_STEP_PREPARE_STATUS_CANT;
1855 else if (status == DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE)
1857 /* Not enough displaced stepping resources available, defer this
1858 request by placing it the queue. */
1860 displaced_debug_printf ("not enough resources available, "
1861 "deferring step of %s",
1862 tp->ptid.to_string ().c_str ());
1864 global_thread_step_over_chain_enqueue (tp);
1866 return DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE;
1869 gdb_assert (status == DISPLACED_STEP_PREPARE_STATUS_OK);
1871 /* Save the information we need to fix things up if the step
1872 succeeds. */
1873 disp_step_thread_state.set (gdbarch);
1875 tp->inf->displaced_step_state.in_progress_count++;
1877 displaced_debug_printf ("prepared successfully thread=%s, "
1878 "original_pc=%s, displaced_pc=%s",
1879 tp->ptid.to_string ().c_str (),
1880 paddress (gdbarch, original_pc),
1881 paddress (gdbarch, displaced_pc));
1883 /* Display the new displaced instruction(s). */
1884 if (debug_displaced)
1886 string_file tmp_stream;
1887 CORE_ADDR addr = displaced_pc;
1889 /* If displaced stepping is going to use h/w single step then we know
1890 that the replacement instruction can only be a single instruction,
1891 in that case set the end address at the next byte.
1893 Otherwise the displaced stepping copy instruction routine could
1894 have generated multiple instructions, and all we know is that they
1895 must fit within the LEN bytes of the buffer. */
1896 CORE_ADDR end
1897 = addr + (gdbarch_displaced_step_hw_singlestep (gdbarch)
1898 ? 1 : gdbarch_displaced_step_buffer_length (gdbarch));
1900 while (addr < end)
1902 int dislen = gdb_print_insn (gdbarch, addr, &tmp_stream, nullptr);
1903 if (dislen <= 0)
1905 displaced_debug_printf
1906 ("replacement insn %s: invalid length: %d",
1907 paddress (gdbarch, addr), dislen);
1908 break;
1911 gdb::byte_vector insn_buf (dislen);
1912 read_memory (addr, insn_buf.data (), insn_buf.size ());
1914 std::string insn_bytes = bytes_to_string (insn_buf);
1915 std::string insn_str = tmp_stream.release ();
1916 displaced_debug_printf ("replacement insn %s: %s \t %s",
1917 paddress (gdbarch, addr),
1918 insn_bytes.c_str (),
1919 insn_str.c_str ());
1920 addr += dislen;
1924 return DISPLACED_STEP_PREPARE_STATUS_OK;
1927 /* Wrapper for displaced_step_prepare_throw that disabled further
1928 attempts at displaced stepping if we get a memory error. */
1930 static displaced_step_prepare_status
1931 displaced_step_prepare (thread_info *thread)
1933 displaced_step_prepare_status status
1934 = DISPLACED_STEP_PREPARE_STATUS_CANT;
1938 status = displaced_step_prepare_throw (thread);
1940 catch (const gdb_exception_error &ex)
1942 if (ex.error != MEMORY_ERROR
1943 && ex.error != NOT_SUPPORTED_ERROR)
1944 throw;
1946 infrun_debug_printf ("caught exception, disabling displaced stepping: %s",
1947 ex.what ());
1949 /* Be verbose if "set displaced-stepping" is "on", silent if
1950 "auto". */
1951 if (can_use_displaced_stepping == AUTO_BOOLEAN_TRUE)
1953 warning (_("disabling displaced stepping: %s"),
1954 ex.what ());
1957 /* Disable further displaced stepping attempts. */
1958 thread->inf->displaced_step_state.failed_before = 1;
1961 return status;
1964 /* True if any thread of TARGET that matches RESUME_PTID requires
1965 target_thread_events enabled. This assumes TARGET does not support
1966 target thread options. */
1968 static bool
1969 any_thread_needs_target_thread_events (process_stratum_target *target,
1970 ptid_t resume_ptid)
1972 for (thread_info *tp : all_non_exited_threads (target, resume_ptid))
1973 if (displaced_step_in_progress_thread (tp)
1974 || schedlock_applies (tp)
1975 || tp->thread_fsm () != nullptr)
1976 return true;
1977 return false;
1980 /* Maybe disable thread-{cloned,created,exited} event reporting after
1981 a step-over (either in-line or displaced) finishes. */
1983 static void
1984 update_thread_events_after_step_over (thread_info *event_thread,
1985 const target_waitstatus &event_status)
1987 if (schedlock_applies (event_thread))
1989 /* If scheduler-locking applies, continue reporting
1990 thread-created/thread-cloned events. */
1991 return;
1993 else if (target_supports_set_thread_options (0))
1995 /* We can control per-thread options. Disable events for the
1996 event thread, unless the thread is gone. */
1997 if (event_status.kind () != TARGET_WAITKIND_THREAD_EXITED)
1998 event_thread->set_thread_options (0);
2000 else
2002 /* We can only control the target-wide target_thread_events
2003 setting. Disable it, but only if other threads in the target
2004 don't need it enabled. */
2005 process_stratum_target *target = event_thread->inf->process_target ();
2006 if (!any_thread_needs_target_thread_events (target, minus_one_ptid))
2007 target_thread_events (false);
2011 /* If we displaced stepped an instruction successfully, adjust registers and
2012 memory to yield the same effect the instruction would have had if we had
2013 executed it at its original address, and return
2014 DISPLACED_STEP_FINISH_STATUS_OK. If the instruction didn't complete,
2015 relocate the PC and return DISPLACED_STEP_FINISH_STATUS_NOT_EXECUTED.
2017 If the thread wasn't displaced stepping, return
2018 DISPLACED_STEP_FINISH_STATUS_OK as well. */
2020 static displaced_step_finish_status
2021 displaced_step_finish (thread_info *event_thread,
2022 const target_waitstatus &event_status)
2024 /* Check whether the parent is displaced stepping. */
2025 inferior *parent_inf = event_thread->inf;
2027 /* If this was a fork/vfork/clone, this event indicates that the
2028 displaced stepping of the syscall instruction has been done, so
2029 we perform cleanup for parent here. Also note that this
2030 operation also cleans up the child for vfork, because their pages
2031 are shared. */
2033 /* If this is a fork (child gets its own address space copy) and
2034 some displaced step buffers were in use at the time of the fork,
2035 restore the displaced step buffer bytes in the child process.
2037 Architectures which support displaced stepping and fork events
2038 must supply an implementation of
2039 gdbarch_displaced_step_restore_all_in_ptid. This is not enforced
2040 during gdbarch validation to support architectures which support
2041 displaced stepping but not forks. */
2042 if (event_status.kind () == TARGET_WAITKIND_FORKED)
2044 struct regcache *parent_regcache = get_thread_regcache (event_thread);
2045 struct gdbarch *gdbarch = parent_regcache->arch ();
2047 if (gdbarch_supports_displaced_stepping (gdbarch))
2048 gdbarch_displaced_step_restore_all_in_ptid
2049 (gdbarch, parent_inf, event_status.child_ptid ());
2052 displaced_step_thread_state *displaced = &event_thread->displaced_step_state;
2054 /* Was this thread performing a displaced step? */
2055 if (!displaced->in_progress ())
2056 return DISPLACED_STEP_FINISH_STATUS_OK;
2058 update_thread_events_after_step_over (event_thread, event_status);
2060 gdb_assert (event_thread->inf->displaced_step_state.in_progress_count > 0);
2061 event_thread->inf->displaced_step_state.in_progress_count--;
2063 /* Fixup may need to read memory/registers. Switch to the thread
2064 that we're fixing up. Also, target_stopped_by_watchpoint checks
2065 the current thread, and displaced_step_restore performs ptid-dependent
2066 memory accesses using current_inferior(). */
2067 switch_to_thread (event_thread);
2069 displaced_step_reset_cleanup cleanup (displaced);
2071 /* Do the fixup, and release the resources acquired to do the displaced
2072 step. */
2073 displaced_step_finish_status status
2074 = gdbarch_displaced_step_finish (displaced->get_original_gdbarch (),
2075 event_thread, event_status);
2077 if (event_status.kind () == TARGET_WAITKIND_FORKED
2078 || event_status.kind () == TARGET_WAITKIND_VFORKED
2079 || event_status.kind () == TARGET_WAITKIND_THREAD_CLONED)
2081 /* Since the vfork/fork/clone syscall instruction was executed
2082 in the scratchpad, the child's PC is also within the
2083 scratchpad. Set the child's PC to the parent's PC value,
2084 which has already been fixed up. Note: we use the parent's
2085 aspace here, although we're touching the child, because the
2086 child hasn't been added to the inferior list yet at this
2087 point. */
2089 struct regcache *parent_regcache = get_thread_regcache (event_thread);
2090 struct gdbarch *gdbarch = parent_regcache->arch ();
2091 struct regcache *child_regcache
2092 = get_thread_arch_regcache (parent_inf, event_status.child_ptid (),
2093 gdbarch);
2094 /* Read PC value of parent. */
2095 CORE_ADDR parent_pc = regcache_read_pc (parent_regcache);
2097 displaced_debug_printf ("write child pc from %s to %s",
2098 paddress (gdbarch,
2099 regcache_read_pc (child_regcache)),
2100 paddress (gdbarch, parent_pc));
2102 regcache_write_pc (child_regcache, parent_pc);
2105 return status;
2108 /* Data to be passed around while handling an event. This data is
2109 discarded between events. */
2110 struct execution_control_state
2112 explicit execution_control_state (thread_info *thr = nullptr)
2113 : ptid (thr == nullptr ? null_ptid : thr->ptid),
2114 event_thread (thr)
2118 process_stratum_target *target = nullptr;
2119 ptid_t ptid;
2120 /* The thread that got the event, if this was a thread event; NULL
2121 otherwise. */
2122 struct thread_info *event_thread;
2124 struct target_waitstatus ws;
2125 int stop_func_filled_in = 0;
2126 CORE_ADDR stop_func_alt_start = 0;
2127 CORE_ADDR stop_func_start = 0;
2128 CORE_ADDR stop_func_end = 0;
2129 const char *stop_func_name = nullptr;
2130 int wait_some_more = 0;
2132 /* True if the event thread hit the single-step breakpoint of
2133 another thread. Thus the event doesn't cause a stop, the thread
2134 needs to be single-stepped past the single-step breakpoint before
2135 we can switch back to the original stepping thread. */
2136 int hit_singlestep_breakpoint = 0;
2139 static void keep_going_pass_signal (struct execution_control_state *ecs);
2140 static void prepare_to_wait (struct execution_control_state *ecs);
2141 static bool keep_going_stepped_thread (struct thread_info *tp);
2142 static step_over_what thread_still_needs_step_over (struct thread_info *tp);
2144 /* Are there any pending step-over requests? If so, run all we can
2145 now and return true. Otherwise, return false. */
2147 static bool
2148 start_step_over (void)
2150 INFRUN_SCOPED_DEBUG_ENTER_EXIT;
2152 /* Don't start a new step-over if we already have an in-line
2153 step-over operation ongoing. */
2154 if (step_over_info_valid_p ())
2155 return false;
2157 /* Steal the global thread step over chain. As we try to initiate displaced
2158 steps, threads will be enqueued in the global chain if no buffers are
2159 available. If we iterated on the global chain directly, we might iterate
2160 indefinitely. */
2161 thread_step_over_list threads_to_step
2162 = std::move (global_thread_step_over_list);
2164 infrun_debug_printf ("stealing global queue of threads to step, length = %d",
2165 thread_step_over_chain_length (threads_to_step));
2167 bool started = false;
2169 /* On scope exit (whatever the reason, return or exception), if there are
2170 threads left in the THREADS_TO_STEP chain, put back these threads in the
2171 global list. */
2172 SCOPE_EXIT
2174 if (threads_to_step.empty ())
2175 infrun_debug_printf ("step-over queue now empty");
2176 else
2178 infrun_debug_printf ("putting back %d threads to step in global queue",
2179 thread_step_over_chain_length (threads_to_step));
2181 global_thread_step_over_chain_enqueue_chain
2182 (std::move (threads_to_step));
2186 thread_step_over_list_safe_range range
2187 = make_thread_step_over_list_safe_range (threads_to_step);
2189 for (thread_info *tp : range)
2191 step_over_what step_what;
2192 int must_be_in_line;
2194 gdb_assert (!tp->stop_requested);
2196 if (tp->inf->displaced_step_state.unavailable)
2198 /* The arch told us to not even try preparing another displaced step
2199 for this inferior. Just leave the thread in THREADS_TO_STEP, it
2200 will get moved to the global chain on scope exit. */
2201 continue;
2204 if (tp->inf->thread_waiting_for_vfork_done != nullptr)
2206 /* When we stop all threads, handling a vfork, any thread in the step
2207 over chain remains there. A user could also try to continue a
2208 thread stopped at a breakpoint while another thread is waiting for
2209 a vfork-done event. In any case, we don't want to start a step
2210 over right now. */
2211 continue;
2214 /* Remove thread from the THREADS_TO_STEP chain. If anything goes wrong
2215 while we try to prepare the displaced step, we don't add it back to
2216 the global step over chain. This is to avoid a thread staying in the
2217 step over chain indefinitely if something goes wrong when resuming it
2218 If the error is intermittent and it still needs a step over, it will
2219 get enqueued again when we try to resume it normally. */
2220 threads_to_step.erase (threads_to_step.iterator_to (*tp));
2222 step_what = thread_still_needs_step_over (tp);
2223 must_be_in_line = ((step_what & STEP_OVER_WATCHPOINT)
2224 || ((step_what & STEP_OVER_BREAKPOINT)
2225 && !use_displaced_stepping (tp)));
2227 /* We currently stop all threads of all processes to step-over
2228 in-line. If we need to start a new in-line step-over, let
2229 any pending displaced steps finish first. */
2230 if (must_be_in_line && displaced_step_in_progress_any_thread ())
2232 global_thread_step_over_chain_enqueue (tp);
2233 continue;
2236 if (tp->control.trap_expected
2237 || tp->resumed ()
2238 || tp->executing ())
2240 internal_error ("[%s] has inconsistent state: "
2241 "trap_expected=%d, resumed=%d, executing=%d\n",
2242 tp->ptid.to_string ().c_str (),
2243 tp->control.trap_expected,
2244 tp->resumed (),
2245 tp->executing ());
2248 infrun_debug_printf ("resuming [%s] for step-over",
2249 tp->ptid.to_string ().c_str ());
2251 /* keep_going_pass_signal skips the step-over if the breakpoint
2252 is no longer inserted. In all-stop, we want to keep looking
2253 for a thread that needs a step-over instead of resuming TP,
2254 because we wouldn't be able to resume anything else until the
2255 target stops again. In non-stop, the resume always resumes
2256 only TP, so it's OK to let the thread resume freely. */
2257 if (!target_is_non_stop_p () && !step_what)
2258 continue;
2260 switch_to_thread (tp);
2261 execution_control_state ecs (tp);
2262 keep_going_pass_signal (&ecs);
2264 if (!ecs.wait_some_more)
2265 error (_("Command aborted."));
2267 /* If the thread's step over could not be initiated because no buffers
2268 were available, it was re-added to the global step over chain. */
2269 if (tp->resumed ())
2271 infrun_debug_printf ("[%s] was resumed.",
2272 tp->ptid.to_string ().c_str ());
2273 gdb_assert (!thread_is_in_step_over_chain (tp));
2275 else
2277 infrun_debug_printf ("[%s] was NOT resumed.",
2278 tp->ptid.to_string ().c_str ());
2279 gdb_assert (thread_is_in_step_over_chain (tp));
2282 /* If we started a new in-line step-over, we're done. */
2283 if (step_over_info_valid_p ())
2285 gdb_assert (tp->control.trap_expected);
2286 started = true;
2287 break;
2290 if (!target_is_non_stop_p ())
2292 /* On all-stop, shouldn't have resumed unless we needed a
2293 step over. */
2294 gdb_assert (tp->control.trap_expected
2295 || tp->step_after_step_resume_breakpoint);
2297 /* With remote targets (at least), in all-stop, we can't
2298 issue any further remote commands until the program stops
2299 again. */
2300 started = true;
2301 break;
2304 /* Either the thread no longer needed a step-over, or a new
2305 displaced stepping sequence started. Even in the latter
2306 case, continue looking. Maybe we can also start another
2307 displaced step on a thread of other process. */
2310 return started;
2313 /* Update global variables holding ptids to hold NEW_PTID if they were
2314 holding OLD_PTID. */
2315 static void
2316 infrun_thread_ptid_changed (process_stratum_target *target,
2317 ptid_t old_ptid, ptid_t new_ptid)
2319 if (inferior_ptid == old_ptid
2320 && current_inferior ()->process_target () == target)
2321 inferior_ptid = new_ptid;
2326 static const char schedlock_off[] = "off";
2327 static const char schedlock_on[] = "on";
2328 static const char schedlock_step[] = "step";
2329 static const char schedlock_replay[] = "replay";
2330 static const char *const scheduler_enums[] = {
2331 schedlock_off,
2332 schedlock_on,
2333 schedlock_step,
2334 schedlock_replay,
2335 nullptr
2337 static const char *scheduler_mode = schedlock_replay;
2338 static void
2339 show_scheduler_mode (struct ui_file *file, int from_tty,
2340 struct cmd_list_element *c, const char *value)
2342 gdb_printf (file,
2343 _("Mode for locking scheduler "
2344 "during execution is \"%s\".\n"),
2345 value);
2348 static void
2349 set_schedlock_func (const char *args, int from_tty, struct cmd_list_element *c)
2351 if (!target_can_lock_scheduler ())
2353 scheduler_mode = schedlock_off;
2354 error (_("Target '%s' cannot support this command."),
2355 target_shortname ());
2359 /* True if execution commands resume all threads of all processes by
2360 default; otherwise, resume only threads of the current inferior
2361 process. */
2362 bool sched_multi = false;
2364 /* Try to setup for software single stepping. Return true if target_resume()
2365 should use hardware single step.
2367 GDBARCH the current gdbarch. */
2369 static bool
2370 maybe_software_singlestep (struct gdbarch *gdbarch)
2372 bool hw_step = true;
2374 if (execution_direction == EXEC_FORWARD
2375 && gdbarch_software_single_step_p (gdbarch))
2376 hw_step = !insert_single_step_breakpoints (gdbarch);
2378 return hw_step;
2381 /* See infrun.h. */
2383 ptid_t
2384 user_visible_resume_ptid (int step)
2386 ptid_t resume_ptid;
2388 if (non_stop)
2390 /* With non-stop mode on, threads are always handled
2391 individually. */
2392 resume_ptid = inferior_ptid;
2394 else if ((scheduler_mode == schedlock_on)
2395 || (scheduler_mode == schedlock_step && step))
2397 /* User-settable 'scheduler' mode requires solo thread
2398 resume. */
2399 resume_ptid = inferior_ptid;
2401 else if ((scheduler_mode == schedlock_replay)
2402 && target_record_will_replay (minus_one_ptid, execution_direction))
2404 /* User-settable 'scheduler' mode requires solo thread resume in replay
2405 mode. */
2406 resume_ptid = inferior_ptid;
2408 else if (inferior_ptid != null_ptid
2409 && inferior_thread ()->control.in_cond_eval)
2411 /* The inferior thread is evaluating a BP condition. Other threads
2412 might be stopped or running and we do not want to change their
2413 state, thus, resume only the current thread. */
2414 resume_ptid = inferior_ptid;
2416 else if (!sched_multi && target_supports_multi_process ())
2418 /* Resume all threads of the current process (and none of other
2419 processes). */
2420 resume_ptid = ptid_t (inferior_ptid.pid ());
2422 else
2424 /* Resume all threads of all processes. */
2425 resume_ptid = RESUME_ALL;
2428 return resume_ptid;
2431 /* See infrun.h. */
2433 process_stratum_target *
2434 user_visible_resume_target (ptid_t resume_ptid)
2436 return (resume_ptid == minus_one_ptid && sched_multi
2437 ? nullptr
2438 : current_inferior ()->process_target ());
2441 /* Find a thread from the inferiors that we'll resume that is waiting
2442 for a vfork-done event. */
2444 static thread_info *
2445 find_thread_waiting_for_vfork_done ()
2447 gdb_assert (!target_is_non_stop_p ());
2449 if (sched_multi)
2451 for (inferior *inf : all_non_exited_inferiors ())
2452 if (inf->thread_waiting_for_vfork_done != nullptr)
2453 return inf->thread_waiting_for_vfork_done;
2455 else
2457 inferior *cur_inf = current_inferior ();
2458 if (cur_inf->thread_waiting_for_vfork_done != nullptr)
2459 return cur_inf->thread_waiting_for_vfork_done;
2461 return nullptr;
2464 /* Return a ptid representing the set of threads that we will resume,
2465 in the perspective of the target, assuming run control handling
2466 does not require leaving some threads stopped (e.g., stepping past
2467 breakpoint). USER_STEP indicates whether we're about to start the
2468 target for a stepping command. */
2470 static ptid_t
2471 internal_resume_ptid (int user_step)
2473 /* In non-stop, we always control threads individually. Note that
2474 the target may always work in non-stop mode even with "set
2475 non-stop off", in which case user_visible_resume_ptid could
2476 return a wildcard ptid. */
2477 if (target_is_non_stop_p ())
2478 return inferior_ptid;
2480 /* The rest of the function assumes non-stop==off and
2481 target-non-stop==off.
2483 If a thread is waiting for a vfork-done event, it means breakpoints are out
2484 for this inferior (well, program space in fact). We don't want to resume
2485 any thread other than the one waiting for vfork done, otherwise these other
2486 threads could miss breakpoints. So if a thread in the resumption set is
2487 waiting for a vfork-done event, resume only that thread.
2489 The resumption set width depends on whether schedule-multiple is on or off.
2491 Note that if the target_resume interface was more flexible, we could be
2492 smarter here when schedule-multiple is on. For example, imagine 3
2493 inferiors with 2 threads each (1.1, 1.2, 2.1, 2.2, 3.1 and 3.2). Threads
2494 2.1 and 3.2 are both waiting for a vfork-done event. Then we could ask the
2495 target(s) to resume:
2497 - All threads of inferior 1
2498 - Thread 2.1
2499 - Thread 3.2
2501 Since we don't have that flexibility (we can only pass one ptid), just
2502 resume the first thread waiting for a vfork-done event we find (e.g. thread
2503 2.1). */
2504 thread_info *thr = find_thread_waiting_for_vfork_done ();
2505 if (thr != nullptr)
2507 /* If we have a thread that is waiting for a vfork-done event,
2508 then we should have switched to it earlier. Calling
2509 target_resume with thread scope is only possible when the
2510 current thread matches the thread scope. */
2511 gdb_assert (thr->ptid == inferior_ptid);
2512 gdb_assert (thr->inf->process_target ()
2513 == inferior_thread ()->inf->process_target ());
2514 return thr->ptid;
2517 return user_visible_resume_ptid (user_step);
2520 /* Wrapper for target_resume, that handles infrun-specific
2521 bookkeeping. */
2523 static void
2524 do_target_resume (ptid_t resume_ptid, bool step, enum gdb_signal sig)
2526 struct thread_info *tp = inferior_thread ();
2528 gdb_assert (!tp->stop_requested);
2530 /* Install inferior's terminal modes. */
2531 target_terminal::inferior ();
2533 /* Avoid confusing the next resume, if the next stop/resume
2534 happens to apply to another thread. */
2535 tp->set_stop_signal (GDB_SIGNAL_0);
2537 /* Advise target which signals may be handled silently.
2539 If we have removed breakpoints because we are stepping over one
2540 in-line (in any thread), we need to receive all signals to avoid
2541 accidentally skipping a breakpoint during execution of a signal
2542 handler.
2544 Likewise if we're displaced stepping, otherwise a trap for a
2545 breakpoint in a signal handler might be confused with the
2546 displaced step finishing. We don't make the displaced_step_finish
2547 step distinguish the cases instead, because:
2549 - a backtrace while stopped in the signal handler would show the
2550 scratch pad as frame older than the signal handler, instead of
2551 the real mainline code.
2553 - when the thread is later resumed, the signal handler would
2554 return to the scratch pad area, which would no longer be
2555 valid. */
2556 if (step_over_info_valid_p ()
2557 || displaced_step_in_progress (tp->inf))
2558 target_pass_signals ({});
2559 else
2560 target_pass_signals (signal_pass);
2562 /* Request that the target report thread-{created,cloned,exited}
2563 events in the following situations:
2565 - If we are performing an in-line step-over-breakpoint, then we
2566 will remove a breakpoint from the target and only run the
2567 current thread. We don't want any new thread (spawned by the
2568 step) to start running, as it might miss the breakpoint. We
2569 need to clear the step-over state if the stepped thread exits,
2570 so we also enable thread-exit events.
2572 - If we are stepping over a breakpoint out of line (displaced
2573 stepping) then we won't remove a breakpoint from the target,
2574 but, if the step spawns a new clone thread, then we will need
2575 to fixup the $pc address in the clone child too, so we need it
2576 to start stopped. We need to release the displaced stepping
2577 buffer if the stepped thread exits, so we also enable
2578 thread-exit events.
2580 - If scheduler-locking applies, threads that the current thread
2581 spawns should remain halted. It's not strictly necessary to
2582 enable thread-exit events in this case, but it doesn't hurt.
2584 if (step_over_info_valid_p ()
2585 || displaced_step_in_progress_thread (tp)
2586 || schedlock_applies (tp))
2588 gdb_thread_options options
2589 = GDB_THREAD_OPTION_CLONE | GDB_THREAD_OPTION_EXIT;
2590 if (target_supports_set_thread_options (options))
2591 tp->set_thread_options (options);
2592 else
2593 target_thread_events (true);
2595 else if (tp->thread_fsm () != nullptr)
2597 gdb_thread_options options = GDB_THREAD_OPTION_EXIT;
2598 if (target_supports_set_thread_options (options))
2599 tp->set_thread_options (options);
2600 else
2601 target_thread_events (true);
2603 else
2605 if (target_supports_set_thread_options (0))
2606 tp->set_thread_options (0);
2607 else
2609 process_stratum_target *resume_target = tp->inf->process_target ();
2610 if (!any_thread_needs_target_thread_events (resume_target,
2611 resume_ptid))
2612 target_thread_events (false);
2616 /* If we're resuming more than one thread simultaneously, then any
2617 thread other than the leader is being set to run free. Clear any
2618 previous thread option for those threads. */
2619 if (resume_ptid != inferior_ptid && target_supports_set_thread_options (0))
2621 process_stratum_target *resume_target = tp->inf->process_target ();
2622 for (thread_info *thr_iter : all_non_exited_threads (resume_target,
2623 resume_ptid))
2624 if (thr_iter != tp)
2625 thr_iter->set_thread_options (0);
2628 infrun_debug_printf ("resume_ptid=%s, step=%d, sig=%s",
2629 resume_ptid.to_string ().c_str (),
2630 step, gdb_signal_to_symbol_string (sig));
2632 target_resume (resume_ptid, step, sig);
2635 /* Resume the inferior. SIG is the signal to give the inferior
2636 (GDB_SIGNAL_0 for none). Note: don't call this directly; instead
2637 call 'resume', which handles exceptions. */
2639 static void
2640 resume_1 (enum gdb_signal sig)
2642 struct thread_info *tp = inferior_thread ();
2643 regcache *regcache = get_thread_regcache (tp);
2644 struct gdbarch *gdbarch = regcache->arch ();
2645 ptid_t resume_ptid;
2646 /* This represents the user's step vs continue request. When
2647 deciding whether "set scheduler-locking step" applies, it's the
2648 user's intention that counts. */
2649 const int user_step = tp->control.stepping_command;
2650 /* This represents what we'll actually request the target to do.
2651 This can decay from a step to a continue, if e.g., we need to
2652 implement single-stepping with breakpoints (software
2653 single-step). */
2654 bool step;
2656 gdb_assert (!tp->stop_requested);
2657 gdb_assert (!thread_is_in_step_over_chain (tp));
2659 if (tp->has_pending_waitstatus ())
2661 infrun_debug_printf
2662 ("thread %s has pending wait "
2663 "status %s (currently_stepping=%d).",
2664 tp->ptid.to_string ().c_str (),
2665 tp->pending_waitstatus ().to_string ().c_str (),
2666 currently_stepping (tp));
2668 tp->inf->process_target ()->threads_executing = true;
2669 tp->set_resumed (true);
2671 /* FIXME: What should we do if we are supposed to resume this
2672 thread with a signal? Maybe we should maintain a queue of
2673 pending signals to deliver. */
2674 if (sig != GDB_SIGNAL_0)
2676 warning (_("Couldn't deliver signal %s to %s."),
2677 gdb_signal_to_name (sig),
2678 tp->ptid.to_string ().c_str ());
2681 tp->set_stop_signal (GDB_SIGNAL_0);
2683 if (target_can_async_p ())
2685 target_async (true);
2686 /* Tell the event loop we have an event to process. */
2687 mark_async_event_handler (infrun_async_inferior_event_token);
2689 return;
2692 tp->stepped_breakpoint = 0;
2694 /* Depends on stepped_breakpoint. */
2695 step = currently_stepping (tp);
2697 if (current_inferior ()->thread_waiting_for_vfork_done != nullptr)
2699 /* Don't try to single-step a vfork parent that is waiting for
2700 the child to get out of the shared memory region (by exec'ing
2701 or exiting). This is particularly important on software
2702 single-step archs, as the child process would trip on the
2703 software single step breakpoint inserted for the parent
2704 process. Since the parent will not actually execute any
2705 instruction until the child is out of the shared region (such
2706 are vfork's semantics), it is safe to simply continue it.
2707 Eventually, we'll see a TARGET_WAITKIND_VFORK_DONE event for
2708 the parent, and tell it to `keep_going', which automatically
2709 re-sets it stepping. */
2710 infrun_debug_printf ("resume : clear step");
2711 step = false;
2714 CORE_ADDR pc = regcache_read_pc (regcache);
2716 infrun_debug_printf ("step=%d, signal=%s, trap_expected=%d, "
2717 "current thread [%s] at %s",
2718 step, gdb_signal_to_symbol_string (sig),
2719 tp->control.trap_expected,
2720 inferior_ptid.to_string ().c_str (),
2721 paddress (gdbarch, pc));
2723 const address_space *aspace = tp->inf->aspace.get ();
2725 /* Normally, by the time we reach `resume', the breakpoints are either
2726 removed or inserted, as appropriate. The exception is if we're sitting
2727 at a permanent breakpoint; we need to step over it, but permanent
2728 breakpoints can't be removed. So we have to test for it here. */
2729 if (breakpoint_here_p (aspace, pc) == permanent_breakpoint_here)
2731 if (sig != GDB_SIGNAL_0)
2733 /* We have a signal to pass to the inferior. The resume
2734 may, or may not take us to the signal handler. If this
2735 is a step, we'll need to stop in the signal handler, if
2736 there's one, (if the target supports stepping into
2737 handlers), or in the next mainline instruction, if
2738 there's no handler. If this is a continue, we need to be
2739 sure to run the handler with all breakpoints inserted.
2740 In all cases, set a breakpoint at the current address
2741 (where the handler returns to), and once that breakpoint
2742 is hit, resume skipping the permanent breakpoint. If
2743 that breakpoint isn't hit, then we've stepped into the
2744 signal handler (or hit some other event). We'll delete
2745 the step-resume breakpoint then. */
2747 infrun_debug_printf ("resume: skipping permanent breakpoint, "
2748 "deliver signal first");
2750 clear_step_over_info ();
2751 tp->control.trap_expected = 0;
2753 if (tp->control.step_resume_breakpoint == nullptr)
2755 /* Set a "high-priority" step-resume, as we don't want
2756 user breakpoints at PC to trigger (again) when this
2757 hits. */
2758 insert_hp_step_resume_breakpoint_at_frame (get_current_frame ());
2759 gdb_assert (tp->control.step_resume_breakpoint->first_loc ()
2760 .permanent);
2762 tp->step_after_step_resume_breakpoint = step;
2765 insert_breakpoints ();
2767 else
2769 /* There's no signal to pass, we can go ahead and skip the
2770 permanent breakpoint manually. */
2771 infrun_debug_printf ("skipping permanent breakpoint");
2772 gdbarch_skip_permanent_breakpoint (gdbarch, regcache);
2773 /* Update pc to reflect the new address from which we will
2774 execute instructions. */
2775 pc = regcache_read_pc (regcache);
2777 if (step)
2779 /* We've already advanced the PC, so the stepping part
2780 is done. Now we need to arrange for a trap to be
2781 reported to handle_inferior_event. Set a breakpoint
2782 at the current PC, and run to it. Don't update
2783 prev_pc, because if we end in
2784 switch_back_to_stepped_thread, we want the "expected
2785 thread advanced also" branch to be taken. IOW, we
2786 don't want this thread to step further from PC
2787 (overstep). */
2788 gdb_assert (!step_over_info_valid_p ());
2789 insert_single_step_breakpoint (gdbarch, aspace, pc);
2790 insert_breakpoints ();
2792 resume_ptid = internal_resume_ptid (user_step);
2793 do_target_resume (resume_ptid, false, GDB_SIGNAL_0);
2794 tp->set_resumed (true);
2795 return;
2800 /* If we have a breakpoint to step over, make sure to do a single
2801 step only. Same if we have software watchpoints. */
2802 if (tp->control.trap_expected || bpstat_should_step ())
2803 tp->control.may_range_step = 0;
2805 /* If displaced stepping is enabled, step over breakpoints by executing a
2806 copy of the instruction at a different address.
2808 We can't use displaced stepping when we have a signal to deliver;
2809 the comments for displaced_step_prepare explain why. The
2810 comments in the handle_inferior event for dealing with 'random
2811 signals' explain what we do instead.
2813 We can't use displaced stepping when we are waiting for vfork_done
2814 event, displaced stepping breaks the vfork child similarly as single
2815 step software breakpoint. */
2816 if (tp->control.trap_expected
2817 && use_displaced_stepping (tp)
2818 && !step_over_info_valid_p ()
2819 && sig == GDB_SIGNAL_0
2820 && current_inferior ()->thread_waiting_for_vfork_done == nullptr)
2822 displaced_step_prepare_status prepare_status
2823 = displaced_step_prepare (tp);
2825 if (prepare_status == DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE)
2827 infrun_debug_printf ("Got placed in step-over queue");
2829 tp->control.trap_expected = 0;
2830 return;
2832 else if (prepare_status == DISPLACED_STEP_PREPARE_STATUS_CANT)
2834 /* Fallback to stepping over the breakpoint in-line. */
2836 if (target_is_non_stop_p ())
2837 stop_all_threads ("displaced stepping falling back on inline stepping");
2839 set_step_over_info (aspace, regcache_read_pc (regcache), 0,
2840 tp->global_num);
2842 step = maybe_software_singlestep (gdbarch);
2844 insert_breakpoints ();
2846 else if (prepare_status == DISPLACED_STEP_PREPARE_STATUS_OK)
2848 /* Update pc to reflect the new address from which we will
2849 execute instructions due to displaced stepping. */
2850 pc = regcache_read_pc (get_thread_regcache (tp));
2852 step = gdbarch_displaced_step_hw_singlestep (gdbarch);
2854 else
2855 gdb_assert_not_reached ("Invalid displaced_step_prepare_status "
2856 "value.");
2859 /* Do we need to do it the hard way, w/temp breakpoints? */
2860 else if (step)
2861 step = maybe_software_singlestep (gdbarch);
2863 /* Currently, our software single-step implementation leads to different
2864 results than hardware single-stepping in one situation: when stepping
2865 into delivering a signal which has an associated signal handler,
2866 hardware single-step will stop at the first instruction of the handler,
2867 while software single-step will simply skip execution of the handler.
2869 For now, this difference in behavior is accepted since there is no
2870 easy way to actually implement single-stepping into a signal handler
2871 without kernel support.
2873 However, there is one scenario where this difference leads to follow-on
2874 problems: if we're stepping off a breakpoint by removing all breakpoints
2875 and then single-stepping. In this case, the software single-step
2876 behavior means that even if there is a *breakpoint* in the signal
2877 handler, GDB still would not stop.
2879 Fortunately, we can at least fix this particular issue. We detect
2880 here the case where we are about to deliver a signal while software
2881 single-stepping with breakpoints removed. In this situation, we
2882 revert the decisions to remove all breakpoints and insert single-
2883 step breakpoints, and instead we install a step-resume breakpoint
2884 at the current address, deliver the signal without stepping, and
2885 once we arrive back at the step-resume breakpoint, actually step
2886 over the breakpoint we originally wanted to step over. */
2887 if (thread_has_single_step_breakpoints_set (tp)
2888 && sig != GDB_SIGNAL_0
2889 && step_over_info_valid_p ())
2891 /* If we have nested signals or a pending signal is delivered
2892 immediately after a handler returns, might already have
2893 a step-resume breakpoint set on the earlier handler. We cannot
2894 set another step-resume breakpoint; just continue on until the
2895 original breakpoint is hit. */
2896 if (tp->control.step_resume_breakpoint == nullptr)
2898 insert_hp_step_resume_breakpoint_at_frame (get_current_frame ());
2899 tp->step_after_step_resume_breakpoint = 1;
2902 delete_single_step_breakpoints (tp);
2904 clear_step_over_info ();
2905 tp->control.trap_expected = 0;
2907 insert_breakpoints ();
2910 /* If STEP is set, it's a request to use hardware stepping
2911 facilities. But in that case, we should never
2912 use singlestep breakpoint. */
2913 gdb_assert (!(thread_has_single_step_breakpoints_set (tp) && step));
2915 /* Decide the set of threads to ask the target to resume. */
2916 if (tp->control.trap_expected)
2918 /* We're allowing a thread to run past a breakpoint it has
2919 hit, either by single-stepping the thread with the breakpoint
2920 removed, or by displaced stepping, with the breakpoint inserted.
2921 In the former case, we need to single-step only this thread,
2922 and keep others stopped, as they can miss this breakpoint if
2923 allowed to run. That's not really a problem for displaced
2924 stepping, but, we still keep other threads stopped, in case
2925 another thread is also stopped for a breakpoint waiting for
2926 its turn in the displaced stepping queue. */
2927 resume_ptid = inferior_ptid;
2929 else
2930 resume_ptid = internal_resume_ptid (user_step);
2932 if (execution_direction != EXEC_REVERSE
2933 && step && breakpoint_inserted_here_p (aspace, pc))
2935 /* There are two cases where we currently need to step a
2936 breakpoint instruction when we have a signal to deliver:
2938 - See handle_signal_stop where we handle random signals that
2939 could take out us out of the stepping range. Normally, in
2940 that case we end up continuing (instead of stepping) over the
2941 signal handler with a breakpoint at PC, but there are cases
2942 where we should _always_ single-step, even if we have a
2943 step-resume breakpoint, like when a software watchpoint is
2944 set. Assuming single-stepping and delivering a signal at the
2945 same time would takes us to the signal handler, then we could
2946 have removed the breakpoint at PC to step over it. However,
2947 some hardware step targets (like e.g., Mac OS) can't step
2948 into signal handlers, and for those, we need to leave the
2949 breakpoint at PC inserted, as otherwise if the handler
2950 recurses and executes PC again, it'll miss the breakpoint.
2951 So we leave the breakpoint inserted anyway, but we need to
2952 record that we tried to step a breakpoint instruction, so
2953 that adjust_pc_after_break doesn't end up confused.
2955 - In non-stop if we insert a breakpoint (e.g., a step-resume)
2956 in one thread after another thread that was stepping had been
2957 momentarily paused for a step-over. When we re-resume the
2958 stepping thread, it may be resumed from that address with a
2959 breakpoint that hasn't trapped yet. Seen with
2960 gdb.threads/non-stop-fair-events.exp, on targets that don't
2961 do displaced stepping. */
2963 infrun_debug_printf ("resume: [%s] stepped breakpoint",
2964 tp->ptid.to_string ().c_str ());
2966 tp->stepped_breakpoint = 1;
2968 /* Most targets can step a breakpoint instruction, thus
2969 executing it normally. But if this one cannot, just
2970 continue and we will hit it anyway. */
2971 if (gdbarch_cannot_step_breakpoint (gdbarch))
2972 step = false;
2975 if (tp->control.may_range_step)
2977 /* If we're resuming a thread with the PC out of the step
2978 range, then we're doing some nested/finer run control
2979 operation, like stepping the thread out of the dynamic
2980 linker or the displaced stepping scratch pad. We
2981 shouldn't have allowed a range step then. */
2982 gdb_assert (pc_in_thread_step_range (pc, tp));
2985 do_target_resume (resume_ptid, step, sig);
2986 tp->set_resumed (true);
2989 /* Resume the inferior. SIG is the signal to give the inferior
2990 (GDB_SIGNAL_0 for none). This is a wrapper around 'resume_1' that
2991 rolls back state on error. */
2993 static void
2994 resume (gdb_signal sig)
2998 resume_1 (sig);
3000 catch (const gdb_exception &ex)
3002 /* If resuming is being aborted for any reason, delete any
3003 single-step breakpoint resume_1 may have created, to avoid
3004 confusing the following resumption, and to avoid leaving
3005 single-step breakpoints perturbing other threads, in case
3006 we're running in non-stop mode. */
3007 if (inferior_ptid != null_ptid)
3008 delete_single_step_breakpoints (inferior_thread ());
3009 throw;
3014 /* Proceeding. */
3016 /* See infrun.h. */
3018 /* Counter that tracks number of user visible stops. This can be used
3019 to tell whether a command has proceeded the inferior past the
3020 current location. This allows e.g., inferior function calls in
3021 breakpoint commands to not interrupt the command list. When the
3022 call finishes successfully, the inferior is standing at the same
3023 breakpoint as if nothing happened (and so we don't call
3024 normal_stop). */
3025 static ULONGEST current_stop_id;
3027 /* See infrun.h. */
3029 ULONGEST
3030 get_stop_id (void)
3032 return current_stop_id;
3035 /* Called when we report a user visible stop. */
3037 static void
3038 new_stop_id (void)
3040 current_stop_id++;
3043 /* Clear out all variables saying what to do when inferior is continued.
3044 First do this, then set the ones you want, then call `proceed'. */
3046 static void
3047 clear_proceed_status_thread (struct thread_info *tp)
3049 infrun_debug_printf ("%s", tp->ptid.to_string ().c_str ());
3051 /* If we're starting a new sequence, then the previous finished
3052 single-step is no longer relevant. */
3053 if (tp->has_pending_waitstatus ())
3055 if (tp->stop_reason () == TARGET_STOPPED_BY_SINGLE_STEP)
3057 infrun_debug_printf ("pending event of %s was a finished step. "
3058 "Discarding.",
3059 tp->ptid.to_string ().c_str ());
3061 tp->clear_pending_waitstatus ();
3062 tp->set_stop_reason (TARGET_STOPPED_BY_NO_REASON);
3064 else
3066 infrun_debug_printf
3067 ("thread %s has pending wait status %s (currently_stepping=%d).",
3068 tp->ptid.to_string ().c_str (),
3069 tp->pending_waitstatus ().to_string ().c_str (),
3070 currently_stepping (tp));
3074 /* If this signal should not be seen by program, give it zero.
3075 Used for debugging signals. */
3076 if (!signal_pass_state (tp->stop_signal ()))
3077 tp->set_stop_signal (GDB_SIGNAL_0);
3079 tp->release_thread_fsm ();
3081 tp->control.trap_expected = 0;
3082 tp->control.step_range_start = 0;
3083 tp->control.step_range_end = 0;
3084 tp->control.may_range_step = 0;
3085 tp->control.step_frame_id = null_frame_id;
3086 tp->control.step_stack_frame_id = null_frame_id;
3087 tp->control.step_over_calls = STEP_OVER_UNDEBUGGABLE;
3088 tp->control.step_start_function = nullptr;
3089 tp->stop_requested = 0;
3091 tp->control.stop_step = 0;
3093 tp->control.proceed_to_finish = 0;
3095 tp->control.stepping_command = 0;
3097 /* Discard any remaining commands or status from previous stop. */
3098 bpstat_clear (&tp->control.stop_bpstat);
3101 /* Notify the current interpreter and observers that the target is about to
3102 proceed. */
3104 static void
3105 notify_about_to_proceed ()
3107 top_level_interpreter ()->on_about_to_proceed ();
3108 gdb::observers::about_to_proceed.notify ();
3111 void
3112 clear_proceed_status (int step)
3114 /* With scheduler-locking replay, stop replaying other threads if we're
3115 not replaying the user-visible resume ptid.
3117 This is a convenience feature to not require the user to explicitly
3118 stop replaying the other threads. We're assuming that the user's
3119 intent is to resume tracing the recorded process. */
3120 if (!non_stop && scheduler_mode == schedlock_replay
3121 && target_record_is_replaying (minus_one_ptid)
3122 && !target_record_will_replay (user_visible_resume_ptid (step),
3123 execution_direction))
3124 target_record_stop_replaying ();
3126 if (!non_stop && inferior_ptid != null_ptid)
3128 ptid_t resume_ptid = user_visible_resume_ptid (step);
3129 process_stratum_target *resume_target
3130 = user_visible_resume_target (resume_ptid);
3132 /* In all-stop mode, delete the per-thread status of all threads
3133 we're about to resume, implicitly and explicitly. */
3134 for (thread_info *tp : all_non_exited_threads (resume_target, resume_ptid))
3135 clear_proceed_status_thread (tp);
3138 if (inferior_ptid != null_ptid)
3140 struct inferior *inferior;
3142 if (non_stop)
3144 /* If in non-stop mode, only delete the per-thread status of
3145 the current thread. */
3146 clear_proceed_status_thread (inferior_thread ());
3149 inferior = current_inferior ();
3150 inferior->control.stop_soon = NO_STOP_QUIETLY;
3153 notify_about_to_proceed ();
3156 /* Returns true if TP is still stopped at a breakpoint that needs
3157 stepping-over in order to make progress. If the breakpoint is gone
3158 meanwhile, we can skip the whole step-over dance. */
3160 static bool
3161 thread_still_needs_step_over_bp (struct thread_info *tp)
3163 if (tp->stepping_over_breakpoint)
3165 struct regcache *regcache = get_thread_regcache (tp);
3167 if (breakpoint_here_p (tp->inf->aspace.get (),
3168 regcache_read_pc (regcache))
3169 == ordinary_breakpoint_here)
3170 return true;
3172 tp->stepping_over_breakpoint = 0;
3175 return false;
3178 /* Check whether thread TP still needs to start a step-over in order
3179 to make progress when resumed. Returns an bitwise or of enum
3180 step_over_what bits, indicating what needs to be stepped over. */
3182 static step_over_what
3183 thread_still_needs_step_over (struct thread_info *tp)
3185 step_over_what what = 0;
3187 if (thread_still_needs_step_over_bp (tp))
3188 what |= STEP_OVER_BREAKPOINT;
3190 if (tp->stepping_over_watchpoint
3191 && !target_have_steppable_watchpoint ())
3192 what |= STEP_OVER_WATCHPOINT;
3194 return what;
3197 /* Returns true if scheduler locking applies. STEP indicates whether
3198 we're about to do a step/next-like command to a thread. */
3200 static bool
3201 schedlock_applies (struct thread_info *tp)
3203 return (scheduler_mode == schedlock_on
3204 || (scheduler_mode == schedlock_step
3205 && tp->control.stepping_command)
3206 || (scheduler_mode == schedlock_replay
3207 && target_record_will_replay (minus_one_ptid,
3208 execution_direction)));
3211 /* When FORCE_P is false, set process_stratum_target::COMMIT_RESUMED_STATE
3212 in all target stacks that have threads executing and don't have threads
3213 with pending events.
3215 When FORCE_P is true, set process_stratum_target::COMMIT_RESUMED_STATE
3216 in all target stacks that have threads executing regardless of whether
3217 there are pending events or not.
3219 Passing FORCE_P as false makes sense when GDB is going to wait for
3220 events from all threads and will therefore spot the pending events.
3221 However, if GDB is only going to wait for events from select threads
3222 (i.e. when performing an inferior call) then a pending event on some
3223 other thread will not be spotted, and if we fail to commit the resume
3224 state for the thread performing the inferior call, then the inferior
3225 call will never complete (or even start). */
3227 static void
3228 maybe_set_commit_resumed_all_targets (bool force_p)
3230 scoped_restore_current_thread restore_thread;
3232 for (inferior *inf : all_non_exited_inferiors ())
3234 process_stratum_target *proc_target = inf->process_target ();
3236 if (proc_target->commit_resumed_state)
3238 /* We already set this in a previous iteration, via another
3239 inferior sharing the process_stratum target. */
3240 continue;
3243 /* If the target has no resumed threads, it would be useless to
3244 ask it to commit the resumed threads. */
3245 if (!proc_target->threads_executing)
3247 infrun_debug_printf ("not requesting commit-resumed for target "
3248 "%s, no resumed threads",
3249 proc_target->shortname ());
3250 continue;
3253 /* As an optimization, if a thread from this target has some
3254 status to report, handle it before requiring the target to
3255 commit its resumed threads: handling the status might lead to
3256 resuming more threads. */
3257 if (!force_p && proc_target->has_resumed_with_pending_wait_status ())
3259 infrun_debug_printf ("not requesting commit-resumed for target %s, a"
3260 " thread has a pending waitstatus",
3261 proc_target->shortname ());
3262 continue;
3265 switch_to_inferior_no_thread (inf);
3267 if (!force_p && target_has_pending_events ())
3269 infrun_debug_printf ("not requesting commit-resumed for target %s, "
3270 "target has pending events",
3271 proc_target->shortname ());
3272 continue;
3275 infrun_debug_printf ("enabling commit-resumed for target %s",
3276 proc_target->shortname ());
3278 proc_target->commit_resumed_state = true;
3282 /* See infrun.h. */
3284 void
3285 maybe_call_commit_resumed_all_targets ()
3287 scoped_restore_current_thread restore_thread;
3289 for (inferior *inf : all_non_exited_inferiors ())
3291 process_stratum_target *proc_target = inf->process_target ();
3293 if (!proc_target->commit_resumed_state)
3294 continue;
3296 switch_to_inferior_no_thread (inf);
3298 infrun_debug_printf ("calling commit_resumed for target %s",
3299 proc_target->shortname());
3301 target_commit_resumed ();
3305 /* To track nesting of scoped_disable_commit_resumed objects, ensuring
3306 that only the outermost one attempts to re-enable
3307 commit-resumed. */
3308 static bool enable_commit_resumed = true;
3310 /* See infrun.h. */
3312 scoped_disable_commit_resumed::scoped_disable_commit_resumed
3313 (const char *reason)
3314 : m_reason (reason),
3315 m_prev_enable_commit_resumed (enable_commit_resumed)
3317 infrun_debug_printf ("reason=%s", m_reason);
3319 enable_commit_resumed = false;
3321 for (inferior *inf : all_non_exited_inferiors ())
3323 process_stratum_target *proc_target = inf->process_target ();
3325 if (m_prev_enable_commit_resumed)
3327 /* This is the outermost instance: force all
3328 COMMIT_RESUMED_STATE to false. */
3329 proc_target->commit_resumed_state = false;
3331 else
3333 /* This is not the outermost instance, we expect
3334 COMMIT_RESUMED_STATE to have been cleared by the
3335 outermost instance. */
3336 gdb_assert (!proc_target->commit_resumed_state);
3341 /* See infrun.h. */
3343 void
3344 scoped_disable_commit_resumed::reset ()
3346 if (m_reset)
3347 return;
3348 m_reset = true;
3350 infrun_debug_printf ("reason=%s", m_reason);
3352 gdb_assert (!enable_commit_resumed);
3354 enable_commit_resumed = m_prev_enable_commit_resumed;
3356 if (m_prev_enable_commit_resumed)
3358 /* This is the outermost instance, re-enable
3359 COMMIT_RESUMED_STATE on the targets where it's possible. */
3360 maybe_set_commit_resumed_all_targets (false);
3362 else
3364 /* This is not the outermost instance, we expect
3365 COMMIT_RESUMED_STATE to still be false. */
3366 for (inferior *inf : all_non_exited_inferiors ())
3368 process_stratum_target *proc_target = inf->process_target ();
3369 gdb_assert (!proc_target->commit_resumed_state);
3374 /* See infrun.h. */
3376 scoped_disable_commit_resumed::~scoped_disable_commit_resumed ()
3378 reset ();
3381 /* See infrun.h. */
3383 void
3384 scoped_disable_commit_resumed::reset_and_commit ()
3386 reset ();
3387 maybe_call_commit_resumed_all_targets ();
3390 /* See infrun.h. */
3392 scoped_enable_commit_resumed::scoped_enable_commit_resumed
3393 (const char *reason, bool force_p)
3394 : m_reason (reason),
3395 m_prev_enable_commit_resumed (enable_commit_resumed)
3397 infrun_debug_printf ("reason=%s", m_reason);
3399 if (!enable_commit_resumed)
3401 enable_commit_resumed = true;
3403 /* Re-enable COMMIT_RESUMED_STATE on the targets where it's
3404 possible. */
3405 maybe_set_commit_resumed_all_targets (force_p);
3407 maybe_call_commit_resumed_all_targets ();
3411 /* See infrun.h. */
3413 scoped_enable_commit_resumed::~scoped_enable_commit_resumed ()
3415 infrun_debug_printf ("reason=%s", m_reason);
3417 gdb_assert (enable_commit_resumed);
3419 enable_commit_resumed = m_prev_enable_commit_resumed;
3421 if (!enable_commit_resumed)
3423 /* Force all COMMIT_RESUMED_STATE back to false. */
3424 for (inferior *inf : all_non_exited_inferiors ())
3426 process_stratum_target *proc_target = inf->process_target ();
3427 proc_target->commit_resumed_state = false;
3432 /* Check that all the targets we're about to resume are in non-stop
3433 mode. Ideally, we'd only care whether all targets support
3434 target-async, but we're not there yet. E.g., stop_all_threads
3435 doesn't know how to handle all-stop targets. Also, the remote
3436 protocol in all-stop mode is synchronous, irrespective of
3437 target-async, which means that things like a breakpoint re-set
3438 triggered by one target would try to read memory from all targets
3439 and fail. */
3441 static void
3442 check_multi_target_resumption (process_stratum_target *resume_target)
3444 if (!non_stop && resume_target == nullptr)
3446 scoped_restore_current_thread restore_thread;
3448 /* This is used to track whether we're resuming more than one
3449 target. */
3450 process_stratum_target *first_connection = nullptr;
3452 /* The first inferior we see with a target that does not work in
3453 always-non-stop mode. */
3454 inferior *first_not_non_stop = nullptr;
3456 for (inferior *inf : all_non_exited_inferiors ())
3458 switch_to_inferior_no_thread (inf);
3460 if (!target_has_execution ())
3461 continue;
3463 process_stratum_target *proc_target
3464 = current_inferior ()->process_target();
3466 if (!target_is_non_stop_p ())
3467 first_not_non_stop = inf;
3469 if (first_connection == nullptr)
3470 first_connection = proc_target;
3471 else if (first_connection != proc_target
3472 && first_not_non_stop != nullptr)
3474 switch_to_inferior_no_thread (first_not_non_stop);
3476 proc_target = current_inferior ()->process_target();
3478 error (_("Connection %d (%s) does not support "
3479 "multi-target resumption."),
3480 proc_target->connection_number,
3481 make_target_connection_string (proc_target).c_str ());
3487 /* Helper function for `proceed`. Check if thread TP is suitable for
3488 resuming, and, if it is, switch to the thread and call
3489 `keep_going_pass_signal`. If TP is not suitable for resuming then this
3490 function will just return without switching threads. */
3492 static void
3493 proceed_resume_thread_checked (thread_info *tp)
3495 if (!tp->inf->has_execution ())
3497 infrun_debug_printf ("[%s] target has no execution",
3498 tp->ptid.to_string ().c_str ());
3499 return;
3502 if (tp->resumed ())
3504 infrun_debug_printf ("[%s] resumed",
3505 tp->ptid.to_string ().c_str ());
3506 gdb_assert (tp->executing () || tp->has_pending_waitstatus ());
3507 return;
3510 if (thread_is_in_step_over_chain (tp))
3512 infrun_debug_printf ("[%s] needs step-over",
3513 tp->ptid.to_string ().c_str ());
3514 return;
3517 /* When handling a vfork GDB removes all breakpoints from the program
3518 space in which the vfork is being handled. If we are following the
3519 parent then GDB will set the thread_waiting_for_vfork_done member of
3520 the parent inferior. In this case we should take care to only resume
3521 the vfork parent thread, the kernel will hold this thread suspended
3522 until the vfork child has exited or execd, at which point the parent
3523 will be resumed and a VFORK_DONE event sent to GDB. */
3524 if (tp->inf->thread_waiting_for_vfork_done != nullptr)
3526 if (target_is_non_stop_p ())
3528 /* For non-stop targets, regardless of whether GDB is using
3529 all-stop or non-stop mode, threads are controlled
3530 individually.
3532 When a thread is handling a vfork, breakpoints are removed
3533 from the inferior (well, program space in fact), so it is
3534 critical that we don't try to resume any thread other than the
3535 vfork parent. */
3536 if (tp != tp->inf->thread_waiting_for_vfork_done)
3538 infrun_debug_printf ("[%s] thread %s of this inferior is "
3539 "waiting for vfork-done",
3540 tp->ptid.to_string ().c_str (),
3541 tp->inf->thread_waiting_for_vfork_done
3542 ->ptid.to_string ().c_str ());
3543 return;
3546 else
3548 /* For all-stop targets, when we attempt to resume the inferior,
3549 we will only resume the vfork parent thread, this is handled
3550 in internal_resume_ptid.
3552 Additionally, we will always be called with the vfork parent
3553 thread as the current thread (TP) thanks to follow_fork, as
3554 such the following assertion should hold.
3556 Beyond this there is nothing more that needs to be done
3557 here. */
3558 gdb_assert (tp == tp->inf->thread_waiting_for_vfork_done);
3562 /* When handling a vfork GDB removes all breakpoints from the program
3563 space in which the vfork is being handled. If we are following the
3564 child then GDB will set vfork_child member of the vfork parent
3565 inferior. Once the child has either exited or execd then GDB will
3566 detach from the parent process. Until that point GDB should not
3567 resume any thread in the parent process. */
3568 if (tp->inf->vfork_child != nullptr)
3570 infrun_debug_printf ("[%s] thread is part of a vfork parent, child is %d",
3571 tp->ptid.to_string ().c_str (),
3572 tp->inf->vfork_child->pid);
3573 return;
3576 infrun_debug_printf ("resuming %s",
3577 tp->ptid.to_string ().c_str ());
3579 execution_control_state ecs (tp);
3580 switch_to_thread (tp);
3581 keep_going_pass_signal (&ecs);
3582 if (!ecs.wait_some_more)
3583 error (_("Command aborted."));
3586 /* Basic routine for continuing the program in various fashions.
3588 ADDR is the address to resume at, or -1 for resume where stopped.
3589 SIGGNAL is the signal to give it, or GDB_SIGNAL_0 for none,
3590 or GDB_SIGNAL_DEFAULT for act according to how it stopped.
3592 You should call clear_proceed_status before calling proceed. */
3594 void
3595 proceed (CORE_ADDR addr, enum gdb_signal siggnal)
3597 INFRUN_SCOPED_DEBUG_ENTER_EXIT;
3599 struct gdbarch *gdbarch;
3600 CORE_ADDR pc;
3602 /* If we're stopped at a fork/vfork, switch to either the parent or child
3603 thread as defined by the "set follow-fork-mode" command, or, if both
3604 the parent and child are controlled by GDB, and schedule-multiple is
3605 on, follow the child. If none of the above apply then we just proceed
3606 resuming the current thread. */
3607 if (!follow_fork ())
3609 /* The target for some reason decided not to resume. */
3610 normal_stop ();
3611 if (target_can_async_p ())
3612 inferior_event_handler (INF_EXEC_COMPLETE);
3613 return;
3616 /* We'll update this if & when we switch to a new thread. */
3617 update_previous_thread ();
3619 thread_info *cur_thr = inferior_thread ();
3620 infrun_debug_printf ("cur_thr = %s", cur_thr->ptid.to_string ().c_str ());
3622 regcache *regcache = get_thread_regcache (cur_thr);
3623 gdbarch = regcache->arch ();
3624 pc = regcache_read_pc_protected (regcache);
3626 /* Fill in with reasonable starting values. */
3627 init_thread_stepping_state (cur_thr);
3629 gdb_assert (!thread_is_in_step_over_chain (cur_thr));
3631 ptid_t resume_ptid
3632 = user_visible_resume_ptid (cur_thr->control.stepping_command);
3633 process_stratum_target *resume_target
3634 = user_visible_resume_target (resume_ptid);
3636 check_multi_target_resumption (resume_target);
3638 if (addr == (CORE_ADDR) -1)
3640 const address_space *aspace = cur_thr->inf->aspace.get ();
3642 if (cur_thr->stop_pc_p ()
3643 && pc == cur_thr->stop_pc ()
3644 && breakpoint_here_p (aspace, pc) == ordinary_breakpoint_here
3645 && execution_direction != EXEC_REVERSE)
3646 /* There is a breakpoint at the address we will resume at,
3647 step one instruction before inserting breakpoints so that
3648 we do not stop right away (and report a second hit at this
3649 breakpoint).
3651 Note, we don't do this in reverse, because we won't
3652 actually be executing the breakpoint insn anyway.
3653 We'll be (un-)executing the previous instruction. */
3654 cur_thr->stepping_over_breakpoint = 1;
3655 else if (gdbarch_single_step_through_delay_p (gdbarch)
3656 && gdbarch_single_step_through_delay (gdbarch,
3657 get_current_frame ()))
3658 /* We stepped onto an instruction that needs to be stepped
3659 again before re-inserting the breakpoint, do so. */
3660 cur_thr->stepping_over_breakpoint = 1;
3662 else
3664 regcache_write_pc (regcache, addr);
3667 if (siggnal != GDB_SIGNAL_DEFAULT)
3668 cur_thr->set_stop_signal (siggnal);
3670 /* If an exception is thrown from this point on, make sure to
3671 propagate GDB's knowledge of the executing state to the
3672 frontend/user running state. */
3673 scoped_finish_thread_state finish_state (resume_target, resume_ptid);
3675 /* Even if RESUME_PTID is a wildcard, and we end up resuming fewer
3676 threads (e.g., we might need to set threads stepping over
3677 breakpoints first), from the user/frontend's point of view, all
3678 threads in RESUME_PTID are now running. Unless we're calling an
3679 inferior function, as in that case we pretend the inferior
3680 doesn't run at all. */
3681 if (!cur_thr->control.in_infcall)
3682 set_running (resume_target, resume_ptid, true);
3684 infrun_debug_printf ("addr=%s, signal=%s, resume_ptid=%s",
3685 paddress (gdbarch, addr),
3686 gdb_signal_to_symbol_string (siggnal),
3687 resume_ptid.to_string ().c_str ());
3689 annotate_starting ();
3691 /* Make sure that output from GDB appears before output from the
3692 inferior. */
3693 gdb_flush (gdb_stdout);
3695 /* Since we've marked the inferior running, give it the terminal. A
3696 QUIT/Ctrl-C from here on is forwarded to the target (which can
3697 still detect attempts to unblock a stuck connection with repeated
3698 Ctrl-C from within target_pass_ctrlc). */
3699 target_terminal::inferior ();
3701 /* In a multi-threaded task we may select another thread and
3702 then continue or step.
3704 But if a thread that we're resuming had stopped at a breakpoint,
3705 it will immediately cause another breakpoint stop without any
3706 execution (i.e. it will report a breakpoint hit incorrectly). So
3707 we must step over it first.
3709 Look for threads other than the current (TP) that reported a
3710 breakpoint hit and haven't been resumed yet since. */
3712 /* If scheduler locking applies, we can avoid iterating over all
3713 threads. */
3714 if (!non_stop && !schedlock_applies (cur_thr))
3716 for (thread_info *tp : all_non_exited_threads (resume_target,
3717 resume_ptid))
3719 switch_to_thread_no_regs (tp);
3721 /* Ignore the current thread here. It's handled
3722 afterwards. */
3723 if (tp == cur_thr)
3724 continue;
3726 if (!thread_still_needs_step_over (tp))
3727 continue;
3729 gdb_assert (!thread_is_in_step_over_chain (tp));
3731 infrun_debug_printf ("need to step-over [%s] first",
3732 tp->ptid.to_string ().c_str ());
3734 global_thread_step_over_chain_enqueue (tp);
3737 switch_to_thread (cur_thr);
3740 /* Enqueue the current thread last, so that we move all other
3741 threads over their breakpoints first. */
3742 if (cur_thr->stepping_over_breakpoint)
3743 global_thread_step_over_chain_enqueue (cur_thr);
3745 /* If the thread isn't started, we'll still need to set its prev_pc,
3746 so that switch_back_to_stepped_thread knows the thread hasn't
3747 advanced. Must do this before resuming any thread, as in
3748 all-stop/remote, once we resume we can't send any other packet
3749 until the target stops again. */
3750 cur_thr->prev_pc = regcache_read_pc_protected (regcache);
3753 scoped_disable_commit_resumed disable_commit_resumed ("proceeding");
3754 bool step_over_started = start_step_over ();
3756 if (step_over_info_valid_p ())
3758 /* Either this thread started a new in-line step over, or some
3759 other thread was already doing one. In either case, don't
3760 resume anything else until the step-over is finished. */
3762 else if (step_over_started && !target_is_non_stop_p ())
3764 /* A new displaced stepping sequence was started. In all-stop,
3765 we can't talk to the target anymore until it next stops. */
3767 else if (!non_stop && target_is_non_stop_p ())
3769 INFRUN_SCOPED_DEBUG_START_END
3770 ("resuming threads, all-stop-on-top-of-non-stop");
3772 /* In all-stop, but the target is always in non-stop mode.
3773 Start all other threads that are implicitly resumed too. */
3774 for (thread_info *tp : all_non_exited_threads (resume_target,
3775 resume_ptid))
3777 switch_to_thread_no_regs (tp);
3778 proceed_resume_thread_checked (tp);
3781 else
3782 proceed_resume_thread_checked (cur_thr);
3784 disable_commit_resumed.reset_and_commit ();
3787 finish_state.release ();
3789 /* If we've switched threads above, switch back to the previously
3790 current thread. We don't want the user to see a different
3791 selected thread. */
3792 switch_to_thread (cur_thr);
3794 /* Tell the event loop to wait for it to stop. If the target
3795 supports asynchronous execution, it'll do this from within
3796 target_resume. */
3797 if (!target_can_async_p ())
3798 mark_async_event_handler (infrun_async_inferior_event_token);
3802 /* Start remote-debugging of a machine over a serial link. */
3804 void
3805 start_remote (int from_tty)
3807 inferior *inf = current_inferior ();
3808 inf->control.stop_soon = STOP_QUIETLY_REMOTE;
3810 /* Always go on waiting for the target, regardless of the mode. */
3811 /* FIXME: cagney/1999-09-23: At present it isn't possible to
3812 indicate to wait_for_inferior that a target should timeout if
3813 nothing is returned (instead of just blocking). Because of this,
3814 targets expecting an immediate response need to, internally, set
3815 things up so that the target_wait() is forced to eventually
3816 timeout. */
3817 /* FIXME: cagney/1999-09-24: It isn't possible for target_open() to
3818 differentiate to its caller what the state of the target is after
3819 the initial open has been performed. Here we're assuming that
3820 the target has stopped. It should be possible to eventually have
3821 target_open() return to the caller an indication that the target
3822 is currently running and GDB state should be set to the same as
3823 for an async run. */
3824 wait_for_inferior (inf);
3826 /* Now that the inferior has stopped, do any bookkeeping like
3827 loading shared libraries. We want to do this before normal_stop,
3828 so that the displayed frame is up to date. */
3829 post_create_inferior (from_tty);
3831 normal_stop ();
3834 /* Initialize static vars when a new inferior begins. */
3836 void
3837 init_wait_for_inferior (void)
3839 /* These are meaningless until the first time through wait_for_inferior. */
3841 breakpoint_init_inferior (current_inferior (), inf_starting);
3843 clear_proceed_status (0);
3845 nullify_last_target_wait_ptid ();
3847 update_previous_thread ();
3852 static void handle_inferior_event (struct execution_control_state *ecs);
3854 static void handle_step_into_function (struct gdbarch *gdbarch,
3855 struct execution_control_state *ecs);
3856 static void handle_step_into_function_backward (struct gdbarch *gdbarch,
3857 struct execution_control_state *ecs);
3858 static void handle_signal_stop (struct execution_control_state *ecs);
3859 static void check_exception_resume (struct execution_control_state *,
3860 const frame_info_ptr &);
3862 static void end_stepping_range (struct execution_control_state *ecs);
3863 static void stop_waiting (struct execution_control_state *ecs);
3864 static void keep_going (struct execution_control_state *ecs);
3865 static void process_event_stop_test (struct execution_control_state *ecs);
3866 static bool switch_back_to_stepped_thread (struct execution_control_state *ecs);
3868 /* This function is attached as a "thread_stop_requested" observer.
3869 Cleanup local state that assumed the PTID was to be resumed, and
3870 report the stop to the frontend. */
3872 static void
3873 infrun_thread_stop_requested (ptid_t ptid)
3875 process_stratum_target *curr_target = current_inferior ()->process_target ();
3877 /* PTID was requested to stop. If the thread was already stopped,
3878 but the user/frontend doesn't know about that yet (e.g., the
3879 thread had been temporarily paused for some step-over), set up
3880 for reporting the stop now. */
3881 for (thread_info *tp : all_threads (curr_target, ptid))
3883 if (tp->state != THREAD_RUNNING)
3884 continue;
3885 if (tp->executing ())
3886 continue;
3888 /* Remove matching threads from the step-over queue, so
3889 start_step_over doesn't try to resume them
3890 automatically. */
3891 if (thread_is_in_step_over_chain (tp))
3892 global_thread_step_over_chain_remove (tp);
3894 /* If the thread is stopped, but the user/frontend doesn't
3895 know about that yet, queue a pending event, as if the
3896 thread had just stopped now. Unless the thread already had
3897 a pending event. */
3898 if (!tp->has_pending_waitstatus ())
3900 target_waitstatus ws;
3901 ws.set_stopped (GDB_SIGNAL_0);
3902 tp->set_pending_waitstatus (ws);
3905 /* Clear the inline-frame state, since we're re-processing the
3906 stop. */
3907 clear_inline_frame_state (tp);
3909 /* If this thread was paused because some other thread was
3910 doing an inline-step over, let that finish first. Once
3911 that happens, we'll restart all threads and consume pending
3912 stop events then. */
3913 if (step_over_info_valid_p ())
3914 continue;
3916 /* Otherwise we can process the (new) pending event now. Set
3917 it so this pending event is considered by
3918 do_target_wait. */
3919 tp->set_resumed (true);
3923 /* Delete the step resume, single-step and longjmp/exception resume
3924 breakpoints of TP. */
3926 static void
3927 delete_thread_infrun_breakpoints (struct thread_info *tp)
3929 delete_step_resume_breakpoint (tp);
3930 delete_exception_resume_breakpoint (tp);
3931 delete_single_step_breakpoints (tp);
3934 /* If the target still has execution, call FUNC for each thread that
3935 just stopped. In all-stop, that's all the non-exited threads; in
3936 non-stop, that's the current thread, only. */
3938 typedef void (*for_each_just_stopped_thread_callback_func)
3939 (struct thread_info *tp);
3941 static void
3942 for_each_just_stopped_thread (for_each_just_stopped_thread_callback_func func)
3944 if (!target_has_execution () || inferior_ptid == null_ptid)
3945 return;
3947 if (target_is_non_stop_p ())
3949 /* If in non-stop mode, only the current thread stopped. */
3950 func (inferior_thread ());
3952 else
3954 /* In all-stop mode, all threads have stopped. */
3955 for (thread_info *tp : all_non_exited_threads ())
3956 func (tp);
3960 /* Delete the step resume and longjmp/exception resume breakpoints of
3961 the threads that just stopped. */
3963 static void
3964 delete_just_stopped_threads_infrun_breakpoints (void)
3966 for_each_just_stopped_thread (delete_thread_infrun_breakpoints);
3969 /* Delete the single-step breakpoints of the threads that just
3970 stopped. */
3972 static void
3973 delete_just_stopped_threads_single_step_breakpoints (void)
3975 for_each_just_stopped_thread (delete_single_step_breakpoints);
3978 /* See infrun.h. */
3980 void
3981 print_target_wait_results (ptid_t waiton_ptid, ptid_t result_ptid,
3982 const struct target_waitstatus &ws)
3984 infrun_debug_printf ("target_wait (%s [%s], status) =",
3985 waiton_ptid.to_string ().c_str (),
3986 target_pid_to_str (waiton_ptid).c_str ());
3987 infrun_debug_printf (" %s [%s],",
3988 result_ptid.to_string ().c_str (),
3989 target_pid_to_str (result_ptid).c_str ());
3990 infrun_debug_printf (" %s", ws.to_string ().c_str ());
3993 /* Select a thread at random, out of those which are resumed and have
3994 had events. */
3996 static struct thread_info *
3997 random_pending_event_thread (inferior *inf, ptid_t waiton_ptid)
3999 process_stratum_target *proc_target = inf->process_target ();
4000 thread_info *thread
4001 = proc_target->random_resumed_with_pending_wait_status (inf, waiton_ptid);
4003 if (thread == nullptr)
4005 infrun_debug_printf ("None found.");
4006 return nullptr;
4009 infrun_debug_printf ("Found %s.", thread->ptid.to_string ().c_str ());
4010 gdb_assert (thread->resumed ());
4011 gdb_assert (thread->has_pending_waitstatus ());
4013 return thread;
4016 /* Wrapper for target_wait that first checks whether threads have
4017 pending statuses to report before actually asking the target for
4018 more events. INF is the inferior we're using to call target_wait
4019 on. */
4021 static ptid_t
4022 do_target_wait_1 (inferior *inf, ptid_t ptid,
4023 target_waitstatus *status, target_wait_flags options)
4025 struct thread_info *tp;
4027 /* We know that we are looking for an event in the target of inferior
4028 INF, but we don't know which thread the event might come from. As
4029 such we want to make sure that INFERIOR_PTID is reset so that none of
4030 the wait code relies on it - doing so is always a mistake. */
4031 switch_to_inferior_no_thread (inf);
4033 /* First check if there is a resumed thread with a wait status
4034 pending. */
4035 if (ptid == minus_one_ptid || ptid.is_pid ())
4037 tp = random_pending_event_thread (inf, ptid);
4039 else
4041 infrun_debug_printf ("Waiting for specific thread %s.",
4042 ptid.to_string ().c_str ());
4044 /* We have a specific thread to check. */
4045 tp = inf->find_thread (ptid);
4046 gdb_assert (tp != nullptr);
4047 if (!tp->has_pending_waitstatus ())
4048 tp = nullptr;
4051 if (tp != nullptr
4052 && (tp->stop_reason () == TARGET_STOPPED_BY_SW_BREAKPOINT
4053 || tp->stop_reason () == TARGET_STOPPED_BY_HW_BREAKPOINT))
4055 struct regcache *regcache = get_thread_regcache (tp);
4056 struct gdbarch *gdbarch = regcache->arch ();
4057 CORE_ADDR pc;
4058 int discard = 0;
4060 pc = regcache_read_pc (regcache);
4062 if (pc != tp->stop_pc ())
4064 infrun_debug_printf ("PC of %s changed. was=%s, now=%s",
4065 tp->ptid.to_string ().c_str (),
4066 paddress (gdbarch, tp->stop_pc ()),
4067 paddress (gdbarch, pc));
4068 discard = 1;
4070 else if (!breakpoint_inserted_here_p (tp->inf->aspace.get (), pc))
4072 infrun_debug_printf ("previous breakpoint of %s, at %s gone",
4073 tp->ptid.to_string ().c_str (),
4074 paddress (gdbarch, pc));
4076 discard = 1;
4079 if (discard)
4081 infrun_debug_printf ("pending event of %s cancelled.",
4082 tp->ptid.to_string ().c_str ());
4084 tp->clear_pending_waitstatus ();
4085 target_waitstatus ws;
4086 ws.set_spurious ();
4087 tp->set_pending_waitstatus (ws);
4088 tp->set_stop_reason (TARGET_STOPPED_BY_NO_REASON);
4092 if (tp != nullptr)
4094 infrun_debug_printf ("Using pending wait status %s for %s.",
4095 tp->pending_waitstatus ().to_string ().c_str (),
4096 tp->ptid.to_string ().c_str ());
4098 /* Now that we've selected our final event LWP, un-adjust its PC
4099 if it was a software breakpoint (and the target doesn't
4100 always adjust the PC itself). */
4101 if (tp->stop_reason () == TARGET_STOPPED_BY_SW_BREAKPOINT
4102 && !target_supports_stopped_by_sw_breakpoint ())
4104 struct regcache *regcache;
4105 struct gdbarch *gdbarch;
4106 int decr_pc;
4108 regcache = get_thread_regcache (tp);
4109 gdbarch = regcache->arch ();
4111 decr_pc = gdbarch_decr_pc_after_break (gdbarch);
4112 if (decr_pc != 0)
4114 CORE_ADDR pc;
4116 pc = regcache_read_pc (regcache);
4117 regcache_write_pc (regcache, pc + decr_pc);
4121 tp->set_stop_reason (TARGET_STOPPED_BY_NO_REASON);
4122 *status = tp->pending_waitstatus ();
4123 tp->clear_pending_waitstatus ();
4125 /* Wake up the event loop again, until all pending events are
4126 processed. */
4127 if (target_is_async_p ())
4128 mark_async_event_handler (infrun_async_inferior_event_token);
4129 return tp->ptid;
4132 /* But if we don't find one, we'll have to wait. */
4134 /* We can't ask a non-async target to do a non-blocking wait, so this will be
4135 a blocking wait. */
4136 if (!target_can_async_p ())
4137 options &= ~TARGET_WNOHANG;
4139 return target_wait (ptid, status, options);
4142 /* Wrapper for target_wait that first checks whether threads have
4143 pending statuses to report before actually asking the target for
4144 more events. Polls for events from all inferiors/targets. */
4146 static bool
4147 do_target_wait (ptid_t wait_ptid, execution_control_state *ecs,
4148 target_wait_flags options)
4150 int num_inferiors = 0;
4151 int random_selector;
4153 /* For fairness, we pick the first inferior/target to poll at random
4154 out of all inferiors that may report events, and then continue
4155 polling the rest of the inferior list starting from that one in a
4156 circular fashion until the whole list is polled once. */
4158 ptid_t wait_ptid_pid {wait_ptid.pid ()};
4159 auto inferior_matches = [&wait_ptid_pid] (inferior *inf)
4161 return (inf->process_target () != nullptr
4162 && ptid_t (inf->pid).matches (wait_ptid_pid));
4165 /* First see how many matching inferiors we have. */
4166 for (inferior *inf : all_inferiors ())
4167 if (inferior_matches (inf))
4168 num_inferiors++;
4170 if (num_inferiors == 0)
4172 ecs->ws.set_ignore ();
4173 return false;
4176 /* Now randomly pick an inferior out of those that matched. */
4177 random_selector = (int)
4178 ((num_inferiors * (double) rand ()) / (RAND_MAX + 1.0));
4180 if (num_inferiors > 1)
4181 infrun_debug_printf ("Found %d inferiors, starting at #%d",
4182 num_inferiors, random_selector);
4184 /* Select the Nth inferior that matched. */
4186 inferior *selected = nullptr;
4188 for (inferior *inf : all_inferiors ())
4189 if (inferior_matches (inf))
4190 if (random_selector-- == 0)
4192 selected = inf;
4193 break;
4196 /* Now poll for events out of each of the matching inferior's
4197 targets, starting from the selected one. */
4199 auto do_wait = [&] (inferior *inf)
4201 ecs->ptid = do_target_wait_1 (inf, wait_ptid, &ecs->ws, options);
4202 ecs->target = inf->process_target ();
4203 return (ecs->ws.kind () != TARGET_WAITKIND_IGNORE);
4206 /* Needed in 'all-stop + target-non-stop' mode, because we end up
4207 here spuriously after the target is all stopped and we've already
4208 reported the stop to the user, polling for events. */
4209 scoped_restore_current_thread restore_thread;
4211 intrusive_list_iterator<inferior> start
4212 = inferior_list.iterator_to (*selected);
4214 for (intrusive_list_iterator<inferior> it = start;
4215 it != inferior_list.end ();
4216 ++it)
4218 inferior *inf = &*it;
4220 if (inferior_matches (inf) && do_wait (inf))
4221 return true;
4224 for (intrusive_list_iterator<inferior> it = inferior_list.begin ();
4225 it != start;
4226 ++it)
4228 inferior *inf = &*it;
4230 if (inferior_matches (inf) && do_wait (inf))
4231 return true;
4234 ecs->ws.set_ignore ();
4235 return false;
4238 /* An event reported by wait_one. */
4240 struct wait_one_event
4242 /* The target the event came out of. */
4243 process_stratum_target *target;
4245 /* The PTID the event was for. */
4246 ptid_t ptid;
4248 /* The waitstatus. */
4249 target_waitstatus ws;
4252 static bool handle_one (const wait_one_event &event);
4253 static int finish_step_over (struct execution_control_state *ecs);
4255 /* Prepare and stabilize the inferior for detaching it. E.g.,
4256 detaching while a thread is displaced stepping is a recipe for
4257 crashing it, as nothing would readjust the PC out of the scratch
4258 pad. */
4260 void
4261 prepare_for_detach (void)
4263 struct inferior *inf = current_inferior ();
4264 ptid_t pid_ptid = ptid_t (inf->pid);
4265 scoped_restore_current_thread restore_thread;
4267 scoped_restore restore_detaching = make_scoped_restore (&inf->detaching, true);
4269 /* Remove all threads of INF from the global step-over chain. We
4270 want to stop any ongoing step-over, not start any new one. */
4271 thread_step_over_list_safe_range range
4272 = make_thread_step_over_list_safe_range (global_thread_step_over_list);
4274 for (thread_info *tp : range)
4275 if (tp->inf == inf)
4277 infrun_debug_printf ("removing thread %s from global step over chain",
4278 tp->ptid.to_string ().c_str ());
4279 global_thread_step_over_chain_remove (tp);
4282 /* If we were already in the middle of an inline step-over, and the
4283 thread stepping belongs to the inferior we're detaching, we need
4284 to restart the threads of other inferiors. */
4285 if (step_over_info.thread != -1)
4287 infrun_debug_printf ("inline step-over in-process while detaching");
4289 thread_info *thr = find_thread_global_id (step_over_info.thread);
4290 if (thr->inf == inf)
4292 /* Since we removed threads of INF from the step-over chain,
4293 we know this won't start a step-over for INF. */
4294 clear_step_over_info ();
4296 if (target_is_non_stop_p ())
4298 /* Start a new step-over in another thread if there's
4299 one that needs it. */
4300 start_step_over ();
4302 /* Restart all other threads (except the
4303 previously-stepping thread, since that one is still
4304 running). */
4305 if (!step_over_info_valid_p ())
4306 restart_threads (thr);
4311 if (displaced_step_in_progress (inf))
4313 infrun_debug_printf ("displaced-stepping in-process while detaching");
4315 /* Stop threads currently displaced stepping, aborting it. */
4317 for (thread_info *thr : inf->non_exited_threads ())
4319 if (thr->displaced_step_state.in_progress ())
4321 if (thr->executing ())
4323 if (!thr->stop_requested)
4325 target_stop (thr->ptid);
4326 thr->stop_requested = true;
4329 else
4330 thr->set_resumed (false);
4334 while (displaced_step_in_progress (inf))
4336 wait_one_event event;
4338 event.target = inf->process_target ();
4339 event.ptid = do_target_wait_1 (inf, pid_ptid, &event.ws, 0);
4341 if (debug_infrun)
4342 print_target_wait_results (pid_ptid, event.ptid, event.ws);
4344 handle_one (event);
4347 /* It's OK to leave some of the threads of INF stopped, since
4348 they'll be detached shortly. */
4352 /* If all-stop, but there exists a non-stop target, stop all threads
4353 now that we're presenting the stop to the user. */
4355 static void
4356 stop_all_threads_if_all_stop_mode ()
4358 if (!non_stop && exists_non_stop_target ())
4359 stop_all_threads ("presenting stop to user in all-stop");
4362 /* Wait for control to return from inferior to debugger.
4364 If inferior gets a signal, we may decide to start it up again
4365 instead of returning. That is why there is a loop in this function.
4366 When this function actually returns it means the inferior
4367 should be left stopped and GDB should read more commands. */
4369 static void
4370 wait_for_inferior (inferior *inf)
4372 infrun_debug_printf ("wait_for_inferior ()");
4374 SCOPE_EXIT { delete_just_stopped_threads_infrun_breakpoints (); };
4376 /* If an error happens while handling the event, propagate GDB's
4377 knowledge of the executing state to the frontend/user running
4378 state. */
4379 scoped_finish_thread_state finish_state
4380 (inf->process_target (), minus_one_ptid);
4382 while (1)
4384 execution_control_state ecs;
4386 overlay_cache_invalid = 1;
4388 /* Flush target cache before starting to handle each event.
4389 Target was running and cache could be stale. This is just a
4390 heuristic. Running threads may modify target memory, but we
4391 don't get any event. */
4392 target_dcache_invalidate (current_program_space->aspace);
4394 ecs.ptid = do_target_wait_1 (inf, minus_one_ptid, &ecs.ws, 0);
4395 ecs.target = inf->process_target ();
4397 if (debug_infrun)
4398 print_target_wait_results (minus_one_ptid, ecs.ptid, ecs.ws);
4400 /* Now figure out what to do with the result of the result. */
4401 handle_inferior_event (&ecs);
4403 if (!ecs.wait_some_more)
4404 break;
4407 stop_all_threads_if_all_stop_mode ();
4409 /* No error, don't finish the state yet. */
4410 finish_state.release ();
4413 /* Cleanup that reinstalls the readline callback handler, if the
4414 target is running in the background. If while handling the target
4415 event something triggered a secondary prompt, like e.g., a
4416 pagination prompt, we'll have removed the callback handler (see
4417 gdb_readline_wrapper_line). Need to do this as we go back to the
4418 event loop, ready to process further input. Note this has no
4419 effect if the handler hasn't actually been removed, because calling
4420 rl_callback_handler_install resets the line buffer, thus losing
4421 input. */
4423 static void
4424 reinstall_readline_callback_handler_cleanup ()
4426 struct ui *ui = current_ui;
4428 if (!ui->async)
4430 /* We're not going back to the top level event loop yet. Don't
4431 install the readline callback, as it'd prep the terminal,
4432 readline-style (raw, noecho) (e.g., --batch). We'll install
4433 it the next time the prompt is displayed, when we're ready
4434 for input. */
4435 return;
4438 if (ui->command_editing && ui->prompt_state != PROMPT_BLOCKED)
4439 gdb_rl_callback_handler_reinstall ();
4442 /* Clean up the FSMs of threads that are now stopped. In non-stop,
4443 that's just the event thread. In all-stop, that's all threads. In
4444 all-stop, threads that had a pending exit no longer have a reason
4445 to be around, as their FSMs/commands are canceled, so we delete
4446 them. This avoids "info threads" listing such threads as if they
4447 were alive (and failing to read their registers), the user being
4448 able to select and resume them (and that failing), etc. */
4450 static void
4451 clean_up_just_stopped_threads_fsms (struct execution_control_state *ecs)
4453 /* The first clean_up call below assumes the event thread is the current
4454 one. */
4455 if (ecs->event_thread != nullptr)
4456 gdb_assert (ecs->event_thread == inferior_thread ());
4458 if (ecs->event_thread != nullptr
4459 && ecs->event_thread->thread_fsm () != nullptr)
4460 ecs->event_thread->thread_fsm ()->clean_up (ecs->event_thread);
4462 if (!non_stop)
4464 scoped_restore_current_thread restore_thread;
4466 for (thread_info *thr : all_threads_safe ())
4468 if (thr->state == THREAD_EXITED)
4469 continue;
4471 if (thr == ecs->event_thread)
4472 continue;
4474 if (thr->thread_fsm () != nullptr)
4476 switch_to_thread (thr);
4477 thr->thread_fsm ()->clean_up (thr);
4480 /* As we are cancelling the command/FSM of this thread,
4481 whatever was the reason we needed to report a thread
4482 exited event to the user, that reason is gone. Delete
4483 the thread, so that the user doesn't see it in the thread
4484 list, the next proceed doesn't try to resume it, etc. */
4485 if (thr->has_pending_waitstatus ()
4486 && (thr->pending_waitstatus ().kind ()
4487 == TARGET_WAITKIND_THREAD_EXITED))
4488 delete_thread (thr);
4493 /* Helper for all_uis_check_sync_execution_done that works on the
4494 current UI. */
4496 static void
4497 check_curr_ui_sync_execution_done (void)
4499 struct ui *ui = current_ui;
4501 if (ui->prompt_state == PROMPT_NEEDED
4502 && ui->async
4503 && !gdb_in_secondary_prompt_p (ui))
4505 target_terminal::ours ();
4506 top_level_interpreter ()->on_sync_execution_done ();
4507 ui->register_file_handler ();
4511 /* See infrun.h. */
4513 void
4514 all_uis_check_sync_execution_done (void)
4516 SWITCH_THRU_ALL_UIS ()
4518 check_curr_ui_sync_execution_done ();
4522 /* See infrun.h. */
4524 void
4525 all_uis_on_sync_execution_starting (void)
4527 SWITCH_THRU_ALL_UIS ()
4529 if (current_ui->prompt_state == PROMPT_NEEDED)
4530 async_disable_stdin ();
4534 /* A quit_handler callback installed while we're handling inferior
4535 events. */
4537 static void
4538 infrun_quit_handler ()
4540 if (target_terminal::is_ours ())
4542 /* Do nothing.
4544 default_quit_handler would throw a quit in this case, but if
4545 we're handling an event while we have the terminal, it means
4546 the target is running a background execution command, and
4547 thus when users press Ctrl-C, they're wanting to interrupt
4548 whatever command they were executing in the command line.
4549 E.g.:
4551 (gdb) c&
4552 (gdb) foo bar whatever<ctrl-c>
4554 That Ctrl-C should clear the input line, not interrupt event
4555 handling if it happens that the user types Ctrl-C at just the
4556 "wrong" time!
4558 It's as-if background event handling was handled by a
4559 separate background thread.
4561 To be clear, the Ctrl-C is not lost -- it will be processed
4562 by the next QUIT call once we're out of fetch_inferior_event
4563 again. */
4565 else
4567 if (check_quit_flag ())
4568 target_pass_ctrlc ();
4572 /* Asynchronous version of wait_for_inferior. It is called by the
4573 event loop whenever a change of state is detected on the file
4574 descriptor corresponding to the target. It can be called more than
4575 once to complete a single execution command. In such cases we need
4576 to keep the state in a global variable ECSS. If it is the last time
4577 that this function is called for a single execution command, then
4578 report to the user that the inferior has stopped, and do the
4579 necessary cleanups. */
4581 void
4582 fetch_inferior_event ()
4584 INFRUN_SCOPED_DEBUG_ENTER_EXIT;
4586 execution_control_state ecs;
4587 int cmd_done = 0;
4589 /* Events are always processed with the main UI as current UI. This
4590 way, warnings, debug output, etc. are always consistently sent to
4591 the main console. */
4592 scoped_restore save_ui = make_scoped_restore (&current_ui, main_ui);
4594 /* Temporarily disable pagination. Otherwise, the user would be
4595 given an option to press 'q' to quit, which would cause an early
4596 exit and could leave GDB in a half-baked state. */
4597 scoped_restore save_pagination
4598 = make_scoped_restore (&pagination_enabled, false);
4600 /* Install a quit handler that does nothing if we have the terminal
4601 (meaning the target is running a background execution command),
4602 so that Ctrl-C never interrupts GDB before the event is fully
4603 handled. */
4604 scoped_restore restore_quit_handler
4605 = make_scoped_restore (&quit_handler, infrun_quit_handler);
4607 /* Make sure a SIGINT does not interrupt an extension language while
4608 we're handling an event. That could interrupt a Python unwinder
4609 or a Python observer or some such. A Ctrl-C should either be
4610 forwarded to the inferior if the inferior has the terminal, or,
4611 if GDB has the terminal, should interrupt the command the user is
4612 typing in the CLI. */
4613 scoped_disable_cooperative_sigint_handling restore_coop_sigint;
4615 /* End up with readline processing input, if necessary. */
4617 SCOPE_EXIT { reinstall_readline_callback_handler_cleanup (); };
4619 /* We're handling a live event, so make sure we're doing live
4620 debugging. If we're looking at traceframes while the target is
4621 running, we're going to need to get back to that mode after
4622 handling the event. */
4623 std::optional<scoped_restore_current_traceframe> maybe_restore_traceframe;
4624 if (non_stop)
4626 maybe_restore_traceframe.emplace ();
4627 set_current_traceframe (-1);
4630 /* The user/frontend should not notice a thread switch due to
4631 internal events. Make sure we revert to the user selected
4632 thread and frame after handling the event and running any
4633 breakpoint commands. */
4634 scoped_restore_current_thread restore_thread;
4636 overlay_cache_invalid = 1;
4637 /* Flush target cache before starting to handle each event. Target
4638 was running and cache could be stale. This is just a heuristic.
4639 Running threads may modify target memory, but we don't get any
4640 event. */
4641 target_dcache_invalidate (current_program_space->aspace);
4643 scoped_restore save_exec_dir
4644 = make_scoped_restore (&execution_direction,
4645 target_execution_direction ());
4647 /* Allow targets to pause their resumed threads while we handle
4648 the event. */
4649 scoped_disable_commit_resumed disable_commit_resumed ("handling event");
4651 /* Is the current thread performing an inferior function call as part
4652 of a breakpoint condition evaluation? */
4653 bool in_cond_eval = (inferior_ptid != null_ptid
4654 && inferior_thread ()->control.in_cond_eval);
4656 /* If the thread is in the middle of the condition evaluation, wait for
4657 an event from the current thread. Otherwise, wait for an event from
4658 any thread. */
4659 ptid_t waiton_ptid = in_cond_eval ? inferior_ptid : minus_one_ptid;
4661 if (!do_target_wait (waiton_ptid, &ecs, TARGET_WNOHANG))
4663 infrun_debug_printf ("do_target_wait returned no event");
4664 disable_commit_resumed.reset_and_commit ();
4665 return;
4668 gdb_assert (ecs.ws.kind () != TARGET_WAITKIND_IGNORE);
4670 /* Switch to the inferior that generated the event, so we can do
4671 target calls. If the event was not associated to a ptid, */
4672 if (ecs.ptid != null_ptid
4673 && ecs.ptid != minus_one_ptid)
4674 switch_to_inferior_no_thread (find_inferior_ptid (ecs.target, ecs.ptid));
4675 else
4676 switch_to_target_no_thread (ecs.target);
4678 if (debug_infrun)
4679 print_target_wait_results (minus_one_ptid, ecs.ptid, ecs.ws);
4681 /* If an error happens while handling the event, propagate GDB's
4682 knowledge of the executing state to the frontend/user running
4683 state. */
4684 ptid_t finish_ptid = !target_is_non_stop_p () ? minus_one_ptid : ecs.ptid;
4685 scoped_finish_thread_state finish_state (ecs.target, finish_ptid);
4687 /* Get executed before scoped_restore_current_thread above to apply
4688 still for the thread which has thrown the exception. */
4689 auto defer_bpstat_clear
4690 = make_scope_exit (bpstat_clear_actions);
4691 auto defer_delete_threads
4692 = make_scope_exit (delete_just_stopped_threads_infrun_breakpoints);
4694 int stop_id = get_stop_id ();
4696 /* Now figure out what to do with the result of the result. */
4697 handle_inferior_event (&ecs);
4699 if (!ecs.wait_some_more)
4701 struct inferior *inf = find_inferior_ptid (ecs.target, ecs.ptid);
4702 bool should_stop = true;
4703 struct thread_info *thr = ecs.event_thread;
4705 delete_just_stopped_threads_infrun_breakpoints ();
4707 if (thr != nullptr && thr->thread_fsm () != nullptr)
4708 should_stop = thr->thread_fsm ()->should_stop (thr);
4710 if (!should_stop)
4712 keep_going (&ecs);
4714 else
4716 bool should_notify_stop = true;
4717 bool proceeded = false;
4719 /* If the thread that stopped just completed an inferior
4720 function call as part of a condition evaluation, then we
4721 don't want to stop all the other threads. */
4722 if (ecs.event_thread == nullptr
4723 || !ecs.event_thread->control.in_cond_eval)
4724 stop_all_threads_if_all_stop_mode ();
4726 clean_up_just_stopped_threads_fsms (&ecs);
4728 if (stop_id != get_stop_id ())
4730 /* If the stop-id has changed then a stop has already been
4731 presented to the user in handle_inferior_event, this is
4732 likely a failed inferior call. As the stop has already
4733 been announced then we should not notify again.
4735 Also, if the prompt state is not PROMPT_NEEDED then GDB
4736 will not be ready for user input after this function. */
4737 should_notify_stop = false;
4738 gdb_assert (current_ui->prompt_state == PROMPT_NEEDED);
4740 else if (thr != nullptr && thr->thread_fsm () != nullptr)
4741 should_notify_stop
4742 = thr->thread_fsm ()->should_notify_stop ();
4744 if (should_notify_stop)
4746 /* We may not find an inferior if this was a process exit. */
4747 if (inf == nullptr || inf->control.stop_soon == NO_STOP_QUIETLY)
4748 proceeded = normal_stop ();
4751 if (!proceeded && !in_cond_eval)
4753 inferior_event_handler (INF_EXEC_COMPLETE);
4754 cmd_done = 1;
4757 /* If we got a TARGET_WAITKIND_NO_RESUMED event, then the
4758 previously selected thread is gone. We have two
4759 choices - switch to no thread selected, or restore the
4760 previously selected thread (now exited). We chose the
4761 later, just because that's what GDB used to do. After
4762 this, "info threads" says "The current thread <Thread
4763 ID 2> has terminated." instead of "No thread
4764 selected.". */
4765 if (!non_stop
4766 && cmd_done
4767 && ecs.ws.kind () != TARGET_WAITKIND_NO_RESUMED)
4768 restore_thread.dont_restore ();
4772 defer_delete_threads.release ();
4773 defer_bpstat_clear.release ();
4775 /* No error, don't finish the thread states yet. */
4776 finish_state.release ();
4778 disable_commit_resumed.reset_and_commit ();
4780 /* This scope is used to ensure that readline callbacks are
4781 reinstalled here. */
4784 /* Handling this event might have caused some inferiors to become prunable.
4785 For example, the exit of an inferior that was automatically added. Try
4786 to get rid of them. Keeping those around slows down things linearly.
4788 Note that this never removes the current inferior. Therefore, call this
4789 after RESTORE_THREAD went out of scope, in case the event inferior (which was
4790 temporarily made the current inferior) is meant to be deleted.
4792 Call this before all_uis_check_sync_execution_done, so that notifications about
4793 removed inferiors appear before the prompt. */
4794 prune_inferiors ();
4796 /* If a UI was in sync execution mode, and now isn't, restore its
4797 prompt (a synchronous execution command has finished, and we're
4798 ready for input). */
4799 all_uis_check_sync_execution_done ();
4801 if (cmd_done
4802 && exec_done_display_p
4803 && (inferior_ptid == null_ptid
4804 || inferior_thread ()->state != THREAD_RUNNING))
4805 gdb_printf (_("completed.\n"));
4808 /* See infrun.h. */
4810 void
4811 set_step_info (thread_info *tp, const frame_info_ptr &frame,
4812 struct symtab_and_line sal)
4814 /* This can be removed once this function no longer implicitly relies on the
4815 inferior_ptid value. */
4816 gdb_assert (inferior_ptid == tp->ptid);
4818 tp->control.step_frame_id = get_frame_id (frame);
4819 tp->control.step_stack_frame_id = get_stack_frame_id (frame);
4821 tp->current_symtab = sal.symtab;
4822 tp->current_line = sal.line;
4824 infrun_debug_printf
4825 ("symtab = %s, line = %d, step_frame_id = %s, step_stack_frame_id = %s",
4826 tp->current_symtab != nullptr ? tp->current_symtab->filename : "<null>",
4827 tp->current_line,
4828 tp->control.step_frame_id.to_string ().c_str (),
4829 tp->control.step_stack_frame_id.to_string ().c_str ());
4832 /* Clear context switchable stepping state. */
4834 void
4835 init_thread_stepping_state (struct thread_info *tss)
4837 tss->stepped_breakpoint = 0;
4838 tss->stepping_over_breakpoint = 0;
4839 tss->stepping_over_watchpoint = 0;
4840 tss->step_after_step_resume_breakpoint = 0;
4843 /* See infrun.h. */
4845 void
4846 set_last_target_status (process_stratum_target *target, ptid_t ptid,
4847 const target_waitstatus &status)
4849 target_last_proc_target = target;
4850 target_last_wait_ptid = ptid;
4851 target_last_waitstatus = status;
4854 /* See infrun.h. */
4856 void
4857 get_last_target_status (process_stratum_target **target, ptid_t *ptid,
4858 target_waitstatus *status)
4860 if (target != nullptr)
4861 *target = target_last_proc_target;
4862 if (ptid != nullptr)
4863 *ptid = target_last_wait_ptid;
4864 if (status != nullptr)
4865 *status = target_last_waitstatus;
4868 /* See infrun.h. */
4870 void
4871 nullify_last_target_wait_ptid (void)
4873 target_last_proc_target = nullptr;
4874 target_last_wait_ptid = minus_one_ptid;
4875 target_last_waitstatus = {};
4878 /* Switch thread contexts. */
4880 static void
4881 context_switch (execution_control_state *ecs)
4883 if (ecs->ptid != inferior_ptid
4884 && (inferior_ptid == null_ptid
4885 || ecs->event_thread != inferior_thread ()))
4887 infrun_debug_printf ("Switching context from %s to %s",
4888 inferior_ptid.to_string ().c_str (),
4889 ecs->ptid.to_string ().c_str ());
4892 switch_to_thread (ecs->event_thread);
4895 /* If the target can't tell whether we've hit breakpoints
4896 (target_supports_stopped_by_sw_breakpoint), and we got a SIGTRAP,
4897 check whether that could have been caused by a breakpoint. If so,
4898 adjust the PC, per gdbarch_decr_pc_after_break. */
4900 static void
4901 adjust_pc_after_break (struct thread_info *thread,
4902 const target_waitstatus &ws)
4904 struct regcache *regcache;
4905 struct gdbarch *gdbarch;
4906 CORE_ADDR breakpoint_pc, decr_pc;
4908 /* If we've hit a breakpoint, we'll normally be stopped with SIGTRAP. If
4909 we aren't, just return.
4911 We assume that waitkinds other than TARGET_WAITKIND_STOPPED are not
4912 affected by gdbarch_decr_pc_after_break. Other waitkinds which are
4913 implemented by software breakpoints should be handled through the normal
4914 breakpoint layer.
4916 NOTE drow/2004-01-31: On some targets, breakpoints may generate
4917 different signals (SIGILL or SIGEMT for instance), but it is less
4918 clear where the PC is pointing afterwards. It may not match
4919 gdbarch_decr_pc_after_break. I don't know any specific target that
4920 generates these signals at breakpoints (the code has been in GDB since at
4921 least 1992) so I can not guess how to handle them here.
4923 In earlier versions of GDB, a target with
4924 gdbarch_have_nonsteppable_watchpoint would have the PC after hitting a
4925 watchpoint affected by gdbarch_decr_pc_after_break. I haven't found any
4926 target with both of these set in GDB history, and it seems unlikely to be
4927 correct, so gdbarch_have_nonsteppable_watchpoint is not checked here. */
4929 if (ws.kind () != TARGET_WAITKIND_STOPPED)
4930 return;
4932 if (ws.sig () != GDB_SIGNAL_TRAP)
4933 return;
4935 /* In reverse execution, when a breakpoint is hit, the instruction
4936 under it has already been de-executed. The reported PC always
4937 points at the breakpoint address, so adjusting it further would
4938 be wrong. E.g., consider this case on a decr_pc_after_break == 1
4939 architecture:
4941 B1 0x08000000 : INSN1
4942 B2 0x08000001 : INSN2
4943 0x08000002 : INSN3
4944 PC -> 0x08000003 : INSN4
4946 Say you're stopped at 0x08000003 as above. Reverse continuing
4947 from that point should hit B2 as below. Reading the PC when the
4948 SIGTRAP is reported should read 0x08000001 and INSN2 should have
4949 been de-executed already.
4951 B1 0x08000000 : INSN1
4952 B2 PC -> 0x08000001 : INSN2
4953 0x08000002 : INSN3
4954 0x08000003 : INSN4
4956 We can't apply the same logic as for forward execution, because
4957 we would wrongly adjust the PC to 0x08000000, since there's a
4958 breakpoint at PC - 1. We'd then report a hit on B1, although
4959 INSN1 hadn't been de-executed yet. Doing nothing is the correct
4960 behaviour. */
4961 if (execution_direction == EXEC_REVERSE)
4962 return;
4964 /* If the target can tell whether the thread hit a SW breakpoint,
4965 trust it. Targets that can tell also adjust the PC
4966 themselves. */
4967 if (target_supports_stopped_by_sw_breakpoint ())
4968 return;
4970 /* Note that relying on whether a breakpoint is planted in memory to
4971 determine this can fail. E.g,. the breakpoint could have been
4972 removed since. Or the thread could have been told to step an
4973 instruction the size of a breakpoint instruction, and only
4974 _after_ was a breakpoint inserted at its address. */
4976 /* If this target does not decrement the PC after breakpoints, then
4977 we have nothing to do. */
4978 regcache = get_thread_regcache (thread);
4979 gdbarch = regcache->arch ();
4981 decr_pc = gdbarch_decr_pc_after_break (gdbarch);
4982 if (decr_pc == 0)
4983 return;
4985 const address_space *aspace = thread->inf->aspace.get ();
4987 /* Find the location where (if we've hit a breakpoint) the
4988 breakpoint would be. */
4989 breakpoint_pc = regcache_read_pc (regcache) - decr_pc;
4991 /* If the target can't tell whether a software breakpoint triggered,
4992 fallback to figuring it out based on breakpoints we think were
4993 inserted in the target, and on whether the thread was stepped or
4994 continued. */
4996 /* Check whether there actually is a software breakpoint inserted at
4997 that location.
4999 If in non-stop mode, a race condition is possible where we've
5000 removed a breakpoint, but stop events for that breakpoint were
5001 already queued and arrive later. To suppress those spurious
5002 SIGTRAPs, we keep a list of such breakpoint locations for a bit,
5003 and retire them after a number of stop events are reported. Note
5004 this is an heuristic and can thus get confused. The real fix is
5005 to get the "stopped by SW BP and needs adjustment" info out of
5006 the target/kernel (and thus never reach here; see above). */
5007 if (software_breakpoint_inserted_here_p (aspace, breakpoint_pc)
5008 || (target_is_non_stop_p ()
5009 && moribund_breakpoint_here_p (aspace, breakpoint_pc)))
5011 std::optional<scoped_restore_tmpl<int>> restore_operation_disable;
5013 if (record_full_is_used ())
5014 restore_operation_disable.emplace
5015 (record_full_gdb_operation_disable_set ());
5017 /* When using hardware single-step, a SIGTRAP is reported for both
5018 a completed single-step and a software breakpoint. Need to
5019 differentiate between the two, as the latter needs adjusting
5020 but the former does not.
5022 The SIGTRAP can be due to a completed hardware single-step only if
5023 - we didn't insert software single-step breakpoints
5024 - this thread is currently being stepped
5026 If any of these events did not occur, we must have stopped due
5027 to hitting a software breakpoint, and have to back up to the
5028 breakpoint address.
5030 As a special case, we could have hardware single-stepped a
5031 software breakpoint. In this case (prev_pc == breakpoint_pc),
5032 we also need to back up to the breakpoint address. */
5034 if (thread_has_single_step_breakpoints_set (thread)
5035 || !currently_stepping (thread)
5036 || (thread->stepped_breakpoint
5037 && thread->prev_pc == breakpoint_pc))
5038 regcache_write_pc (regcache, breakpoint_pc);
5042 static bool
5043 stepped_in_from (const frame_info_ptr &initial_frame, frame_id step_frame_id)
5045 frame_info_ptr frame = initial_frame;
5047 for (frame = get_prev_frame (frame);
5048 frame != nullptr;
5049 frame = get_prev_frame (frame))
5051 if (get_frame_id (frame) == step_frame_id)
5052 return true;
5054 if (get_frame_type (frame) != INLINE_FRAME)
5055 break;
5058 return false;
5061 /* Look for an inline frame that is marked for skip.
5062 If PREV_FRAME is TRUE start at the previous frame,
5063 otherwise start at the current frame. Stop at the
5064 first non-inline frame, or at the frame where the
5065 step started. */
5067 static bool
5068 inline_frame_is_marked_for_skip (bool prev_frame, struct thread_info *tp)
5070 frame_info_ptr frame = get_current_frame ();
5072 if (prev_frame)
5073 frame = get_prev_frame (frame);
5075 for (; frame != nullptr; frame = get_prev_frame (frame))
5077 const char *fn = nullptr;
5078 symtab_and_line sal;
5079 struct symbol *sym;
5081 if (get_frame_id (frame) == tp->control.step_frame_id)
5082 break;
5083 if (get_frame_type (frame) != INLINE_FRAME)
5084 break;
5086 sal = find_frame_sal (frame);
5087 sym = get_frame_function (frame);
5089 if (sym != nullptr)
5090 fn = sym->print_name ();
5092 if (sal.line != 0
5093 && function_name_is_marked_for_skip (fn, sal))
5094 return true;
5097 return false;
5100 /* If the event thread has the stop requested flag set, pretend it
5101 stopped for a GDB_SIGNAL_0 (i.e., as if it stopped due to
5102 target_stop). */
5104 static bool
5105 handle_stop_requested (struct execution_control_state *ecs)
5107 if (ecs->event_thread->stop_requested)
5109 ecs->ws.set_stopped (GDB_SIGNAL_0);
5110 handle_signal_stop (ecs);
5111 return true;
5113 return false;
5116 /* Auxiliary function that handles syscall entry/return events.
5117 It returns true if the inferior should keep going (and GDB
5118 should ignore the event), or false if the event deserves to be
5119 processed. */
5121 static bool
5122 handle_syscall_event (struct execution_control_state *ecs)
5124 struct regcache *regcache;
5125 int syscall_number;
5127 context_switch (ecs);
5129 regcache = get_thread_regcache (ecs->event_thread);
5130 syscall_number = ecs->ws.syscall_number ();
5131 ecs->event_thread->set_stop_pc (regcache_read_pc (regcache));
5133 if (catch_syscall_enabled ()
5134 && catching_syscall_number (syscall_number))
5136 infrun_debug_printf ("syscall number=%d", syscall_number);
5138 ecs->event_thread->control.stop_bpstat
5139 = bpstat_stop_status_nowatch (ecs->event_thread->inf->aspace.get (),
5140 ecs->event_thread->stop_pc (),
5141 ecs->event_thread, ecs->ws);
5143 if (handle_stop_requested (ecs))
5144 return false;
5146 if (bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
5148 /* Catchpoint hit. */
5149 return false;
5153 if (handle_stop_requested (ecs))
5154 return false;
5156 /* If no catchpoint triggered for this, then keep going. */
5157 keep_going (ecs);
5159 return true;
5162 /* Lazily fill in the execution_control_state's stop_func_* fields. */
5164 static void
5165 fill_in_stop_func (struct gdbarch *gdbarch,
5166 struct execution_control_state *ecs)
5168 if (!ecs->stop_func_filled_in)
5170 const block *block;
5171 const general_symbol_info *gsi;
5173 /* Don't care about return value; stop_func_start and stop_func_name
5174 will both be 0 if it doesn't work. */
5175 find_pc_partial_function_sym (ecs->event_thread->stop_pc (),
5176 &gsi,
5177 &ecs->stop_func_start,
5178 &ecs->stop_func_end,
5179 &block);
5180 ecs->stop_func_name = gsi == nullptr ? nullptr : gsi->print_name ();
5182 /* The call to find_pc_partial_function, above, will set
5183 stop_func_start and stop_func_end to the start and end
5184 of the range containing the stop pc. If this range
5185 contains the entry pc for the block (which is always the
5186 case for contiguous blocks), advance stop_func_start past
5187 the function's start offset and entrypoint. Note that
5188 stop_func_start is NOT advanced when in a range of a
5189 non-contiguous block that does not contain the entry pc. */
5190 if (block != nullptr
5191 && ecs->stop_func_start <= block->entry_pc ()
5192 && block->entry_pc () < ecs->stop_func_end)
5194 ecs->stop_func_start
5195 += gdbarch_deprecated_function_start_offset (gdbarch);
5197 /* PowerPC functions have a Local Entry Point (LEP) and a Global
5198 Entry Point (GEP). There is only one Entry Point (GEP = LEP) for
5199 other architectures. */
5200 ecs->stop_func_alt_start = ecs->stop_func_start;
5202 if (gdbarch_skip_entrypoint_p (gdbarch))
5203 ecs->stop_func_start
5204 = gdbarch_skip_entrypoint (gdbarch, ecs->stop_func_start);
5207 ecs->stop_func_filled_in = 1;
5212 /* Return the STOP_SOON field of the inferior pointed at by ECS. */
5214 static enum stop_kind
5215 get_inferior_stop_soon (execution_control_state *ecs)
5217 struct inferior *inf = find_inferior_ptid (ecs->target, ecs->ptid);
5219 gdb_assert (inf != nullptr);
5220 return inf->control.stop_soon;
5223 /* Poll for one event out of the current target. Store the resulting
5224 waitstatus in WS, and return the event ptid. Does not block. */
5226 static ptid_t
5227 poll_one_curr_target (struct target_waitstatus *ws)
5229 ptid_t event_ptid;
5231 overlay_cache_invalid = 1;
5233 /* Flush target cache before starting to handle each event.
5234 Target was running and cache could be stale. This is just a
5235 heuristic. Running threads may modify target memory, but we
5236 don't get any event. */
5237 target_dcache_invalidate (current_program_space->aspace);
5239 event_ptid = target_wait (minus_one_ptid, ws, TARGET_WNOHANG);
5241 if (debug_infrun)
5242 print_target_wait_results (minus_one_ptid, event_ptid, *ws);
5244 return event_ptid;
5247 /* Wait for one event out of any target. */
5249 static wait_one_event
5250 wait_one ()
5252 while (1)
5254 for (inferior *inf : all_inferiors ())
5256 process_stratum_target *target = inf->process_target ();
5257 if (target == nullptr
5258 || !target->is_async_p ()
5259 || !target->threads_executing)
5260 continue;
5262 switch_to_inferior_no_thread (inf);
5264 wait_one_event event;
5265 event.target = target;
5266 event.ptid = poll_one_curr_target (&event.ws);
5268 if (event.ws.kind () == TARGET_WAITKIND_NO_RESUMED)
5270 /* If nothing is resumed, remove the target from the
5271 event loop. */
5272 target_async (false);
5274 else if (event.ws.kind () != TARGET_WAITKIND_IGNORE)
5275 return event;
5278 /* Block waiting for some event. */
5280 fd_set readfds;
5281 int nfds = 0;
5283 FD_ZERO (&readfds);
5285 for (inferior *inf : all_inferiors ())
5287 process_stratum_target *target = inf->process_target ();
5288 if (target == nullptr
5289 || !target->is_async_p ()
5290 || !target->threads_executing)
5291 continue;
5293 int fd = target->async_wait_fd ();
5294 FD_SET (fd, &readfds);
5295 if (nfds <= fd)
5296 nfds = fd + 1;
5299 if (nfds == 0)
5301 /* No waitable targets left. All must be stopped. */
5302 infrun_debug_printf ("no waitable targets left");
5304 target_waitstatus ws;
5305 ws.set_no_resumed ();
5306 return {nullptr, minus_one_ptid, std::move (ws)};
5309 QUIT;
5311 int numfds = interruptible_select (nfds, &readfds, 0, nullptr, 0);
5312 if (numfds < 0)
5314 if (errno == EINTR)
5315 continue;
5316 else
5317 perror_with_name ("interruptible_select");
5322 /* Save the thread's event and stop reason to process it later. */
5324 static void
5325 save_waitstatus (struct thread_info *tp, const target_waitstatus &ws)
5327 infrun_debug_printf ("saving status %s for %s",
5328 ws.to_string ().c_str (),
5329 tp->ptid.to_string ().c_str ());
5331 /* Record for later. */
5332 tp->set_pending_waitstatus (ws);
5334 if (ws.kind () == TARGET_WAITKIND_STOPPED
5335 && ws.sig () == GDB_SIGNAL_TRAP)
5337 struct regcache *regcache = get_thread_regcache (tp);
5338 const address_space *aspace = tp->inf->aspace.get ();
5339 CORE_ADDR pc = regcache_read_pc (regcache);
5341 adjust_pc_after_break (tp, tp->pending_waitstatus ());
5343 scoped_restore_current_thread restore_thread;
5344 switch_to_thread (tp);
5346 if (target_stopped_by_watchpoint ())
5347 tp->set_stop_reason (TARGET_STOPPED_BY_WATCHPOINT);
5348 else if (target_supports_stopped_by_sw_breakpoint ()
5349 && target_stopped_by_sw_breakpoint ())
5350 tp->set_stop_reason (TARGET_STOPPED_BY_SW_BREAKPOINT);
5351 else if (target_supports_stopped_by_hw_breakpoint ()
5352 && target_stopped_by_hw_breakpoint ())
5353 tp->set_stop_reason (TARGET_STOPPED_BY_HW_BREAKPOINT);
5354 else if (!target_supports_stopped_by_hw_breakpoint ()
5355 && hardware_breakpoint_inserted_here_p (aspace, pc))
5356 tp->set_stop_reason (TARGET_STOPPED_BY_HW_BREAKPOINT);
5357 else if (!target_supports_stopped_by_sw_breakpoint ()
5358 && software_breakpoint_inserted_here_p (aspace, pc))
5359 tp->set_stop_reason (TARGET_STOPPED_BY_SW_BREAKPOINT);
5360 else if (!thread_has_single_step_breakpoints_set (tp)
5361 && currently_stepping (tp))
5362 tp->set_stop_reason (TARGET_STOPPED_BY_SINGLE_STEP);
5366 /* Mark the non-executing threads accordingly. In all-stop, all
5367 threads of all processes are stopped when we get any event
5368 reported. In non-stop mode, only the event thread stops. */
5370 static void
5371 mark_non_executing_threads (process_stratum_target *target,
5372 ptid_t event_ptid,
5373 const target_waitstatus &ws)
5375 ptid_t mark_ptid;
5377 if (!target_is_non_stop_p ())
5378 mark_ptid = minus_one_ptid;
5379 else if (ws.kind () == TARGET_WAITKIND_SIGNALLED
5380 || ws.kind () == TARGET_WAITKIND_EXITED)
5382 /* If we're handling a process exit in non-stop mode, even
5383 though threads haven't been deleted yet, one would think
5384 that there is nothing to do, as threads of the dead process
5385 will be soon deleted, and threads of any other process were
5386 left running. However, on some targets, threads survive a
5387 process exit event. E.g., for the "checkpoint" command,
5388 when the current checkpoint/fork exits, linux-fork.c
5389 automatically switches to another fork from within
5390 target_mourn_inferior, by associating the same
5391 inferior/thread to another fork. We haven't mourned yet at
5392 this point, but we must mark any threads left in the
5393 process as not-executing so that finish_thread_state marks
5394 them stopped (in the user's perspective) if/when we present
5395 the stop to the user. */
5396 mark_ptid = ptid_t (event_ptid.pid ());
5398 else
5399 mark_ptid = event_ptid;
5401 set_executing (target, mark_ptid, false);
5403 /* Likewise the resumed flag. */
5404 set_resumed (target, mark_ptid, false);
5407 /* Handle one event after stopping threads. If the eventing thread
5408 reports back any interesting event, we leave it pending. If the
5409 eventing thread was in the middle of a displaced step, we
5410 cancel/finish it, and unless the thread's inferior is being
5411 detached, put the thread back in the step-over chain. Returns true
5412 if there are no resumed threads left in the target (thus there's no
5413 point in waiting further), false otherwise. */
5415 static bool
5416 handle_one (const wait_one_event &event)
5418 infrun_debug_printf
5419 ("%s %s", event.ws.to_string ().c_str (),
5420 event.ptid.to_string ().c_str ());
5422 if (event.ws.kind () == TARGET_WAITKIND_NO_RESUMED)
5424 /* All resumed threads exited. */
5425 return true;
5427 else if (event.ws.kind () == TARGET_WAITKIND_THREAD_EXITED
5428 || event.ws.kind () == TARGET_WAITKIND_EXITED
5429 || event.ws.kind () == TARGET_WAITKIND_SIGNALLED)
5431 /* One thread/process exited/signalled. */
5433 thread_info *t = nullptr;
5435 /* The target may have reported just a pid. If so, try
5436 the first non-exited thread. */
5437 if (event.ptid.is_pid ())
5439 int pid = event.ptid.pid ();
5440 inferior *inf = find_inferior_pid (event.target, pid);
5441 for (thread_info *tp : inf->non_exited_threads ())
5443 t = tp;
5444 break;
5447 /* If there is no available thread, the event would
5448 have to be appended to a per-inferior event list,
5449 which does not exist (and if it did, we'd have
5450 to adjust run control command to be able to
5451 resume such an inferior). We assert here instead
5452 of going into an infinite loop. */
5453 gdb_assert (t != nullptr);
5455 infrun_debug_printf
5456 ("using %s", t->ptid.to_string ().c_str ());
5458 else
5460 t = event.target->find_thread (event.ptid);
5461 /* Check if this is the first time we see this thread.
5462 Don't bother adding if it individually exited. */
5463 if (t == nullptr
5464 && event.ws.kind () != TARGET_WAITKIND_THREAD_EXITED)
5465 t = add_thread (event.target, event.ptid);
5468 if (t != nullptr)
5470 /* Set the threads as non-executing to avoid
5471 another stop attempt on them. */
5472 switch_to_thread_no_regs (t);
5473 mark_non_executing_threads (event.target, event.ptid,
5474 event.ws);
5475 save_waitstatus (t, event.ws);
5476 t->stop_requested = false;
5478 if (event.ws.kind () == TARGET_WAITKIND_THREAD_EXITED)
5480 if (displaced_step_finish (t, event.ws)
5481 != DISPLACED_STEP_FINISH_STATUS_OK)
5483 gdb_assert_not_reached ("displaced_step_finish on "
5484 "exited thread failed");
5489 else
5491 thread_info *t = event.target->find_thread (event.ptid);
5492 if (t == nullptr)
5493 t = add_thread (event.target, event.ptid);
5495 t->stop_requested = 0;
5496 t->set_executing (false);
5497 t->set_resumed (false);
5498 t->control.may_range_step = 0;
5500 /* This may be the first time we see the inferior report
5501 a stop. */
5502 if (t->inf->needs_setup)
5504 switch_to_thread_no_regs (t);
5505 setup_inferior (0);
5508 if (event.ws.kind () == TARGET_WAITKIND_STOPPED
5509 && event.ws.sig () == GDB_SIGNAL_0)
5511 /* We caught the event that we intended to catch, so
5512 there's no event to save as pending. */
5514 if (displaced_step_finish (t, event.ws)
5515 == DISPLACED_STEP_FINISH_STATUS_NOT_EXECUTED)
5517 /* Add it back to the step-over queue. */
5518 infrun_debug_printf
5519 ("displaced-step of %s canceled",
5520 t->ptid.to_string ().c_str ());
5522 t->control.trap_expected = 0;
5523 if (!t->inf->detaching)
5524 global_thread_step_over_chain_enqueue (t);
5527 else
5529 struct regcache *regcache;
5531 infrun_debug_printf
5532 ("target_wait %s, saving status for %s",
5533 event.ws.to_string ().c_str (),
5534 t->ptid.to_string ().c_str ());
5536 /* Record for later. */
5537 save_waitstatus (t, event.ws);
5539 if (displaced_step_finish (t, event.ws)
5540 == DISPLACED_STEP_FINISH_STATUS_NOT_EXECUTED)
5542 /* Add it back to the step-over queue. */
5543 t->control.trap_expected = 0;
5544 if (!t->inf->detaching)
5545 global_thread_step_over_chain_enqueue (t);
5548 regcache = get_thread_regcache (t);
5549 t->set_stop_pc (regcache_read_pc (regcache));
5551 infrun_debug_printf ("saved stop_pc=%s for %s "
5552 "(currently_stepping=%d)",
5553 paddress (current_inferior ()->arch (),
5554 t->stop_pc ()),
5555 t->ptid.to_string ().c_str (),
5556 currently_stepping (t));
5560 return false;
5563 /* Helper for stop_all_threads. wait_one waits for events until it
5564 sees a TARGET_WAITKIND_NO_RESUMED event. When it sees one, it
5565 disables target_async for the target to stop waiting for events
5566 from it. TARGET_WAITKIND_NO_RESUMED can be delayed though,
5567 consider, debugging against gdbserver:
5569 #1 - Threads 1-5 are running, and thread 1 hits a breakpoint.
5571 #2 - gdb processes the breakpoint hit for thread 1, stops all
5572 threads, and steps thread 1 over the breakpoint. while
5573 stopping threads, some other threads reported interesting
5574 events, which were left pending in the thread's objects
5575 (infrun's queue).
5577 #2 - Thread 1 exits (it stepped an exit syscall), and gdbserver
5578 reports the thread exit for thread 1. The event ends up in
5579 remote's stop reply queue.
5581 #3 - That was the last resumed thread, so gdbserver reports
5582 no-resumed, and that event also ends up in remote's stop
5583 reply queue, queued after the thread exit from #2.
5585 #4 - gdb processes the thread exit event, which finishes the
5586 step-over, and so gdb restarts all threads (threads with
5587 pending events are left marked resumed, but aren't set
5588 executing). The no-resumed event is still left pending in
5589 the remote stop reply queue.
5591 #5 - Since there are now resumed threads with pending breakpoint
5592 hits, gdb picks one at random to process next.
5594 #5 - gdb picks the breakpoint hit for thread 2 this time, and that
5595 breakpoint also needs to be stepped over, so gdb stops all
5596 threads again.
5598 #6 - stop_all_threads counts number of expected stops and calls
5599 wait_one once for each.
5601 #7 - The first wait_one call collects the no-resumed event from #3
5602 above.
5604 #9 - Seeing the no-resumed event, wait_one disables target async
5605 for the remote target, to stop waiting for events from it.
5606 wait_one from here on always return no-resumed directly
5607 without reaching the target.
5609 #10 - stop_all_threads still hasn't seen all the stops it expects,
5610 so it does another pass.
5612 #11 - Since the remote target is not async (disabled in #9),
5613 wait_one doesn't wait on it, so it won't see the expected
5614 stops, and instead returns no-resumed directly.
5616 #12 - stop_all_threads still haven't seen all the stops, so it
5617 does another pass. goto #11, looping forever.
5619 To handle this, we explicitly (re-)enable target async on all
5620 targets that can async every time stop_all_threads goes wait for
5621 the expected stops. */
5623 static void
5624 reenable_target_async ()
5626 for (inferior *inf : all_inferiors ())
5628 process_stratum_target *target = inf->process_target ();
5629 if (target != nullptr
5630 && target->threads_executing
5631 && target->can_async_p ()
5632 && !target->is_async_p ())
5634 switch_to_inferior_no_thread (inf);
5635 target_async (1);
5640 /* See infrun.h. */
5642 void
5643 stop_all_threads (const char *reason, inferior *inf)
5645 /* We may need multiple passes to discover all threads. */
5646 int pass;
5647 int iterations = 0;
5649 gdb_assert (exists_non_stop_target ());
5651 INFRUN_SCOPED_DEBUG_START_END ("reason=%s, inf=%d", reason,
5652 inf != nullptr ? inf->num : -1);
5654 infrun_debug_show_threads ("non-exited threads",
5655 all_non_exited_threads ());
5657 scoped_restore_current_thread restore_thread;
5659 /* Enable thread events on relevant targets. */
5660 for (auto *target : all_non_exited_process_targets ())
5662 if (inf != nullptr && inf->process_target () != target)
5663 continue;
5665 switch_to_target_no_thread (target);
5666 target_thread_events (true);
5669 SCOPE_EXIT
5671 /* Disable thread events on relevant targets. */
5672 for (auto *target : all_non_exited_process_targets ())
5674 if (inf != nullptr && inf->process_target () != target)
5675 continue;
5677 switch_to_target_no_thread (target);
5678 target_thread_events (false);
5681 /* Use debug_prefixed_printf directly to get a meaningful function
5682 name. */
5683 if (debug_infrun)
5684 debug_prefixed_printf ("infrun", "stop_all_threads", "done");
5687 /* Request threads to stop, and then wait for the stops. Because
5688 threads we already know about can spawn more threads while we're
5689 trying to stop them, and we only learn about new threads when we
5690 update the thread list, do this in a loop, and keep iterating
5691 until two passes find no threads that need to be stopped. */
5692 for (pass = 0; pass < 2; pass++, iterations++)
5694 infrun_debug_printf ("pass=%d, iterations=%d", pass, iterations);
5695 while (1)
5697 int waits_needed = 0;
5699 for (auto *target : all_non_exited_process_targets ())
5701 if (inf != nullptr && inf->process_target () != target)
5702 continue;
5704 switch_to_target_no_thread (target);
5705 update_thread_list ();
5708 /* Go through all threads looking for threads that we need
5709 to tell the target to stop. */
5710 for (thread_info *t : all_non_exited_threads ())
5712 if (inf != nullptr && t->inf != inf)
5713 continue;
5715 /* For a single-target setting with an all-stop target,
5716 we would not even arrive here. For a multi-target
5717 setting, until GDB is able to handle a mixture of
5718 all-stop and non-stop targets, simply skip all-stop
5719 targets' threads. This should be fine due to the
5720 protection of 'check_multi_target_resumption'. */
5722 switch_to_thread_no_regs (t);
5723 if (!target_is_non_stop_p ())
5724 continue;
5726 if (t->executing ())
5728 /* If already stopping, don't request a stop again.
5729 We just haven't seen the notification yet. */
5730 if (!t->stop_requested)
5732 infrun_debug_printf (" %s executing, need stop",
5733 t->ptid.to_string ().c_str ());
5734 target_stop (t->ptid);
5735 t->stop_requested = 1;
5737 else
5739 infrun_debug_printf (" %s executing, already stopping",
5740 t->ptid.to_string ().c_str ());
5743 if (t->stop_requested)
5744 waits_needed++;
5746 else
5748 infrun_debug_printf (" %s not executing",
5749 t->ptid.to_string ().c_str ());
5751 /* The thread may be not executing, but still be
5752 resumed with a pending status to process. */
5753 t->set_resumed (false);
5757 if (waits_needed == 0)
5758 break;
5760 /* If we find new threads on the second iteration, restart
5761 over. We want to see two iterations in a row with all
5762 threads stopped. */
5763 if (pass > 0)
5764 pass = -1;
5766 reenable_target_async ();
5768 for (int i = 0; i < waits_needed; i++)
5770 wait_one_event event = wait_one ();
5771 if (handle_one (event))
5772 break;
5778 /* Handle a TARGET_WAITKIND_NO_RESUMED event. Return true if we
5779 handled the event and should continue waiting. Return false if we
5780 should stop and report the event to the user. */
5782 static bool
5783 handle_no_resumed (struct execution_control_state *ecs)
5785 if (target_can_async_p ())
5787 bool any_sync = false;
5789 for (ui *ui : all_uis ())
5791 if (ui->prompt_state == PROMPT_BLOCKED)
5793 any_sync = true;
5794 break;
5797 if (!any_sync)
5799 /* There were no unwaited-for children left in the target, but,
5800 we're not synchronously waiting for events either. Just
5801 ignore. */
5803 infrun_debug_printf ("TARGET_WAITKIND_NO_RESUMED (ignoring: bg)");
5804 prepare_to_wait (ecs);
5805 return true;
5809 /* Otherwise, if we were running a synchronous execution command, we
5810 may need to cancel it and give the user back the terminal.
5812 In non-stop mode, the target can't tell whether we've already
5813 consumed previous stop events, so it can end up sending us a
5814 no-resumed event like so:
5816 #0 - thread 1 is left stopped
5818 #1 - thread 2 is resumed and hits breakpoint
5819 -> TARGET_WAITKIND_STOPPED
5821 #2 - thread 3 is resumed and exits
5822 this is the last resumed thread, so
5823 -> TARGET_WAITKIND_NO_RESUMED
5825 #3 - gdb processes stop for thread 2 and decides to re-resume
5828 #4 - gdb processes the TARGET_WAITKIND_NO_RESUMED event.
5829 thread 2 is now resumed, so the event should be ignored.
5831 IOW, if the stop for thread 2 doesn't end a foreground command,
5832 then we need to ignore the following TARGET_WAITKIND_NO_RESUMED
5833 event. But it could be that the event meant that thread 2 itself
5834 (or whatever other thread was the last resumed thread) exited.
5836 To address this we refresh the thread list and check whether we
5837 have resumed threads _now_. In the example above, this removes
5838 thread 3 from the thread list. If thread 2 was re-resumed, we
5839 ignore this event. If we find no thread resumed, then we cancel
5840 the synchronous command and show "no unwaited-for " to the
5841 user. */
5843 inferior *curr_inf = current_inferior ();
5845 scoped_restore_current_thread restore_thread;
5846 update_thread_list ();
5848 /* If:
5850 - the current target has no thread executing, and
5851 - the current inferior is native, and
5852 - the current inferior is the one which has the terminal, and
5853 - we did nothing,
5855 then a Ctrl-C from this point on would remain stuck in the
5856 kernel, until a thread resumes and dequeues it. That would
5857 result in the GDB CLI not reacting to Ctrl-C, not able to
5858 interrupt the program. To address this, if the current inferior
5859 no longer has any thread executing, we give the terminal to some
5860 other inferior that has at least one thread executing. */
5861 bool swap_terminal = true;
5863 /* Whether to ignore this TARGET_WAITKIND_NO_RESUMED event, or
5864 whether to report it to the user. */
5865 bool ignore_event = false;
5867 for (thread_info *thread : all_non_exited_threads ())
5869 if (swap_terminal && thread->executing ())
5871 if (thread->inf != curr_inf)
5873 target_terminal::ours ();
5875 switch_to_thread (thread);
5876 target_terminal::inferior ();
5878 swap_terminal = false;
5881 if (!ignore_event && thread->resumed ())
5883 /* Either there were no unwaited-for children left in the
5884 target at some point, but there are now, or some target
5885 other than the eventing one has unwaited-for children
5886 left. Just ignore. */
5887 infrun_debug_printf ("TARGET_WAITKIND_NO_RESUMED "
5888 "(ignoring: found resumed)");
5890 ignore_event = true;
5893 if (ignore_event && !swap_terminal)
5894 break;
5897 if (ignore_event)
5899 switch_to_inferior_no_thread (curr_inf);
5900 prepare_to_wait (ecs);
5901 return true;
5904 /* Go ahead and report the event. */
5905 return false;
5908 /* Handle a TARGET_WAITKIND_THREAD_EXITED event. Return true if we
5909 handled the event and should continue waiting. Return false if we
5910 should stop and report the event to the user. */
5912 static bool
5913 handle_thread_exited (execution_control_state *ecs)
5915 context_switch (ecs);
5917 /* Clear these so we don't re-start the thread stepping over a
5918 breakpoint/watchpoint. */
5919 ecs->event_thread->stepping_over_breakpoint = 0;
5920 ecs->event_thread->stepping_over_watchpoint = 0;
5922 /* If the thread had an FSM, then abort the command. But only after
5923 finishing the step over, as in non-stop mode, aborting this
5924 thread's command should not interfere with other threads. We
5925 must check this before finish_step over, however, which may
5926 update the thread list and delete the event thread. */
5927 bool abort_cmd = (ecs->event_thread->thread_fsm () != nullptr);
5929 /* Mark the thread exited right now, because finish_step_over may
5930 update the thread list and that may delete the thread silently
5931 (depending on target), while we always want to emit the "[Thread
5932 ... exited]" notification. Don't actually delete the thread yet,
5933 because we need to pass its pointer down to finish_step_over. */
5934 set_thread_exited (ecs->event_thread);
5936 /* Maybe the thread was doing a step-over, if so release
5937 resources and start any further pending step-overs.
5939 If we are on a non-stop target and the thread was doing an
5940 in-line step, this also restarts the other threads. */
5941 int ret = finish_step_over (ecs);
5943 /* finish_step_over returns true if it moves ecs' wait status
5944 back into the thread, so that we go handle another pending
5945 event before this one. But we know it never does that if
5946 the event thread has exited. */
5947 gdb_assert (ret == 0);
5949 if (abort_cmd)
5951 /* We're stopping for the thread exit event. Switch to the
5952 event thread again, as finish_step_over may have switched
5953 threads. */
5954 switch_to_thread (ecs->event_thread);
5955 ecs->event_thread = nullptr;
5956 return false;
5959 /* If finish_step_over started a new in-line step-over, don't
5960 try to restart anything else. */
5961 if (step_over_info_valid_p ())
5963 delete_thread (ecs->event_thread);
5964 return true;
5967 /* Maybe we are on an all-stop target and we got this event
5968 while doing a step-like command on another thread. If so,
5969 go back to doing that. If this thread was stepping,
5970 switch_back_to_stepped_thread will consider that the thread
5971 was interrupted mid-step and will try keep stepping it. We
5972 don't want that, the thread is gone. So clear the proceed
5973 status so it doesn't do that. */
5974 clear_proceed_status_thread (ecs->event_thread);
5975 if (switch_back_to_stepped_thread (ecs))
5977 delete_thread (ecs->event_thread);
5978 return true;
5981 inferior *inf = ecs->event_thread->inf;
5982 bool slock_applies = schedlock_applies (ecs->event_thread);
5984 delete_thread (ecs->event_thread);
5985 ecs->event_thread = nullptr;
5987 /* Continue handling the event as if we had gotten a
5988 TARGET_WAITKIND_NO_RESUMED. */
5989 auto handle_as_no_resumed = [ecs] ()
5991 /* handle_no_resumed doesn't really look at the event kind, but
5992 normal_stop does. */
5993 ecs->ws.set_no_resumed ();
5994 ecs->event_thread = nullptr;
5995 ecs->ptid = minus_one_ptid;
5997 /* Re-record the last target status. */
5998 set_last_target_status (ecs->target, ecs->ptid, ecs->ws);
6000 return handle_no_resumed (ecs);
6003 /* If we are on an all-stop target, the target has stopped all
6004 threads to report the event. We don't actually want to
6005 stop, so restart the threads. */
6006 if (!target_is_non_stop_p ())
6008 if (slock_applies)
6010 /* Since the target is !non-stop, then everything is stopped
6011 at this point, and we can't assume we'll get further
6012 events until we resume the target again. Handle this
6013 event like if it were a TARGET_WAITKIND_NO_RESUMED. Note
6014 this refreshes the thread list and checks whether there
6015 are other resumed threads before deciding whether to
6016 print "no-unwaited-for left". This is important because
6017 the user could have done:
6019 (gdb) set scheduler-locking on
6020 (gdb) thread 1
6021 (gdb) c&
6022 (gdb) thread 2
6023 (gdb) c
6025 ... and only one of the threads exited. */
6026 return handle_as_no_resumed ();
6028 else
6030 /* Switch to the first non-exited thread we can find, and
6031 resume. */
6032 auto range = inf->non_exited_threads ();
6033 if (range.begin () == range.end ())
6035 /* Looks like the target reported a
6036 TARGET_WAITKIND_THREAD_EXITED for its last known
6037 thread. */
6038 return handle_as_no_resumed ();
6040 thread_info *non_exited_thread = *range.begin ();
6041 switch_to_thread (non_exited_thread);
6042 insert_breakpoints ();
6043 resume (GDB_SIGNAL_0);
6047 prepare_to_wait (ecs);
6048 return true;
6051 /* Given an execution control state that has been freshly filled in by
6052 an event from the inferior, figure out what it means and take
6053 appropriate action.
6055 The alternatives are:
6057 1) stop_waiting and return; to really stop and return to the
6058 debugger.
6060 2) keep_going and return; to wait for the next event (set
6061 ecs->event_thread->stepping_over_breakpoint to 1 to single step
6062 once). */
6064 static void
6065 handle_inferior_event (struct execution_control_state *ecs)
6067 /* Make sure that all temporary struct value objects that were
6068 created during the handling of the event get deleted at the
6069 end. */
6070 scoped_value_mark free_values;
6072 infrun_debug_printf ("%s", ecs->ws.to_string ().c_str ());
6074 if (ecs->ws.kind () == TARGET_WAITKIND_IGNORE)
6076 /* We had an event in the inferior, but we are not interested in
6077 handling it at this level. The lower layers have already
6078 done what needs to be done, if anything.
6080 One of the possible circumstances for this is when the
6081 inferior produces output for the console. The inferior has
6082 not stopped, and we are ignoring the event. Another possible
6083 circumstance is any event which the lower level knows will be
6084 reported multiple times without an intervening resume. */
6085 prepare_to_wait (ecs);
6086 return;
6089 if (ecs->ws.kind () == TARGET_WAITKIND_NO_RESUMED
6090 && handle_no_resumed (ecs))
6091 return;
6093 /* Cache the last target/ptid/waitstatus. */
6094 set_last_target_status (ecs->target, ecs->ptid, ecs->ws);
6096 /* Always clear state belonging to the previous time we stopped. */
6097 stop_stack_dummy = STOP_NONE;
6099 if (ecs->ws.kind () == TARGET_WAITKIND_NO_RESUMED)
6101 /* No unwaited-for children left. IOW, all resumed children
6102 have exited. */
6103 stop_waiting (ecs);
6104 return;
6107 if (ecs->ws.kind () != TARGET_WAITKIND_EXITED
6108 && ecs->ws.kind () != TARGET_WAITKIND_SIGNALLED)
6110 ecs->event_thread = ecs->target->find_thread (ecs->ptid);
6111 /* If it's a new thread, add it to the thread database. */
6112 if (ecs->event_thread == nullptr)
6113 ecs->event_thread = add_thread (ecs->target, ecs->ptid);
6115 /* Disable range stepping. If the next step request could use a
6116 range, this will be end up re-enabled then. */
6117 ecs->event_thread->control.may_range_step = 0;
6120 /* Dependent on valid ECS->EVENT_THREAD. */
6121 adjust_pc_after_break (ecs->event_thread, ecs->ws);
6123 /* Dependent on the current PC value modified by adjust_pc_after_break. */
6124 reinit_frame_cache ();
6126 breakpoint_retire_moribund ();
6128 /* First, distinguish signals caused by the debugger from signals
6129 that have to do with the program's own actions. Note that
6130 breakpoint insns may cause SIGTRAP or SIGILL or SIGEMT, depending
6131 on the operating system version. Here we detect when a SIGILL or
6132 SIGEMT is really a breakpoint and change it to SIGTRAP. We do
6133 something similar for SIGSEGV, since a SIGSEGV will be generated
6134 when we're trying to execute a breakpoint instruction on a
6135 non-executable stack. This happens for call dummy breakpoints
6136 for architectures like SPARC that place call dummies on the
6137 stack. */
6138 if (ecs->ws.kind () == TARGET_WAITKIND_STOPPED
6139 && (ecs->ws.sig () == GDB_SIGNAL_ILL
6140 || ecs->ws.sig () == GDB_SIGNAL_SEGV
6141 || ecs->ws.sig () == GDB_SIGNAL_EMT))
6143 struct regcache *regcache = get_thread_regcache (ecs->event_thread);
6145 if (breakpoint_inserted_here_p (ecs->event_thread->inf->aspace.get (),
6146 regcache_read_pc (regcache)))
6148 infrun_debug_printf ("Treating signal as SIGTRAP");
6149 ecs->ws.set_stopped (GDB_SIGNAL_TRAP);
6153 mark_non_executing_threads (ecs->target, ecs->ptid, ecs->ws);
6155 switch (ecs->ws.kind ())
6157 case TARGET_WAITKIND_LOADED:
6159 context_switch (ecs);
6160 /* Ignore gracefully during startup of the inferior, as it might
6161 be the shell which has just loaded some objects, otherwise
6162 add the symbols for the newly loaded objects. Also ignore at
6163 the beginning of an attach or remote session; we will query
6164 the full list of libraries once the connection is
6165 established. */
6167 stop_kind stop_soon = get_inferior_stop_soon (ecs);
6168 if (stop_soon == NO_STOP_QUIETLY)
6170 struct regcache *regcache;
6172 regcache = get_thread_regcache (ecs->event_thread);
6174 handle_solib_event ();
6176 ecs->event_thread->set_stop_pc (regcache_read_pc (regcache));
6177 address_space *aspace = ecs->event_thread->inf->aspace.get ();
6178 ecs->event_thread->control.stop_bpstat
6179 = bpstat_stop_status_nowatch (aspace,
6180 ecs->event_thread->stop_pc (),
6181 ecs->event_thread, ecs->ws);
6183 if (handle_stop_requested (ecs))
6184 return;
6186 if (bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
6188 /* A catchpoint triggered. */
6189 process_event_stop_test (ecs);
6190 return;
6193 /* If requested, stop when the dynamic linker notifies
6194 gdb of events. This allows the user to get control
6195 and place breakpoints in initializer routines for
6196 dynamically loaded objects (among other things). */
6197 ecs->event_thread->set_stop_signal (GDB_SIGNAL_0);
6198 if (stop_on_solib_events)
6200 /* Make sure we print "Stopped due to solib-event" in
6201 normal_stop. */
6202 stop_print_frame = true;
6204 stop_waiting (ecs);
6205 return;
6209 /* If we are skipping through a shell, or through shared library
6210 loading that we aren't interested in, resume the program. If
6211 we're running the program normally, also resume. */
6212 if (stop_soon == STOP_QUIETLY || stop_soon == NO_STOP_QUIETLY)
6214 /* Loading of shared libraries might have changed breakpoint
6215 addresses. Make sure new breakpoints are inserted. */
6216 if (stop_soon == NO_STOP_QUIETLY)
6217 insert_breakpoints ();
6218 resume (GDB_SIGNAL_0);
6219 prepare_to_wait (ecs);
6220 return;
6223 /* But stop if we're attaching or setting up a remote
6224 connection. */
6225 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
6226 || stop_soon == STOP_QUIETLY_REMOTE)
6228 infrun_debug_printf ("quietly stopped");
6229 stop_waiting (ecs);
6230 return;
6233 internal_error (_("unhandled stop_soon: %d"), (int) stop_soon);
6236 case TARGET_WAITKIND_SPURIOUS:
6237 if (handle_stop_requested (ecs))
6238 return;
6239 context_switch (ecs);
6240 resume (GDB_SIGNAL_0);
6241 prepare_to_wait (ecs);
6242 return;
6244 case TARGET_WAITKIND_THREAD_CREATED:
6245 if (handle_stop_requested (ecs))
6246 return;
6247 context_switch (ecs);
6248 if (!switch_back_to_stepped_thread (ecs))
6249 keep_going (ecs);
6250 return;
6252 case TARGET_WAITKIND_THREAD_EXITED:
6253 if (handle_thread_exited (ecs))
6254 return;
6255 stop_waiting (ecs);
6256 break;
6258 case TARGET_WAITKIND_EXITED:
6259 case TARGET_WAITKIND_SIGNALLED:
6261 /* Depending on the system, ecs->ptid may point to a thread or
6262 to a process. On some targets, target_mourn_inferior may
6263 need to have access to the just-exited thread. That is the
6264 case of GNU/Linux's "checkpoint" support, for example.
6265 Call the switch_to_xxx routine as appropriate. */
6266 thread_info *thr = ecs->target->find_thread (ecs->ptid);
6267 if (thr != nullptr)
6268 switch_to_thread (thr);
6269 else
6271 inferior *inf = find_inferior_ptid (ecs->target, ecs->ptid);
6272 switch_to_inferior_no_thread (inf);
6275 handle_vfork_child_exec_or_exit (0);
6276 target_terminal::ours (); /* Must do this before mourn anyway. */
6278 /* Clearing any previous state of convenience variables. */
6279 clear_exit_convenience_vars ();
6281 if (ecs->ws.kind () == TARGET_WAITKIND_EXITED)
6283 /* Record the exit code in the convenience variable $_exitcode, so
6284 that the user can inspect this again later. */
6285 set_internalvar_integer (lookup_internalvar ("_exitcode"),
6286 (LONGEST) ecs->ws.exit_status ());
6288 /* Also record this in the inferior itself. */
6289 current_inferior ()->has_exit_code = true;
6290 current_inferior ()->exit_code = (LONGEST) ecs->ws.exit_status ();
6292 /* Support the --return-child-result option. */
6293 return_child_result_value = ecs->ws.exit_status ();
6295 interps_notify_exited (ecs->ws.exit_status ());
6297 else
6299 struct gdbarch *gdbarch = current_inferior ()->arch ();
6301 if (gdbarch_gdb_signal_to_target_p (gdbarch))
6303 /* Set the value of the internal variable $_exitsignal,
6304 which holds the signal uncaught by the inferior. */
6305 set_internalvar_integer (lookup_internalvar ("_exitsignal"),
6306 gdbarch_gdb_signal_to_target (gdbarch,
6307 ecs->ws.sig ()));
6309 else
6311 /* We don't have access to the target's method used for
6312 converting between signal numbers (GDB's internal
6313 representation <-> target's representation).
6314 Therefore, we cannot do a good job at displaying this
6315 information to the user. It's better to just warn
6316 her about it (if infrun debugging is enabled), and
6317 give up. */
6318 infrun_debug_printf ("Cannot fill $_exitsignal with the correct "
6319 "signal number.");
6322 interps_notify_signal_exited (ecs->ws.sig ());
6325 gdb_flush (gdb_stdout);
6326 target_mourn_inferior (inferior_ptid);
6327 stop_print_frame = false;
6328 stop_waiting (ecs);
6329 return;
6331 case TARGET_WAITKIND_FORKED:
6332 case TARGET_WAITKIND_VFORKED:
6333 case TARGET_WAITKIND_THREAD_CLONED:
6335 displaced_step_finish (ecs->event_thread, ecs->ws);
6337 /* Start a new step-over in another thread if there's one that
6338 needs it. */
6339 start_step_over ();
6341 context_switch (ecs);
6343 /* Immediately detach breakpoints from the child before there's
6344 any chance of letting the user delete breakpoints from the
6345 breakpoint lists. If we don't do this early, it's easy to
6346 leave left over traps in the child, vis: "break foo; catch
6347 fork; c; <fork>; del; c; <child calls foo>". We only follow
6348 the fork on the last `continue', and by that time the
6349 breakpoint at "foo" is long gone from the breakpoint table.
6350 If we vforked, then we don't need to unpatch here, since both
6351 parent and child are sharing the same memory pages; we'll
6352 need to unpatch at follow/detach time instead to be certain
6353 that new breakpoints added between catchpoint hit time and
6354 vfork follow are detached. */
6355 if (ecs->ws.kind () == TARGET_WAITKIND_FORKED)
6357 /* This won't actually modify the breakpoint list, but will
6358 physically remove the breakpoints from the child. */
6359 detach_breakpoints (ecs->ws.child_ptid ());
6362 delete_just_stopped_threads_single_step_breakpoints ();
6364 /* In case the event is caught by a catchpoint, remember that
6365 the event is to be followed at the next resume of the thread,
6366 and not immediately. */
6367 ecs->event_thread->pending_follow = ecs->ws;
6369 ecs->event_thread->set_stop_pc
6370 (regcache_read_pc (get_thread_regcache (ecs->event_thread)));
6372 ecs->event_thread->control.stop_bpstat
6373 = bpstat_stop_status_nowatch (ecs->event_thread->inf->aspace.get (),
6374 ecs->event_thread->stop_pc (),
6375 ecs->event_thread, ecs->ws);
6377 if (handle_stop_requested (ecs))
6378 return;
6380 /* If no catchpoint triggered for this, then keep going. Note
6381 that we're interested in knowing the bpstat actually causes a
6382 stop, not just if it may explain the signal. Software
6383 watchpoints, for example, always appear in the bpstat. */
6384 if (!bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
6386 bool follow_child
6387 = (ecs->ws.kind () != TARGET_WAITKIND_THREAD_CLONED
6388 && follow_fork_mode_string == follow_fork_mode_child);
6390 ecs->event_thread->set_stop_signal (GDB_SIGNAL_0);
6392 process_stratum_target *targ
6393 = ecs->event_thread->inf->process_target ();
6395 bool should_resume;
6396 if (ecs->ws.kind () != TARGET_WAITKIND_THREAD_CLONED)
6397 should_resume = follow_fork ();
6398 else
6400 should_resume = true;
6401 inferior *inf = ecs->event_thread->inf;
6402 inf->top_target ()->follow_clone (ecs->ws.child_ptid ());
6403 ecs->event_thread->pending_follow.set_spurious ();
6406 /* Note that one of these may be an invalid pointer,
6407 depending on detach_fork. */
6408 thread_info *parent = ecs->event_thread;
6409 thread_info *child = targ->find_thread (ecs->ws.child_ptid ());
6411 /* At this point, the parent is marked running, and the
6412 child is marked stopped. */
6414 /* If not resuming the parent, mark it stopped. */
6415 if (ecs->ws.kind () != TARGET_WAITKIND_THREAD_CLONED
6416 && follow_child && !detach_fork && !non_stop && !sched_multi)
6417 parent->set_running (false);
6419 /* If resuming the child, mark it running. */
6420 if ((ecs->ws.kind () == TARGET_WAITKIND_THREAD_CLONED
6421 && !schedlock_applies (ecs->event_thread))
6422 || (ecs->ws.kind () != TARGET_WAITKIND_THREAD_CLONED
6423 && (follow_child
6424 || (!detach_fork && (non_stop || sched_multi)))))
6425 child->set_running (true);
6427 /* In non-stop mode, also resume the other branch. */
6428 if ((ecs->ws.kind () == TARGET_WAITKIND_THREAD_CLONED
6429 && target_is_non_stop_p ()
6430 && !schedlock_applies (ecs->event_thread))
6431 || (ecs->ws.kind () != TARGET_WAITKIND_THREAD_CLONED
6432 && (!detach_fork && (non_stop
6433 || (sched_multi
6434 && target_is_non_stop_p ())))))
6436 if (follow_child)
6437 switch_to_thread (parent);
6438 else
6439 switch_to_thread (child);
6441 ecs->event_thread = inferior_thread ();
6442 ecs->ptid = inferior_ptid;
6443 keep_going (ecs);
6446 if (follow_child)
6447 switch_to_thread (child);
6448 else
6449 switch_to_thread (parent);
6451 ecs->event_thread = inferior_thread ();
6452 ecs->ptid = inferior_ptid;
6454 if (should_resume)
6456 /* Never call switch_back_to_stepped_thread if we are waiting for
6457 vfork-done (waiting for an external vfork child to exec or
6458 exit). We will resume only the vforking thread for the purpose
6459 of collecting the vfork-done event, and we will restart any
6460 step once the critical shared address space window is done. */
6461 if ((!follow_child
6462 && detach_fork
6463 && parent->inf->thread_waiting_for_vfork_done != nullptr)
6464 || !switch_back_to_stepped_thread (ecs))
6465 keep_going (ecs);
6467 else
6468 stop_waiting (ecs);
6469 return;
6471 process_event_stop_test (ecs);
6472 return;
6474 case TARGET_WAITKIND_VFORK_DONE:
6475 /* Done with the shared memory region. Re-insert breakpoints in
6476 the parent, and keep going. */
6478 context_switch (ecs);
6480 handle_vfork_done (ecs->event_thread);
6481 gdb_assert (inferior_thread () == ecs->event_thread);
6483 if (handle_stop_requested (ecs))
6484 return;
6486 if (!switch_back_to_stepped_thread (ecs))
6488 gdb_assert (inferior_thread () == ecs->event_thread);
6489 /* This also takes care of reinserting breakpoints in the
6490 previously locked inferior. */
6491 keep_going (ecs);
6493 return;
6495 case TARGET_WAITKIND_EXECD:
6497 /* Note we can't read registers yet (the stop_pc), because we
6498 don't yet know the inferior's post-exec architecture.
6499 'stop_pc' is explicitly read below instead. */
6500 switch_to_thread_no_regs (ecs->event_thread);
6502 /* Do whatever is necessary to the parent branch of the vfork. */
6503 handle_vfork_child_exec_or_exit (1);
6505 /* This causes the eventpoints and symbol table to be reset.
6506 Must do this now, before trying to determine whether to
6507 stop. */
6508 follow_exec (inferior_ptid, ecs->ws.execd_pathname ());
6510 /* In follow_exec we may have deleted the original thread and
6511 created a new one. Make sure that the event thread is the
6512 execd thread for that case (this is a nop otherwise). */
6513 ecs->event_thread = inferior_thread ();
6515 ecs->event_thread->set_stop_pc
6516 (regcache_read_pc (get_thread_regcache (ecs->event_thread)));
6518 ecs->event_thread->control.stop_bpstat
6519 = bpstat_stop_status_nowatch (ecs->event_thread->inf->aspace.get (),
6520 ecs->event_thread->stop_pc (),
6521 ecs->event_thread, ecs->ws);
6523 if (handle_stop_requested (ecs))
6524 return;
6526 /* If no catchpoint triggered for this, then keep going. */
6527 if (!bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
6529 ecs->event_thread->set_stop_signal (GDB_SIGNAL_0);
6530 keep_going (ecs);
6531 return;
6533 process_event_stop_test (ecs);
6534 return;
6536 /* Be careful not to try to gather much state about a thread
6537 that's in a syscall. It's frequently a losing proposition. */
6538 case TARGET_WAITKIND_SYSCALL_ENTRY:
6539 /* Getting the current syscall number. */
6540 if (handle_syscall_event (ecs) == 0)
6541 process_event_stop_test (ecs);
6542 return;
6544 /* Before examining the threads further, step this thread to
6545 get it entirely out of the syscall. (We get notice of the
6546 event when the thread is just on the verge of exiting a
6547 syscall. Stepping one instruction seems to get it back
6548 into user code.) */
6549 case TARGET_WAITKIND_SYSCALL_RETURN:
6550 if (handle_syscall_event (ecs) == 0)
6551 process_event_stop_test (ecs);
6552 return;
6554 case TARGET_WAITKIND_STOPPED:
6555 handle_signal_stop (ecs);
6556 return;
6558 case TARGET_WAITKIND_NO_HISTORY:
6559 /* Reverse execution: target ran out of history info. */
6561 /* Switch to the stopped thread. */
6562 context_switch (ecs);
6563 infrun_debug_printf ("stopped");
6565 delete_just_stopped_threads_single_step_breakpoints ();
6566 ecs->event_thread->set_stop_pc
6567 (regcache_read_pc (get_thread_regcache (inferior_thread ())));
6569 if (handle_stop_requested (ecs))
6570 return;
6572 interps_notify_no_history ();
6573 stop_waiting (ecs);
6574 return;
6578 /* Restart threads back to what they were trying to do back when we
6579 paused them (because of an in-line step-over or vfork, for example).
6580 The EVENT_THREAD thread is ignored (not restarted).
6582 If INF is non-nullptr, only resume threads from INF. */
6584 static void
6585 restart_threads (struct thread_info *event_thread, inferior *inf)
6587 INFRUN_SCOPED_DEBUG_START_END ("event_thread=%s, inf=%d",
6588 event_thread->ptid.to_string ().c_str (),
6589 inf != nullptr ? inf->num : -1);
6591 gdb_assert (!step_over_info_valid_p ());
6593 /* In case the instruction just stepped spawned a new thread. */
6594 update_thread_list ();
6596 for (thread_info *tp : all_non_exited_threads ())
6598 if (inf != nullptr && tp->inf != inf)
6599 continue;
6601 if (tp->inf->detaching)
6603 infrun_debug_printf ("restart threads: [%s] inferior detaching",
6604 tp->ptid.to_string ().c_str ());
6605 continue;
6608 switch_to_thread_no_regs (tp);
6610 if (tp == event_thread)
6612 infrun_debug_printf ("restart threads: [%s] is event thread",
6613 tp->ptid.to_string ().c_str ());
6614 continue;
6617 if (!(tp->state == THREAD_RUNNING || tp->control.in_infcall))
6619 infrun_debug_printf ("restart threads: [%s] not meant to be running",
6620 tp->ptid.to_string ().c_str ());
6621 continue;
6624 if (tp->resumed ())
6626 infrun_debug_printf ("restart threads: [%s] resumed",
6627 tp->ptid.to_string ().c_str ());
6628 gdb_assert (tp->executing () || tp->has_pending_waitstatus ());
6629 continue;
6632 if (thread_is_in_step_over_chain (tp))
6634 infrun_debug_printf ("restart threads: [%s] needs step-over",
6635 tp->ptid.to_string ().c_str ());
6636 gdb_assert (!tp->resumed ());
6637 continue;
6641 if (tp->has_pending_waitstatus ())
6643 infrun_debug_printf ("restart threads: [%s] has pending status",
6644 tp->ptid.to_string ().c_str ());
6645 tp->set_resumed (true);
6646 continue;
6649 gdb_assert (!tp->stop_requested);
6651 /* If some thread needs to start a step-over at this point, it
6652 should still be in the step-over queue, and thus skipped
6653 above. */
6654 if (thread_still_needs_step_over (tp))
6656 internal_error ("thread [%s] needs a step-over, but not in "
6657 "step-over queue\n",
6658 tp->ptid.to_string ().c_str ());
6661 if (currently_stepping (tp))
6663 infrun_debug_printf ("restart threads: [%s] was stepping",
6664 tp->ptid.to_string ().c_str ());
6665 keep_going_stepped_thread (tp);
6667 else
6669 infrun_debug_printf ("restart threads: [%s] continuing",
6670 tp->ptid.to_string ().c_str ());
6671 execution_control_state ecs (tp);
6672 switch_to_thread (tp);
6673 keep_going_pass_signal (&ecs);
6678 /* Callback for iterate_over_threads. Find a resumed thread that has
6679 a pending waitstatus. */
6681 static int
6682 resumed_thread_with_pending_status (struct thread_info *tp,
6683 void *arg)
6685 return tp->resumed () && tp->has_pending_waitstatus ();
6688 /* Called when we get an event that may finish an in-line or
6689 out-of-line (displaced stepping) step-over started previously.
6690 Return true if the event is processed and we should go back to the
6691 event loop; false if the caller should continue processing the
6692 event. */
6694 static int
6695 finish_step_over (struct execution_control_state *ecs)
6697 displaced_step_finish (ecs->event_thread, ecs->ws);
6699 bool had_step_over_info = step_over_info_valid_p ();
6701 if (had_step_over_info)
6703 /* If we're stepping over a breakpoint with all threads locked,
6704 then only the thread that was stepped should be reporting
6705 back an event. */
6706 gdb_assert (ecs->event_thread->control.trap_expected);
6708 update_thread_events_after_step_over (ecs->event_thread, ecs->ws);
6710 clear_step_over_info ();
6713 if (!target_is_non_stop_p ())
6714 return 0;
6716 /* Start a new step-over in another thread if there's one that
6717 needs it. */
6718 start_step_over ();
6720 /* If we were stepping over a breakpoint before, and haven't started
6721 a new in-line step-over sequence, then restart all other threads
6722 (except the event thread). We can't do this in all-stop, as then
6723 e.g., we wouldn't be able to issue any other remote packet until
6724 these other threads stop. */
6725 if (had_step_over_info && !step_over_info_valid_p ())
6727 struct thread_info *pending;
6729 /* If we only have threads with pending statuses, the restart
6730 below won't restart any thread and so nothing re-inserts the
6731 breakpoint we just stepped over. But we need it inserted
6732 when we later process the pending events, otherwise if
6733 another thread has a pending event for this breakpoint too,
6734 we'd discard its event (because the breakpoint that
6735 originally caused the event was no longer inserted). */
6736 context_switch (ecs);
6737 insert_breakpoints ();
6739 restart_threads (ecs->event_thread);
6741 /* If we have events pending, go through handle_inferior_event
6742 again, picking up a pending event at random. This avoids
6743 thread starvation. */
6745 /* But not if we just stepped over a watchpoint in order to let
6746 the instruction execute so we can evaluate its expression.
6747 The set of watchpoints that triggered is recorded in the
6748 breakpoint objects themselves (see bp->watchpoint_triggered).
6749 If we processed another event first, that other event could
6750 clobber this info. */
6751 if (ecs->event_thread->stepping_over_watchpoint)
6752 return 0;
6754 /* The code below is meant to avoid one thread hogging the event
6755 loop by doing constant in-line step overs. If the stepping
6756 thread exited, there's no risk for this to happen, so we can
6757 safely let our caller process the event immediately. */
6758 if (ecs->ws.kind () == TARGET_WAITKIND_THREAD_EXITED)
6759 return 0;
6761 pending = iterate_over_threads (resumed_thread_with_pending_status,
6762 nullptr);
6763 if (pending != nullptr)
6765 struct thread_info *tp = ecs->event_thread;
6766 struct regcache *regcache;
6768 infrun_debug_printf ("found resumed threads with "
6769 "pending events, saving status");
6771 gdb_assert (pending != tp);
6773 /* Record the event thread's event for later. */
6774 save_waitstatus (tp, ecs->ws);
6775 /* This was cleared early, by handle_inferior_event. Set it
6776 so this pending event is considered by
6777 do_target_wait. */
6778 tp->set_resumed (true);
6780 gdb_assert (!tp->executing ());
6782 regcache = get_thread_regcache (tp);
6783 tp->set_stop_pc (regcache_read_pc (regcache));
6785 infrun_debug_printf ("saved stop_pc=%s for %s "
6786 "(currently_stepping=%d)",
6787 paddress (current_inferior ()->arch (),
6788 tp->stop_pc ()),
6789 tp->ptid.to_string ().c_str (),
6790 currently_stepping (tp));
6792 /* This in-line step-over finished; clear this so we won't
6793 start a new one. This is what handle_signal_stop would
6794 do, if we returned false. */
6795 tp->stepping_over_breakpoint = 0;
6797 /* Wake up the event loop again. */
6798 mark_async_event_handler (infrun_async_inferior_event_token);
6800 prepare_to_wait (ecs);
6801 return 1;
6805 return 0;
6808 /* See infrun.h. */
6810 void
6811 notify_signal_received (gdb_signal sig)
6813 interps_notify_signal_received (sig);
6814 gdb::observers::signal_received.notify (sig);
6817 /* See infrun.h. */
6819 void
6820 notify_normal_stop (bpstat *bs, int print_frame)
6822 interps_notify_normal_stop (bs, print_frame);
6823 gdb::observers::normal_stop.notify (bs, print_frame);
6826 /* See infrun.h. */
6828 void notify_user_selected_context_changed (user_selected_what selection)
6830 interps_notify_user_selected_context_changed (selection);
6831 gdb::observers::user_selected_context_changed.notify (selection);
6834 /* Come here when the program has stopped with a signal. */
6836 static void
6837 handle_signal_stop (struct execution_control_state *ecs)
6839 frame_info_ptr frame;
6840 struct gdbarch *gdbarch;
6841 int stopped_by_watchpoint;
6842 enum stop_kind stop_soon;
6843 int random_signal;
6845 gdb_assert (ecs->ws.kind () == TARGET_WAITKIND_STOPPED);
6847 ecs->event_thread->set_stop_signal (ecs->ws.sig ());
6849 /* Do we need to clean up the state of a thread that has
6850 completed a displaced single-step? (Doing so usually affects
6851 the PC, so do it here, before we set stop_pc.) */
6852 if (finish_step_over (ecs))
6853 return;
6855 /* If we either finished a single-step or hit a breakpoint, but
6856 the user wanted this thread to be stopped, pretend we got a
6857 SIG0 (generic unsignaled stop). */
6858 if (ecs->event_thread->stop_requested
6859 && ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP)
6860 ecs->event_thread->set_stop_signal (GDB_SIGNAL_0);
6862 ecs->event_thread->set_stop_pc
6863 (regcache_read_pc (get_thread_regcache (ecs->event_thread)));
6865 context_switch (ecs);
6867 if (deprecated_context_hook)
6868 deprecated_context_hook (ecs->event_thread->global_num);
6870 if (debug_infrun)
6872 struct regcache *regcache = get_thread_regcache (ecs->event_thread);
6873 struct gdbarch *reg_gdbarch = regcache->arch ();
6875 infrun_debug_printf
6876 ("stop_pc=%s", paddress (reg_gdbarch, ecs->event_thread->stop_pc ()));
6877 if (target_stopped_by_watchpoint ())
6879 CORE_ADDR addr;
6881 infrun_debug_printf ("stopped by watchpoint");
6883 if (target_stopped_data_address (current_inferior ()->top_target (),
6884 &addr))
6885 infrun_debug_printf ("stopped data address=%s",
6886 paddress (reg_gdbarch, addr));
6887 else
6888 infrun_debug_printf ("(no data address available)");
6892 /* This is originated from start_remote(), start_inferior() and
6893 shared libraries hook functions. */
6894 stop_soon = get_inferior_stop_soon (ecs);
6895 if (stop_soon == STOP_QUIETLY || stop_soon == STOP_QUIETLY_REMOTE)
6897 infrun_debug_printf ("quietly stopped");
6898 stop_print_frame = true;
6899 stop_waiting (ecs);
6900 return;
6903 /* This originates from attach_command(). We need to overwrite
6904 the stop_signal here, because some kernels don't ignore a
6905 SIGSTOP in a subsequent ptrace(PTRACE_CONT,SIGSTOP) call.
6906 See more comments in inferior.h. On the other hand, if we
6907 get a non-SIGSTOP, report it to the user - assume the backend
6908 will handle the SIGSTOP if it should show up later.
6910 Also consider that the attach is complete when we see a
6911 SIGTRAP. Some systems (e.g. Windows), and stubs supporting
6912 target extended-remote report it instead of a SIGSTOP
6913 (e.g. gdbserver). We already rely on SIGTRAP being our
6914 signal, so this is no exception.
6916 Also consider that the attach is complete when we see a
6917 GDB_SIGNAL_0. In non-stop mode, GDB will explicitly tell
6918 the target to stop all threads of the inferior, in case the
6919 low level attach operation doesn't stop them implicitly. If
6920 they weren't stopped implicitly, then the stub will report a
6921 GDB_SIGNAL_0, meaning: stopped for no particular reason
6922 other than GDB's request. */
6923 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
6924 && (ecs->event_thread->stop_signal () == GDB_SIGNAL_STOP
6925 || ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP
6926 || ecs->event_thread->stop_signal () == GDB_SIGNAL_0))
6928 stop_print_frame = true;
6929 stop_waiting (ecs);
6930 ecs->event_thread->set_stop_signal (GDB_SIGNAL_0);
6931 return;
6934 /* At this point, get hold of the now-current thread's frame. */
6935 frame = get_current_frame ();
6936 gdbarch = get_frame_arch (frame);
6938 /* Pull the single step breakpoints out of the target. */
6939 if (ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP)
6941 struct regcache *regcache;
6942 CORE_ADDR pc;
6944 regcache = get_thread_regcache (ecs->event_thread);
6945 const address_space *aspace = ecs->event_thread->inf->aspace.get ();
6947 pc = regcache_read_pc (regcache);
6949 /* However, before doing so, if this single-step breakpoint was
6950 actually for another thread, set this thread up for moving
6951 past it. */
6952 if (!thread_has_single_step_breakpoint_here (ecs->event_thread,
6953 aspace, pc))
6955 if (single_step_breakpoint_inserted_here_p (aspace, pc))
6957 infrun_debug_printf ("[%s] hit another thread's single-step "
6958 "breakpoint",
6959 ecs->ptid.to_string ().c_str ());
6960 ecs->hit_singlestep_breakpoint = 1;
6963 else
6965 infrun_debug_printf ("[%s] hit its single-step breakpoint",
6966 ecs->ptid.to_string ().c_str ());
6969 delete_just_stopped_threads_single_step_breakpoints ();
6971 if (ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP
6972 && ecs->event_thread->control.trap_expected
6973 && ecs->event_thread->stepping_over_watchpoint)
6974 stopped_by_watchpoint = 0;
6975 else
6976 stopped_by_watchpoint = watchpoints_triggered (ecs->ws);
6978 /* If necessary, step over this watchpoint. We'll be back to display
6979 it in a moment. */
6980 if (stopped_by_watchpoint
6981 && (target_have_steppable_watchpoint ()
6982 || gdbarch_have_nonsteppable_watchpoint (gdbarch)))
6984 /* At this point, we are stopped at an instruction which has
6985 attempted to write to a piece of memory under control of
6986 a watchpoint. The instruction hasn't actually executed
6987 yet. If we were to evaluate the watchpoint expression
6988 now, we would get the old value, and therefore no change
6989 would seem to have occurred.
6991 In order to make watchpoints work `right', we really need
6992 to complete the memory write, and then evaluate the
6993 watchpoint expression. We do this by single-stepping the
6994 target.
6996 It may not be necessary to disable the watchpoint to step over
6997 it. For example, the PA can (with some kernel cooperation)
6998 single step over a watchpoint without disabling the watchpoint.
7000 It is far more common to need to disable a watchpoint to step
7001 the inferior over it. If we have non-steppable watchpoints,
7002 we must disable the current watchpoint; it's simplest to
7003 disable all watchpoints.
7005 Any breakpoint at PC must also be stepped over -- if there's
7006 one, it will have already triggered before the watchpoint
7007 triggered, and we either already reported it to the user, or
7008 it didn't cause a stop and we called keep_going. In either
7009 case, if there was a breakpoint at PC, we must be trying to
7010 step past it. */
7011 ecs->event_thread->stepping_over_watchpoint = 1;
7012 keep_going (ecs);
7013 return;
7016 ecs->event_thread->stepping_over_breakpoint = 0;
7017 ecs->event_thread->stepping_over_watchpoint = 0;
7018 bpstat_clear (&ecs->event_thread->control.stop_bpstat);
7019 ecs->event_thread->control.stop_step = 0;
7020 stop_print_frame = true;
7021 stopped_by_random_signal = 0;
7022 bpstat *stop_chain = nullptr;
7024 /* Hide inlined functions starting here, unless we just performed stepi or
7025 nexti. After stepi and nexti, always show the innermost frame (not any
7026 inline function call sites). */
7027 if (ecs->event_thread->control.step_range_end != 1)
7029 const address_space *aspace = ecs->event_thread->inf->aspace.get ();
7031 /* skip_inline_frames is expensive, so we avoid it if we can
7032 determine that the address is one where functions cannot have
7033 been inlined. This improves performance with inferiors that
7034 load a lot of shared libraries, because the solib event
7035 breakpoint is defined as the address of a function (i.e. not
7036 inline). Note that we have to check the previous PC as well
7037 as the current one to catch cases when we have just
7038 single-stepped off a breakpoint prior to reinstating it.
7039 Note that we're assuming that the code we single-step to is
7040 not inline, but that's not definitive: there's nothing
7041 preventing the event breakpoint function from containing
7042 inlined code, and the single-step ending up there. If the
7043 user had set a breakpoint on that inlined code, the missing
7044 skip_inline_frames call would break things. Fortunately
7045 that's an extremely unlikely scenario. */
7046 if (!pc_at_non_inline_function (aspace,
7047 ecs->event_thread->stop_pc (),
7048 ecs->ws)
7049 && !(ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP
7050 && ecs->event_thread->control.trap_expected
7051 && pc_at_non_inline_function (aspace,
7052 ecs->event_thread->prev_pc,
7053 ecs->ws)))
7055 stop_chain = build_bpstat_chain (aspace,
7056 ecs->event_thread->stop_pc (),
7057 ecs->ws);
7058 skip_inline_frames (ecs->event_thread, stop_chain);
7062 if (ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP
7063 && ecs->event_thread->control.trap_expected
7064 && gdbarch_single_step_through_delay_p (gdbarch)
7065 && currently_stepping (ecs->event_thread))
7067 /* We're trying to step off a breakpoint. Turns out that we're
7068 also on an instruction that needs to be stepped multiple
7069 times before it's been fully executing. E.g., architectures
7070 with a delay slot. It needs to be stepped twice, once for
7071 the instruction and once for the delay slot. */
7072 int step_through_delay
7073 = gdbarch_single_step_through_delay (gdbarch, frame);
7075 if (step_through_delay)
7076 infrun_debug_printf ("step through delay");
7078 if (ecs->event_thread->control.step_range_end == 0
7079 && step_through_delay)
7081 /* The user issued a continue when stopped at a breakpoint.
7082 Set up for another trap and get out of here. */
7083 ecs->event_thread->stepping_over_breakpoint = 1;
7084 keep_going (ecs);
7085 return;
7087 else if (step_through_delay)
7089 /* The user issued a step when stopped at a breakpoint.
7090 Maybe we should stop, maybe we should not - the delay
7091 slot *might* correspond to a line of source. In any
7092 case, don't decide that here, just set
7093 ecs->stepping_over_breakpoint, making sure we
7094 single-step again before breakpoints are re-inserted. */
7095 ecs->event_thread->stepping_over_breakpoint = 1;
7099 /* See if there is a breakpoint/watchpoint/catchpoint/etc. that
7100 handles this event. */
7101 ecs->event_thread->control.stop_bpstat
7102 = bpstat_stop_status (ecs->event_thread->inf->aspace.get (),
7103 ecs->event_thread->stop_pc (),
7104 ecs->event_thread, ecs->ws, stop_chain);
7106 /* Following in case break condition called a
7107 function. */
7108 stop_print_frame = true;
7110 /* This is where we handle "moribund" watchpoints. Unlike
7111 software breakpoints traps, hardware watchpoint traps are
7112 always distinguishable from random traps. If no high-level
7113 watchpoint is associated with the reported stop data address
7114 anymore, then the bpstat does not explain the signal ---
7115 simply make sure to ignore it if `stopped_by_watchpoint' is
7116 set. */
7118 if (ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP
7119 && !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat,
7120 GDB_SIGNAL_TRAP)
7121 && stopped_by_watchpoint)
7123 infrun_debug_printf ("no user watchpoint explains watchpoint SIGTRAP, "
7124 "ignoring");
7127 /* NOTE: cagney/2003-03-29: These checks for a random signal
7128 at one stage in the past included checks for an inferior
7129 function call's call dummy's return breakpoint. The original
7130 comment, that went with the test, read:
7132 ``End of a stack dummy. Some systems (e.g. Sony news) give
7133 another signal besides SIGTRAP, so check here as well as
7134 above.''
7136 If someone ever tries to get call dummys on a
7137 non-executable stack to work (where the target would stop
7138 with something like a SIGSEGV), then those tests might need
7139 to be re-instated. Given, however, that the tests were only
7140 enabled when momentary breakpoints were not being used, I
7141 suspect that it won't be the case.
7143 NOTE: kettenis/2004-02-05: Indeed such checks don't seem to
7144 be necessary for call dummies on a non-executable stack on
7145 SPARC. */
7147 /* See if the breakpoints module can explain the signal. */
7148 random_signal
7149 = !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat,
7150 ecs->event_thread->stop_signal ());
7152 /* Maybe this was a trap for a software breakpoint that has since
7153 been removed. */
7154 if (random_signal && target_stopped_by_sw_breakpoint ())
7156 if (gdbarch_program_breakpoint_here_p (gdbarch,
7157 ecs->event_thread->stop_pc ()))
7159 struct regcache *regcache;
7160 int decr_pc;
7162 /* Re-adjust PC to what the program would see if GDB was not
7163 debugging it. */
7164 regcache = get_thread_regcache (ecs->event_thread);
7165 decr_pc = gdbarch_decr_pc_after_break (gdbarch);
7166 if (decr_pc != 0)
7168 std::optional<scoped_restore_tmpl<int>>
7169 restore_operation_disable;
7171 if (record_full_is_used ())
7172 restore_operation_disable.emplace
7173 (record_full_gdb_operation_disable_set ());
7175 regcache_write_pc (regcache,
7176 ecs->event_thread->stop_pc () + decr_pc);
7179 else
7181 /* A delayed software breakpoint event. Ignore the trap. */
7182 infrun_debug_printf ("delayed software breakpoint trap, ignoring");
7183 random_signal = 0;
7187 /* Maybe this was a trap for a hardware breakpoint/watchpoint that
7188 has since been removed. */
7189 if (random_signal && target_stopped_by_hw_breakpoint ())
7191 /* A delayed hardware breakpoint event. Ignore the trap. */
7192 infrun_debug_printf ("delayed hardware breakpoint/watchpoint "
7193 "trap, ignoring");
7194 random_signal = 0;
7197 /* If not, perhaps stepping/nexting can. */
7198 if (random_signal)
7199 random_signal = !(ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP
7200 && currently_stepping (ecs->event_thread));
7202 /* Perhaps the thread hit a single-step breakpoint of _another_
7203 thread. Single-step breakpoints are transparent to the
7204 breakpoints module. */
7205 if (random_signal)
7206 random_signal = !ecs->hit_singlestep_breakpoint;
7208 /* No? Perhaps we got a moribund watchpoint. */
7209 if (random_signal)
7210 random_signal = !stopped_by_watchpoint;
7212 /* Always stop if the user explicitly requested this thread to
7213 remain stopped. */
7214 if (ecs->event_thread->stop_requested)
7216 random_signal = 1;
7217 infrun_debug_printf ("user-requested stop");
7220 /* For the program's own signals, act according to
7221 the signal handling tables. */
7223 if (random_signal)
7225 /* Signal not for debugging purposes. */
7226 enum gdb_signal stop_signal = ecs->event_thread->stop_signal ();
7228 infrun_debug_printf ("random signal (%s)",
7229 gdb_signal_to_symbol_string (stop_signal));
7231 stopped_by_random_signal = 1;
7233 /* Always stop on signals if we're either just gaining control
7234 of the program, or the user explicitly requested this thread
7235 to remain stopped. */
7236 if (stop_soon != NO_STOP_QUIETLY
7237 || ecs->event_thread->stop_requested
7238 || signal_stop_state (ecs->event_thread->stop_signal ()))
7240 stop_waiting (ecs);
7241 return;
7244 /* Notify observers the signal has "handle print" set. Note we
7245 returned early above if stopping; normal_stop handles the
7246 printing in that case. */
7247 if (signal_print[ecs->event_thread->stop_signal ()])
7249 /* The signal table tells us to print about this signal. */
7250 target_terminal::ours_for_output ();
7251 notify_signal_received (ecs->event_thread->stop_signal ());
7252 target_terminal::inferior ();
7255 /* Clear the signal if it should not be passed. */
7256 if (signal_program[ecs->event_thread->stop_signal ()] == 0)
7257 ecs->event_thread->set_stop_signal (GDB_SIGNAL_0);
7259 if (ecs->event_thread->prev_pc == ecs->event_thread->stop_pc ()
7260 && ecs->event_thread->control.trap_expected
7261 && ecs->event_thread->control.step_resume_breakpoint == nullptr)
7263 /* We were just starting a new sequence, attempting to
7264 single-step off of a breakpoint and expecting a SIGTRAP.
7265 Instead this signal arrives. This signal will take us out
7266 of the stepping range so GDB needs to remember to, when
7267 the signal handler returns, resume stepping off that
7268 breakpoint. */
7269 /* To simplify things, "continue" is forced to use the same
7270 code paths as single-step - set a breakpoint at the
7271 signal return address and then, once hit, step off that
7272 breakpoint. */
7273 infrun_debug_printf ("signal arrived while stepping over breakpoint");
7275 insert_hp_step_resume_breakpoint_at_frame (frame);
7276 ecs->event_thread->step_after_step_resume_breakpoint = 1;
7277 /* Reset trap_expected to ensure breakpoints are re-inserted. */
7278 ecs->event_thread->control.trap_expected = 0;
7280 /* If we were nexting/stepping some other thread, switch to
7281 it, so that we don't continue it, losing control. */
7282 if (!switch_back_to_stepped_thread (ecs))
7283 keep_going (ecs);
7284 return;
7287 if (ecs->event_thread->stop_signal () != GDB_SIGNAL_0
7288 && (pc_in_thread_step_range (ecs->event_thread->stop_pc (),
7289 ecs->event_thread)
7290 || ecs->event_thread->control.step_range_end == 1)
7291 && (get_stack_frame_id (frame)
7292 == ecs->event_thread->control.step_stack_frame_id)
7293 && ecs->event_thread->control.step_resume_breakpoint == nullptr)
7295 /* The inferior is about to take a signal that will take it
7296 out of the single step range. Set a breakpoint at the
7297 current PC (which is presumably where the signal handler
7298 will eventually return) and then allow the inferior to
7299 run free.
7301 Note that this is only needed for a signal delivered
7302 while in the single-step range. Nested signals aren't a
7303 problem as they eventually all return. */
7304 infrun_debug_printf ("signal may take us out of single-step range");
7306 clear_step_over_info ();
7307 insert_hp_step_resume_breakpoint_at_frame (frame);
7308 ecs->event_thread->step_after_step_resume_breakpoint = 1;
7309 /* Reset trap_expected to ensure breakpoints are re-inserted. */
7310 ecs->event_thread->control.trap_expected = 0;
7311 keep_going (ecs);
7312 return;
7315 /* Note: step_resume_breakpoint may be non-NULL. This occurs
7316 when either there's a nested signal, or when there's a
7317 pending signal enabled just as the signal handler returns
7318 (leaving the inferior at the step-resume-breakpoint without
7319 actually executing it). Either way continue until the
7320 breakpoint is really hit. */
7322 if (!switch_back_to_stepped_thread (ecs))
7324 infrun_debug_printf ("random signal, keep going");
7326 keep_going (ecs);
7328 return;
7331 process_event_stop_test (ecs);
7334 /* Return the address for the beginning of the line. */
7336 CORE_ADDR
7337 update_line_range_start (CORE_ADDR pc, struct execution_control_state *ecs)
7339 /* The line table may have multiple entries for the same source code line.
7340 Given the PC, check the line table and return the PC that corresponds
7341 to the line table entry for the source line that PC is in. */
7342 CORE_ADDR start_line_pc = ecs->event_thread->control.step_range_start;
7343 std::optional<CORE_ADDR> real_range_start;
7345 /* Call find_line_range_start to get the smallest address in the
7346 linetable for multiple Line X entries in the line table. */
7347 real_range_start = find_line_range_start (pc);
7349 if (real_range_start.has_value ())
7350 start_line_pc = *real_range_start;
7352 return start_line_pc;
7355 namespace {
7357 /* Helper class for process_event_stop_test implementing lazy evaluation. */
7358 template<typename T>
7359 class lazy_loader
7361 using fetcher_t = std::function<T ()>;
7363 public:
7364 explicit lazy_loader (fetcher_t &&f) : m_loader (std::move (f))
7367 T &operator* ()
7369 if (!m_value.has_value ())
7370 m_value.emplace (m_loader ());
7371 return m_value.value ();
7374 T *operator-> ()
7376 return &**this;
7379 private:
7380 std::optional<T> m_value;
7381 fetcher_t m_loader;
7386 /* Come here when we've got some debug event / signal we can explain
7387 (IOW, not a random signal), and test whether it should cause a
7388 stop, or whether we should resume the inferior (transparently).
7389 E.g., could be a breakpoint whose condition evaluates false; we
7390 could be still stepping within the line; etc. */
7392 static void
7393 process_event_stop_test (struct execution_control_state *ecs)
7395 struct symtab_and_line stop_pc_sal;
7396 frame_info_ptr frame;
7397 struct gdbarch *gdbarch;
7398 CORE_ADDR jmp_buf_pc;
7399 struct bpstat_what what;
7401 /* Handle cases caused by hitting a breakpoint. */
7403 frame = get_current_frame ();
7404 gdbarch = get_frame_arch (frame);
7406 what = bpstat_what (ecs->event_thread->control.stop_bpstat);
7408 if (what.call_dummy)
7410 stop_stack_dummy = what.call_dummy;
7413 /* A few breakpoint types have callbacks associated (e.g.,
7414 bp_jit_event). Run them now. */
7415 bpstat_run_callbacks (ecs->event_thread->control.stop_bpstat);
7417 /* Shorthand to make if statements smaller. */
7418 struct frame_id original_frame_id
7419 = ecs->event_thread->control.step_frame_id;
7420 lazy_loader<frame_id> curr_frame_id
7421 ([] () { return get_frame_id (get_current_frame ()); });
7423 switch (what.main_action)
7425 case BPSTAT_WHAT_SET_LONGJMP_RESUME:
7426 /* If we hit the breakpoint at longjmp while stepping, we
7427 install a momentary breakpoint at the target of the
7428 jmp_buf. */
7430 infrun_debug_printf ("BPSTAT_WHAT_SET_LONGJMP_RESUME");
7432 ecs->event_thread->stepping_over_breakpoint = 1;
7434 if (what.is_longjmp)
7436 struct value *arg_value;
7438 /* If we set the longjmp breakpoint via a SystemTap probe,
7439 then use it to extract the arguments. The destination PC
7440 is the third argument to the probe. */
7441 arg_value = probe_safe_evaluate_at_pc (frame, 2);
7442 if (arg_value)
7444 jmp_buf_pc = value_as_address (arg_value);
7445 jmp_buf_pc = gdbarch_addr_bits_remove (gdbarch, jmp_buf_pc);
7447 else if (!gdbarch_get_longjmp_target_p (gdbarch)
7448 || !gdbarch_get_longjmp_target (gdbarch,
7449 frame, &jmp_buf_pc))
7451 infrun_debug_printf ("BPSTAT_WHAT_SET_LONGJMP_RESUME "
7452 "(!gdbarch_get_longjmp_target)");
7453 keep_going (ecs);
7454 return;
7457 /* Insert a breakpoint at resume address. */
7458 insert_longjmp_resume_breakpoint (gdbarch, jmp_buf_pc);
7460 else
7461 check_exception_resume (ecs, frame);
7462 keep_going (ecs);
7463 return;
7465 case BPSTAT_WHAT_CLEAR_LONGJMP_RESUME:
7467 frame_info_ptr init_frame;
7469 /* There are several cases to consider.
7471 1. The initiating frame no longer exists. In this case we
7472 must stop, because the exception or longjmp has gone too
7473 far.
7475 2. The initiating frame exists, and is the same as the
7476 current frame. We stop, because the exception or longjmp
7477 has been caught.
7479 3. The initiating frame exists and is different from the
7480 current frame. This means the exception or longjmp has
7481 been caught beneath the initiating frame, so keep going.
7483 4. longjmp breakpoint has been placed just to protect
7484 against stale dummy frames and user is not interested in
7485 stopping around longjmps. */
7487 infrun_debug_printf ("BPSTAT_WHAT_CLEAR_LONGJMP_RESUME");
7489 gdb_assert (ecs->event_thread->control.exception_resume_breakpoint
7490 != nullptr);
7491 delete_exception_resume_breakpoint (ecs->event_thread);
7493 if (what.is_longjmp)
7495 check_longjmp_breakpoint_for_call_dummy (ecs->event_thread);
7497 if (!frame_id_p (ecs->event_thread->initiating_frame))
7499 /* Case 4. */
7500 keep_going (ecs);
7501 return;
7505 init_frame = frame_find_by_id (ecs->event_thread->initiating_frame);
7507 if (init_frame)
7509 if (*curr_frame_id == ecs->event_thread->initiating_frame)
7511 /* Case 2. Fall through. */
7513 else
7515 /* Case 3. */
7516 keep_going (ecs);
7517 return;
7521 /* For Cases 1 and 2, remove the step-resume breakpoint, if it
7522 exists. */
7523 delete_step_resume_breakpoint (ecs->event_thread);
7525 end_stepping_range (ecs);
7527 return;
7529 case BPSTAT_WHAT_SINGLE:
7530 infrun_debug_printf ("BPSTAT_WHAT_SINGLE");
7531 ecs->event_thread->stepping_over_breakpoint = 1;
7532 /* Still need to check other stuff, at least the case where we
7533 are stepping and step out of the right range. */
7534 break;
7536 case BPSTAT_WHAT_STEP_RESUME:
7537 infrun_debug_printf ("BPSTAT_WHAT_STEP_RESUME");
7539 delete_step_resume_breakpoint (ecs->event_thread);
7540 if (ecs->event_thread->control.proceed_to_finish
7541 && execution_direction == EXEC_REVERSE)
7543 struct thread_info *tp = ecs->event_thread;
7545 /* We are finishing a function in reverse, and just hit the
7546 step-resume breakpoint at the start address of the
7547 function, and we're almost there -- just need to back up
7548 by one more single-step, which should take us back to the
7549 function call. */
7550 tp->control.step_range_start = tp->control.step_range_end = 1;
7551 keep_going (ecs);
7552 return;
7554 fill_in_stop_func (gdbarch, ecs);
7555 if (ecs->event_thread->stop_pc () == ecs->stop_func_start
7556 && execution_direction == EXEC_REVERSE)
7558 /* We are stepping over a function call in reverse, and just
7559 hit the step-resume breakpoint at the start address of
7560 the function. Go back to single-stepping, which should
7561 take us back to the function call. */
7562 ecs->event_thread->stepping_over_breakpoint = 1;
7563 keep_going (ecs);
7564 return;
7566 break;
7568 case BPSTAT_WHAT_STOP_NOISY:
7569 infrun_debug_printf ("BPSTAT_WHAT_STOP_NOISY");
7570 stop_print_frame = true;
7572 /* Assume the thread stopped for a breakpoint. We'll still check
7573 whether a/the breakpoint is there when the thread is next
7574 resumed. */
7575 ecs->event_thread->stepping_over_breakpoint = 1;
7577 stop_waiting (ecs);
7578 return;
7580 case BPSTAT_WHAT_STOP_SILENT:
7581 infrun_debug_printf ("BPSTAT_WHAT_STOP_SILENT");
7582 stop_print_frame = false;
7584 /* Assume the thread stopped for a breakpoint. We'll still check
7585 whether a/the breakpoint is there when the thread is next
7586 resumed. */
7587 ecs->event_thread->stepping_over_breakpoint = 1;
7588 stop_waiting (ecs);
7589 return;
7591 case BPSTAT_WHAT_HP_STEP_RESUME:
7592 infrun_debug_printf ("BPSTAT_WHAT_HP_STEP_RESUME");
7594 delete_step_resume_breakpoint (ecs->event_thread);
7595 if (ecs->event_thread->step_after_step_resume_breakpoint)
7597 /* Back when the step-resume breakpoint was inserted, we
7598 were trying to single-step off a breakpoint. Go back to
7599 doing that. */
7600 ecs->event_thread->step_after_step_resume_breakpoint = 0;
7601 ecs->event_thread->stepping_over_breakpoint = 1;
7602 keep_going (ecs);
7603 return;
7605 break;
7607 case BPSTAT_WHAT_KEEP_CHECKING:
7608 break;
7611 /* If we stepped a permanent breakpoint and we had a high priority
7612 step-resume breakpoint for the address we stepped, but we didn't
7613 hit it, then we must have stepped into the signal handler. The
7614 step-resume was only necessary to catch the case of _not_
7615 stepping into the handler, so delete it, and fall through to
7616 checking whether the step finished. */
7617 if (ecs->event_thread->stepped_breakpoint)
7619 struct breakpoint *sr_bp
7620 = ecs->event_thread->control.step_resume_breakpoint;
7622 if (sr_bp != nullptr
7623 && sr_bp->first_loc ().permanent
7624 && sr_bp->type == bp_hp_step_resume
7625 && sr_bp->first_loc ().address == ecs->event_thread->prev_pc)
7627 infrun_debug_printf ("stepped permanent breakpoint, stopped in handler");
7628 delete_step_resume_breakpoint (ecs->event_thread);
7629 ecs->event_thread->step_after_step_resume_breakpoint = 0;
7633 /* We come here if we hit a breakpoint but should not stop for it.
7634 Possibly we also were stepping and should stop for that. So fall
7635 through and test for stepping. But, if not stepping, do not
7636 stop. */
7638 /* In all-stop mode, if we're currently stepping but have stopped in
7639 some other thread, we need to switch back to the stepped thread. */
7640 if (switch_back_to_stepped_thread (ecs))
7641 return;
7643 if (ecs->event_thread->control.step_resume_breakpoint)
7645 infrun_debug_printf ("step-resume breakpoint is inserted");
7647 /* Having a step-resume breakpoint overrides anything
7648 else having to do with stepping commands until
7649 that breakpoint is reached. */
7650 keep_going (ecs);
7651 return;
7654 if (ecs->event_thread->control.step_range_end == 0)
7656 infrun_debug_printf ("no stepping, continue");
7657 /* Likewise if we aren't even stepping. */
7658 keep_going (ecs);
7659 return;
7662 fill_in_stop_func (gdbarch, ecs);
7664 /* If stepping through a line, keep going if still within it.
7666 Note that step_range_end is the address of the first instruction
7667 beyond the step range, and NOT the address of the last instruction
7668 within it!
7670 Note also that during reverse execution, we may be stepping
7671 through a function epilogue and therefore must detect when
7672 the current-frame changes in the middle of a line. */
7674 if (pc_in_thread_step_range (ecs->event_thread->stop_pc (),
7675 ecs->event_thread)
7676 && (execution_direction != EXEC_REVERSE
7677 || *curr_frame_id == original_frame_id))
7679 infrun_debug_printf
7680 ("stepping inside range [%s-%s]",
7681 paddress (gdbarch, ecs->event_thread->control.step_range_start),
7682 paddress (gdbarch, ecs->event_thread->control.step_range_end));
7684 /* Tentatively re-enable range stepping; `resume' disables it if
7685 necessary (e.g., if we're stepping over a breakpoint or we
7686 have software watchpoints). */
7687 ecs->event_thread->control.may_range_step = 1;
7689 /* When stepping backward, stop at beginning of line range
7690 (unless it's the function entry point, in which case
7691 keep going back to the call point). */
7692 CORE_ADDR stop_pc = ecs->event_thread->stop_pc ();
7693 if (stop_pc == ecs->event_thread->control.step_range_start
7694 && stop_pc != ecs->stop_func_start
7695 && execution_direction == EXEC_REVERSE)
7696 end_stepping_range (ecs);
7697 else
7698 keep_going (ecs);
7700 return;
7703 /* We stepped out of the stepping range. */
7705 /* If we are stepping at the source level and entered the runtime
7706 loader dynamic symbol resolution code...
7708 EXEC_FORWARD: we keep on single stepping until we exit the run
7709 time loader code and reach the callee's address.
7711 EXEC_REVERSE: we've already executed the callee (backward), and
7712 the runtime loader code is handled just like any other
7713 undebuggable function call. Now we need only keep stepping
7714 backward through the trampoline code, and that's handled further
7715 down, so there is nothing for us to do here. */
7717 if (execution_direction != EXEC_REVERSE
7718 && ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
7719 && in_solib_dynsym_resolve_code (ecs->event_thread->stop_pc ())
7720 && (ecs->event_thread->control.step_start_function == nullptr
7721 || !in_solib_dynsym_resolve_code (
7722 ecs->event_thread->control.step_start_function->value_block ()
7723 ->entry_pc ())))
7725 CORE_ADDR pc_after_resolver =
7726 gdbarch_skip_solib_resolver (gdbarch, ecs->event_thread->stop_pc ());
7728 infrun_debug_printf ("stepped into dynsym resolve code");
7730 if (pc_after_resolver)
7732 /* Set up a step-resume breakpoint at the address
7733 indicated by SKIP_SOLIB_RESOLVER. */
7734 symtab_and_line sr_sal;
7735 sr_sal.pc = pc_after_resolver;
7736 sr_sal.pspace = get_frame_program_space (frame);
7738 insert_step_resume_breakpoint_at_sal (gdbarch,
7739 sr_sal, null_frame_id);
7742 keep_going (ecs);
7743 return;
7746 /* Step through an indirect branch thunk. */
7747 if (ecs->event_thread->control.step_over_calls != STEP_OVER_NONE
7748 && gdbarch_in_indirect_branch_thunk (gdbarch,
7749 ecs->event_thread->stop_pc ()))
7751 infrun_debug_printf ("stepped into indirect branch thunk");
7752 keep_going (ecs);
7753 return;
7756 if (ecs->event_thread->control.step_range_end != 1
7757 && (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
7758 || ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
7759 && get_frame_type (frame) == SIGTRAMP_FRAME)
7761 infrun_debug_printf ("stepped into signal trampoline");
7762 /* The inferior, while doing a "step" or "next", has ended up in
7763 a signal trampoline (either by a signal being delivered or by
7764 the signal handler returning). Just single-step until the
7765 inferior leaves the trampoline (either by calling the handler
7766 or returning). */
7767 keep_going (ecs);
7768 return;
7771 /* If we're in the return path from a shared library trampoline,
7772 we want to proceed through the trampoline when stepping. */
7773 /* macro/2012-04-25: This needs to come before the subroutine
7774 call check below as on some targets return trampolines look
7775 like subroutine calls (MIPS16 return thunks). */
7776 if (gdbarch_in_solib_return_trampoline (gdbarch,
7777 ecs->event_thread->stop_pc (),
7778 ecs->stop_func_name)
7779 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE)
7781 /* Determine where this trampoline returns. */
7782 CORE_ADDR stop_pc = ecs->event_thread->stop_pc ();
7783 CORE_ADDR real_stop_pc
7784 = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
7786 infrun_debug_printf ("stepped into solib return tramp");
7788 /* Only proceed through if we know where it's going. */
7789 if (real_stop_pc)
7791 /* And put the step-breakpoint there and go until there. */
7792 symtab_and_line sr_sal;
7793 sr_sal.pc = real_stop_pc;
7794 sr_sal.section = find_pc_overlay (sr_sal.pc);
7795 sr_sal.pspace = get_frame_program_space (frame);
7797 /* Do not specify what the fp should be when we stop since
7798 on some machines the prologue is where the new fp value
7799 is established. */
7800 insert_step_resume_breakpoint_at_sal (gdbarch,
7801 sr_sal, null_frame_id);
7803 /* Restart without fiddling with the step ranges or
7804 other state. */
7805 keep_going (ecs);
7806 return;
7810 /* Check for subroutine calls. The check for the current frame
7811 equalling the step ID is not necessary - the check of the
7812 previous frame's ID is sufficient - but it is a common case and
7813 cheaper than checking the previous frame's ID.
7815 NOTE: frame_id::operator== will never report two invalid frame IDs as
7816 being equal, so to get into this block, both the current and
7817 previous frame must have valid frame IDs. */
7818 /* The outer_frame_id check is a heuristic to detect stepping
7819 through startup code. If we step over an instruction which
7820 sets the stack pointer from an invalid value to a valid value,
7821 we may detect that as a subroutine call from the mythical
7822 "outermost" function. This could be fixed by marking
7823 outermost frames as !stack_p,code_p,special_p. Then the
7824 initial outermost frame, before sp was valid, would
7825 have code_addr == &_start. See the comment in frame_id::operator==
7826 for more. */
7828 /* We want "nexti" to step into, not over, signal handlers invoked
7829 by the kernel, therefore this subroutine check should not trigger
7830 for a signal handler invocation. On most platforms, this is already
7831 not the case, as the kernel puts a signal trampoline frame onto the
7832 stack to handle proper return after the handler, and therefore at this
7833 point, the current frame is a grandchild of the step frame, not a
7834 child. However, on some platforms, the kernel actually uses a
7835 trampoline to handle *invocation* of the handler. In that case,
7836 when executing the first instruction of the trampoline, this check
7837 would erroneously detect the trampoline invocation as a subroutine
7838 call. Fix this by checking for SIGTRAMP_FRAME. */
7839 if ((get_stack_frame_id (frame)
7840 != ecs->event_thread->control.step_stack_frame_id)
7841 && get_frame_type (frame) != SIGTRAMP_FRAME
7842 && ((frame_unwind_caller_id (frame)
7843 == ecs->event_thread->control.step_stack_frame_id)
7844 && ((ecs->event_thread->control.step_stack_frame_id
7845 != outer_frame_id)
7846 || (ecs->event_thread->control.step_start_function
7847 != find_pc_function (ecs->event_thread->stop_pc ())))))
7849 CORE_ADDR stop_pc = ecs->event_thread->stop_pc ();
7850 CORE_ADDR real_stop_pc;
7852 infrun_debug_printf ("stepped into subroutine");
7854 if (ecs->event_thread->control.step_over_calls == STEP_OVER_NONE)
7856 /* I presume that step_over_calls is only 0 when we're
7857 supposed to be stepping at the assembly language level
7858 ("stepi"). Just stop. */
7859 /* And this works the same backward as frontward. MVS */
7860 end_stepping_range (ecs);
7861 return;
7864 /* Reverse stepping through solib trampolines. */
7866 if (execution_direction == EXEC_REVERSE
7867 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE
7868 && (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
7869 || (ecs->stop_func_start == 0
7870 && in_solib_dynsym_resolve_code (stop_pc))))
7872 /* Any solib trampoline code can be handled in reverse
7873 by simply continuing to single-step. We have already
7874 executed the solib function (backwards), and a few
7875 steps will take us back through the trampoline to the
7876 caller. */
7877 keep_going (ecs);
7878 return;
7881 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
7883 /* We're doing a "next".
7885 Normal (forward) execution: set a breakpoint at the
7886 callee's return address (the address at which the caller
7887 will resume).
7889 Reverse (backward) execution. set the step-resume
7890 breakpoint at the start of the function that we just
7891 stepped into (backwards), and continue to there. When we
7892 get there, we'll need to single-step back to the caller. */
7894 if (execution_direction == EXEC_REVERSE)
7896 /* If we're already at the start of the function, we've either
7897 just stepped backward into a single instruction function,
7898 or stepped back out of a signal handler to the first instruction
7899 of the function. Just keep going, which will single-step back
7900 to the caller. */
7901 if (ecs->stop_func_start != stop_pc && ecs->stop_func_start != 0)
7903 /* Normal function call return (static or dynamic). */
7904 symtab_and_line sr_sal;
7905 sr_sal.pc = ecs->stop_func_start;
7906 sr_sal.pspace = get_frame_program_space (frame);
7907 insert_step_resume_breakpoint_at_sal (gdbarch,
7908 sr_sal, get_stack_frame_id (frame));
7911 else
7912 insert_step_resume_breakpoint_at_caller (frame);
7914 keep_going (ecs);
7915 return;
7918 /* If we are in a function call trampoline (a stub between the
7919 calling routine and the real function), locate the real
7920 function. That's what tells us (a) whether we want to step
7921 into it at all, and (b) what prologue we want to run to the
7922 end of, if we do step into it. */
7923 real_stop_pc = skip_language_trampoline (frame, stop_pc);
7924 if (real_stop_pc == 0)
7925 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
7926 if (real_stop_pc != 0)
7927 ecs->stop_func_start = real_stop_pc;
7929 if (real_stop_pc != 0 && in_solib_dynsym_resolve_code (real_stop_pc))
7931 symtab_and_line sr_sal;
7932 sr_sal.pc = ecs->stop_func_start;
7933 sr_sal.pspace = get_frame_program_space (frame);
7935 insert_step_resume_breakpoint_at_sal (gdbarch,
7936 sr_sal, null_frame_id);
7937 keep_going (ecs);
7938 return;
7941 /* If we have line number information for the function we are
7942 thinking of stepping into and the function isn't on the skip
7943 list, step into it.
7945 If there are several symtabs at that PC (e.g. with include
7946 files), just want to know whether *any* of them have line
7947 numbers. find_pc_line handles this. */
7949 struct symtab_and_line tmp_sal;
7951 tmp_sal = find_pc_line (ecs->stop_func_start, 0);
7952 if (tmp_sal.line != 0
7953 && !function_name_is_marked_for_skip (ecs->stop_func_name,
7954 tmp_sal)
7955 && !inline_frame_is_marked_for_skip (true, ecs->event_thread))
7957 if (execution_direction == EXEC_REVERSE)
7958 handle_step_into_function_backward (gdbarch, ecs);
7959 else
7960 handle_step_into_function (gdbarch, ecs);
7961 return;
7965 /* If we have no line number and the step-stop-if-no-debug is
7966 set, we stop the step so that the user has a chance to switch
7967 in assembly mode. */
7968 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
7969 && step_stop_if_no_debug)
7971 end_stepping_range (ecs);
7972 return;
7975 if (execution_direction == EXEC_REVERSE)
7977 /* If we're already at the start of the function, we've either just
7978 stepped backward into a single instruction function without line
7979 number info, or stepped back out of a signal handler to the first
7980 instruction of the function without line number info. Just keep
7981 going, which will single-step back to the caller. */
7982 if (ecs->stop_func_start != stop_pc)
7984 /* Set a breakpoint at callee's start address.
7985 From there we can step once and be back in the caller. */
7986 symtab_and_line sr_sal;
7987 sr_sal.pc = ecs->stop_func_start;
7988 sr_sal.pspace = get_frame_program_space (frame);
7989 insert_step_resume_breakpoint_at_sal (gdbarch,
7990 sr_sal, null_frame_id);
7993 else
7994 /* Set a breakpoint at callee's return address (the address
7995 at which the caller will resume). */
7996 insert_step_resume_breakpoint_at_caller (frame);
7998 keep_going (ecs);
7999 return;
8002 /* Reverse stepping through solib trampolines. */
8004 if (execution_direction == EXEC_REVERSE
8005 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE)
8007 CORE_ADDR stop_pc = ecs->event_thread->stop_pc ();
8009 if (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
8010 || (ecs->stop_func_start == 0
8011 && in_solib_dynsym_resolve_code (stop_pc)))
8013 /* Any solib trampoline code can be handled in reverse
8014 by simply continuing to single-step. We have already
8015 executed the solib function (backwards), and a few
8016 steps will take us back through the trampoline to the
8017 caller. */
8018 keep_going (ecs);
8019 return;
8021 else if (in_solib_dynsym_resolve_code (stop_pc))
8023 /* Stepped backward into the solib dynsym resolver.
8024 Set a breakpoint at its start and continue, then
8025 one more step will take us out. */
8026 symtab_and_line sr_sal;
8027 sr_sal.pc = ecs->stop_func_start;
8028 sr_sal.pspace = get_frame_program_space (frame);
8029 insert_step_resume_breakpoint_at_sal (gdbarch,
8030 sr_sal, null_frame_id);
8031 keep_going (ecs);
8032 return;
8036 /* This always returns the sal for the inner-most frame when we are in a
8037 stack of inlined frames, even if GDB actually believes that it is in a
8038 more outer frame. This is checked for below by calls to
8039 inline_skipped_frames. */
8040 stop_pc_sal = find_pc_line (ecs->event_thread->stop_pc (), 0);
8042 /* NOTE: tausq/2004-05-24: This if block used to be done before all
8043 the trampoline processing logic, however, there are some trampolines
8044 that have no names, so we should do trampoline handling first. */
8045 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
8046 && ecs->stop_func_name == nullptr
8047 && stop_pc_sal.line == 0)
8049 infrun_debug_printf ("stepped into undebuggable function");
8051 /* The inferior just stepped into, or returned to, an
8052 undebuggable function (where there is no debugging information
8053 and no line number corresponding to the address where the
8054 inferior stopped). Since we want to skip this kind of code,
8055 we keep going until the inferior returns from this
8056 function - unless the user has asked us not to (via
8057 set step-mode) or we no longer know how to get back
8058 to the call site. */
8059 if (step_stop_if_no_debug
8060 || !frame_id_p (frame_unwind_caller_id (frame)))
8062 /* If we have no line number and the step-stop-if-no-debug
8063 is set, we stop the step so that the user has a chance to
8064 switch in assembly mode. */
8065 end_stepping_range (ecs);
8066 return;
8068 else
8070 /* Set a breakpoint at callee's return address (the address
8071 at which the caller will resume). */
8072 insert_step_resume_breakpoint_at_caller (frame);
8073 keep_going (ecs);
8074 return;
8078 if (execution_direction == EXEC_REVERSE
8079 && ecs->event_thread->control.proceed_to_finish
8080 && ecs->event_thread->stop_pc () >= ecs->stop_func_alt_start
8081 && ecs->event_thread->stop_pc () < ecs->stop_func_start)
8083 /* We are executing the reverse-finish command.
8084 If the system supports multiple entry points and we are finishing a
8085 function in reverse. If we are between the entry points single-step
8086 back to the alternate entry point. If we are at the alternate entry
8087 point -- just need to back up by one more single-step, which
8088 should take us back to the function call. */
8089 ecs->event_thread->control.step_range_start
8090 = ecs->event_thread->control.step_range_end = 1;
8091 keep_going (ecs);
8092 return;
8096 if (ecs->event_thread->control.step_range_end == 1)
8098 /* It is stepi or nexti. We always want to stop stepping after
8099 one instruction. */
8100 infrun_debug_printf ("stepi/nexti");
8101 end_stepping_range (ecs);
8102 return;
8105 if (stop_pc_sal.line == 0)
8107 /* We have no line number information. That means to stop
8108 stepping (does this always happen right after one instruction,
8109 when we do "s" in a function with no line numbers,
8110 or can this happen as a result of a return or longjmp?). */
8111 infrun_debug_printf ("line number info");
8112 end_stepping_range (ecs);
8113 return;
8116 /* Look for "calls" to inlined functions, part one. If the inline
8117 frame machinery detected some skipped call sites, we have entered
8118 a new inline function. */
8120 if ((*curr_frame_id == original_frame_id)
8121 && inline_skipped_frames (ecs->event_thread))
8123 infrun_debug_printf ("stepped into inlined function");
8125 symtab_and_line call_sal = find_frame_sal (frame);
8127 if (ecs->event_thread->control.step_over_calls != STEP_OVER_ALL)
8129 /* For "step", we're going to stop. But if the call site
8130 for this inlined function is on the same source line as
8131 we were previously stepping, go down into the function
8132 first. Otherwise stop at the call site. */
8134 if (call_sal.line == ecs->event_thread->current_line
8135 && call_sal.symtab == ecs->event_thread->current_symtab)
8137 step_into_inline_frame (ecs->event_thread);
8138 if (inline_frame_is_marked_for_skip (false, ecs->event_thread))
8140 keep_going (ecs);
8141 return;
8145 end_stepping_range (ecs);
8146 return;
8148 else
8150 /* For "next", we should stop at the call site if it is on a
8151 different source line. Otherwise continue through the
8152 inlined function. */
8153 if (call_sal.line == ecs->event_thread->current_line
8154 && call_sal.symtab == ecs->event_thread->current_symtab)
8155 keep_going (ecs);
8156 else
8157 end_stepping_range (ecs);
8158 return;
8162 /* Look for "calls" to inlined functions, part two. If we are still
8163 in the same real function we were stepping through, but we have
8164 to go further up to find the exact frame ID, we are stepping
8165 through a more inlined call beyond its call site. */
8167 if (get_frame_type (frame) == INLINE_FRAME
8168 && (*curr_frame_id != original_frame_id)
8169 && stepped_in_from (frame, original_frame_id))
8171 infrun_debug_printf ("stepping through inlined function");
8173 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL
8174 || inline_frame_is_marked_for_skip (false, ecs->event_thread))
8175 keep_going (ecs);
8176 else
8177 end_stepping_range (ecs);
8178 return;
8181 bool refresh_step_info = true;
8182 if ((ecs->event_thread->stop_pc () == stop_pc_sal.pc)
8183 && (ecs->event_thread->current_line != stop_pc_sal.line
8184 || ecs->event_thread->current_symtab != stop_pc_sal.symtab))
8186 /* We are at a different line. */
8188 if (stop_pc_sal.is_stmt)
8190 if (execution_direction == EXEC_REVERSE)
8192 /* We are stepping backwards make sure we have reached the
8193 beginning of the line. */
8194 CORE_ADDR stop_pc = ecs->event_thread->stop_pc ();
8195 CORE_ADDR start_line_pc
8196 = update_line_range_start (stop_pc, ecs);
8198 if (stop_pc != start_line_pc)
8200 /* Have not reached the beginning of the source code line.
8201 Set a step range. Execution should stop in any function
8202 calls we execute back into before reaching the beginning
8203 of the line. */
8204 ecs->event_thread->control.step_range_start
8205 = start_line_pc;
8206 ecs->event_thread->control.step_range_end = stop_pc;
8207 set_step_info (ecs->event_thread, frame, stop_pc_sal);
8208 keep_going (ecs);
8209 return;
8213 /* We are at the start of a statement.
8215 So stop. Note that we don't stop if we step into the middle of a
8216 statement. That is said to make things like for (;;) statements
8217 work better. */
8218 infrun_debug_printf ("stepped to a different line");
8219 end_stepping_range (ecs);
8220 return;
8222 else if (*curr_frame_id == original_frame_id)
8224 /* We are not at the start of a statement, and we have not changed
8225 frame.
8227 We ignore this line table entry, and continue stepping forward,
8228 looking for a better place to stop. */
8229 refresh_step_info = false;
8230 infrun_debug_printf ("stepped to a different line, but "
8231 "it's not the start of a statement");
8233 else
8235 /* We are not the start of a statement, and we have changed frame.
8237 We ignore this line table entry, and continue stepping forward,
8238 looking for a better place to stop. Keep refresh_step_info at
8239 true to note that the frame has changed, but ignore the line
8240 number to make sure we don't ignore a subsequent entry with the
8241 same line number. */
8242 stop_pc_sal.line = 0;
8243 infrun_debug_printf ("stepped to a different frame, but "
8244 "it's not the start of a statement");
8247 else if (execution_direction == EXEC_REVERSE
8248 && *curr_frame_id != original_frame_id
8249 && original_frame_id.code_addr_p && curr_frame_id->code_addr_p
8250 && original_frame_id.code_addr == curr_frame_id->code_addr)
8252 /* If we enter here, we're leaving a recursive function call. In this
8253 situation, we shouldn't refresh the step information, because if we
8254 do, we'll lose the frame_id of when we started stepping, and this
8255 will make GDB not know we need to print frame information. */
8256 refresh_step_info = false;
8257 infrun_debug_printf ("reverse stepping, left a recursive call, don't "
8258 "update step info so we remember we left a frame");
8261 /* We aren't done stepping.
8263 Optimize by setting the stepping range to the line.
8264 (We might not be in the original line, but if we entered a
8265 new line in mid-statement, we continue stepping. This makes
8266 things like for(;;) statements work better.)
8268 If we entered a SAL that indicates a non-statement line table entry,
8269 then we update the stepping range, but we don't update the step info,
8270 which includes things like the line number we are stepping away from.
8271 This means we will stop when we find a line table entry that is marked
8272 as is-statement, even if it matches the non-statement one we just
8273 stepped into. */
8275 ecs->event_thread->control.step_range_start = stop_pc_sal.pc;
8276 ecs->event_thread->control.step_range_end = stop_pc_sal.end;
8277 ecs->event_thread->control.may_range_step = 1;
8278 infrun_debug_printf
8279 ("updated step range, start = %s, end = %s, may_range_step = %d",
8280 paddress (gdbarch, ecs->event_thread->control.step_range_start),
8281 paddress (gdbarch, ecs->event_thread->control.step_range_end),
8282 ecs->event_thread->control.may_range_step);
8283 if (refresh_step_info)
8284 set_step_info (ecs->event_thread, frame, stop_pc_sal);
8286 infrun_debug_printf ("keep going");
8288 if (execution_direction == EXEC_REVERSE)
8290 CORE_ADDR stop_pc = ecs->event_thread->stop_pc ();
8292 /* Make sure the stop_pc is set to the beginning of the line. */
8293 if (stop_pc != ecs->event_thread->control.step_range_start)
8294 ecs->event_thread->control.step_range_start
8295 = update_line_range_start (stop_pc, ecs);
8298 keep_going (ecs);
8301 static bool restart_stepped_thread (process_stratum_target *resume_target,
8302 ptid_t resume_ptid);
8304 /* In all-stop mode, if we're currently stepping but have stopped in
8305 some other thread, we may need to switch back to the stepped
8306 thread. Returns true we set the inferior running, false if we left
8307 it stopped (and the event needs further processing). */
8309 static bool
8310 switch_back_to_stepped_thread (struct execution_control_state *ecs)
8312 if (!target_is_non_stop_p ())
8314 /* If any thread is blocked on some internal breakpoint, and we
8315 simply need to step over that breakpoint to get it going
8316 again, do that first. */
8318 /* However, if we see an event for the stepping thread, then we
8319 know all other threads have been moved past their breakpoints
8320 already. Let the caller check whether the step is finished,
8321 etc., before deciding to move it past a breakpoint. */
8322 if (ecs->event_thread->control.step_range_end != 0)
8323 return false;
8325 /* Check if the current thread is blocked on an incomplete
8326 step-over, interrupted by a random signal. */
8327 if (ecs->event_thread->control.trap_expected
8328 && ecs->event_thread->stop_signal () != GDB_SIGNAL_TRAP)
8330 infrun_debug_printf
8331 ("need to finish step-over of [%s]",
8332 ecs->event_thread->ptid.to_string ().c_str ());
8333 keep_going (ecs);
8334 return true;
8337 /* Check if the current thread is blocked by a single-step
8338 breakpoint of another thread. */
8339 if (ecs->hit_singlestep_breakpoint)
8341 infrun_debug_printf ("need to step [%s] over single-step breakpoint",
8342 ecs->ptid.to_string ().c_str ());
8343 keep_going (ecs);
8344 return true;
8347 /* If this thread needs yet another step-over (e.g., stepping
8348 through a delay slot), do it first before moving on to
8349 another thread. */
8350 if (thread_still_needs_step_over (ecs->event_thread))
8352 infrun_debug_printf
8353 ("thread [%s] still needs step-over",
8354 ecs->event_thread->ptid.to_string ().c_str ());
8355 keep_going (ecs);
8356 return true;
8359 /* If scheduler locking applies even if not stepping, there's no
8360 need to walk over threads. Above we've checked whether the
8361 current thread is stepping. If some other thread not the
8362 event thread is stepping, then it must be that scheduler
8363 locking is not in effect. */
8364 if (schedlock_applies (ecs->event_thread))
8365 return false;
8367 /* Otherwise, we no longer expect a trap in the current thread.
8368 Clear the trap_expected flag before switching back -- this is
8369 what keep_going does as well, if we call it. */
8370 ecs->event_thread->control.trap_expected = 0;
8372 /* Likewise, clear the signal if it should not be passed. */
8373 if (!signal_program[ecs->event_thread->stop_signal ()])
8374 ecs->event_thread->set_stop_signal (GDB_SIGNAL_0);
8376 if (restart_stepped_thread (ecs->target, ecs->ptid))
8378 prepare_to_wait (ecs);
8379 return true;
8382 switch_to_thread (ecs->event_thread);
8385 return false;
8388 /* Look for the thread that was stepping, and resume it.
8389 RESUME_TARGET / RESUME_PTID indicate the set of threads the caller
8390 is resuming. Return true if a thread was started, false
8391 otherwise. */
8393 static bool
8394 restart_stepped_thread (process_stratum_target *resume_target,
8395 ptid_t resume_ptid)
8397 /* Do all pending step-overs before actually proceeding with
8398 step/next/etc. */
8399 if (start_step_over ())
8400 return true;
8402 for (thread_info *tp : all_threads_safe ())
8404 if (tp->state == THREAD_EXITED)
8405 continue;
8407 if (tp->has_pending_waitstatus ())
8408 continue;
8410 /* Ignore threads of processes the caller is not
8411 resuming. */
8412 if (!sched_multi
8413 && (tp->inf->process_target () != resume_target
8414 || tp->inf->pid != resume_ptid.pid ()))
8415 continue;
8417 if (tp->control.trap_expected)
8419 infrun_debug_printf ("switching back to stepped thread (step-over)");
8421 if (keep_going_stepped_thread (tp))
8422 return true;
8426 for (thread_info *tp : all_threads_safe ())
8428 if (tp->state == THREAD_EXITED)
8429 continue;
8431 if (tp->has_pending_waitstatus ())
8432 continue;
8434 /* Ignore threads of processes the caller is not
8435 resuming. */
8436 if (!sched_multi
8437 && (tp->inf->process_target () != resume_target
8438 || tp->inf->pid != resume_ptid.pid ()))
8439 continue;
8441 /* Did we find the stepping thread? */
8442 if (tp->control.step_range_end)
8444 infrun_debug_printf ("switching back to stepped thread (stepping)");
8446 if (keep_going_stepped_thread (tp))
8447 return true;
8451 return false;
8454 /* See infrun.h. */
8456 void
8457 restart_after_all_stop_detach (process_stratum_target *proc_target)
8459 /* Note we don't check target_is_non_stop_p() here, because the
8460 current inferior may no longer have a process_stratum target
8461 pushed, as we just detached. */
8463 /* See if we have a THREAD_RUNNING thread that need to be
8464 re-resumed. If we have any thread that is already executing,
8465 then we don't need to resume the target -- it is already been
8466 resumed. With the remote target (in all-stop), it's even
8467 impossible to issue another resumption if the target is already
8468 resumed, until the target reports a stop. */
8469 for (thread_info *thr : all_threads (proc_target))
8471 if (thr->state != THREAD_RUNNING)
8472 continue;
8474 /* If we have any thread that is already executing, then we
8475 don't need to resume the target -- it is already been
8476 resumed. */
8477 if (thr->executing ())
8478 return;
8480 /* If we have a pending event to process, skip resuming the
8481 target and go straight to processing it. */
8482 if (thr->resumed () && thr->has_pending_waitstatus ())
8483 return;
8486 /* Alright, we need to re-resume the target. If a thread was
8487 stepping, we need to restart it stepping. */
8488 if (restart_stepped_thread (proc_target, minus_one_ptid))
8489 return;
8491 /* Otherwise, find the first THREAD_RUNNING thread and resume
8492 it. */
8493 for (thread_info *thr : all_threads (proc_target))
8495 if (thr->state != THREAD_RUNNING)
8496 continue;
8498 execution_control_state ecs (thr);
8499 switch_to_thread (thr);
8500 keep_going (&ecs);
8501 return;
8505 /* Set a previously stepped thread back to stepping. Returns true on
8506 success, false if the resume is not possible (e.g., the thread
8507 vanished). */
8509 static bool
8510 keep_going_stepped_thread (struct thread_info *tp)
8512 frame_info_ptr frame;
8514 /* If the stepping thread exited, then don't try to switch back and
8515 resume it, which could fail in several different ways depending
8516 on the target. Instead, just keep going.
8518 We can find a stepping dead thread in the thread list in two
8519 cases:
8521 - The target supports thread exit events, and when the target
8522 tries to delete the thread from the thread list, inferior_ptid
8523 pointed at the exiting thread. In such case, calling
8524 delete_thread does not really remove the thread from the list;
8525 instead, the thread is left listed, with 'exited' state.
8527 - The target's debug interface does not support thread exit
8528 events, and so we have no idea whatsoever if the previously
8529 stepping thread is still alive. For that reason, we need to
8530 synchronously query the target now. */
8532 if (tp->state == THREAD_EXITED || !target_thread_alive (tp->ptid))
8534 infrun_debug_printf ("not resuming previously stepped thread, it has "
8535 "vanished");
8537 delete_thread (tp);
8538 return false;
8541 infrun_debug_printf ("resuming previously stepped thread");
8543 execution_control_state ecs (tp);
8544 switch_to_thread (tp);
8546 tp->set_stop_pc (regcache_read_pc (get_thread_regcache (tp)));
8547 frame = get_current_frame ();
8549 /* If the PC of the thread we were trying to single-step has
8550 changed, then that thread has trapped or been signaled, but the
8551 event has not been reported to GDB yet. Re-poll the target
8552 looking for this particular thread's event (i.e. temporarily
8553 enable schedlock) by:
8555 - setting a break at the current PC
8556 - resuming that particular thread, only (by setting trap
8557 expected)
8559 This prevents us continuously moving the single-step breakpoint
8560 forward, one instruction at a time, overstepping. */
8562 if (tp->stop_pc () != tp->prev_pc)
8564 ptid_t resume_ptid;
8566 infrun_debug_printf ("expected thread advanced also (%s -> %s)",
8567 paddress (current_inferior ()->arch (), tp->prev_pc),
8568 paddress (current_inferior ()->arch (),
8569 tp->stop_pc ()));
8571 /* Clear the info of the previous step-over, as it's no longer
8572 valid (if the thread was trying to step over a breakpoint, it
8573 has already succeeded). It's what keep_going would do too,
8574 if we called it. Do this before trying to insert the sss
8575 breakpoint, otherwise if we were previously trying to step
8576 over this exact address in another thread, the breakpoint is
8577 skipped. */
8578 clear_step_over_info ();
8579 tp->control.trap_expected = 0;
8581 insert_single_step_breakpoint (get_frame_arch (frame),
8582 get_frame_address_space (frame),
8583 tp->stop_pc ());
8585 tp->set_resumed (true);
8586 resume_ptid = internal_resume_ptid (tp->control.stepping_command);
8587 do_target_resume (resume_ptid, false, GDB_SIGNAL_0);
8589 else
8591 infrun_debug_printf ("expected thread still hasn't advanced");
8593 keep_going_pass_signal (&ecs);
8596 return true;
8599 /* Is thread TP in the middle of (software or hardware)
8600 single-stepping? (Note the result of this function must never be
8601 passed directly as target_resume's STEP parameter.) */
8603 static bool
8604 currently_stepping (struct thread_info *tp)
8606 return ((tp->control.step_range_end
8607 && tp->control.step_resume_breakpoint == nullptr)
8608 || tp->control.trap_expected
8609 || tp->stepped_breakpoint
8610 || bpstat_should_step ());
8613 /* Inferior has stepped into a subroutine call with source code that
8614 we should not step over. Do step to the first line of code in
8615 it. */
8617 static void
8618 handle_step_into_function (struct gdbarch *gdbarch,
8619 struct execution_control_state *ecs)
8621 fill_in_stop_func (gdbarch, ecs);
8623 compunit_symtab *cust
8624 = find_pc_compunit_symtab (ecs->event_thread->stop_pc ());
8625 if (cust != nullptr && cust->language () != language_asm)
8626 ecs->stop_func_start
8627 = gdbarch_skip_prologue_noexcept (gdbarch, ecs->stop_func_start);
8629 symtab_and_line stop_func_sal = find_pc_line (ecs->stop_func_start, 0);
8630 /* Use the step_resume_break to step until the end of the prologue,
8631 even if that involves jumps (as it seems to on the vax under
8632 4.2). */
8633 /* If the prologue ends in the middle of a source line, continue to
8634 the end of that source line (if it is still within the function).
8635 Otherwise, just go to end of prologue. */
8636 if (stop_func_sal.end
8637 && stop_func_sal.pc != ecs->stop_func_start
8638 && stop_func_sal.end < ecs->stop_func_end)
8639 ecs->stop_func_start = stop_func_sal.end;
8641 /* Architectures which require breakpoint adjustment might not be able
8642 to place a breakpoint at the computed address. If so, the test
8643 ``ecs->stop_func_start == stop_pc'' will never succeed. Adjust
8644 ecs->stop_func_start to an address at which a breakpoint may be
8645 legitimately placed.
8647 Note: kevinb/2004-01-19: On FR-V, if this adjustment is not
8648 made, GDB will enter an infinite loop when stepping through
8649 optimized code consisting of VLIW instructions which contain
8650 subinstructions corresponding to different source lines. On
8651 FR-V, it's not permitted to place a breakpoint on any but the
8652 first subinstruction of a VLIW instruction. When a breakpoint is
8653 set, GDB will adjust the breakpoint address to the beginning of
8654 the VLIW instruction. Thus, we need to make the corresponding
8655 adjustment here when computing the stop address. */
8657 if (gdbarch_adjust_breakpoint_address_p (gdbarch))
8659 ecs->stop_func_start
8660 = gdbarch_adjust_breakpoint_address (gdbarch,
8661 ecs->stop_func_start);
8664 if (ecs->stop_func_start == ecs->event_thread->stop_pc ())
8666 /* We are already there: stop now. */
8667 end_stepping_range (ecs);
8668 return;
8670 else
8672 /* Put the step-breakpoint there and go until there. */
8673 symtab_and_line sr_sal;
8674 sr_sal.pc = ecs->stop_func_start;
8675 sr_sal.section = find_pc_overlay (ecs->stop_func_start);
8676 sr_sal.pspace = get_frame_program_space (get_current_frame ());
8678 /* Do not specify what the fp should be when we stop since on
8679 some machines the prologue is where the new fp value is
8680 established. */
8681 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal, null_frame_id);
8683 /* And make sure stepping stops right away then. */
8684 ecs->event_thread->control.step_range_end
8685 = ecs->event_thread->control.step_range_start;
8687 keep_going (ecs);
8690 /* Inferior has stepped backward into a subroutine call with source
8691 code that we should not step over. Do step to the beginning of the
8692 last line of code in it. */
8694 static void
8695 handle_step_into_function_backward (struct gdbarch *gdbarch,
8696 struct execution_control_state *ecs)
8698 struct compunit_symtab *cust;
8699 struct symtab_and_line stop_func_sal;
8701 fill_in_stop_func (gdbarch, ecs);
8703 cust = find_pc_compunit_symtab (ecs->event_thread->stop_pc ());
8704 if (cust != nullptr && cust->language () != language_asm)
8705 ecs->stop_func_start
8706 = gdbarch_skip_prologue_noexcept (gdbarch, ecs->stop_func_start);
8708 stop_func_sal = find_pc_line (ecs->event_thread->stop_pc (), 0);
8710 /* OK, we're just going to keep stepping here. */
8711 if (stop_func_sal.pc == ecs->event_thread->stop_pc ())
8713 /* We're there already. Just stop stepping now. */
8714 end_stepping_range (ecs);
8716 else
8718 /* Else just reset the step range and keep going.
8719 No step-resume breakpoint, they don't work for
8720 epilogues, which can have multiple entry paths. */
8721 ecs->event_thread->control.step_range_start = stop_func_sal.pc;
8722 ecs->event_thread->control.step_range_end = stop_func_sal.end;
8723 keep_going (ecs);
8725 return;
8728 /* Insert a "step-resume breakpoint" at SR_SAL with frame ID SR_ID.
8729 This is used to both functions and to skip over code. */
8731 static void
8732 insert_step_resume_breakpoint_at_sal_1 (struct gdbarch *gdbarch,
8733 struct symtab_and_line sr_sal,
8734 struct frame_id sr_id,
8735 enum bptype sr_type)
8737 /* There should never be more than one step-resume or longjmp-resume
8738 breakpoint per thread, so we should never be setting a new
8739 step_resume_breakpoint when one is already active. */
8740 gdb_assert (inferior_thread ()->control.step_resume_breakpoint == nullptr);
8741 gdb_assert (sr_type == bp_step_resume || sr_type == bp_hp_step_resume);
8743 infrun_debug_printf ("inserting step-resume breakpoint at %s",
8744 paddress (gdbarch, sr_sal.pc));
8746 inferior_thread ()->control.step_resume_breakpoint
8747 = set_momentary_breakpoint (gdbarch, sr_sal, sr_id, sr_type).release ();
8750 void
8751 insert_step_resume_breakpoint_at_sal (struct gdbarch *gdbarch,
8752 struct symtab_and_line sr_sal,
8753 struct frame_id sr_id)
8755 insert_step_resume_breakpoint_at_sal_1 (gdbarch,
8756 sr_sal, sr_id,
8757 bp_step_resume);
8760 /* Insert a "high-priority step-resume breakpoint" at RETURN_FRAME.pc.
8761 This is used to skip a potential signal handler.
8763 This is called with the interrupted function's frame. The signal
8764 handler, when it returns, will resume the interrupted function at
8765 RETURN_FRAME.pc. */
8767 static void
8768 insert_hp_step_resume_breakpoint_at_frame (const frame_info_ptr &return_frame)
8770 gdb_assert (return_frame != nullptr);
8772 struct gdbarch *gdbarch = get_frame_arch (return_frame);
8774 symtab_and_line sr_sal;
8775 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch, get_frame_pc (return_frame));
8776 sr_sal.section = find_pc_overlay (sr_sal.pc);
8777 sr_sal.pspace = get_frame_program_space (return_frame);
8779 insert_step_resume_breakpoint_at_sal_1 (gdbarch, sr_sal,
8780 get_stack_frame_id (return_frame),
8781 bp_hp_step_resume);
8784 /* Insert a "step-resume breakpoint" at the previous frame's PC. This
8785 is used to skip a function after stepping into it (for "next" or if
8786 the called function has no debugging information).
8788 The current function has almost always been reached by single
8789 stepping a call or return instruction. NEXT_FRAME belongs to the
8790 current function, and the breakpoint will be set at the caller's
8791 resume address.
8793 This is a separate function rather than reusing
8794 insert_hp_step_resume_breakpoint_at_frame in order to avoid
8795 get_prev_frame, which may stop prematurely (see the implementation
8796 of frame_unwind_caller_id for an example). */
8798 static void
8799 insert_step_resume_breakpoint_at_caller (const frame_info_ptr &next_frame)
8801 /* We shouldn't have gotten here if we don't know where the call site
8802 is. */
8803 gdb_assert (frame_id_p (frame_unwind_caller_id (next_frame)));
8805 struct gdbarch *gdbarch = frame_unwind_caller_arch (next_frame);
8807 symtab_and_line sr_sal;
8808 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch,
8809 frame_unwind_caller_pc (next_frame));
8810 sr_sal.section = find_pc_overlay (sr_sal.pc);
8811 sr_sal.pspace = frame_unwind_program_space (next_frame);
8813 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal,
8814 frame_unwind_caller_id (next_frame));
8817 /* Insert a "longjmp-resume" breakpoint at PC. This is used to set a
8818 new breakpoint at the target of a jmp_buf. The handling of
8819 longjmp-resume uses the same mechanisms used for handling
8820 "step-resume" breakpoints. */
8822 static void
8823 insert_longjmp_resume_breakpoint (struct gdbarch *gdbarch, CORE_ADDR pc)
8825 /* There should never be more than one longjmp-resume breakpoint per
8826 thread, so we should never be setting a new
8827 longjmp_resume_breakpoint when one is already active. */
8828 gdb_assert (inferior_thread ()->control.exception_resume_breakpoint == nullptr);
8830 infrun_debug_printf ("inserting longjmp-resume breakpoint at %s",
8831 paddress (gdbarch, pc));
8833 inferior_thread ()->control.exception_resume_breakpoint =
8834 set_momentary_breakpoint_at_pc (gdbarch, pc, bp_longjmp_resume).release ();
8837 /* Insert an exception resume breakpoint. TP is the thread throwing
8838 the exception. The block B is the block of the unwinder debug hook
8839 function. FRAME is the frame corresponding to the call to this
8840 function. SYM is the symbol of the function argument holding the
8841 target PC of the exception. */
8843 static void
8844 insert_exception_resume_breakpoint (struct thread_info *tp,
8845 const struct block *b,
8846 const frame_info_ptr &frame,
8847 struct symbol *sym)
8851 struct block_symbol vsym;
8852 struct value *value;
8853 CORE_ADDR handler;
8854 struct breakpoint *bp;
8856 vsym = lookup_symbol_search_name (sym->search_name (),
8857 b, SEARCH_VAR_DOMAIN);
8858 value = read_var_value (vsym.symbol, vsym.block, frame);
8859 /* If the value was optimized out, revert to the old behavior. */
8860 if (! value->optimized_out ())
8862 handler = value_as_address (value);
8864 infrun_debug_printf ("exception resume at %lx",
8865 (unsigned long) handler);
8867 /* set_momentary_breakpoint_at_pc creates a thread-specific
8868 breakpoint for the current inferior thread. */
8869 gdb_assert (tp == inferior_thread ());
8870 bp = set_momentary_breakpoint_at_pc (get_frame_arch (frame),
8871 handler,
8872 bp_exception_resume).release ();
8874 tp->control.exception_resume_breakpoint = bp;
8877 catch (const gdb_exception_error &e)
8879 /* We want to ignore errors here. */
8883 /* A helper for check_exception_resume that sets an
8884 exception-breakpoint based on a SystemTap probe. */
8886 static void
8887 insert_exception_resume_from_probe (struct thread_info *tp,
8888 const struct bound_probe *probe,
8889 const frame_info_ptr &frame)
8891 struct value *arg_value;
8892 CORE_ADDR handler;
8893 struct breakpoint *bp;
8895 arg_value = probe_safe_evaluate_at_pc (frame, 1);
8896 if (!arg_value)
8897 return;
8899 handler = value_as_address (arg_value);
8901 infrun_debug_printf ("exception resume at %s",
8902 paddress (probe->objfile->arch (), handler));
8904 /* set_momentary_breakpoint_at_pc creates a thread-specific breakpoint
8905 for the current inferior thread. */
8906 gdb_assert (tp == inferior_thread ());
8907 bp = set_momentary_breakpoint_at_pc (get_frame_arch (frame),
8908 handler, bp_exception_resume).release ();
8909 tp->control.exception_resume_breakpoint = bp;
8912 /* This is called when an exception has been intercepted. Check to
8913 see whether the exception's destination is of interest, and if so,
8914 set an exception resume breakpoint there. */
8916 static void
8917 check_exception_resume (struct execution_control_state *ecs,
8918 const frame_info_ptr &frame)
8920 struct bound_probe probe;
8921 struct symbol *func;
8923 /* First see if this exception unwinding breakpoint was set via a
8924 SystemTap probe point. If so, the probe has two arguments: the
8925 CFA and the HANDLER. We ignore the CFA, extract the handler, and
8926 set a breakpoint there. */
8927 probe = find_probe_by_pc (get_frame_pc (frame));
8928 if (probe.prob)
8930 insert_exception_resume_from_probe (ecs->event_thread, &probe, frame);
8931 return;
8934 func = get_frame_function (frame);
8935 if (!func)
8936 return;
8940 const struct block *b;
8941 int argno = 0;
8943 /* The exception breakpoint is a thread-specific breakpoint on
8944 the unwinder's debug hook, declared as:
8946 void _Unwind_DebugHook (void *cfa, void *handler);
8948 The CFA argument indicates the frame to which control is
8949 about to be transferred. HANDLER is the destination PC.
8951 We ignore the CFA and set a temporary breakpoint at HANDLER.
8952 This is not extremely efficient but it avoids issues in gdb
8953 with computing the DWARF CFA, and it also works even in weird
8954 cases such as throwing an exception from inside a signal
8955 handler. */
8957 b = func->value_block ();
8958 for (struct symbol *sym : block_iterator_range (b))
8960 if (!sym->is_argument ())
8961 continue;
8963 if (argno == 0)
8964 ++argno;
8965 else
8967 insert_exception_resume_breakpoint (ecs->event_thread,
8968 b, frame, sym);
8969 break;
8973 catch (const gdb_exception_error &e)
8978 static void
8979 stop_waiting (struct execution_control_state *ecs)
8981 infrun_debug_printf ("stop_waiting");
8983 /* Let callers know we don't want to wait for the inferior anymore. */
8984 ecs->wait_some_more = 0;
8987 /* Like keep_going, but passes the signal to the inferior, even if the
8988 signal is set to nopass. */
8990 static void
8991 keep_going_pass_signal (struct execution_control_state *ecs)
8993 gdb_assert (ecs->event_thread->ptid == inferior_ptid);
8994 gdb_assert (!ecs->event_thread->resumed ());
8996 /* Save the pc before execution, to compare with pc after stop. */
8997 ecs->event_thread->prev_pc
8998 = regcache_read_pc_protected (get_thread_regcache (ecs->event_thread));
9000 if (ecs->event_thread->control.trap_expected)
9002 struct thread_info *tp = ecs->event_thread;
9004 infrun_debug_printf ("%s has trap_expected set, "
9005 "resuming to collect trap",
9006 tp->ptid.to_string ().c_str ());
9008 /* We haven't yet gotten our trap, and either: intercepted a
9009 non-signal event (e.g., a fork); or took a signal which we
9010 are supposed to pass through to the inferior. Simply
9011 continue. */
9012 resume (ecs->event_thread->stop_signal ());
9014 else if (step_over_info_valid_p ())
9016 /* Another thread is stepping over a breakpoint in-line. If
9017 this thread needs a step-over too, queue the request. In
9018 either case, this resume must be deferred for later. */
9019 struct thread_info *tp = ecs->event_thread;
9021 if (ecs->hit_singlestep_breakpoint
9022 || thread_still_needs_step_over (tp))
9024 infrun_debug_printf ("step-over already in progress: "
9025 "step-over for %s deferred",
9026 tp->ptid.to_string ().c_str ());
9027 global_thread_step_over_chain_enqueue (tp);
9029 else
9030 infrun_debug_printf ("step-over in progress: resume of %s deferred",
9031 tp->ptid.to_string ().c_str ());
9033 else
9035 regcache *regcache = get_thread_regcache (ecs->event_thread);
9036 int remove_bp;
9037 int remove_wps;
9038 step_over_what step_what;
9040 /* Either the trap was not expected, but we are continuing
9041 anyway (if we got a signal, the user asked it be passed to
9042 the child)
9043 -- or --
9044 We got our expected trap, but decided we should resume from
9047 We're going to run this baby now!
9049 Note that insert_breakpoints won't try to re-insert
9050 already inserted breakpoints. Therefore, we don't
9051 care if breakpoints were already inserted, or not. */
9053 /* If we need to step over a breakpoint, and we're not using
9054 displaced stepping to do so, insert all breakpoints
9055 (watchpoints, etc.) but the one we're stepping over, step one
9056 instruction, and then re-insert the breakpoint when that step
9057 is finished. */
9059 step_what = thread_still_needs_step_over (ecs->event_thread);
9061 remove_bp = (ecs->hit_singlestep_breakpoint
9062 || (step_what & STEP_OVER_BREAKPOINT));
9063 remove_wps = (step_what & STEP_OVER_WATCHPOINT);
9065 /* We can't use displaced stepping if we need to step past a
9066 watchpoint. The instruction copied to the scratch pad would
9067 still trigger the watchpoint. */
9068 if (remove_bp
9069 && (remove_wps || !use_displaced_stepping (ecs->event_thread)))
9071 set_step_over_info (ecs->event_thread->inf->aspace.get (),
9072 regcache_read_pc (regcache), remove_wps,
9073 ecs->event_thread->global_num);
9075 else if (remove_wps)
9076 set_step_over_info (nullptr, 0, remove_wps, -1);
9078 /* If we now need to do an in-line step-over, we need to stop
9079 all other threads. Note this must be done before
9080 insert_breakpoints below, because that removes the breakpoint
9081 we're about to step over, otherwise other threads could miss
9082 it. */
9083 if (step_over_info_valid_p () && target_is_non_stop_p ())
9084 stop_all_threads ("starting in-line step-over");
9086 /* Stop stepping if inserting breakpoints fails. */
9089 insert_breakpoints ();
9091 catch (const gdb_exception_error &e)
9093 exception_print (gdb_stderr, e);
9094 stop_waiting (ecs);
9095 clear_step_over_info ();
9096 return;
9099 ecs->event_thread->control.trap_expected = (remove_bp || remove_wps);
9101 resume (ecs->event_thread->stop_signal ());
9104 prepare_to_wait (ecs);
9107 /* Called when we should continue running the inferior, because the
9108 current event doesn't cause a user visible stop. This does the
9109 resuming part; waiting for the next event is done elsewhere. */
9111 static void
9112 keep_going (struct execution_control_state *ecs)
9114 if (ecs->event_thread->control.trap_expected
9115 && ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP)
9116 ecs->event_thread->control.trap_expected = 0;
9118 if (!signal_program[ecs->event_thread->stop_signal ()])
9119 ecs->event_thread->set_stop_signal (GDB_SIGNAL_0);
9120 keep_going_pass_signal (ecs);
9123 /* This function normally comes after a resume, before
9124 handle_inferior_event exits. It takes care of any last bits of
9125 housekeeping, and sets the all-important wait_some_more flag. */
9127 static void
9128 prepare_to_wait (struct execution_control_state *ecs)
9130 infrun_debug_printf ("prepare_to_wait");
9132 ecs->wait_some_more = 1;
9134 /* If the target can't async, emulate it by marking the infrun event
9135 handler such that as soon as we get back to the event-loop, we
9136 immediately end up in fetch_inferior_event again calling
9137 target_wait. */
9138 if (!target_can_async_p ())
9139 mark_infrun_async_event_handler ();
9142 /* We are done with the step range of a step/next/si/ni command.
9143 Called once for each n of a "step n" operation. */
9145 static void
9146 end_stepping_range (struct execution_control_state *ecs)
9148 ecs->event_thread->control.stop_step = 1;
9149 stop_waiting (ecs);
9152 /* Several print_*_reason functions to print why the inferior has stopped.
9153 We always print something when the inferior exits, or receives a signal.
9154 The rest of the cases are dealt with later on in normal_stop and
9155 print_it_typical. Ideally there should be a call to one of these
9156 print_*_reason functions functions from handle_inferior_event each time
9157 stop_waiting is called.
9159 Note that we don't call these directly, instead we delegate that to
9160 the interpreters, through observers. Interpreters then call these
9161 with whatever uiout is right. */
9163 void
9164 print_signal_exited_reason (struct ui_out *uiout, enum gdb_signal siggnal)
9166 annotate_signalled ();
9167 if (uiout->is_mi_like_p ())
9168 uiout->field_string
9169 ("reason", async_reason_lookup (EXEC_ASYNC_EXITED_SIGNALLED));
9170 uiout->text ("\nProgram terminated with signal ");
9171 annotate_signal_name ();
9172 uiout->field_string ("signal-name",
9173 gdb_signal_to_name (siggnal));
9174 annotate_signal_name_end ();
9175 uiout->text (", ");
9176 annotate_signal_string ();
9177 uiout->field_string ("signal-meaning",
9178 gdb_signal_to_string (siggnal));
9179 annotate_signal_string_end ();
9180 uiout->text (".\n");
9181 uiout->text ("The program no longer exists.\n");
9184 void
9185 print_exited_reason (struct ui_out *uiout, int exitstatus)
9187 struct inferior *inf = current_inferior ();
9188 std::string pidstr = target_pid_to_str (ptid_t (inf->pid));
9190 annotate_exited (exitstatus);
9191 if (exitstatus)
9193 if (uiout->is_mi_like_p ())
9194 uiout->field_string ("reason", async_reason_lookup (EXEC_ASYNC_EXITED));
9195 std::string exit_code_str
9196 = string_printf ("0%o", (unsigned int) exitstatus);
9197 uiout->message ("[Inferior %s (%s) exited with code %pF]\n",
9198 plongest (inf->num), pidstr.c_str (),
9199 string_field ("exit-code", exit_code_str.c_str ()));
9201 else
9203 if (uiout->is_mi_like_p ())
9204 uiout->field_string
9205 ("reason", async_reason_lookup (EXEC_ASYNC_EXITED_NORMALLY));
9206 uiout->message ("[Inferior %s (%s) exited normally]\n",
9207 plongest (inf->num), pidstr.c_str ());
9211 void
9212 print_signal_received_reason (struct ui_out *uiout, enum gdb_signal siggnal)
9214 struct thread_info *thr = inferior_thread ();
9216 infrun_debug_printf ("signal = %s", gdb_signal_to_string (siggnal));
9218 annotate_signal ();
9220 if (uiout->is_mi_like_p ())
9222 else if (show_thread_that_caused_stop ())
9224 uiout->text ("\nThread ");
9225 uiout->field_string ("thread-id", print_thread_id (thr));
9227 const char *name = thread_name (thr);
9228 if (name != nullptr)
9230 uiout->text (" \"");
9231 uiout->field_string ("name", name);
9232 uiout->text ("\"");
9235 else
9236 uiout->text ("\nProgram");
9238 if (siggnal == GDB_SIGNAL_0 && !uiout->is_mi_like_p ())
9239 uiout->text (" stopped");
9240 else
9242 uiout->text (" received signal ");
9243 annotate_signal_name ();
9244 if (uiout->is_mi_like_p ())
9245 uiout->field_string
9246 ("reason", async_reason_lookup (EXEC_ASYNC_SIGNAL_RECEIVED));
9247 uiout->field_string ("signal-name", gdb_signal_to_name (siggnal));
9248 annotate_signal_name_end ();
9249 uiout->text (", ");
9250 annotate_signal_string ();
9251 uiout->field_string ("signal-meaning", gdb_signal_to_string (siggnal));
9253 regcache *regcache = get_thread_regcache (thr);
9254 struct gdbarch *gdbarch = regcache->arch ();
9255 if (gdbarch_report_signal_info_p (gdbarch))
9256 gdbarch_report_signal_info (gdbarch, uiout, siggnal);
9258 annotate_signal_string_end ();
9260 uiout->text (".\n");
9263 void
9264 print_no_history_reason (struct ui_out *uiout)
9266 if (uiout->is_mi_like_p ())
9267 uiout->field_string ("reason", async_reason_lookup (EXEC_ASYNC_NO_HISTORY));
9268 else
9269 uiout->text ("\nNo more reverse-execution history.\n");
9272 /* Print current location without a level number, if we have changed
9273 functions or hit a breakpoint. Print source line if we have one.
9274 bpstat_print contains the logic deciding in detail what to print,
9275 based on the event(s) that just occurred. */
9277 static void
9278 print_stop_location (const target_waitstatus &ws)
9280 int bpstat_ret;
9281 enum print_what source_flag;
9282 int do_frame_printing = 1;
9283 struct thread_info *tp = inferior_thread ();
9285 bpstat_ret = bpstat_print (tp->control.stop_bpstat, ws.kind ());
9286 switch (bpstat_ret)
9288 case PRINT_UNKNOWN:
9289 /* FIXME: cagney/2002-12-01: Given that a frame ID does (or
9290 should) carry around the function and does (or should) use
9291 that when doing a frame comparison. */
9292 if (tp->control.stop_step
9293 && (tp->control.step_frame_id
9294 == get_frame_id (get_current_frame ()))
9295 && (tp->control.step_start_function
9296 == find_pc_function (tp->stop_pc ())))
9298 /* Finished step, just print source line. */
9299 source_flag = SRC_LINE;
9301 else
9303 /* Print location and source line. */
9304 source_flag = SRC_AND_LOC;
9306 break;
9307 case PRINT_SRC_AND_LOC:
9308 /* Print location and source line. */
9309 source_flag = SRC_AND_LOC;
9310 break;
9311 case PRINT_SRC_ONLY:
9312 source_flag = SRC_LINE;
9313 break;
9314 case PRINT_NOTHING:
9315 /* Something bogus. */
9316 source_flag = SRC_LINE;
9317 do_frame_printing = 0;
9318 break;
9319 default:
9320 internal_error (_("Unknown value."));
9323 /* The behavior of this routine with respect to the source
9324 flag is:
9325 SRC_LINE: Print only source line
9326 LOCATION: Print only location
9327 SRC_AND_LOC: Print location and source line. */
9328 if (do_frame_printing)
9329 print_stack_frame (get_selected_frame (nullptr), 0, source_flag, 1);
9332 /* See `print_stop_event` in infrun.h. */
9334 static void
9335 do_print_stop_event (struct ui_out *uiout, bool displays)
9337 struct target_waitstatus last;
9338 struct thread_info *tp;
9340 get_last_target_status (nullptr, nullptr, &last);
9343 scoped_restore save_uiout = make_scoped_restore (&current_uiout, uiout);
9345 print_stop_location (last);
9347 /* Display the auto-display expressions. */
9348 if (displays)
9349 do_displays ();
9352 tp = inferior_thread ();
9353 if (tp->thread_fsm () != nullptr
9354 && tp->thread_fsm ()->finished_p ())
9356 struct return_value_info *rv;
9358 rv = tp->thread_fsm ()->return_value ();
9359 if (rv != nullptr)
9360 print_return_value (uiout, rv);
9364 /* See infrun.h. This function itself sets up buffered output for the
9365 duration of do_print_stop_event, which performs the actual event
9366 printing. */
9368 void
9369 print_stop_event (struct ui_out *uiout, bool displays)
9371 do_with_buffered_output (do_print_stop_event, uiout, displays);
9374 /* See infrun.h. */
9376 void
9377 maybe_remove_breakpoints (void)
9379 if (!breakpoints_should_be_inserted_now () && target_has_execution ())
9381 if (remove_breakpoints ())
9383 target_terminal::ours_for_output ();
9384 gdb_printf (_("Cannot remove breakpoints because "
9385 "program is no longer writable.\nFurther "
9386 "execution is probably impossible.\n"));
9391 /* The execution context that just caused a normal stop. */
9393 struct stop_context
9395 stop_context ();
9397 DISABLE_COPY_AND_ASSIGN (stop_context);
9399 bool changed () const;
9401 /* The stop ID. */
9402 ULONGEST stop_id;
9404 /* The event PTID. */
9406 ptid_t ptid;
9408 /* If stopp for a thread event, this is the thread that caused the
9409 stop. */
9410 thread_info_ref thread;
9412 /* The inferior that caused the stop. */
9413 int inf_num;
9416 /* Initializes a new stop context. If stopped for a thread event, this
9417 takes a strong reference to the thread. */
9419 stop_context::stop_context ()
9421 stop_id = get_stop_id ();
9422 ptid = inferior_ptid;
9423 inf_num = current_inferior ()->num;
9425 if (inferior_ptid != null_ptid)
9427 /* Take a strong reference so that the thread can't be deleted
9428 yet. */
9429 thread = thread_info_ref::new_reference (inferior_thread ());
9433 /* Return true if the current context no longer matches the saved stop
9434 context. */
9436 bool
9437 stop_context::changed () const
9439 if (ptid != inferior_ptid)
9440 return true;
9441 if (inf_num != current_inferior ()->num)
9442 return true;
9443 if (thread != nullptr && thread->state != THREAD_STOPPED)
9444 return true;
9445 if (get_stop_id () != stop_id)
9446 return true;
9447 return false;
9450 /* See infrun.h. */
9452 bool
9453 normal_stop ()
9455 struct target_waitstatus last;
9457 get_last_target_status (nullptr, nullptr, &last);
9459 new_stop_id ();
9461 /* If an exception is thrown from this point on, make sure to
9462 propagate GDB's knowledge of the executing state to the
9463 frontend/user running state. A QUIT is an easy exception to see
9464 here, so do this before any filtered output. */
9466 ptid_t finish_ptid = null_ptid;
9468 if (!non_stop)
9469 finish_ptid = minus_one_ptid;
9470 else if (last.kind () == TARGET_WAITKIND_SIGNALLED
9471 || last.kind () == TARGET_WAITKIND_EXITED)
9473 /* On some targets, we may still have live threads in the
9474 inferior when we get a process exit event. E.g., for
9475 "checkpoint", when the current checkpoint/fork exits,
9476 linux-fork.c automatically switches to another fork from
9477 within target_mourn_inferior. */
9478 if (inferior_ptid != null_ptid)
9479 finish_ptid = ptid_t (inferior_ptid.pid ());
9481 else if (last.kind () != TARGET_WAITKIND_NO_RESUMED
9482 && last.kind () != TARGET_WAITKIND_THREAD_EXITED)
9483 finish_ptid = inferior_ptid;
9485 std::optional<scoped_finish_thread_state> maybe_finish_thread_state;
9486 if (finish_ptid != null_ptid)
9488 maybe_finish_thread_state.emplace
9489 (user_visible_resume_target (finish_ptid), finish_ptid);
9492 /* As we're presenting a stop, and potentially removing breakpoints,
9493 update the thread list so we can tell whether there are threads
9494 running on the target. With target remote, for example, we can
9495 only learn about new threads when we explicitly update the thread
9496 list. Do this before notifying the interpreters about signal
9497 stops, end of stepping ranges, etc., so that the "new thread"
9498 output is emitted before e.g., "Program received signal FOO",
9499 instead of after. */
9500 update_thread_list ();
9502 if (last.kind () == TARGET_WAITKIND_STOPPED && stopped_by_random_signal)
9503 notify_signal_received (inferior_thread ()->stop_signal ());
9505 /* As with the notification of thread events, we want to delay
9506 notifying the user that we've switched thread context until
9507 the inferior actually stops.
9509 There's no point in saying anything if the inferior has exited.
9510 Note that SIGNALLED here means "exited with a signal", not
9511 "received a signal".
9513 Also skip saying anything in non-stop mode. In that mode, as we
9514 don't want GDB to switch threads behind the user's back, to avoid
9515 races where the user is typing a command to apply to thread x,
9516 but GDB switches to thread y before the user finishes entering
9517 the command, fetch_inferior_event installs a cleanup to restore
9518 the current thread back to the thread the user had selected right
9519 after this event is handled, so we're not really switching, only
9520 informing of a stop. */
9521 if (!non_stop)
9523 if ((last.kind () != TARGET_WAITKIND_SIGNALLED
9524 && last.kind () != TARGET_WAITKIND_EXITED
9525 && last.kind () != TARGET_WAITKIND_NO_RESUMED
9526 && last.kind () != TARGET_WAITKIND_THREAD_EXITED)
9527 && target_has_execution ()
9528 && previous_thread != inferior_thread ())
9530 SWITCH_THRU_ALL_UIS ()
9532 target_terminal::ours_for_output ();
9533 gdb_printf (_("[Switching to %s]\n"),
9534 target_pid_to_str (inferior_ptid).c_str ());
9535 annotate_thread_changed ();
9539 update_previous_thread ();
9542 if (last.kind () == TARGET_WAITKIND_NO_RESUMED
9543 || last.kind () == TARGET_WAITKIND_THREAD_EXITED)
9545 stop_print_frame = false;
9547 SWITCH_THRU_ALL_UIS ()
9548 if (current_ui->prompt_state == PROMPT_BLOCKED)
9550 target_terminal::ours_for_output ();
9551 if (last.kind () == TARGET_WAITKIND_NO_RESUMED)
9552 gdb_printf (_("No unwaited-for children left.\n"));
9553 else if (last.kind () == TARGET_WAITKIND_THREAD_EXITED)
9554 gdb_printf (_("Command aborted, thread exited.\n"));
9555 else
9556 gdb_assert_not_reached ("unhandled");
9560 /* Note: this depends on the update_thread_list call above. */
9561 maybe_remove_breakpoints ();
9563 /* If an auto-display called a function and that got a signal,
9564 delete that auto-display to avoid an infinite recursion. */
9566 if (stopped_by_random_signal)
9567 disable_current_display ();
9569 SWITCH_THRU_ALL_UIS ()
9571 async_enable_stdin ();
9574 /* Let the user/frontend see the threads as stopped. */
9575 maybe_finish_thread_state.reset ();
9577 /* Select innermost stack frame - i.e., current frame is frame 0,
9578 and current location is based on that. Handle the case where the
9579 dummy call is returning after being stopped. E.g. the dummy call
9580 previously hit a breakpoint. (If the dummy call returns
9581 normally, we won't reach here.) Do this before the stop hook is
9582 run, so that it doesn't get to see the temporary dummy frame,
9583 which is not where we'll present the stop. */
9584 if (has_stack_frames ())
9586 if (stop_stack_dummy == STOP_STACK_DUMMY)
9588 /* Pop the empty frame that contains the stack dummy. This
9589 also restores inferior state prior to the call (struct
9590 infcall_suspend_state). */
9591 frame_info_ptr frame = get_current_frame ();
9593 gdb_assert (get_frame_type (frame) == DUMMY_FRAME);
9594 frame_pop (frame);
9595 /* frame_pop calls reinit_frame_cache as the last thing it
9596 does which means there's now no selected frame. */
9599 select_frame (get_current_frame ());
9601 /* Set the current source location. */
9602 set_current_sal_from_frame (get_current_frame ());
9605 /* Look up the hook_stop and run it (CLI internally handles problem
9606 of stop_command's pre-hook not existing). */
9607 stop_context saved_context;
9611 execute_cmd_pre_hook (stop_command);
9613 catch (const gdb_exception_error &ex)
9615 exception_fprintf (gdb_stderr, ex,
9616 "Error while running hook_stop:\n");
9619 /* If the stop hook resumes the target, then there's no point in
9620 trying to notify about the previous stop; its context is
9621 gone. Likewise if the command switches thread or inferior --
9622 the observers would print a stop for the wrong
9623 thread/inferior. */
9624 if (saved_context.changed ())
9625 return true;
9627 /* Notify observers about the stop. This is where the interpreters
9628 print the stop event. */
9629 notify_normal_stop ((inferior_ptid != null_ptid
9630 ? inferior_thread ()->control.stop_bpstat
9631 : nullptr),
9632 stop_print_frame);
9633 annotate_stopped ();
9635 if (target_has_execution ())
9637 if (last.kind () != TARGET_WAITKIND_SIGNALLED
9638 && last.kind () != TARGET_WAITKIND_EXITED
9639 && last.kind () != TARGET_WAITKIND_NO_RESUMED
9640 && last.kind () != TARGET_WAITKIND_THREAD_EXITED)
9641 /* Delete the breakpoint we stopped at, if it wants to be deleted.
9642 Delete any breakpoint that is to be deleted at the next stop. */
9643 breakpoint_auto_delete (inferior_thread ()->control.stop_bpstat);
9646 return false;
9650 signal_stop_state (int signo)
9652 return signal_stop[signo];
9656 signal_print_state (int signo)
9658 return signal_print[signo];
9662 signal_pass_state (int signo)
9664 return signal_program[signo];
9667 static void
9668 signal_cache_update (int signo)
9670 if (signo == -1)
9672 for (signo = 0; signo < (int) GDB_SIGNAL_LAST; signo++)
9673 signal_cache_update (signo);
9675 return;
9678 signal_pass[signo] = (signal_stop[signo] == 0
9679 && signal_print[signo] == 0
9680 && signal_program[signo] == 1
9681 && signal_catch[signo] == 0);
9685 signal_stop_update (int signo, int state)
9687 int ret = signal_stop[signo];
9689 signal_stop[signo] = state;
9690 signal_cache_update (signo);
9691 return ret;
9695 signal_print_update (int signo, int state)
9697 int ret = signal_print[signo];
9699 signal_print[signo] = state;
9700 signal_cache_update (signo);
9701 return ret;
9705 signal_pass_update (int signo, int state)
9707 int ret = signal_program[signo];
9709 signal_program[signo] = state;
9710 signal_cache_update (signo);
9711 return ret;
9714 /* Update the global 'signal_catch' from INFO and notify the
9715 target. */
9717 void
9718 signal_catch_update (const unsigned int *info)
9720 int i;
9722 for (i = 0; i < GDB_SIGNAL_LAST; ++i)
9723 signal_catch[i] = info[i] > 0;
9724 signal_cache_update (-1);
9725 target_pass_signals (signal_pass);
9728 static void
9729 sig_print_header (void)
9731 gdb_printf (_("Signal Stop\tPrint\tPass "
9732 "to program\tDescription\n"));
9735 static void
9736 sig_print_info (enum gdb_signal oursig)
9738 const char *name = gdb_signal_to_name (oursig);
9739 int name_padding = 13 - strlen (name);
9741 if (name_padding <= 0)
9742 name_padding = 0;
9744 gdb_printf ("%s", name);
9745 gdb_printf ("%*.*s ", name_padding, name_padding, " ");
9746 gdb_printf ("%s\t", signal_stop[oursig] ? "Yes" : "No");
9747 gdb_printf ("%s\t", signal_print[oursig] ? "Yes" : "No");
9748 gdb_printf ("%s\t\t", signal_program[oursig] ? "Yes" : "No");
9749 gdb_printf ("%s\n", gdb_signal_to_string (oursig));
9752 /* Specify how various signals in the inferior should be handled. */
9754 static void
9755 handle_command (const char *args, int from_tty)
9757 int digits, wordlen;
9758 int sigfirst, siglast;
9759 enum gdb_signal oursig;
9760 int allsigs;
9762 if (args == nullptr)
9764 error_no_arg (_("signal to handle"));
9767 /* Allocate and zero an array of flags for which signals to handle. */
9769 const size_t nsigs = GDB_SIGNAL_LAST;
9770 unsigned char sigs[nsigs] {};
9772 /* Break the command line up into args. */
9774 gdb_argv built_argv (args);
9776 /* Walk through the args, looking for signal oursigs, signal names, and
9777 actions. Signal numbers and signal names may be interspersed with
9778 actions, with the actions being performed for all signals cumulatively
9779 specified. Signal ranges can be specified as <LOW>-<HIGH>. */
9781 for (char *arg : built_argv)
9783 wordlen = strlen (arg);
9784 for (digits = 0; isdigit (arg[digits]); digits++)
9787 allsigs = 0;
9788 sigfirst = siglast = -1;
9790 if (wordlen >= 1 && !strncmp (arg, "all", wordlen))
9792 /* Apply action to all signals except those used by the
9793 debugger. Silently skip those. */
9794 allsigs = 1;
9795 sigfirst = 0;
9796 siglast = nsigs - 1;
9798 else if (wordlen >= 1 && !strncmp (arg, "stop", wordlen))
9800 SET_SIGS (nsigs, sigs, signal_stop);
9801 SET_SIGS (nsigs, sigs, signal_print);
9803 else if (wordlen >= 1 && !strncmp (arg, "ignore", wordlen))
9805 UNSET_SIGS (nsigs, sigs, signal_program);
9807 else if (wordlen >= 2 && !strncmp (arg, "print", wordlen))
9809 SET_SIGS (nsigs, sigs, signal_print);
9811 else if (wordlen >= 2 && !strncmp (arg, "pass", wordlen))
9813 SET_SIGS (nsigs, sigs, signal_program);
9815 else if (wordlen >= 3 && !strncmp (arg, "nostop", wordlen))
9817 UNSET_SIGS (nsigs, sigs, signal_stop);
9819 else if (wordlen >= 3 && !strncmp (arg, "noignore", wordlen))
9821 SET_SIGS (nsigs, sigs, signal_program);
9823 else if (wordlen >= 4 && !strncmp (arg, "noprint", wordlen))
9825 UNSET_SIGS (nsigs, sigs, signal_print);
9826 UNSET_SIGS (nsigs, sigs, signal_stop);
9828 else if (wordlen >= 4 && !strncmp (arg, "nopass", wordlen))
9830 UNSET_SIGS (nsigs, sigs, signal_program);
9832 else if (digits > 0)
9834 /* It is numeric. The numeric signal refers to our own
9835 internal signal numbering from target.h, not to host/target
9836 signal number. This is a feature; users really should be
9837 using symbolic names anyway, and the common ones like
9838 SIGHUP, SIGINT, SIGALRM, etc. will work right anyway. */
9840 sigfirst = siglast = (int)
9841 gdb_signal_from_command (atoi (arg));
9842 if (arg[digits] == '-')
9844 siglast = (int)
9845 gdb_signal_from_command (atoi (arg + digits + 1));
9847 if (sigfirst > siglast)
9849 /* Bet he didn't figure we'd think of this case... */
9850 std::swap (sigfirst, siglast);
9853 else
9855 oursig = gdb_signal_from_name (arg);
9856 if (oursig != GDB_SIGNAL_UNKNOWN)
9858 sigfirst = siglast = (int) oursig;
9860 else
9862 /* Not a number and not a recognized flag word => complain. */
9863 error (_("Unrecognized or ambiguous flag word: \"%s\"."), arg);
9867 /* If any signal numbers or symbol names were found, set flags for
9868 which signals to apply actions to. */
9870 for (int signum = sigfirst; signum >= 0 && signum <= siglast; signum++)
9872 switch ((enum gdb_signal) signum)
9874 case GDB_SIGNAL_TRAP:
9875 case GDB_SIGNAL_INT:
9876 if (!allsigs && !sigs[signum])
9878 if (query (_("%s is used by the debugger.\n\
9879 Are you sure you want to change it? "),
9880 gdb_signal_to_name ((enum gdb_signal) signum)))
9882 sigs[signum] = 1;
9884 else
9885 gdb_printf (_("Not confirmed, unchanged.\n"));
9887 break;
9888 case GDB_SIGNAL_0:
9889 case GDB_SIGNAL_DEFAULT:
9890 case GDB_SIGNAL_UNKNOWN:
9891 /* Make sure that "all" doesn't print these. */
9892 break;
9893 default:
9894 sigs[signum] = 1;
9895 break;
9900 for (int signum = 0; signum < nsigs; signum++)
9901 if (sigs[signum])
9903 signal_cache_update (-1);
9904 target_pass_signals (signal_pass);
9905 target_program_signals (signal_program);
9907 if (from_tty)
9909 /* Show the results. */
9910 sig_print_header ();
9911 for (; signum < nsigs; signum++)
9912 if (sigs[signum])
9913 sig_print_info ((enum gdb_signal) signum);
9916 break;
9920 /* Complete the "handle" command. */
9922 static void
9923 handle_completer (struct cmd_list_element *ignore,
9924 completion_tracker &tracker,
9925 const char *text, const char *word)
9927 static const char * const keywords[] =
9929 "all",
9930 "stop",
9931 "ignore",
9932 "print",
9933 "pass",
9934 "nostop",
9935 "noignore",
9936 "noprint",
9937 "nopass",
9938 nullptr,
9941 signal_completer (ignore, tracker, text, word);
9942 complete_on_enum (tracker, keywords, word, word);
9945 enum gdb_signal
9946 gdb_signal_from_command (int num)
9948 if (num >= 1 && num <= 15)
9949 return (enum gdb_signal) num;
9950 error (_("Only signals 1-15 are valid as numeric signals.\n\
9951 Use \"info signals\" for a list of symbolic signals."));
9954 /* Print current contents of the tables set by the handle command.
9955 It is possible we should just be printing signals actually used
9956 by the current target (but for things to work right when switching
9957 targets, all signals should be in the signal tables). */
9959 static void
9960 info_signals_command (const char *signum_exp, int from_tty)
9962 enum gdb_signal oursig;
9964 sig_print_header ();
9966 if (signum_exp)
9968 /* First see if this is a symbol name. */
9969 oursig = gdb_signal_from_name (signum_exp);
9970 if (oursig == GDB_SIGNAL_UNKNOWN)
9972 /* No, try numeric. */
9973 oursig =
9974 gdb_signal_from_command (parse_and_eval_long (signum_exp));
9976 sig_print_info (oursig);
9977 return;
9980 gdb_printf ("\n");
9981 /* These ugly casts brought to you by the native VAX compiler. */
9982 for (oursig = GDB_SIGNAL_FIRST;
9983 (int) oursig < (int) GDB_SIGNAL_LAST;
9984 oursig = (enum gdb_signal) ((int) oursig + 1))
9986 QUIT;
9988 if (oursig != GDB_SIGNAL_UNKNOWN
9989 && oursig != GDB_SIGNAL_DEFAULT && oursig != GDB_SIGNAL_0)
9990 sig_print_info (oursig);
9993 gdb_printf (_("\nUse the \"handle\" command "
9994 "to change these tables.\n"));
9997 /* The $_siginfo convenience variable is a bit special. We don't know
9998 for sure the type of the value until we actually have a chance to
9999 fetch the data. The type can change depending on gdbarch, so it is
10000 also dependent on which thread you have selected.
10002 1. making $_siginfo be an internalvar that creates a new value on
10003 access.
10005 2. making the value of $_siginfo be an lval_computed value. */
10007 /* This function implements the lval_computed support for reading a
10008 $_siginfo value. */
10010 static void
10011 siginfo_value_read (struct value *v)
10013 LONGEST transferred;
10015 /* If we can access registers, so can we access $_siginfo. Likewise
10016 vice versa. */
10017 validate_registers_access ();
10019 transferred =
10020 target_read (current_inferior ()->top_target (),
10021 TARGET_OBJECT_SIGNAL_INFO,
10022 nullptr,
10023 v->contents_all_raw ().data (),
10024 v->offset (),
10025 v->type ()->length ());
10027 if (transferred != v->type ()->length ())
10028 error (_("Unable to read siginfo"));
10031 /* This function implements the lval_computed support for writing a
10032 $_siginfo value. */
10034 static void
10035 siginfo_value_write (struct value *v, struct value *fromval)
10037 LONGEST transferred;
10039 /* If we can access registers, so can we access $_siginfo. Likewise
10040 vice versa. */
10041 validate_registers_access ();
10043 transferred = target_write (current_inferior ()->top_target (),
10044 TARGET_OBJECT_SIGNAL_INFO,
10045 nullptr,
10046 fromval->contents_all_raw ().data (),
10047 v->offset (),
10048 fromval->type ()->length ());
10050 if (transferred != fromval->type ()->length ())
10051 error (_("Unable to write siginfo"));
10054 static const struct lval_funcs siginfo_value_funcs =
10056 siginfo_value_read,
10057 siginfo_value_write
10060 /* Return a new value with the correct type for the siginfo object of
10061 the current thread using architecture GDBARCH. Return a void value
10062 if there's no object available. */
10064 static struct value *
10065 siginfo_make_value (struct gdbarch *gdbarch, struct internalvar *var,
10066 void *ignore)
10068 if (target_has_stack ()
10069 && inferior_ptid != null_ptid
10070 && gdbarch_get_siginfo_type_p (gdbarch))
10072 struct type *type = gdbarch_get_siginfo_type (gdbarch);
10074 return value::allocate_computed (type, &siginfo_value_funcs, nullptr);
10077 return value::allocate (builtin_type (gdbarch)->builtin_void);
10081 /* infcall_suspend_state contains state about the program itself like its
10082 registers and any signal it received when it last stopped.
10083 This state must be restored regardless of how the inferior function call
10084 ends (either successfully, or after it hits a breakpoint or signal)
10085 if the program is to properly continue where it left off. */
10087 class infcall_suspend_state
10089 public:
10090 /* Capture state from GDBARCH, TP, and REGCACHE that must be restored
10091 once the inferior function call has finished. */
10092 infcall_suspend_state (struct gdbarch *gdbarch,
10093 const struct thread_info *tp,
10094 struct regcache *regcache)
10095 : m_registers (new readonly_detached_regcache (*regcache))
10097 tp->save_suspend_to (m_thread_suspend);
10099 gdb::unique_xmalloc_ptr<gdb_byte> siginfo_data;
10101 if (gdbarch_get_siginfo_type_p (gdbarch))
10103 struct type *type = gdbarch_get_siginfo_type (gdbarch);
10104 size_t len = type->length ();
10106 siginfo_data.reset ((gdb_byte *) xmalloc (len));
10108 if (target_read (current_inferior ()->top_target (),
10109 TARGET_OBJECT_SIGNAL_INFO, nullptr,
10110 siginfo_data.get (), 0, len) != len)
10112 /* Errors ignored. */
10113 siginfo_data.reset (nullptr);
10117 if (siginfo_data)
10119 m_siginfo_gdbarch = gdbarch;
10120 m_siginfo_data = std::move (siginfo_data);
10124 /* Return a pointer to the stored register state. */
10126 readonly_detached_regcache *registers () const
10128 return m_registers.get ();
10131 /* Restores the stored state into GDBARCH, TP, and REGCACHE. */
10133 void restore (struct gdbarch *gdbarch,
10134 struct thread_info *tp,
10135 struct regcache *regcache) const
10137 tp->restore_suspend_from (m_thread_suspend);
10139 if (m_siginfo_gdbarch == gdbarch)
10141 struct type *type = gdbarch_get_siginfo_type (gdbarch);
10143 /* Errors ignored. */
10144 target_write (current_inferior ()->top_target (),
10145 TARGET_OBJECT_SIGNAL_INFO, nullptr,
10146 m_siginfo_data.get (), 0, type->length ());
10149 /* The inferior can be gone if the user types "print exit(0)"
10150 (and perhaps other times). */
10151 if (target_has_execution ())
10152 /* NB: The register write goes through to the target. */
10153 regcache->restore (registers ());
10156 private:
10157 /* How the current thread stopped before the inferior function call was
10158 executed. */
10159 struct thread_suspend_state m_thread_suspend;
10161 /* The registers before the inferior function call was executed. */
10162 std::unique_ptr<readonly_detached_regcache> m_registers;
10164 /* Format of SIGINFO_DATA or NULL if it is not present. */
10165 struct gdbarch *m_siginfo_gdbarch = nullptr;
10167 /* The inferior format depends on SIGINFO_GDBARCH and it has a length of
10168 gdbarch_get_siginfo_type ()->length (). For different gdbarch the
10169 content would be invalid. */
10170 gdb::unique_xmalloc_ptr<gdb_byte> m_siginfo_data;
10173 infcall_suspend_state_up
10174 save_infcall_suspend_state ()
10176 struct thread_info *tp = inferior_thread ();
10177 regcache *regcache = get_thread_regcache (tp);
10178 struct gdbarch *gdbarch = regcache->arch ();
10180 infcall_suspend_state_up inf_state
10181 (new struct infcall_suspend_state (gdbarch, tp, regcache));
10183 /* Having saved the current state, adjust the thread state, discarding
10184 any stop signal information. The stop signal is not useful when
10185 starting an inferior function call, and run_inferior_call will not use
10186 the signal due to its `proceed' call with GDB_SIGNAL_0. */
10187 tp->set_stop_signal (GDB_SIGNAL_0);
10189 return inf_state;
10192 /* Restore inferior session state to INF_STATE. */
10194 void
10195 restore_infcall_suspend_state (struct infcall_suspend_state *inf_state)
10197 struct thread_info *tp = inferior_thread ();
10198 regcache *regcache = get_thread_regcache (inferior_thread ());
10199 struct gdbarch *gdbarch = regcache->arch ();
10201 inf_state->restore (gdbarch, tp, regcache);
10202 discard_infcall_suspend_state (inf_state);
10205 void
10206 discard_infcall_suspend_state (struct infcall_suspend_state *inf_state)
10208 delete inf_state;
10211 readonly_detached_regcache *
10212 get_infcall_suspend_state_regcache (struct infcall_suspend_state *inf_state)
10214 return inf_state->registers ();
10217 /* infcall_control_state contains state regarding gdb's control of the
10218 inferior itself like stepping control. It also contains session state like
10219 the user's currently selected frame. */
10221 struct infcall_control_state
10223 struct thread_control_state thread_control;
10224 struct inferior_control_state inferior_control;
10226 /* Other fields: */
10227 enum stop_stack_kind stop_stack_dummy = STOP_NONE;
10228 int stopped_by_random_signal = 0;
10230 /* ID and level of the selected frame when the inferior function
10231 call was made. */
10232 struct frame_id selected_frame_id {};
10233 int selected_frame_level = -1;
10236 /* Save all of the information associated with the inferior<==>gdb
10237 connection. */
10239 infcall_control_state_up
10240 save_infcall_control_state ()
10242 infcall_control_state_up inf_status (new struct infcall_control_state);
10243 struct thread_info *tp = inferior_thread ();
10244 struct inferior *inf = current_inferior ();
10246 inf_status->thread_control = tp->control;
10247 inf_status->inferior_control = inf->control;
10249 tp->control.step_resume_breakpoint = nullptr;
10250 tp->control.exception_resume_breakpoint = nullptr;
10252 /* Save original bpstat chain to INF_STATUS; replace it in TP with copy of
10253 chain. If caller's caller is walking the chain, they'll be happier if we
10254 hand them back the original chain when restore_infcall_control_state is
10255 called. */
10256 tp->control.stop_bpstat = bpstat_copy (tp->control.stop_bpstat);
10258 /* Other fields: */
10259 inf_status->stop_stack_dummy = stop_stack_dummy;
10260 inf_status->stopped_by_random_signal = stopped_by_random_signal;
10262 save_selected_frame (&inf_status->selected_frame_id,
10263 &inf_status->selected_frame_level);
10265 return inf_status;
10268 /* Restore inferior session state to INF_STATUS. */
10270 void
10271 restore_infcall_control_state (struct infcall_control_state *inf_status)
10273 struct thread_info *tp = inferior_thread ();
10274 struct inferior *inf = current_inferior ();
10276 if (tp->control.step_resume_breakpoint)
10277 tp->control.step_resume_breakpoint->disposition = disp_del_at_next_stop;
10279 if (tp->control.exception_resume_breakpoint)
10280 tp->control.exception_resume_breakpoint->disposition
10281 = disp_del_at_next_stop;
10283 /* Handle the bpstat_copy of the chain. */
10284 bpstat_clear (&tp->control.stop_bpstat);
10286 tp->control = inf_status->thread_control;
10287 inf->control = inf_status->inferior_control;
10289 /* Other fields: */
10290 stop_stack_dummy = inf_status->stop_stack_dummy;
10291 stopped_by_random_signal = inf_status->stopped_by_random_signal;
10293 if (target_has_stack ())
10295 restore_selected_frame (inf_status->selected_frame_id,
10296 inf_status->selected_frame_level);
10299 delete inf_status;
10302 void
10303 discard_infcall_control_state (struct infcall_control_state *inf_status)
10305 if (inf_status->thread_control.step_resume_breakpoint)
10306 inf_status->thread_control.step_resume_breakpoint->disposition
10307 = disp_del_at_next_stop;
10309 if (inf_status->thread_control.exception_resume_breakpoint)
10310 inf_status->thread_control.exception_resume_breakpoint->disposition
10311 = disp_del_at_next_stop;
10313 /* See save_infcall_control_state for info on stop_bpstat. */
10314 bpstat_clear (&inf_status->thread_control.stop_bpstat);
10316 delete inf_status;
10319 /* See infrun.h. */
10321 void
10322 clear_exit_convenience_vars (void)
10324 clear_internalvar (lookup_internalvar ("_exitsignal"));
10325 clear_internalvar (lookup_internalvar ("_exitcode"));
10329 /* User interface for reverse debugging:
10330 Set exec-direction / show exec-direction commands
10331 (returns error unless target implements to_set_exec_direction method). */
10333 enum exec_direction_kind execution_direction = EXEC_FORWARD;
10334 static const char exec_forward[] = "forward";
10335 static const char exec_reverse[] = "reverse";
10336 static const char *exec_direction = exec_forward;
10337 static const char *const exec_direction_names[] = {
10338 exec_forward,
10339 exec_reverse,
10340 nullptr
10343 static void
10344 set_exec_direction_func (const char *args, int from_tty,
10345 struct cmd_list_element *cmd)
10347 if (target_can_execute_reverse ())
10349 if (!strcmp (exec_direction, exec_forward))
10350 execution_direction = EXEC_FORWARD;
10351 else if (!strcmp (exec_direction, exec_reverse))
10352 execution_direction = EXEC_REVERSE;
10354 else
10356 exec_direction = exec_forward;
10357 error (_("Target does not support this operation."));
10361 static void
10362 show_exec_direction_func (struct ui_file *out, int from_tty,
10363 struct cmd_list_element *cmd, const char *value)
10365 switch (execution_direction) {
10366 case EXEC_FORWARD:
10367 gdb_printf (out, _("Forward.\n"));
10368 break;
10369 case EXEC_REVERSE:
10370 gdb_printf (out, _("Reverse.\n"));
10371 break;
10372 default:
10373 internal_error (_("bogus execution_direction value: %d"),
10374 (int) execution_direction);
10378 static void
10379 show_schedule_multiple (struct ui_file *file, int from_tty,
10380 struct cmd_list_element *c, const char *value)
10382 gdb_printf (file, _("Resuming the execution of threads "
10383 "of all processes is %s.\n"), value);
10386 /* Implementation of `siginfo' variable. */
10388 static const struct internalvar_funcs siginfo_funcs =
10390 siginfo_make_value,
10391 nullptr,
10394 /* Callback for infrun's target events source. This is marked when a
10395 thread has a pending status to process. */
10397 static void
10398 infrun_async_inferior_event_handler (gdb_client_data data)
10400 clear_async_event_handler (infrun_async_inferior_event_token);
10401 inferior_event_handler (INF_REG_EVENT);
10404 #if GDB_SELF_TEST
10405 namespace selftests
10408 /* Verify that when two threads with the same ptid exist (from two different
10409 targets) and one of them changes ptid, we only update inferior_ptid if
10410 it is appropriate. */
10412 static void
10413 infrun_thread_ptid_changed ()
10415 gdbarch *arch = current_inferior ()->arch ();
10417 /* The thread which inferior_ptid represents changes ptid. */
10419 scoped_restore_current_pspace_and_thread restore;
10421 scoped_mock_context<test_target_ops> target1 (arch);
10422 scoped_mock_context<test_target_ops> target2 (arch);
10424 ptid_t old_ptid (111, 222);
10425 ptid_t new_ptid (111, 333);
10427 target1.mock_inferior.pid = old_ptid.pid ();
10428 target1.mock_thread.ptid = old_ptid;
10429 target1.mock_inferior.ptid_thread_map.clear ();
10430 target1.mock_inferior.ptid_thread_map[old_ptid] = &target1.mock_thread;
10432 target2.mock_inferior.pid = old_ptid.pid ();
10433 target2.mock_thread.ptid = old_ptid;
10434 target2.mock_inferior.ptid_thread_map.clear ();
10435 target2.mock_inferior.ptid_thread_map[old_ptid] = &target2.mock_thread;
10437 auto restore_inferior_ptid = make_scoped_restore (&inferior_ptid, old_ptid);
10438 set_current_inferior (&target1.mock_inferior);
10440 thread_change_ptid (&target1.mock_target, old_ptid, new_ptid);
10442 gdb_assert (inferior_ptid == new_ptid);
10445 /* A thread with the same ptid as inferior_ptid, but from another target,
10446 changes ptid. */
10448 scoped_restore_current_pspace_and_thread restore;
10450 scoped_mock_context<test_target_ops> target1 (arch);
10451 scoped_mock_context<test_target_ops> target2 (arch);
10453 ptid_t old_ptid (111, 222);
10454 ptid_t new_ptid (111, 333);
10456 target1.mock_inferior.pid = old_ptid.pid ();
10457 target1.mock_thread.ptid = old_ptid;
10458 target1.mock_inferior.ptid_thread_map.clear ();
10459 target1.mock_inferior.ptid_thread_map[old_ptid] = &target1.mock_thread;
10461 target2.mock_inferior.pid = old_ptid.pid ();
10462 target2.mock_thread.ptid = old_ptid;
10463 target2.mock_inferior.ptid_thread_map.clear ();
10464 target2.mock_inferior.ptid_thread_map[old_ptid] = &target2.mock_thread;
10466 auto restore_inferior_ptid = make_scoped_restore (&inferior_ptid, old_ptid);
10467 set_current_inferior (&target2.mock_inferior);
10469 thread_change_ptid (&target1.mock_target, old_ptid, new_ptid);
10471 gdb_assert (inferior_ptid == old_ptid);
10475 } /* namespace selftests */
10477 #endif /* GDB_SELF_TEST */
10479 void _initialize_infrun ();
10480 void
10481 _initialize_infrun ()
10483 struct cmd_list_element *c;
10485 /* Register extra event sources in the event loop. */
10486 infrun_async_inferior_event_token
10487 = create_async_event_handler (infrun_async_inferior_event_handler, nullptr,
10488 "infrun");
10490 cmd_list_element *info_signals_cmd
10491 = add_info ("signals", info_signals_command, _("\
10492 What debugger does when program gets various signals.\n\
10493 Specify a signal as argument to print info on that signal only."));
10494 add_info_alias ("handle", info_signals_cmd, 0);
10496 c = add_com ("handle", class_run, handle_command, _("\
10497 Specify how to handle signals.\n\
10498 Usage: handle SIGNAL [ACTIONS]\n\
10499 Args are signals and actions to apply to those signals.\n\
10500 If no actions are specified, the current settings for the specified signals\n\
10501 will be displayed instead.\n\
10503 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
10504 from 1-15 are allowed for compatibility with old versions of GDB.\n\
10505 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
10506 The special arg \"all\" is recognized to mean all signals except those\n\
10507 used by the debugger, typically SIGTRAP and SIGINT.\n\
10509 Recognized actions include \"stop\", \"nostop\", \"print\", \"noprint\",\n\
10510 \"pass\", \"nopass\", \"ignore\", or \"noignore\".\n\
10511 Stop means reenter debugger if this signal happens (implies print).\n\
10512 Print means print a message if this signal happens.\n\
10513 Pass means let program see this signal; otherwise program doesn't know.\n\
10514 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
10515 Pass and Stop may be combined.\n\
10517 Multiple signals may be specified. Signal numbers and signal names\n\
10518 may be interspersed with actions, with the actions being performed for\n\
10519 all signals cumulatively specified."));
10520 set_cmd_completer (c, handle_completer);
10522 stop_command = add_cmd ("stop", class_obscure,
10523 not_just_help_class_command, _("\
10524 There is no `stop' command, but you can set a hook on `stop'.\n\
10525 This allows you to set a list of commands to be run each time execution\n\
10526 of the program stops."), &cmdlist);
10528 add_setshow_boolean_cmd
10529 ("infrun", class_maintenance, &debug_infrun,
10530 _("Set inferior debugging."),
10531 _("Show inferior debugging."),
10532 _("When non-zero, inferior specific debugging is enabled."),
10533 nullptr, show_debug_infrun, &setdebuglist, &showdebuglist);
10535 add_setshow_boolean_cmd ("non-stop", no_class,
10536 &non_stop_1, _("\
10537 Set whether gdb controls the inferior in non-stop mode."), _("\
10538 Show whether gdb controls the inferior in non-stop mode."), _("\
10539 When debugging a multi-threaded program and this setting is\n\
10540 off (the default, also called all-stop mode), when one thread stops\n\
10541 (for a breakpoint, watchpoint, exception, or similar events), GDB stops\n\
10542 all other threads in the program while you interact with the thread of\n\
10543 interest. When you continue or step a thread, you can allow the other\n\
10544 threads to run, or have them remain stopped, but while you inspect any\n\
10545 thread's state, all threads stop.\n\
10547 In non-stop mode, when one thread stops, other threads can continue\n\
10548 to run freely. You'll be able to step each thread independently,\n\
10549 leave it stopped or free to run as needed."),
10550 set_non_stop,
10551 show_non_stop,
10552 &setlist,
10553 &showlist);
10555 for (size_t i = 0; i < GDB_SIGNAL_LAST; i++)
10557 signal_stop[i] = 1;
10558 signal_print[i] = 1;
10559 signal_program[i] = 1;
10560 signal_catch[i] = 0;
10563 /* Signals caused by debugger's own actions should not be given to
10564 the program afterwards.
10566 Do not deliver GDB_SIGNAL_TRAP by default, except when the user
10567 explicitly specifies that it should be delivered to the target
10568 program. Typically, that would occur when a user is debugging a
10569 target monitor on a simulator: the target monitor sets a
10570 breakpoint; the simulator encounters this breakpoint and halts
10571 the simulation handing control to GDB; GDB, noting that the stop
10572 address doesn't map to any known breakpoint, returns control back
10573 to the simulator; the simulator then delivers the hardware
10574 equivalent of a GDB_SIGNAL_TRAP to the program being
10575 debugged. */
10576 signal_program[GDB_SIGNAL_TRAP] = 0;
10577 signal_program[GDB_SIGNAL_INT] = 0;
10579 /* Signals that are not errors should not normally enter the debugger. */
10580 signal_stop[GDB_SIGNAL_ALRM] = 0;
10581 signal_print[GDB_SIGNAL_ALRM] = 0;
10582 signal_stop[GDB_SIGNAL_VTALRM] = 0;
10583 signal_print[GDB_SIGNAL_VTALRM] = 0;
10584 signal_stop[GDB_SIGNAL_PROF] = 0;
10585 signal_print[GDB_SIGNAL_PROF] = 0;
10586 signal_stop[GDB_SIGNAL_CHLD] = 0;
10587 signal_print[GDB_SIGNAL_CHLD] = 0;
10588 signal_stop[GDB_SIGNAL_IO] = 0;
10589 signal_print[GDB_SIGNAL_IO] = 0;
10590 signal_stop[GDB_SIGNAL_POLL] = 0;
10591 signal_print[GDB_SIGNAL_POLL] = 0;
10592 signal_stop[GDB_SIGNAL_URG] = 0;
10593 signal_print[GDB_SIGNAL_URG] = 0;
10594 signal_stop[GDB_SIGNAL_WINCH] = 0;
10595 signal_print[GDB_SIGNAL_WINCH] = 0;
10596 signal_stop[GDB_SIGNAL_PRIO] = 0;
10597 signal_print[GDB_SIGNAL_PRIO] = 0;
10599 /* These signals are used internally by user-level thread
10600 implementations. (See signal(5) on Solaris.) Like the above
10601 signals, a healthy program receives and handles them as part of
10602 its normal operation. */
10603 signal_stop[GDB_SIGNAL_LWP] = 0;
10604 signal_print[GDB_SIGNAL_LWP] = 0;
10605 signal_stop[GDB_SIGNAL_WAITING] = 0;
10606 signal_print[GDB_SIGNAL_WAITING] = 0;
10607 signal_stop[GDB_SIGNAL_CANCEL] = 0;
10608 signal_print[GDB_SIGNAL_CANCEL] = 0;
10609 signal_stop[GDB_SIGNAL_LIBRT] = 0;
10610 signal_print[GDB_SIGNAL_LIBRT] = 0;
10612 /* Update cached state. */
10613 signal_cache_update (-1);
10615 add_setshow_zinteger_cmd ("stop-on-solib-events", class_support,
10616 &stop_on_solib_events, _("\
10617 Set stopping for shared library events."), _("\
10618 Show stopping for shared library events."), _("\
10619 If nonzero, gdb will give control to the user when the dynamic linker\n\
10620 notifies gdb of shared library events. The most common event of interest\n\
10621 to the user would be loading/unloading of a new library."),
10622 set_stop_on_solib_events,
10623 show_stop_on_solib_events,
10624 &setlist, &showlist);
10626 add_setshow_enum_cmd ("follow-fork-mode", class_run,
10627 follow_fork_mode_kind_names,
10628 &follow_fork_mode_string, _("\
10629 Set debugger response to a program call of fork or vfork."), _("\
10630 Show debugger response to a program call of fork or vfork."), _("\
10631 A fork or vfork creates a new process. follow-fork-mode can be:\n\
10632 parent - the original process is debugged after a fork\n\
10633 child - the new process is debugged after a fork\n\
10634 The unfollowed process will continue to run.\n\
10635 By default, the debugger will follow the parent process."),
10636 nullptr,
10637 show_follow_fork_mode_string,
10638 &setlist, &showlist);
10640 add_setshow_enum_cmd ("follow-exec-mode", class_run,
10641 follow_exec_mode_names,
10642 &follow_exec_mode_string, _("\
10643 Set debugger response to a program call of exec."), _("\
10644 Show debugger response to a program call of exec."), _("\
10645 An exec call replaces the program image of a process.\n\
10647 follow-exec-mode can be:\n\
10649 new - the debugger creates a new inferior and rebinds the process\n\
10650 to this new inferior. The program the process was running before\n\
10651 the exec call can be restarted afterwards by restarting the original\n\
10652 inferior.\n\
10654 same - the debugger keeps the process bound to the same inferior.\n\
10655 The new executable image replaces the previous executable loaded in\n\
10656 the inferior. Restarting the inferior after the exec call restarts\n\
10657 the executable the process was running after the exec call.\n\
10659 By default, the debugger will use the same inferior."),
10660 nullptr,
10661 show_follow_exec_mode_string,
10662 &setlist, &showlist);
10664 add_setshow_enum_cmd ("scheduler-locking", class_run,
10665 scheduler_enums, &scheduler_mode, _("\
10666 Set mode for locking scheduler during execution."), _("\
10667 Show mode for locking scheduler during execution."), _("\
10668 off == no locking (threads may preempt at any time)\n\
10669 on == full locking (no thread except the current thread may run)\n\
10670 This applies to both normal execution and replay mode.\n\
10671 step == scheduler locked during stepping commands (step, next, stepi, nexti).\n\
10672 In this mode, other threads may run during other commands.\n\
10673 This applies to both normal execution and replay mode.\n\
10674 replay == scheduler locked in replay mode and unlocked during normal execution."),
10675 set_schedlock_func, /* traps on target vector */
10676 show_scheduler_mode,
10677 &setlist, &showlist);
10679 add_setshow_boolean_cmd ("schedule-multiple", class_run, &sched_multi, _("\
10680 Set mode for resuming threads of all processes."), _("\
10681 Show mode for resuming threads of all processes."), _("\
10682 When on, execution commands (such as 'continue' or 'next') resume all\n\
10683 threads of all processes. When off (which is the default), execution\n\
10684 commands only resume the threads of the current process. The set of\n\
10685 threads that are resumed is further refined by the scheduler-locking\n\
10686 mode (see help set scheduler-locking)."),
10687 nullptr,
10688 show_schedule_multiple,
10689 &setlist, &showlist);
10691 add_setshow_boolean_cmd ("step-mode", class_run, &step_stop_if_no_debug, _("\
10692 Set mode of the step operation."), _("\
10693 Show mode of the step operation."), _("\
10694 When set, doing a step over a function without debug line information\n\
10695 will stop at the first instruction of that function. Otherwise, the\n\
10696 function is skipped and the step command stops at a different source line."),
10697 nullptr,
10698 show_step_stop_if_no_debug,
10699 &setlist, &showlist);
10701 add_setshow_auto_boolean_cmd ("displaced-stepping", class_run,
10702 &can_use_displaced_stepping, _("\
10703 Set debugger's willingness to use displaced stepping."), _("\
10704 Show debugger's willingness to use displaced stepping."), _("\
10705 If on, gdb will use displaced stepping to step over breakpoints if it is\n\
10706 supported by the target architecture. If off, gdb will not use displaced\n\
10707 stepping to step over breakpoints, even if such is supported by the target\n\
10708 architecture. If auto (which is the default), gdb will use displaced stepping\n\
10709 if the target architecture supports it and non-stop mode is active, but will not\n\
10710 use it in all-stop mode (see help set non-stop)."),
10711 nullptr,
10712 show_can_use_displaced_stepping,
10713 &setlist, &showlist);
10715 add_setshow_enum_cmd ("exec-direction", class_run, exec_direction_names,
10716 &exec_direction, _("Set direction of execution.\n\
10717 Options are 'forward' or 'reverse'."),
10718 _("Show direction of execution (forward/reverse)."),
10719 _("Tells gdb whether to execute forward or backward."),
10720 set_exec_direction_func, show_exec_direction_func,
10721 &setlist, &showlist);
10723 /* Set/show detach-on-fork: user-settable mode. */
10725 add_setshow_boolean_cmd ("detach-on-fork", class_run, &detach_fork, _("\
10726 Set whether gdb will detach the child of a fork."), _("\
10727 Show whether gdb will detach the child of a fork."), _("\
10728 Tells gdb whether to detach the child of a fork."),
10729 nullptr, nullptr, &setlist, &showlist);
10731 /* Set/show disable address space randomization mode. */
10733 add_setshow_boolean_cmd ("disable-randomization", class_support,
10734 &disable_randomization, _("\
10735 Set disabling of debuggee's virtual address space randomization."), _("\
10736 Show disabling of debuggee's virtual address space randomization."), _("\
10737 When this mode is on (which is the default), randomization of the virtual\n\
10738 address space is disabled. Standalone programs run with the randomization\n\
10739 enabled by default on some platforms."),
10740 &set_disable_randomization,
10741 &show_disable_randomization,
10742 &setlist, &showlist);
10744 /* ptid initializations */
10745 inferior_ptid = null_ptid;
10746 target_last_wait_ptid = minus_one_ptid;
10748 gdb::observers::thread_ptid_changed.attach (infrun_thread_ptid_changed,
10749 "infrun");
10750 gdb::observers::thread_stop_requested.attach (infrun_thread_stop_requested,
10751 "infrun");
10752 gdb::observers::inferior_exit.attach (infrun_inferior_exit, "infrun");
10753 gdb::observers::inferior_execd.attach (infrun_inferior_execd, "infrun");
10755 /* Explicitly create without lookup, since that tries to create a
10756 value with a void typed value, and when we get here, gdbarch
10757 isn't initialized yet. At this point, we're quite sure there
10758 isn't another convenience variable of the same name. */
10759 create_internalvar_type_lazy ("_siginfo", &siginfo_funcs, nullptr);
10761 add_setshow_boolean_cmd ("observer", no_class,
10762 &observer_mode_1, _("\
10763 Set whether gdb controls the inferior in observer mode."), _("\
10764 Show whether gdb controls the inferior in observer mode."), _("\
10765 In observer mode, GDB can get data from the inferior, but not\n\
10766 affect its execution. Registers and memory may not be changed,\n\
10767 breakpoints may not be set, and the program cannot be interrupted\n\
10768 or signalled."),
10769 set_observer_mode,
10770 show_observer_mode,
10771 &setlist,
10772 &showlist);
10774 #if GDB_SELF_TEST
10775 selftests::register_test ("infrun_thread_ptid_changed",
10776 selftests::infrun_thread_ptid_changed);
10777 #endif