Add generated source files and fix thinko in aarch64-asm.c
[binutils-gdb.git] / gdb / infrun.c
blob3f0f69be098de471948919a39aa2bc6bea1efd7b
1 /* Target-struct-independent code to start (run) and stop an inferior
2 process.
4 Copyright (C) 1986-2024 Free Software Foundation, Inc.
6 This file is part of GDB.
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21 #include "defs.h"
22 #include "displaced-stepping.h"
23 #include "infrun.h"
24 #include <ctype.h>
25 #include "symtab.h"
26 #include "frame.h"
27 #include "inferior.h"
28 #include "breakpoint.h"
29 #include "gdbcore.h"
30 #include "gdbcmd.h"
31 #include "target.h"
32 #include "target-connection.h"
33 #include "gdbthread.h"
34 #include "annotate.h"
35 #include "symfile.h"
36 #include "top.h"
37 #include "ui.h"
38 #include "inf-loop.h"
39 #include "regcache.h"
40 #include "value.h"
41 #include "observable.h"
42 #include "language.h"
43 #include "solib.h"
44 #include "main.h"
45 #include "block.h"
46 #include "mi/mi-common.h"
47 #include "event-top.h"
48 #include "record.h"
49 #include "record-full.h"
50 #include "inline-frame.h"
51 #include "jit.h"
52 #include "tracepoint.h"
53 #include "skip.h"
54 #include "probe.h"
55 #include "objfiles.h"
56 #include "completer.h"
57 #include "target-descriptions.h"
58 #include "target-dcache.h"
59 #include "terminal.h"
60 #include "solist.h"
61 #include "gdbsupport/event-loop.h"
62 #include "thread-fsm.h"
63 #include "gdbsupport/enum-flags.h"
64 #include "progspace-and-thread.h"
65 #include <optional>
66 #include "arch-utils.h"
67 #include "gdbsupport/scope-exit.h"
68 #include "gdbsupport/forward-scope-exit.h"
69 #include "gdbsupport/gdb_select.h"
70 #include <unordered_map>
71 #include "async-event.h"
72 #include "gdbsupport/selftest.h"
73 #include "scoped-mock-context.h"
74 #include "test-target.h"
75 #include "gdbsupport/common-debug.h"
76 #include "gdbsupport/buildargv.h"
77 #include "extension.h"
78 #include "disasm.h"
79 #include "interps.h"
81 /* Prototypes for local functions */
83 static void sig_print_info (enum gdb_signal);
85 static void sig_print_header (void);
87 static void follow_inferior_reset_breakpoints (void);
89 static bool currently_stepping (struct thread_info *tp);
91 static void insert_hp_step_resume_breakpoint_at_frame (frame_info_ptr);
93 static void insert_step_resume_breakpoint_at_caller (frame_info_ptr);
95 static void insert_longjmp_resume_breakpoint (struct gdbarch *, CORE_ADDR);
97 static bool maybe_software_singlestep (struct gdbarch *gdbarch);
99 static void resume (gdb_signal sig);
101 static void wait_for_inferior (inferior *inf);
103 static void restart_threads (struct thread_info *event_thread,
104 inferior *inf = nullptr);
106 static bool start_step_over (void);
108 static bool step_over_info_valid_p (void);
110 static bool schedlock_applies (struct thread_info *tp);
112 /* Asynchronous signal handler registered as event loop source for
113 when we have pending events ready to be passed to the core. */
114 static struct async_event_handler *infrun_async_inferior_event_token;
116 /* Stores whether infrun_async was previously enabled or disabled.
117 Starts off as -1, indicating "never enabled/disabled". */
118 static int infrun_is_async = -1;
119 static CORE_ADDR update_line_range_start (CORE_ADDR pc,
120 struct execution_control_state *ecs);
122 /* See infrun.h. */
124 void
125 infrun_async (int enable)
127 if (infrun_is_async != enable)
129 infrun_is_async = enable;
131 infrun_debug_printf ("enable=%d", enable);
133 if (enable)
134 mark_async_event_handler (infrun_async_inferior_event_token);
135 else
136 clear_async_event_handler (infrun_async_inferior_event_token);
140 /* See infrun.h. */
142 void
143 mark_infrun_async_event_handler (void)
145 mark_async_event_handler (infrun_async_inferior_event_token);
148 /* When set, stop the 'step' command if we enter a function which has
149 no line number information. The normal behavior is that we step
150 over such function. */
151 bool step_stop_if_no_debug = false;
152 static void
153 show_step_stop_if_no_debug (struct ui_file *file, int from_tty,
154 struct cmd_list_element *c, const char *value)
156 gdb_printf (file, _("Mode of the step operation is %s.\n"), value);
159 /* proceed and normal_stop use this to notify the user when the
160 inferior stopped in a different thread than it had been running in.
161 It can also be used to find for which thread normal_stop last
162 reported a stop. */
163 static thread_info_ref previous_thread;
165 /* See infrun.h. */
167 void
168 update_previous_thread ()
170 if (inferior_ptid == null_ptid)
171 previous_thread = nullptr;
172 else
173 previous_thread = thread_info_ref::new_reference (inferior_thread ());
176 /* See infrun.h. */
178 thread_info *
179 get_previous_thread ()
181 return previous_thread.get ();
184 /* If set (default for legacy reasons), when following a fork, GDB
185 will detach from one of the fork branches, child or parent.
186 Exactly which branch is detached depends on 'set follow-fork-mode'
187 setting. */
189 static bool detach_fork = true;
191 bool debug_infrun = false;
192 static void
193 show_debug_infrun (struct ui_file *file, int from_tty,
194 struct cmd_list_element *c, const char *value)
196 gdb_printf (file, _("Inferior debugging is %s.\n"), value);
199 /* Support for disabling address space randomization. */
201 bool disable_randomization = true;
203 static void
204 show_disable_randomization (struct ui_file *file, int from_tty,
205 struct cmd_list_element *c, const char *value)
207 if (target_supports_disable_randomization ())
208 gdb_printf (file,
209 _("Disabling randomization of debuggee's "
210 "virtual address space is %s.\n"),
211 value);
212 else
213 gdb_puts (_("Disabling randomization of debuggee's "
214 "virtual address space is unsupported on\n"
215 "this platform.\n"), file);
218 static void
219 set_disable_randomization (const char *args, int from_tty,
220 struct cmd_list_element *c)
222 if (!target_supports_disable_randomization ())
223 error (_("Disabling randomization of debuggee's "
224 "virtual address space is unsupported on\n"
225 "this platform."));
228 /* User interface for non-stop mode. */
230 bool non_stop = false;
231 static bool non_stop_1 = false;
233 static void
234 set_non_stop (const char *args, int from_tty,
235 struct cmd_list_element *c)
237 if (target_has_execution ())
239 non_stop_1 = non_stop;
240 error (_("Cannot change this setting while the inferior is running."));
243 non_stop = non_stop_1;
246 static void
247 show_non_stop (struct ui_file *file, int from_tty,
248 struct cmd_list_element *c, const char *value)
250 gdb_printf (file,
251 _("Controlling the inferior in non-stop mode is %s.\n"),
252 value);
255 /* "Observer mode" is somewhat like a more extreme version of
256 non-stop, in which all GDB operations that might affect the
257 target's execution have been disabled. */
259 static bool observer_mode = false;
260 static bool observer_mode_1 = false;
262 static void
263 set_observer_mode (const char *args, int from_tty,
264 struct cmd_list_element *c)
266 if (target_has_execution ())
268 observer_mode_1 = observer_mode;
269 error (_("Cannot change this setting while the inferior is running."));
272 observer_mode = observer_mode_1;
274 may_write_registers = !observer_mode;
275 may_write_memory = !observer_mode;
276 may_insert_breakpoints = !observer_mode;
277 may_insert_tracepoints = !observer_mode;
278 /* We can insert fast tracepoints in or out of observer mode,
279 but enable them if we're going into this mode. */
280 if (observer_mode)
281 may_insert_fast_tracepoints = true;
282 may_stop = !observer_mode;
283 update_target_permissions ();
285 /* Going *into* observer mode we must force non-stop, then
286 going out we leave it that way. */
287 if (observer_mode)
289 pagination_enabled = false;
290 non_stop = non_stop_1 = true;
293 if (from_tty)
294 gdb_printf (_("Observer mode is now %s.\n"),
295 (observer_mode ? "on" : "off"));
298 static void
299 show_observer_mode (struct ui_file *file, int from_tty,
300 struct cmd_list_element *c, const char *value)
302 gdb_printf (file, _("Observer mode is %s.\n"), value);
305 /* This updates the value of observer mode based on changes in
306 permissions. Note that we are deliberately ignoring the values of
307 may-write-registers and may-write-memory, since the user may have
308 reason to enable these during a session, for instance to turn on a
309 debugging-related global. */
311 void
312 update_observer_mode (void)
314 bool newval = (!may_insert_breakpoints
315 && !may_insert_tracepoints
316 && may_insert_fast_tracepoints
317 && !may_stop
318 && non_stop);
320 /* Let the user know if things change. */
321 if (newval != observer_mode)
322 gdb_printf (_("Observer mode is now %s.\n"),
323 (newval ? "on" : "off"));
325 observer_mode = observer_mode_1 = newval;
328 /* Tables of how to react to signals; the user sets them. */
330 static unsigned char signal_stop[GDB_SIGNAL_LAST];
331 static unsigned char signal_print[GDB_SIGNAL_LAST];
332 static unsigned char signal_program[GDB_SIGNAL_LAST];
334 /* Table of signals that are registered with "catch signal". A
335 non-zero entry indicates that the signal is caught by some "catch
336 signal" command. */
337 static unsigned char signal_catch[GDB_SIGNAL_LAST];
339 /* Table of signals that the target may silently handle.
340 This is automatically determined from the flags above,
341 and simply cached here. */
342 static unsigned char signal_pass[GDB_SIGNAL_LAST];
344 #define SET_SIGS(nsigs,sigs,flags) \
345 do { \
346 int signum = (nsigs); \
347 while (signum-- > 0) \
348 if ((sigs)[signum]) \
349 (flags)[signum] = 1; \
350 } while (0)
352 #define UNSET_SIGS(nsigs,sigs,flags) \
353 do { \
354 int signum = (nsigs); \
355 while (signum-- > 0) \
356 if ((sigs)[signum]) \
357 (flags)[signum] = 0; \
358 } while (0)
360 /* Update the target's copy of SIGNAL_PROGRAM. The sole purpose of
361 this function is to avoid exporting `signal_program'. */
363 void
364 update_signals_program_target (void)
366 target_program_signals (signal_program);
369 /* Value to pass to target_resume() to cause all threads to resume. */
371 #define RESUME_ALL minus_one_ptid
373 /* Command list pointer for the "stop" placeholder. */
375 static struct cmd_list_element *stop_command;
377 /* Nonzero if we want to give control to the user when we're notified
378 of shared library events by the dynamic linker. */
379 int stop_on_solib_events;
381 /* Enable or disable optional shared library event breakpoints
382 as appropriate when the above flag is changed. */
384 static void
385 set_stop_on_solib_events (const char *args,
386 int from_tty, struct cmd_list_element *c)
388 update_solib_breakpoints ();
391 static void
392 show_stop_on_solib_events (struct ui_file *file, int from_tty,
393 struct cmd_list_element *c, const char *value)
395 gdb_printf (file, _("Stopping for shared library events is %s.\n"),
396 value);
399 /* True after stop if current stack frame should be printed. */
401 static bool stop_print_frame;
403 /* This is a cached copy of the target/ptid/waitstatus of the last
404 event returned by target_wait().
405 This information is returned by get_last_target_status(). */
406 static process_stratum_target *target_last_proc_target;
407 static ptid_t target_last_wait_ptid;
408 static struct target_waitstatus target_last_waitstatus;
410 void init_thread_stepping_state (struct thread_info *tss);
412 static const char follow_fork_mode_child[] = "child";
413 static const char follow_fork_mode_parent[] = "parent";
415 static const char *const follow_fork_mode_kind_names[] = {
416 follow_fork_mode_child,
417 follow_fork_mode_parent,
418 nullptr
421 static const char *follow_fork_mode_string = follow_fork_mode_parent;
422 static void
423 show_follow_fork_mode_string (struct ui_file *file, int from_tty,
424 struct cmd_list_element *c, const char *value)
426 gdb_printf (file,
427 _("Debugger response to a program "
428 "call of fork or vfork is \"%s\".\n"),
429 value);
433 /* Handle changes to the inferior list based on the type of fork,
434 which process is being followed, and whether the other process
435 should be detached. On entry inferior_ptid must be the ptid of
436 the fork parent. At return inferior_ptid is the ptid of the
437 followed inferior. */
439 static bool
440 follow_fork_inferior (bool follow_child, bool detach_fork)
442 INFRUN_SCOPED_DEBUG_ENTER_EXIT;
444 infrun_debug_printf ("follow_child = %d, detach_fork = %d",
445 follow_child, detach_fork);
447 target_waitkind fork_kind = inferior_thread ()->pending_follow.kind ();
448 gdb_assert (fork_kind == TARGET_WAITKIND_FORKED
449 || fork_kind == TARGET_WAITKIND_VFORKED);
450 bool has_vforked = fork_kind == TARGET_WAITKIND_VFORKED;
451 ptid_t parent_ptid = inferior_ptid;
452 ptid_t child_ptid = inferior_thread ()->pending_follow.child_ptid ();
454 if (has_vforked
455 && !non_stop /* Non-stop always resumes both branches. */
456 && current_ui->prompt_state == PROMPT_BLOCKED
457 && !(follow_child || detach_fork || sched_multi))
459 /* The parent stays blocked inside the vfork syscall until the
460 child execs or exits. If we don't let the child run, then
461 the parent stays blocked. If we're telling the parent to run
462 in the foreground, the user will not be able to ctrl-c to get
463 back the terminal, effectively hanging the debug session. */
464 gdb_printf (gdb_stderr, _("\
465 Can not resume the parent process over vfork in the foreground while\n\
466 holding the child stopped. Try \"set detach-on-fork\" or \
467 \"set schedule-multiple\".\n"));
468 return true;
471 inferior *parent_inf = current_inferior ();
472 inferior *child_inf = nullptr;
474 gdb_assert (parent_inf->thread_waiting_for_vfork_done == nullptr);
476 if (!follow_child)
478 /* Detach new forked process? */
479 if (detach_fork)
481 /* Before detaching from the child, remove all breakpoints
482 from it. If we forked, then this has already been taken
483 care of by infrun.c. If we vforked however, any
484 breakpoint inserted in the parent is visible in the
485 child, even those added while stopped in a vfork
486 catchpoint. This will remove the breakpoints from the
487 parent also, but they'll be reinserted below. */
488 if (has_vforked)
490 /* Keep breakpoints list in sync. */
491 remove_breakpoints_inf (current_inferior ());
494 if (print_inferior_events)
496 /* Ensure that we have a process ptid. */
497 ptid_t process_ptid = ptid_t (child_ptid.pid ());
499 target_terminal::ours_for_output ();
500 gdb_printf (_("[Detaching after %s from child %s]\n"),
501 has_vforked ? "vfork" : "fork",
502 target_pid_to_str (process_ptid).c_str ());
505 else
507 /* Add process to GDB's tables. */
508 child_inf = add_inferior (child_ptid.pid ());
510 child_inf->attach_flag = parent_inf->attach_flag;
511 copy_terminal_info (child_inf, parent_inf);
512 child_inf->set_arch (parent_inf->arch ());
513 child_inf->tdesc_info = parent_inf->tdesc_info;
515 child_inf->symfile_flags = SYMFILE_NO_READ;
517 /* If this is a vfork child, then the address-space is
518 shared with the parent. */
519 if (has_vforked)
521 child_inf->pspace = parent_inf->pspace;
522 child_inf->aspace = parent_inf->aspace;
524 exec_on_vfork (child_inf);
526 /* The parent will be frozen until the child is done
527 with the shared region. Keep track of the
528 parent. */
529 child_inf->vfork_parent = parent_inf;
530 child_inf->pending_detach = false;
531 parent_inf->vfork_child = child_inf;
532 parent_inf->pending_detach = false;
534 else
536 child_inf->pspace = new program_space (new_address_space ());
537 child_inf->aspace = child_inf->pspace->aspace;
538 child_inf->removable = true;
539 clone_program_space (child_inf->pspace, parent_inf->pspace);
543 if (has_vforked)
545 /* If we detached from the child, then we have to be careful
546 to not insert breakpoints in the parent until the child
547 is done with the shared memory region. However, if we're
548 staying attached to the child, then we can and should
549 insert breakpoints, so that we can debug it. A
550 subsequent child exec or exit is enough to know when does
551 the child stops using the parent's address space. */
552 parent_inf->thread_waiting_for_vfork_done
553 = detach_fork ? inferior_thread () : nullptr;
554 parent_inf->pspace->breakpoints_not_allowed = detach_fork;
556 infrun_debug_printf
557 ("parent_inf->thread_waiting_for_vfork_done == %s",
558 (parent_inf->thread_waiting_for_vfork_done == nullptr
559 ? "nullptr"
560 : (parent_inf->thread_waiting_for_vfork_done
561 ->ptid.to_string ().c_str ())));
564 else
566 /* Follow the child. */
568 if (print_inferior_events)
570 std::string parent_pid = target_pid_to_str (parent_ptid);
571 std::string child_pid = target_pid_to_str (child_ptid);
573 target_terminal::ours_for_output ();
574 gdb_printf (_("[Attaching after %s %s to child %s]\n"),
575 parent_pid.c_str (),
576 has_vforked ? "vfork" : "fork",
577 child_pid.c_str ());
580 /* Add the new inferior first, so that the target_detach below
581 doesn't unpush the target. */
583 child_inf = add_inferior (child_ptid.pid ());
585 child_inf->attach_flag = parent_inf->attach_flag;
586 copy_terminal_info (child_inf, parent_inf);
587 child_inf->set_arch (parent_inf->arch ());
588 child_inf->tdesc_info = parent_inf->tdesc_info;
590 if (has_vforked)
592 /* If this is a vfork child, then the address-space is shared
593 with the parent. */
594 child_inf->aspace = parent_inf->aspace;
595 child_inf->pspace = parent_inf->pspace;
597 exec_on_vfork (child_inf);
599 else if (detach_fork)
601 /* We follow the child and detach from the parent: move the parent's
602 program space to the child. This simplifies some things, like
603 doing "next" over fork() and landing on the expected line in the
604 child (note, that is broken with "set detach-on-fork off").
606 Before assigning brand new spaces for the parent, remove
607 breakpoints from it: because the new pspace won't match
608 currently inserted locations, the normal detach procedure
609 wouldn't remove them, and we would leave them inserted when
610 detaching. */
611 remove_breakpoints_inf (parent_inf);
613 child_inf->aspace = parent_inf->aspace;
614 child_inf->pspace = parent_inf->pspace;
615 parent_inf->pspace = new program_space (new_address_space ());
616 parent_inf->aspace = parent_inf->pspace->aspace;
617 clone_program_space (parent_inf->pspace, child_inf->pspace);
619 /* The parent inferior is still the current one, so keep things
620 in sync. */
621 set_current_program_space (parent_inf->pspace);
623 else
625 child_inf->pspace = new program_space (new_address_space ());
626 child_inf->aspace = child_inf->pspace->aspace;
627 child_inf->removable = true;
628 child_inf->symfile_flags = SYMFILE_NO_READ;
629 clone_program_space (child_inf->pspace, parent_inf->pspace);
633 gdb_assert (current_inferior () == parent_inf);
635 /* If we are setting up an inferior for the child, target_follow_fork is
636 responsible for pushing the appropriate targets on the new inferior's
637 target stack and adding the initial thread (with ptid CHILD_PTID).
639 If we are not setting up an inferior for the child (because following
640 the parent and detach_fork is true), it is responsible for detaching
641 from CHILD_PTID. */
642 target_follow_fork (child_inf, child_ptid, fork_kind, follow_child,
643 detach_fork);
645 gdb::observers::inferior_forked.notify (parent_inf, child_inf, fork_kind);
647 /* target_follow_fork must leave the parent as the current inferior. If we
648 want to follow the child, we make it the current one below. */
649 gdb_assert (current_inferior () == parent_inf);
651 /* If there is a child inferior, target_follow_fork must have created a thread
652 for it. */
653 if (child_inf != nullptr)
654 gdb_assert (!child_inf->thread_list.empty ());
656 /* Clear the parent thread's pending follow field. Do this before calling
657 target_detach, so that the target can differentiate the two following
658 cases:
660 - We continue past a fork with "follow-fork-mode == child" &&
661 "detach-on-fork on", and therefore detach the parent. In that
662 case the target should not detach the fork child.
663 - We run to a fork catchpoint and the user types "detach". In that
664 case, the target should detach the fork child in addition to the
665 parent.
667 The former case will have pending_follow cleared, the later will have
668 pending_follow set. */
669 thread_info *parent_thread = parent_inf->find_thread (parent_ptid);
670 gdb_assert (parent_thread != nullptr);
671 parent_thread->pending_follow.set_spurious ();
673 /* Detach the parent if needed. */
674 if (follow_child)
676 /* If we're vforking, we want to hold on to the parent until
677 the child exits or execs. At child exec or exit time we
678 can remove the old breakpoints from the parent and detach
679 or resume debugging it. Otherwise, detach the parent now;
680 we'll want to reuse it's program/address spaces, but we
681 can't set them to the child before removing breakpoints
682 from the parent, otherwise, the breakpoints module could
683 decide to remove breakpoints from the wrong process (since
684 they'd be assigned to the same address space). */
686 if (has_vforked)
688 gdb_assert (child_inf->vfork_parent == nullptr);
689 gdb_assert (parent_inf->vfork_child == nullptr);
690 child_inf->vfork_parent = parent_inf;
691 child_inf->pending_detach = false;
692 parent_inf->vfork_child = child_inf;
693 parent_inf->pending_detach = detach_fork;
695 else if (detach_fork)
697 if (print_inferior_events)
699 /* Ensure that we have a process ptid. */
700 ptid_t process_ptid = ptid_t (parent_ptid.pid ());
702 target_terminal::ours_for_output ();
703 gdb_printf (_("[Detaching after fork from "
704 "parent %s]\n"),
705 target_pid_to_str (process_ptid).c_str ());
708 target_detach (parent_inf, 0);
712 /* If we ended up creating a new inferior, call post_create_inferior to inform
713 the various subcomponents. */
714 if (child_inf != nullptr)
716 /* If FOLLOW_CHILD, we leave CHILD_INF as the current inferior
717 (do not restore the parent as the current inferior). */
718 std::optional<scoped_restore_current_thread> maybe_restore;
720 if (!follow_child && !sched_multi)
721 maybe_restore.emplace ();
723 switch_to_thread (*child_inf->threads ().begin ());
724 post_create_inferior (0);
727 return false;
730 /* Set the last target status as TP having stopped. */
732 static void
733 set_last_target_status_stopped (thread_info *tp)
735 set_last_target_status (tp->inf->process_target (), tp->ptid,
736 target_waitstatus {}.set_stopped (GDB_SIGNAL_0));
739 /* Tell the target to follow the fork we're stopped at. Returns true
740 if the inferior should be resumed; false, if the target for some
741 reason decided it's best not to resume. */
743 static bool
744 follow_fork ()
746 INFRUN_SCOPED_DEBUG_ENTER_EXIT;
748 bool follow_child = (follow_fork_mode_string == follow_fork_mode_child);
749 bool should_resume = true;
751 /* Copy user stepping state to the new inferior thread. FIXME: the
752 followed fork child thread should have a copy of most of the
753 parent thread structure's run control related fields, not just these.
754 Initialized to avoid "may be used uninitialized" warnings from gcc. */
755 struct breakpoint *step_resume_breakpoint = nullptr;
756 struct breakpoint *exception_resume_breakpoint = nullptr;
757 CORE_ADDR step_range_start = 0;
758 CORE_ADDR step_range_end = 0;
759 int current_line = 0;
760 symtab *current_symtab = nullptr;
761 struct frame_id step_frame_id = { 0 };
763 if (!non_stop)
765 thread_info *cur_thr = inferior_thread ();
767 ptid_t resume_ptid
768 = user_visible_resume_ptid (cur_thr->control.stepping_command);
769 process_stratum_target *resume_target
770 = user_visible_resume_target (resume_ptid);
772 /* Check if there's a thread that we're about to resume, other
773 than the current, with an unfollowed fork/vfork. If so,
774 switch back to it, to tell the target to follow it (in either
775 direction). We'll afterwards refuse to resume, and inform
776 the user what happened. */
777 for (thread_info *tp : all_non_exited_threads (resume_target,
778 resume_ptid))
780 if (tp == cur_thr)
781 continue;
783 /* follow_fork_inferior clears tp->pending_follow, and below
784 we'll need the value after the follow_fork_inferior
785 call. */
786 target_waitkind kind = tp->pending_follow.kind ();
788 if (kind != TARGET_WAITKIND_SPURIOUS)
790 infrun_debug_printf ("need to follow-fork [%s] first",
791 tp->ptid.to_string ().c_str ());
793 switch_to_thread (tp);
795 /* Set up inferior(s) as specified by the caller, and
796 tell the target to do whatever is necessary to follow
797 either parent or child. */
798 if (follow_child)
800 /* The thread that started the execution command
801 won't exist in the child. Abort the command and
802 immediately stop in this thread, in the child,
803 inside fork. */
804 should_resume = false;
806 else
808 /* Following the parent, so let the thread fork its
809 child freely, it won't influence the current
810 execution command. */
811 if (follow_fork_inferior (follow_child, detach_fork))
813 /* Target refused to follow, or there's some
814 other reason we shouldn't resume. */
815 switch_to_thread (cur_thr);
816 set_last_target_status_stopped (cur_thr);
817 return false;
820 /* If we're following a vfork, when we need to leave
821 the just-forked thread as selected, as we need to
822 solo-resume it to collect the VFORK_DONE event.
823 If we're following a fork, however, switch back
824 to the original thread that we continue stepping
825 it, etc. */
826 if (kind != TARGET_WAITKIND_VFORKED)
828 gdb_assert (kind == TARGET_WAITKIND_FORKED);
829 switch_to_thread (cur_thr);
833 break;
838 thread_info *tp = inferior_thread ();
840 /* If there were any forks/vforks that were caught and are now to be
841 followed, then do so now. */
842 switch (tp->pending_follow.kind ())
844 case TARGET_WAITKIND_FORKED:
845 case TARGET_WAITKIND_VFORKED:
847 ptid_t parent, child;
848 std::unique_ptr<struct thread_fsm> thread_fsm;
850 /* If the user did a next/step, etc, over a fork call,
851 preserve the stepping state in the fork child. */
852 if (follow_child && should_resume)
854 step_resume_breakpoint = clone_momentary_breakpoint
855 (tp->control.step_resume_breakpoint);
856 step_range_start = tp->control.step_range_start;
857 step_range_end = tp->control.step_range_end;
858 current_line = tp->current_line;
859 current_symtab = tp->current_symtab;
860 step_frame_id = tp->control.step_frame_id;
861 exception_resume_breakpoint
862 = clone_momentary_breakpoint (tp->control.exception_resume_breakpoint);
863 thread_fsm = tp->release_thread_fsm ();
865 /* For now, delete the parent's sr breakpoint, otherwise,
866 parent/child sr breakpoints are considered duplicates,
867 and the child version will not be installed. Remove
868 this when the breakpoints module becomes aware of
869 inferiors and address spaces. */
870 delete_step_resume_breakpoint (tp);
871 tp->control.step_range_start = 0;
872 tp->control.step_range_end = 0;
873 tp->control.step_frame_id = null_frame_id;
874 delete_exception_resume_breakpoint (tp);
877 parent = inferior_ptid;
878 child = tp->pending_follow.child_ptid ();
880 /* If handling a vfork, stop all the inferior's threads, they will be
881 restarted when the vfork shared region is complete. */
882 if (tp->pending_follow.kind () == TARGET_WAITKIND_VFORKED
883 && target_is_non_stop_p ())
884 stop_all_threads ("handling vfork", tp->inf);
886 process_stratum_target *parent_targ = tp->inf->process_target ();
887 /* Set up inferior(s) as specified by the caller, and tell the
888 target to do whatever is necessary to follow either parent
889 or child. */
890 if (follow_fork_inferior (follow_child, detach_fork))
892 /* Target refused to follow, or there's some other reason
893 we shouldn't resume. */
894 should_resume = 0;
896 else
898 /* If we followed the child, switch to it... */
899 if (follow_child)
901 tp = parent_targ->find_thread (child);
902 switch_to_thread (tp);
904 /* ... and preserve the stepping state, in case the
905 user was stepping over the fork call. */
906 if (should_resume)
908 tp->control.step_resume_breakpoint
909 = step_resume_breakpoint;
910 tp->control.step_range_start = step_range_start;
911 tp->control.step_range_end = step_range_end;
912 tp->current_line = current_line;
913 tp->current_symtab = current_symtab;
914 tp->control.step_frame_id = step_frame_id;
915 tp->control.exception_resume_breakpoint
916 = exception_resume_breakpoint;
917 tp->set_thread_fsm (std::move (thread_fsm));
919 else
921 /* If we get here, it was because we're trying to
922 resume from a fork catchpoint, but, the user
923 has switched threads away from the thread that
924 forked. In that case, the resume command
925 issued is most likely not applicable to the
926 child, so just warn, and refuse to resume. */
927 warning (_("Not resuming: switched threads "
928 "before following fork child."));
931 /* Reset breakpoints in the child as appropriate. */
932 follow_inferior_reset_breakpoints ();
936 break;
937 case TARGET_WAITKIND_SPURIOUS:
938 /* Nothing to follow. */
939 break;
940 default:
941 internal_error ("Unexpected pending_follow.kind %d\n",
942 tp->pending_follow.kind ());
943 break;
946 if (!should_resume)
947 set_last_target_status_stopped (tp);
948 return should_resume;
951 static void
952 follow_inferior_reset_breakpoints (void)
954 struct thread_info *tp = inferior_thread ();
956 /* Was there a step_resume breakpoint? (There was if the user
957 did a "next" at the fork() call.) If so, explicitly reset its
958 thread number. Cloned step_resume breakpoints are disabled on
959 creation, so enable it here now that it is associated with the
960 correct thread.
962 step_resumes are a form of bp that are made to be per-thread.
963 Since we created the step_resume bp when the parent process
964 was being debugged, and now are switching to the child process,
965 from the breakpoint package's viewpoint, that's a switch of
966 "threads". We must update the bp's notion of which thread
967 it is for, or it'll be ignored when it triggers. */
969 if (tp->control.step_resume_breakpoint)
971 breakpoint_re_set_thread (tp->control.step_resume_breakpoint);
972 tp->control.step_resume_breakpoint->first_loc ().enabled = 1;
975 /* Treat exception_resume breakpoints like step_resume breakpoints. */
976 if (tp->control.exception_resume_breakpoint)
978 breakpoint_re_set_thread (tp->control.exception_resume_breakpoint);
979 tp->control.exception_resume_breakpoint->first_loc ().enabled = 1;
982 /* Reinsert all breakpoints in the child. The user may have set
983 breakpoints after catching the fork, in which case those
984 were never set in the child, but only in the parent. This makes
985 sure the inserted breakpoints match the breakpoint list. */
987 breakpoint_re_set ();
988 insert_breakpoints ();
991 /* The child has exited or execed: resume THREAD, a thread of the parent,
992 if it was meant to be executing. */
994 static void
995 proceed_after_vfork_done (thread_info *thread)
997 if (thread->state == THREAD_RUNNING
998 && !thread->executing ()
999 && !thread->stop_requested
1000 && thread->stop_signal () == GDB_SIGNAL_0)
1002 infrun_debug_printf ("resuming vfork parent thread %s",
1003 thread->ptid.to_string ().c_str ());
1005 switch_to_thread (thread);
1006 clear_proceed_status (0);
1007 proceed ((CORE_ADDR) -1, GDB_SIGNAL_DEFAULT);
1011 /* Called whenever we notice an exec or exit event, to handle
1012 detaching or resuming a vfork parent. */
1014 static void
1015 handle_vfork_child_exec_or_exit (int exec)
1017 INFRUN_SCOPED_DEBUG_ENTER_EXIT;
1019 struct inferior *inf = current_inferior ();
1021 if (inf->vfork_parent)
1023 inferior *resume_parent = nullptr;
1025 /* This exec or exit marks the end of the shared memory region
1026 between the parent and the child. Break the bonds. */
1027 inferior *vfork_parent = inf->vfork_parent;
1028 inf->vfork_parent->vfork_child = nullptr;
1029 inf->vfork_parent = nullptr;
1031 /* If the user wanted to detach from the parent, now is the
1032 time. */
1033 if (vfork_parent->pending_detach)
1035 struct program_space *pspace;
1037 /* follow-fork child, detach-on-fork on. */
1039 vfork_parent->pending_detach = false;
1041 scoped_restore_current_pspace_and_thread restore_thread;
1043 /* We're letting loose of the parent. */
1044 thread_info *tp = any_live_thread_of_inferior (vfork_parent);
1045 switch_to_thread (tp);
1047 /* We're about to detach from the parent, which implicitly
1048 removes breakpoints from its address space. There's a
1049 catch here: we want to reuse the spaces for the child,
1050 but, parent/child are still sharing the pspace at this
1051 point, although the exec in reality makes the kernel give
1052 the child a fresh set of new pages. The problem here is
1053 that the breakpoints module being unaware of this, would
1054 likely chose the child process to write to the parent
1055 address space. Swapping the child temporarily away from
1056 the spaces has the desired effect. Yes, this is "sort
1057 of" a hack. */
1059 pspace = inf->pspace;
1060 inf->pspace = nullptr;
1061 address_space_ref_ptr aspace = std::move (inf->aspace);
1063 if (print_inferior_events)
1065 std::string pidstr
1066 = target_pid_to_str (ptid_t (vfork_parent->pid));
1068 target_terminal::ours_for_output ();
1070 if (exec)
1072 gdb_printf (_("[Detaching vfork parent %s "
1073 "after child exec]\n"), pidstr.c_str ());
1075 else
1077 gdb_printf (_("[Detaching vfork parent %s "
1078 "after child exit]\n"), pidstr.c_str ());
1082 target_detach (vfork_parent, 0);
1084 /* Put it back. */
1085 inf->pspace = pspace;
1086 inf->aspace = aspace;
1088 else if (exec)
1090 /* We're staying attached to the parent, so, really give the
1091 child a new address space. */
1092 inf->pspace = new program_space (maybe_new_address_space ());
1093 inf->aspace = inf->pspace->aspace;
1094 inf->removable = true;
1095 set_current_program_space (inf->pspace);
1097 resume_parent = vfork_parent;
1099 else
1101 /* If this is a vfork child exiting, then the pspace and
1102 aspaces were shared with the parent. Since we're
1103 reporting the process exit, we'll be mourning all that is
1104 found in the address space, and switching to null_ptid,
1105 preparing to start a new inferior. But, since we don't
1106 want to clobber the parent's address/program spaces, we
1107 go ahead and create a new one for this exiting
1108 inferior. */
1110 scoped_restore_current_thread restore_thread;
1112 /* Temporarily switch to the vfork parent, to facilitate ptrace
1113 calls done during maybe_new_address_space. */
1114 switch_to_thread (any_live_thread_of_inferior (vfork_parent));
1115 address_space_ref_ptr aspace = maybe_new_address_space ();
1117 /* Switch back to the vfork child inferior. Switch to no-thread
1118 while running clone_program_space, so that clone_program_space
1119 doesn't want to read the selected frame of a dead process. */
1120 switch_to_inferior_no_thread (inf);
1122 inf->pspace = new program_space (std::move (aspace));
1123 inf->aspace = inf->pspace->aspace;
1124 set_current_program_space (inf->pspace);
1125 inf->removable = true;
1126 inf->symfile_flags = SYMFILE_NO_READ;
1127 clone_program_space (inf->pspace, vfork_parent->pspace);
1129 resume_parent = vfork_parent;
1132 gdb_assert (current_program_space == inf->pspace);
1134 if (non_stop && resume_parent != nullptr)
1136 /* If the user wanted the parent to be running, let it go
1137 free now. */
1138 scoped_restore_current_thread restore_thread;
1140 infrun_debug_printf ("resuming vfork parent process %d",
1141 resume_parent->pid);
1143 for (thread_info *thread : resume_parent->threads ())
1144 proceed_after_vfork_done (thread);
1149 /* Handle TARGET_WAITKIND_VFORK_DONE. */
1151 static void
1152 handle_vfork_done (thread_info *event_thread)
1154 INFRUN_SCOPED_DEBUG_ENTER_EXIT;
1156 /* We only care about this event if inferior::thread_waiting_for_vfork_done is
1157 set, that is if we are waiting for a vfork child not under our control
1158 (because we detached it) to exec or exit.
1160 If an inferior has vforked and we are debugging the child, we don't use
1161 the vfork-done event to get notified about the end of the shared address
1162 space window. We rely instead on the child's exec or exit event, and the
1163 inferior::vfork_{parent,child} fields are used instead. See
1164 handle_vfork_child_exec_or_exit for that. */
1165 if (event_thread->inf->thread_waiting_for_vfork_done == nullptr)
1167 infrun_debug_printf ("not waiting for a vfork-done event");
1168 return;
1171 /* We stopped all threads (other than the vforking thread) of the inferior in
1172 follow_fork and kept them stopped until now. It should therefore not be
1173 possible for another thread to have reported a vfork during that window.
1174 If THREAD_WAITING_FOR_VFORK_DONE is set, it has to be the same thread whose
1175 vfork-done we are handling right now. */
1176 gdb_assert (event_thread->inf->thread_waiting_for_vfork_done == event_thread);
1178 event_thread->inf->thread_waiting_for_vfork_done = nullptr;
1179 event_thread->inf->pspace->breakpoints_not_allowed = 0;
1181 /* On non-stop targets, we stopped all the inferior's threads in follow_fork,
1182 resume them now. On all-stop targets, everything that needs to be resumed
1183 will be when we resume the event thread. */
1184 if (target_is_non_stop_p ())
1186 /* restart_threads and start_step_over may change the current thread, make
1187 sure we leave the event thread as the current thread. */
1188 scoped_restore_current_thread restore_thread;
1190 insert_breakpoints ();
1191 start_step_over ();
1193 if (!step_over_info_valid_p ())
1194 restart_threads (event_thread, event_thread->inf);
1198 /* Enum strings for "set|show follow-exec-mode". */
1200 static const char follow_exec_mode_new[] = "new";
1201 static const char follow_exec_mode_same[] = "same";
1202 static const char *const follow_exec_mode_names[] =
1204 follow_exec_mode_new,
1205 follow_exec_mode_same,
1206 nullptr,
1209 static const char *follow_exec_mode_string = follow_exec_mode_same;
1210 static void
1211 show_follow_exec_mode_string (struct ui_file *file, int from_tty,
1212 struct cmd_list_element *c, const char *value)
1214 gdb_printf (file, _("Follow exec mode is \"%s\".\n"), value);
1217 /* EXEC_FILE_TARGET is assumed to be non-NULL. */
1219 static void
1220 follow_exec (ptid_t ptid, const char *exec_file_target)
1222 int pid = ptid.pid ();
1223 ptid_t process_ptid;
1225 /* Switch terminal for any messages produced e.g. by
1226 breakpoint_re_set. */
1227 target_terminal::ours_for_output ();
1229 /* This is an exec event that we actually wish to pay attention to.
1230 Refresh our symbol table to the newly exec'd program, remove any
1231 momentary bp's, etc.
1233 If there are breakpoints, they aren't really inserted now,
1234 since the exec() transformed our inferior into a fresh set
1235 of instructions.
1237 We want to preserve symbolic breakpoints on the list, since
1238 we have hopes that they can be reset after the new a.out's
1239 symbol table is read.
1241 However, any "raw" breakpoints must be removed from the list
1242 (e.g., the solib bp's), since their address is probably invalid
1243 now.
1245 And, we DON'T want to call delete_breakpoints() here, since
1246 that may write the bp's "shadow contents" (the instruction
1247 value that was overwritten with a TRAP instruction). Since
1248 we now have a new a.out, those shadow contents aren't valid. */
1250 mark_breakpoints_out ();
1252 /* The target reports the exec event to the main thread, even if
1253 some other thread does the exec, and even if the main thread was
1254 stopped or already gone. We may still have non-leader threads of
1255 the process on our list. E.g., on targets that don't have thread
1256 exit events (like remote) and nothing forces an update of the
1257 thread list up to here. When debugging remotely, it's best to
1258 avoid extra traffic, when possible, so avoid syncing the thread
1259 list with the target, and instead go ahead and delete all threads
1260 of the process but the one that reported the event. Note this must
1261 be done before calling update_breakpoints_after_exec, as
1262 otherwise clearing the threads' resources would reference stale
1263 thread breakpoints -- it may have been one of these threads that
1264 stepped across the exec. We could just clear their stepping
1265 states, but as long as we're iterating, might as well delete
1266 them. Deleting them now rather than at the next user-visible
1267 stop provides a nicer sequence of events for user and MI
1268 notifications. */
1269 for (thread_info *th : all_threads_safe ())
1270 if (th->ptid.pid () == pid && th->ptid != ptid)
1271 delete_thread (th);
1273 /* We also need to clear any left over stale state for the
1274 leader/event thread. E.g., if there was any step-resume
1275 breakpoint or similar, it's gone now. We cannot truly
1276 step-to-next statement through an exec(). */
1277 thread_info *th = inferior_thread ();
1278 th->control.step_resume_breakpoint = nullptr;
1279 th->control.exception_resume_breakpoint = nullptr;
1280 th->control.single_step_breakpoints = nullptr;
1281 th->control.step_range_start = 0;
1282 th->control.step_range_end = 0;
1284 /* The user may have had the main thread held stopped in the
1285 previous image (e.g., schedlock on, or non-stop). Release
1286 it now. */
1287 th->stop_requested = 0;
1289 update_breakpoints_after_exec ();
1291 /* What is this a.out's name? */
1292 process_ptid = ptid_t (pid);
1293 gdb_printf (_("%s is executing new program: %s\n"),
1294 target_pid_to_str (process_ptid).c_str (),
1295 exec_file_target);
1297 /* We've followed the inferior through an exec. Therefore, the
1298 inferior has essentially been killed & reborn. */
1300 breakpoint_init_inferior (inf_execd);
1302 gdb::unique_xmalloc_ptr<char> exec_file_host
1303 = exec_file_find (exec_file_target, nullptr);
1305 /* If we were unable to map the executable target pathname onto a host
1306 pathname, tell the user that. Otherwise GDB's subsequent behavior
1307 is confusing. Maybe it would even be better to stop at this point
1308 so that the user can specify a file manually before continuing. */
1309 if (exec_file_host == nullptr)
1310 warning (_("Could not load symbols for executable %s.\n"
1311 "Do you need \"set sysroot\"?"),
1312 exec_file_target);
1314 /* Reset the shared library package. This ensures that we get a
1315 shlib event when the child reaches "_start", at which point the
1316 dld will have had a chance to initialize the child. */
1317 /* Also, loading a symbol file below may trigger symbol lookups, and
1318 we don't want those to be satisfied by the libraries of the
1319 previous incarnation of this process. */
1320 no_shared_libraries (nullptr, 0);
1322 inferior *execing_inferior = current_inferior ();
1323 inferior *following_inferior;
1325 if (follow_exec_mode_string == follow_exec_mode_new)
1327 /* The user wants to keep the old inferior and program spaces
1328 around. Create a new fresh one, and switch to it. */
1330 /* Do exit processing for the original inferior before setting the new
1331 inferior's pid. Having two inferiors with the same pid would confuse
1332 find_inferior_p(t)id. Transfer the terminal state and info from the
1333 old to the new inferior. */
1334 following_inferior = add_inferior_with_spaces ();
1336 swap_terminal_info (following_inferior, execing_inferior);
1337 exit_inferior (execing_inferior);
1339 following_inferior->pid = pid;
1341 else
1343 /* follow-exec-mode is "same", we continue execution in the execing
1344 inferior. */
1345 following_inferior = execing_inferior;
1347 /* The old description may no longer be fit for the new image.
1348 E.g, a 64-bit process exec'ed a 32-bit process. Clear the
1349 old description; we'll read a new one below. No need to do
1350 this on "follow-exec-mode new", as the old inferior stays
1351 around (its description is later cleared/refetched on
1352 restart). */
1353 target_clear_description ();
1356 target_follow_exec (following_inferior, ptid, exec_file_target);
1358 gdb_assert (current_inferior () == following_inferior);
1359 gdb_assert (current_program_space == following_inferior->pspace);
1361 /* Attempt to open the exec file. SYMFILE_DEFER_BP_RESET is used
1362 because the proper displacement for a PIE (Position Independent
1363 Executable) main symbol file will only be computed by
1364 solib_create_inferior_hook below. breakpoint_re_set would fail
1365 to insert the breakpoints with the zero displacement. */
1366 try_open_exec_file (exec_file_host.get (), following_inferior,
1367 SYMFILE_DEFER_BP_RESET);
1369 /* If the target can specify a description, read it. Must do this
1370 after flipping to the new executable (because the target supplied
1371 description must be compatible with the executable's
1372 architecture, and the old executable may e.g., be 32-bit, while
1373 the new one 64-bit), and before anything involving memory or
1374 registers. */
1375 target_find_description ();
1377 gdb::observers::inferior_execd.notify (execing_inferior, following_inferior);
1379 breakpoint_re_set ();
1381 /* Reinsert all breakpoints. (Those which were symbolic have
1382 been reset to the proper address in the new a.out, thanks
1383 to symbol_file_command...). */
1384 insert_breakpoints ();
1386 /* The next resume of this inferior should bring it to the shlib
1387 startup breakpoints. (If the user had also set bp's on
1388 "main" from the old (parent) process, then they'll auto-
1389 matically get reset there in the new process.). */
1392 /* The chain of threads that need to do a step-over operation to get
1393 past e.g., a breakpoint. What technique is used to step over the
1394 breakpoint/watchpoint does not matter -- all threads end up in the
1395 same queue, to maintain rough temporal order of execution, in order
1396 to avoid starvation, otherwise, we could e.g., find ourselves
1397 constantly stepping the same couple threads past their breakpoints
1398 over and over, if the single-step finish fast enough. */
1399 thread_step_over_list global_thread_step_over_list;
1401 /* Bit flags indicating what the thread needs to step over. */
1403 enum step_over_what_flag
1405 /* Step over a breakpoint. */
1406 STEP_OVER_BREAKPOINT = 1,
1408 /* Step past a non-continuable watchpoint, in order to let the
1409 instruction execute so we can evaluate the watchpoint
1410 expression. */
1411 STEP_OVER_WATCHPOINT = 2
1413 DEF_ENUM_FLAGS_TYPE (enum step_over_what_flag, step_over_what);
1415 /* Info about an instruction that is being stepped over. */
1417 struct step_over_info
1419 /* If we're stepping past a breakpoint, this is the address space
1420 and address of the instruction the breakpoint is set at. We'll
1421 skip inserting all breakpoints here. Valid iff ASPACE is
1422 non-NULL. */
1423 const address_space *aspace = nullptr;
1424 CORE_ADDR address = 0;
1426 /* The instruction being stepped over triggers a nonsteppable
1427 watchpoint. If true, we'll skip inserting watchpoints. */
1428 int nonsteppable_watchpoint_p = 0;
1430 /* The thread's global number. */
1431 int thread = -1;
1434 /* The step-over info of the location that is being stepped over.
1436 Note that with async/breakpoint always-inserted mode, a user might
1437 set a new breakpoint/watchpoint/etc. exactly while a breakpoint is
1438 being stepped over. As setting a new breakpoint inserts all
1439 breakpoints, we need to make sure the breakpoint being stepped over
1440 isn't inserted then. We do that by only clearing the step-over
1441 info when the step-over is actually finished (or aborted).
1443 Presently GDB can only step over one breakpoint at any given time.
1444 Given threads that can't run code in the same address space as the
1445 breakpoint's can't really miss the breakpoint, GDB could be taught
1446 to step-over at most one breakpoint per address space (so this info
1447 could move to the address space object if/when GDB is extended).
1448 The set of breakpoints being stepped over will normally be much
1449 smaller than the set of all breakpoints, so a flag in the
1450 breakpoint location structure would be wasteful. A separate list
1451 also saves complexity and run-time, as otherwise we'd have to go
1452 through all breakpoint locations clearing their flag whenever we
1453 start a new sequence. Similar considerations weigh against storing
1454 this info in the thread object. Plus, not all step overs actually
1455 have breakpoint locations -- e.g., stepping past a single-step
1456 breakpoint, or stepping to complete a non-continuable
1457 watchpoint. */
1458 static struct step_over_info step_over_info;
1460 /* Record the address of the breakpoint/instruction we're currently
1461 stepping over.
1462 N.B. We record the aspace and address now, instead of say just the thread,
1463 because when we need the info later the thread may be running. */
1465 static void
1466 set_step_over_info (const address_space *aspace, CORE_ADDR address,
1467 int nonsteppable_watchpoint_p,
1468 int thread)
1470 step_over_info.aspace = aspace;
1471 step_over_info.address = address;
1472 step_over_info.nonsteppable_watchpoint_p = nonsteppable_watchpoint_p;
1473 step_over_info.thread = thread;
1476 /* Called when we're not longer stepping over a breakpoint / an
1477 instruction, so all breakpoints are free to be (re)inserted. */
1479 static void
1480 clear_step_over_info (void)
1482 infrun_debug_printf ("clearing step over info");
1483 step_over_info.aspace = nullptr;
1484 step_over_info.address = 0;
1485 step_over_info.nonsteppable_watchpoint_p = 0;
1486 step_over_info.thread = -1;
1489 /* See infrun.h. */
1492 stepping_past_instruction_at (struct address_space *aspace,
1493 CORE_ADDR address)
1495 return (step_over_info.aspace != nullptr
1496 && breakpoint_address_match (aspace, address,
1497 step_over_info.aspace,
1498 step_over_info.address));
1501 /* See infrun.h. */
1504 thread_is_stepping_over_breakpoint (int thread)
1506 return (step_over_info.thread != -1
1507 && thread == step_over_info.thread);
1510 /* See infrun.h. */
1513 stepping_past_nonsteppable_watchpoint (void)
1515 return step_over_info.nonsteppable_watchpoint_p;
1518 /* Returns true if step-over info is valid. */
1520 static bool
1521 step_over_info_valid_p (void)
1523 return (step_over_info.aspace != nullptr
1524 || stepping_past_nonsteppable_watchpoint ());
1528 /* Displaced stepping. */
1530 /* In non-stop debugging mode, we must take special care to manage
1531 breakpoints properly; in particular, the traditional strategy for
1532 stepping a thread past a breakpoint it has hit is unsuitable.
1533 'Displaced stepping' is a tactic for stepping one thread past a
1534 breakpoint it has hit while ensuring that other threads running
1535 concurrently will hit the breakpoint as they should.
1537 The traditional way to step a thread T off a breakpoint in a
1538 multi-threaded program in all-stop mode is as follows:
1540 a0) Initially, all threads are stopped, and breakpoints are not
1541 inserted.
1542 a1) We single-step T, leaving breakpoints uninserted.
1543 a2) We insert breakpoints, and resume all threads.
1545 In non-stop debugging, however, this strategy is unsuitable: we
1546 don't want to have to stop all threads in the system in order to
1547 continue or step T past a breakpoint. Instead, we use displaced
1548 stepping:
1550 n0) Initially, T is stopped, other threads are running, and
1551 breakpoints are inserted.
1552 n1) We copy the instruction "under" the breakpoint to a separate
1553 location, outside the main code stream, making any adjustments
1554 to the instruction, register, and memory state as directed by
1555 T's architecture.
1556 n2) We single-step T over the instruction at its new location.
1557 n3) We adjust the resulting register and memory state as directed
1558 by T's architecture. This includes resetting T's PC to point
1559 back into the main instruction stream.
1560 n4) We resume T.
1562 This approach depends on the following gdbarch methods:
1564 - gdbarch_max_insn_length and gdbarch_displaced_step_location
1565 indicate where to copy the instruction, and how much space must
1566 be reserved there. We use these in step n1.
1568 - gdbarch_displaced_step_copy_insn copies a instruction to a new
1569 address, and makes any necessary adjustments to the instruction,
1570 register contents, and memory. We use this in step n1.
1572 - gdbarch_displaced_step_fixup adjusts registers and memory after
1573 we have successfully single-stepped the instruction, to yield the
1574 same effect the instruction would have had if we had executed it
1575 at its original address. We use this in step n3.
1577 The gdbarch_displaced_step_copy_insn and
1578 gdbarch_displaced_step_fixup functions must be written so that
1579 copying an instruction with gdbarch_displaced_step_copy_insn,
1580 single-stepping across the copied instruction, and then applying
1581 gdbarch_displaced_insn_fixup should have the same effects on the
1582 thread's memory and registers as stepping the instruction in place
1583 would have. Exactly which responsibilities fall to the copy and
1584 which fall to the fixup is up to the author of those functions.
1586 See the comments in gdbarch.sh for details.
1588 Note that displaced stepping and software single-step cannot
1589 currently be used in combination, although with some care I think
1590 they could be made to. Software single-step works by placing
1591 breakpoints on all possible subsequent instructions; if the
1592 displaced instruction is a PC-relative jump, those breakpoints
1593 could fall in very strange places --- on pages that aren't
1594 executable, or at addresses that are not proper instruction
1595 boundaries. (We do generally let other threads run while we wait
1596 to hit the software single-step breakpoint, and they might
1597 encounter such a corrupted instruction.) One way to work around
1598 this would be to have gdbarch_displaced_step_copy_insn fully
1599 simulate the effect of PC-relative instructions (and return NULL)
1600 on architectures that use software single-stepping.
1602 In non-stop mode, we can have independent and simultaneous step
1603 requests, so more than one thread may need to simultaneously step
1604 over a breakpoint. The current implementation assumes there is
1605 only one scratch space per process. In this case, we have to
1606 serialize access to the scratch space. If thread A wants to step
1607 over a breakpoint, but we are currently waiting for some other
1608 thread to complete a displaced step, we leave thread A stopped and
1609 place it in the displaced_step_request_queue. Whenever a displaced
1610 step finishes, we pick the next thread in the queue and start a new
1611 displaced step operation on it. See displaced_step_prepare and
1612 displaced_step_finish for details. */
1614 /* Return true if THREAD is doing a displaced step. */
1616 static bool
1617 displaced_step_in_progress_thread (thread_info *thread)
1619 gdb_assert (thread != nullptr);
1621 return thread->displaced_step_state.in_progress ();
1624 /* Return true if INF has a thread doing a displaced step. */
1626 static bool
1627 displaced_step_in_progress (inferior *inf)
1629 return inf->displaced_step_state.in_progress_count > 0;
1632 /* Return true if any thread is doing a displaced step. */
1634 static bool
1635 displaced_step_in_progress_any_thread ()
1637 for (inferior *inf : all_non_exited_inferiors ())
1639 if (displaced_step_in_progress (inf))
1640 return true;
1643 return false;
1646 static void
1647 infrun_inferior_exit (struct inferior *inf)
1649 inf->displaced_step_state.reset ();
1650 inf->thread_waiting_for_vfork_done = nullptr;
1653 static void
1654 infrun_inferior_execd (inferior *exec_inf, inferior *follow_inf)
1656 /* If some threads where was doing a displaced step in this inferior at the
1657 moment of the exec, they no longer exist. Even if the exec'ing thread
1658 doing a displaced step, we don't want to to any fixup nor restore displaced
1659 stepping buffer bytes. */
1660 follow_inf->displaced_step_state.reset ();
1662 for (thread_info *thread : follow_inf->threads ())
1663 thread->displaced_step_state.reset ();
1665 /* Since an in-line step is done with everything else stopped, if there was
1666 one in progress at the time of the exec, it must have been the exec'ing
1667 thread. */
1668 clear_step_over_info ();
1670 follow_inf->thread_waiting_for_vfork_done = nullptr;
1673 /* If ON, and the architecture supports it, GDB will use displaced
1674 stepping to step over breakpoints. If OFF, or if the architecture
1675 doesn't support it, GDB will instead use the traditional
1676 hold-and-step approach. If AUTO (which is the default), GDB will
1677 decide which technique to use to step over breakpoints depending on
1678 whether the target works in a non-stop way (see use_displaced_stepping). */
1680 static enum auto_boolean can_use_displaced_stepping = AUTO_BOOLEAN_AUTO;
1682 static void
1683 show_can_use_displaced_stepping (struct ui_file *file, int from_tty,
1684 struct cmd_list_element *c,
1685 const char *value)
1687 if (can_use_displaced_stepping == AUTO_BOOLEAN_AUTO)
1688 gdb_printf (file,
1689 _("Debugger's willingness to use displaced stepping "
1690 "to step over breakpoints is %s (currently %s).\n"),
1691 value, target_is_non_stop_p () ? "on" : "off");
1692 else
1693 gdb_printf (file,
1694 _("Debugger's willingness to use displaced stepping "
1695 "to step over breakpoints is %s.\n"), value);
1698 /* Return true if the gdbarch implements the required methods to use
1699 displaced stepping. */
1701 static bool
1702 gdbarch_supports_displaced_stepping (gdbarch *arch)
1704 /* Only check for the presence of `prepare`. The gdbarch verification ensures
1705 that if `prepare` is provided, so is `finish`. */
1706 return gdbarch_displaced_step_prepare_p (arch);
1709 /* Return non-zero if displaced stepping can/should be used to step
1710 over breakpoints of thread TP. */
1712 static bool
1713 use_displaced_stepping (thread_info *tp)
1715 /* If the user disabled it explicitly, don't use displaced stepping. */
1716 if (can_use_displaced_stepping == AUTO_BOOLEAN_FALSE)
1717 return false;
1719 /* If "auto", only use displaced stepping if the target operates in a non-stop
1720 way. */
1721 if (can_use_displaced_stepping == AUTO_BOOLEAN_AUTO
1722 && !target_is_non_stop_p ())
1723 return false;
1725 gdbarch *gdbarch = get_thread_regcache (tp)->arch ();
1727 /* If the architecture doesn't implement displaced stepping, don't use
1728 it. */
1729 if (!gdbarch_supports_displaced_stepping (gdbarch))
1730 return false;
1732 /* If recording, don't use displaced stepping. */
1733 if (find_record_target () != nullptr)
1734 return false;
1736 /* If displaced stepping failed before for this inferior, don't bother trying
1737 again. */
1738 if (tp->inf->displaced_step_state.failed_before)
1739 return false;
1741 return true;
1744 /* Simple function wrapper around displaced_step_thread_state::reset. */
1746 static void
1747 displaced_step_reset (displaced_step_thread_state *displaced)
1749 displaced->reset ();
1752 /* A cleanup that wraps displaced_step_reset. We use this instead of, say,
1753 SCOPE_EXIT, because it needs to be discardable with "cleanup.release ()". */
1755 using displaced_step_reset_cleanup = FORWARD_SCOPE_EXIT (displaced_step_reset);
1757 /* Prepare to single-step, using displaced stepping.
1759 Note that we cannot use displaced stepping when we have a signal to
1760 deliver. If we have a signal to deliver and an instruction to step
1761 over, then after the step, there will be no indication from the
1762 target whether the thread entered a signal handler or ignored the
1763 signal and stepped over the instruction successfully --- both cases
1764 result in a simple SIGTRAP. In the first case we mustn't do a
1765 fixup, and in the second case we must --- but we can't tell which.
1766 Comments in the code for 'random signals' in handle_inferior_event
1767 explain how we handle this case instead.
1769 Returns DISPLACED_STEP_PREPARE_STATUS_OK if preparing was successful -- this
1770 thread is going to be stepped now; DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE
1771 if displaced stepping this thread got queued; or
1772 DISPLACED_STEP_PREPARE_STATUS_CANT if this instruction can't be displaced
1773 stepped. */
1775 static displaced_step_prepare_status
1776 displaced_step_prepare_throw (thread_info *tp)
1778 regcache *regcache = get_thread_regcache (tp);
1779 struct gdbarch *gdbarch = regcache->arch ();
1780 displaced_step_thread_state &disp_step_thread_state
1781 = tp->displaced_step_state;
1783 /* We should never reach this function if the architecture does not
1784 support displaced stepping. */
1785 gdb_assert (gdbarch_supports_displaced_stepping (gdbarch));
1787 /* Nor if the thread isn't meant to step over a breakpoint. */
1788 gdb_assert (tp->control.trap_expected);
1790 /* Disable range stepping while executing in the scratch pad. We
1791 want a single-step even if executing the displaced instruction in
1792 the scratch buffer lands within the stepping range (e.g., a
1793 jump/branch). */
1794 tp->control.may_range_step = 0;
1796 /* We are about to start a displaced step for this thread. If one is already
1797 in progress, something's wrong. */
1798 gdb_assert (!disp_step_thread_state.in_progress ());
1800 if (tp->inf->displaced_step_state.unavailable)
1802 /* The gdbarch tells us it's not worth asking to try a prepare because
1803 it is likely that it will return unavailable, so don't bother asking. */
1805 displaced_debug_printf ("deferring step of %s",
1806 tp->ptid.to_string ().c_str ());
1808 global_thread_step_over_chain_enqueue (tp);
1809 return DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE;
1812 displaced_debug_printf ("displaced-stepping %s now",
1813 tp->ptid.to_string ().c_str ());
1815 scoped_restore_current_thread restore_thread;
1817 switch_to_thread (tp);
1819 CORE_ADDR original_pc = regcache_read_pc (regcache);
1820 CORE_ADDR displaced_pc;
1822 /* Display the instruction we are going to displaced step. */
1823 if (debug_displaced)
1825 string_file tmp_stream;
1826 int dislen = gdb_print_insn (gdbarch, original_pc, &tmp_stream,
1827 nullptr);
1829 if (dislen > 0)
1831 gdb::byte_vector insn_buf (dislen);
1832 read_memory (original_pc, insn_buf.data (), insn_buf.size ());
1834 std::string insn_bytes = bytes_to_string (insn_buf);
1836 displaced_debug_printf ("original insn %s: %s \t %s",
1837 paddress (gdbarch, original_pc),
1838 insn_bytes.c_str (),
1839 tmp_stream.string ().c_str ());
1841 else
1842 displaced_debug_printf ("original insn %s: invalid length: %d",
1843 paddress (gdbarch, original_pc), dislen);
1846 displaced_step_prepare_status status
1847 = gdbarch_displaced_step_prepare (gdbarch, tp, displaced_pc);
1849 if (status == DISPLACED_STEP_PREPARE_STATUS_CANT)
1851 displaced_debug_printf ("failed to prepare (%s)",
1852 tp->ptid.to_string ().c_str ());
1854 return DISPLACED_STEP_PREPARE_STATUS_CANT;
1856 else if (status == DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE)
1858 /* Not enough displaced stepping resources available, defer this
1859 request by placing it the queue. */
1861 displaced_debug_printf ("not enough resources available, "
1862 "deferring step of %s",
1863 tp->ptid.to_string ().c_str ());
1865 global_thread_step_over_chain_enqueue (tp);
1867 return DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE;
1870 gdb_assert (status == DISPLACED_STEP_PREPARE_STATUS_OK);
1872 /* Save the information we need to fix things up if the step
1873 succeeds. */
1874 disp_step_thread_state.set (gdbarch);
1876 tp->inf->displaced_step_state.in_progress_count++;
1878 displaced_debug_printf ("prepared successfully thread=%s, "
1879 "original_pc=%s, displaced_pc=%s",
1880 tp->ptid.to_string ().c_str (),
1881 paddress (gdbarch, original_pc),
1882 paddress (gdbarch, displaced_pc));
1884 /* Display the new displaced instruction(s). */
1885 if (debug_displaced)
1887 string_file tmp_stream;
1888 CORE_ADDR addr = displaced_pc;
1890 /* If displaced stepping is going to use h/w single step then we know
1891 that the replacement instruction can only be a single instruction,
1892 in that case set the end address at the next byte.
1894 Otherwise the displaced stepping copy instruction routine could
1895 have generated multiple instructions, and all we know is that they
1896 must fit within the LEN bytes of the buffer. */
1897 CORE_ADDR end
1898 = addr + (gdbarch_displaced_step_hw_singlestep (gdbarch)
1899 ? 1 : gdbarch_displaced_step_buffer_length (gdbarch));
1901 while (addr < end)
1903 int dislen = gdb_print_insn (gdbarch, addr, &tmp_stream, nullptr);
1904 if (dislen <= 0)
1906 displaced_debug_printf
1907 ("replacement insn %s: invalid length: %d",
1908 paddress (gdbarch, addr), dislen);
1909 break;
1912 gdb::byte_vector insn_buf (dislen);
1913 read_memory (addr, insn_buf.data (), insn_buf.size ());
1915 std::string insn_bytes = bytes_to_string (insn_buf);
1916 std::string insn_str = tmp_stream.release ();
1917 displaced_debug_printf ("replacement insn %s: %s \t %s",
1918 paddress (gdbarch, addr),
1919 insn_bytes.c_str (),
1920 insn_str.c_str ());
1921 addr += dislen;
1925 return DISPLACED_STEP_PREPARE_STATUS_OK;
1928 /* Wrapper for displaced_step_prepare_throw that disabled further
1929 attempts at displaced stepping if we get a memory error. */
1931 static displaced_step_prepare_status
1932 displaced_step_prepare (thread_info *thread)
1934 displaced_step_prepare_status status
1935 = DISPLACED_STEP_PREPARE_STATUS_CANT;
1939 status = displaced_step_prepare_throw (thread);
1941 catch (const gdb_exception_error &ex)
1943 if (ex.error != MEMORY_ERROR
1944 && ex.error != NOT_SUPPORTED_ERROR)
1945 throw;
1947 infrun_debug_printf ("caught exception, disabling displaced stepping: %s",
1948 ex.what ());
1950 /* Be verbose if "set displaced-stepping" is "on", silent if
1951 "auto". */
1952 if (can_use_displaced_stepping == AUTO_BOOLEAN_TRUE)
1954 warning (_("disabling displaced stepping: %s"),
1955 ex.what ());
1958 /* Disable further displaced stepping attempts. */
1959 thread->inf->displaced_step_state.failed_before = 1;
1962 return status;
1965 /* True if any thread of TARGET that matches RESUME_PTID requires
1966 target_thread_events enabled. This assumes TARGET does not support
1967 target thread options. */
1969 static bool
1970 any_thread_needs_target_thread_events (process_stratum_target *target,
1971 ptid_t resume_ptid)
1973 for (thread_info *tp : all_non_exited_threads (target, resume_ptid))
1974 if (displaced_step_in_progress_thread (tp)
1975 || schedlock_applies (tp)
1976 || tp->thread_fsm () != nullptr)
1977 return true;
1978 return false;
1981 /* Maybe disable thread-{cloned,created,exited} event reporting after
1982 a step-over (either in-line or displaced) finishes. */
1984 static void
1985 update_thread_events_after_step_over (thread_info *event_thread,
1986 const target_waitstatus &event_status)
1988 if (schedlock_applies (event_thread))
1990 /* If scheduler-locking applies, continue reporting
1991 thread-created/thread-cloned events. */
1992 return;
1994 else if (target_supports_set_thread_options (0))
1996 /* We can control per-thread options. Disable events for the
1997 event thread, unless the thread is gone. */
1998 if (event_status.kind () != TARGET_WAITKIND_THREAD_EXITED)
1999 event_thread->set_thread_options (0);
2001 else
2003 /* We can only control the target-wide target_thread_events
2004 setting. Disable it, but only if other threads in the target
2005 don't need it enabled. */
2006 process_stratum_target *target = event_thread->inf->process_target ();
2007 if (!any_thread_needs_target_thread_events (target, minus_one_ptid))
2008 target_thread_events (false);
2012 /* If we displaced stepped an instruction successfully, adjust registers and
2013 memory to yield the same effect the instruction would have had if we had
2014 executed it at its original address, and return
2015 DISPLACED_STEP_FINISH_STATUS_OK. If the instruction didn't complete,
2016 relocate the PC and return DISPLACED_STEP_FINISH_STATUS_NOT_EXECUTED.
2018 If the thread wasn't displaced stepping, return
2019 DISPLACED_STEP_FINISH_STATUS_OK as well. */
2021 static displaced_step_finish_status
2022 displaced_step_finish (thread_info *event_thread,
2023 const target_waitstatus &event_status)
2025 /* Check whether the parent is displaced stepping. */
2026 inferior *parent_inf = event_thread->inf;
2028 /* If this was a fork/vfork/clone, this event indicates that the
2029 displaced stepping of the syscall instruction has been done, so
2030 we perform cleanup for parent here. Also note that this
2031 operation also cleans up the child for vfork, because their pages
2032 are shared. */
2034 /* If this is a fork (child gets its own address space copy) and
2035 some displaced step buffers were in use at the time of the fork,
2036 restore the displaced step buffer bytes in the child process.
2038 Architectures which support displaced stepping and fork events
2039 must supply an implementation of
2040 gdbarch_displaced_step_restore_all_in_ptid. This is not enforced
2041 during gdbarch validation to support architectures which support
2042 displaced stepping but not forks. */
2043 if (event_status.kind () == TARGET_WAITKIND_FORKED)
2045 struct regcache *parent_regcache = get_thread_regcache (event_thread);
2046 struct gdbarch *gdbarch = parent_regcache->arch ();
2048 if (gdbarch_supports_displaced_stepping (gdbarch))
2049 gdbarch_displaced_step_restore_all_in_ptid
2050 (gdbarch, parent_inf, event_status.child_ptid ());
2053 displaced_step_thread_state *displaced = &event_thread->displaced_step_state;
2055 /* Was this thread performing a displaced step? */
2056 if (!displaced->in_progress ())
2057 return DISPLACED_STEP_FINISH_STATUS_OK;
2059 update_thread_events_after_step_over (event_thread, event_status);
2061 gdb_assert (event_thread->inf->displaced_step_state.in_progress_count > 0);
2062 event_thread->inf->displaced_step_state.in_progress_count--;
2064 /* Fixup may need to read memory/registers. Switch to the thread
2065 that we're fixing up. Also, target_stopped_by_watchpoint checks
2066 the current thread, and displaced_step_restore performs ptid-dependent
2067 memory accesses using current_inferior(). */
2068 switch_to_thread (event_thread);
2070 displaced_step_reset_cleanup cleanup (displaced);
2072 /* Do the fixup, and release the resources acquired to do the displaced
2073 step. */
2074 displaced_step_finish_status status
2075 = gdbarch_displaced_step_finish (displaced->get_original_gdbarch (),
2076 event_thread, event_status);
2078 if (event_status.kind () == TARGET_WAITKIND_FORKED
2079 || event_status.kind () == TARGET_WAITKIND_VFORKED
2080 || event_status.kind () == TARGET_WAITKIND_THREAD_CLONED)
2082 /* Since the vfork/fork/clone syscall instruction was executed
2083 in the scratchpad, the child's PC is also within the
2084 scratchpad. Set the child's PC to the parent's PC value,
2085 which has already been fixed up. Note: we use the parent's
2086 aspace here, although we're touching the child, because the
2087 child hasn't been added to the inferior list yet at this
2088 point. */
2090 struct regcache *parent_regcache = get_thread_regcache (event_thread);
2091 struct gdbarch *gdbarch = parent_regcache->arch ();
2092 struct regcache *child_regcache
2093 = get_thread_arch_regcache (parent_inf, event_status.child_ptid (),
2094 gdbarch);
2095 /* Read PC value of parent. */
2096 CORE_ADDR parent_pc = regcache_read_pc (parent_regcache);
2098 displaced_debug_printf ("write child pc from %s to %s",
2099 paddress (gdbarch,
2100 regcache_read_pc (child_regcache)),
2101 paddress (gdbarch, parent_pc));
2103 regcache_write_pc (child_regcache, parent_pc);
2106 return status;
2109 /* Data to be passed around while handling an event. This data is
2110 discarded between events. */
2111 struct execution_control_state
2113 explicit execution_control_state (thread_info *thr = nullptr)
2114 : ptid (thr == nullptr ? null_ptid : thr->ptid),
2115 event_thread (thr)
2119 process_stratum_target *target = nullptr;
2120 ptid_t ptid;
2121 /* The thread that got the event, if this was a thread event; NULL
2122 otherwise. */
2123 struct thread_info *event_thread;
2125 struct target_waitstatus ws;
2126 int stop_func_filled_in = 0;
2127 CORE_ADDR stop_func_alt_start = 0;
2128 CORE_ADDR stop_func_start = 0;
2129 CORE_ADDR stop_func_end = 0;
2130 const char *stop_func_name = nullptr;
2131 int wait_some_more = 0;
2133 /* True if the event thread hit the single-step breakpoint of
2134 another thread. Thus the event doesn't cause a stop, the thread
2135 needs to be single-stepped past the single-step breakpoint before
2136 we can switch back to the original stepping thread. */
2137 int hit_singlestep_breakpoint = 0;
2140 static void keep_going_pass_signal (struct execution_control_state *ecs);
2141 static void prepare_to_wait (struct execution_control_state *ecs);
2142 static bool keep_going_stepped_thread (struct thread_info *tp);
2143 static step_over_what thread_still_needs_step_over (struct thread_info *tp);
2145 /* Are there any pending step-over requests? If so, run all we can
2146 now and return true. Otherwise, return false. */
2148 static bool
2149 start_step_over (void)
2151 INFRUN_SCOPED_DEBUG_ENTER_EXIT;
2153 /* Don't start a new step-over if we already have an in-line
2154 step-over operation ongoing. */
2155 if (step_over_info_valid_p ())
2156 return false;
2158 /* Steal the global thread step over chain. As we try to initiate displaced
2159 steps, threads will be enqueued in the global chain if no buffers are
2160 available. If we iterated on the global chain directly, we might iterate
2161 indefinitely. */
2162 thread_step_over_list threads_to_step
2163 = std::move (global_thread_step_over_list);
2165 infrun_debug_printf ("stealing global queue of threads to step, length = %d",
2166 thread_step_over_chain_length (threads_to_step));
2168 bool started = false;
2170 /* On scope exit (whatever the reason, return or exception), if there are
2171 threads left in the THREADS_TO_STEP chain, put back these threads in the
2172 global list. */
2173 SCOPE_EXIT
2175 if (threads_to_step.empty ())
2176 infrun_debug_printf ("step-over queue now empty");
2177 else
2179 infrun_debug_printf ("putting back %d threads to step in global queue",
2180 thread_step_over_chain_length (threads_to_step));
2182 global_thread_step_over_chain_enqueue_chain
2183 (std::move (threads_to_step));
2187 thread_step_over_list_safe_range range
2188 = make_thread_step_over_list_safe_range (threads_to_step);
2190 for (thread_info *tp : range)
2192 step_over_what step_what;
2193 int must_be_in_line;
2195 gdb_assert (!tp->stop_requested);
2197 if (tp->inf->displaced_step_state.unavailable)
2199 /* The arch told us to not even try preparing another displaced step
2200 for this inferior. Just leave the thread in THREADS_TO_STEP, it
2201 will get moved to the global chain on scope exit. */
2202 continue;
2205 if (tp->inf->thread_waiting_for_vfork_done != nullptr)
2207 /* When we stop all threads, handling a vfork, any thread in the step
2208 over chain remains there. A user could also try to continue a
2209 thread stopped at a breakpoint while another thread is waiting for
2210 a vfork-done event. In any case, we don't want to start a step
2211 over right now. */
2212 continue;
2215 /* Remove thread from the THREADS_TO_STEP chain. If anything goes wrong
2216 while we try to prepare the displaced step, we don't add it back to
2217 the global step over chain. This is to avoid a thread staying in the
2218 step over chain indefinitely if something goes wrong when resuming it
2219 If the error is intermittent and it still needs a step over, it will
2220 get enqueued again when we try to resume it normally. */
2221 threads_to_step.erase (threads_to_step.iterator_to (*tp));
2223 step_what = thread_still_needs_step_over (tp);
2224 must_be_in_line = ((step_what & STEP_OVER_WATCHPOINT)
2225 || ((step_what & STEP_OVER_BREAKPOINT)
2226 && !use_displaced_stepping (tp)));
2228 /* We currently stop all threads of all processes to step-over
2229 in-line. If we need to start a new in-line step-over, let
2230 any pending displaced steps finish first. */
2231 if (must_be_in_line && displaced_step_in_progress_any_thread ())
2233 global_thread_step_over_chain_enqueue (tp);
2234 continue;
2237 if (tp->control.trap_expected
2238 || tp->resumed ()
2239 || tp->executing ())
2241 internal_error ("[%s] has inconsistent state: "
2242 "trap_expected=%d, resumed=%d, executing=%d\n",
2243 tp->ptid.to_string ().c_str (),
2244 tp->control.trap_expected,
2245 tp->resumed (),
2246 tp->executing ());
2249 infrun_debug_printf ("resuming [%s] for step-over",
2250 tp->ptid.to_string ().c_str ());
2252 /* keep_going_pass_signal skips the step-over if the breakpoint
2253 is no longer inserted. In all-stop, we want to keep looking
2254 for a thread that needs a step-over instead of resuming TP,
2255 because we wouldn't be able to resume anything else until the
2256 target stops again. In non-stop, the resume always resumes
2257 only TP, so it's OK to let the thread resume freely. */
2258 if (!target_is_non_stop_p () && !step_what)
2259 continue;
2261 switch_to_thread (tp);
2262 execution_control_state ecs (tp);
2263 keep_going_pass_signal (&ecs);
2265 if (!ecs.wait_some_more)
2266 error (_("Command aborted."));
2268 /* If the thread's step over could not be initiated because no buffers
2269 were available, it was re-added to the global step over chain. */
2270 if (tp->resumed ())
2272 infrun_debug_printf ("[%s] was resumed.",
2273 tp->ptid.to_string ().c_str ());
2274 gdb_assert (!thread_is_in_step_over_chain (tp));
2276 else
2278 infrun_debug_printf ("[%s] was NOT resumed.",
2279 tp->ptid.to_string ().c_str ());
2280 gdb_assert (thread_is_in_step_over_chain (tp));
2283 /* If we started a new in-line step-over, we're done. */
2284 if (step_over_info_valid_p ())
2286 gdb_assert (tp->control.trap_expected);
2287 started = true;
2288 break;
2291 if (!target_is_non_stop_p ())
2293 /* On all-stop, shouldn't have resumed unless we needed a
2294 step over. */
2295 gdb_assert (tp->control.trap_expected
2296 || tp->step_after_step_resume_breakpoint);
2298 /* With remote targets (at least), in all-stop, we can't
2299 issue any further remote commands until the program stops
2300 again. */
2301 started = true;
2302 break;
2305 /* Either the thread no longer needed a step-over, or a new
2306 displaced stepping sequence started. Even in the latter
2307 case, continue looking. Maybe we can also start another
2308 displaced step on a thread of other process. */
2311 return started;
2314 /* Update global variables holding ptids to hold NEW_PTID if they were
2315 holding OLD_PTID. */
2316 static void
2317 infrun_thread_ptid_changed (process_stratum_target *target,
2318 ptid_t old_ptid, ptid_t new_ptid)
2320 if (inferior_ptid == old_ptid
2321 && current_inferior ()->process_target () == target)
2322 inferior_ptid = new_ptid;
2327 static const char schedlock_off[] = "off";
2328 static const char schedlock_on[] = "on";
2329 static const char schedlock_step[] = "step";
2330 static const char schedlock_replay[] = "replay";
2331 static const char *const scheduler_enums[] = {
2332 schedlock_off,
2333 schedlock_on,
2334 schedlock_step,
2335 schedlock_replay,
2336 nullptr
2338 static const char *scheduler_mode = schedlock_replay;
2339 static void
2340 show_scheduler_mode (struct ui_file *file, int from_tty,
2341 struct cmd_list_element *c, const char *value)
2343 gdb_printf (file,
2344 _("Mode for locking scheduler "
2345 "during execution is \"%s\".\n"),
2346 value);
2349 static void
2350 set_schedlock_func (const char *args, int from_tty, struct cmd_list_element *c)
2352 if (!target_can_lock_scheduler ())
2354 scheduler_mode = schedlock_off;
2355 error (_("Target '%s' cannot support this command."),
2356 target_shortname ());
2360 /* True if execution commands resume all threads of all processes by
2361 default; otherwise, resume only threads of the current inferior
2362 process. */
2363 bool sched_multi = false;
2365 /* Try to setup for software single stepping. Return true if target_resume()
2366 should use hardware single step.
2368 GDBARCH the current gdbarch. */
2370 static bool
2371 maybe_software_singlestep (struct gdbarch *gdbarch)
2373 bool hw_step = true;
2375 if (execution_direction == EXEC_FORWARD
2376 && gdbarch_software_single_step_p (gdbarch))
2377 hw_step = !insert_single_step_breakpoints (gdbarch);
2379 return hw_step;
2382 /* See infrun.h. */
2384 ptid_t
2385 user_visible_resume_ptid (int step)
2387 ptid_t resume_ptid;
2389 if (non_stop)
2391 /* With non-stop mode on, threads are always handled
2392 individually. */
2393 resume_ptid = inferior_ptid;
2395 else if ((scheduler_mode == schedlock_on)
2396 || (scheduler_mode == schedlock_step && step))
2398 /* User-settable 'scheduler' mode requires solo thread
2399 resume. */
2400 resume_ptid = inferior_ptid;
2402 else if ((scheduler_mode == schedlock_replay)
2403 && target_record_will_replay (minus_one_ptid, execution_direction))
2405 /* User-settable 'scheduler' mode requires solo thread resume in replay
2406 mode. */
2407 resume_ptid = inferior_ptid;
2409 else if (!sched_multi && target_supports_multi_process ())
2411 /* Resume all threads of the current process (and none of other
2412 processes). */
2413 resume_ptid = ptid_t (inferior_ptid.pid ());
2415 else
2417 /* Resume all threads of all processes. */
2418 resume_ptid = RESUME_ALL;
2421 return resume_ptid;
2424 /* See infrun.h. */
2426 process_stratum_target *
2427 user_visible_resume_target (ptid_t resume_ptid)
2429 return (resume_ptid == minus_one_ptid && sched_multi
2430 ? nullptr
2431 : current_inferior ()->process_target ());
2434 /* Find a thread from the inferiors that we'll resume that is waiting
2435 for a vfork-done event. */
2437 static thread_info *
2438 find_thread_waiting_for_vfork_done ()
2440 gdb_assert (!target_is_non_stop_p ());
2442 if (sched_multi)
2444 for (inferior *inf : all_non_exited_inferiors ())
2445 if (inf->thread_waiting_for_vfork_done != nullptr)
2446 return inf->thread_waiting_for_vfork_done;
2448 else
2450 inferior *cur_inf = current_inferior ();
2451 if (cur_inf->thread_waiting_for_vfork_done != nullptr)
2452 return cur_inf->thread_waiting_for_vfork_done;
2454 return nullptr;
2457 /* Return a ptid representing the set of threads that we will resume,
2458 in the perspective of the target, assuming run control handling
2459 does not require leaving some threads stopped (e.g., stepping past
2460 breakpoint). USER_STEP indicates whether we're about to start the
2461 target for a stepping command. */
2463 static ptid_t
2464 internal_resume_ptid (int user_step)
2466 /* In non-stop, we always control threads individually. Note that
2467 the target may always work in non-stop mode even with "set
2468 non-stop off", in which case user_visible_resume_ptid could
2469 return a wildcard ptid. */
2470 if (target_is_non_stop_p ())
2471 return inferior_ptid;
2473 /* The rest of the function assumes non-stop==off and
2474 target-non-stop==off.
2476 If a thread is waiting for a vfork-done event, it means breakpoints are out
2477 for this inferior (well, program space in fact). We don't want to resume
2478 any thread other than the one waiting for vfork done, otherwise these other
2479 threads could miss breakpoints. So if a thread in the resumption set is
2480 waiting for a vfork-done event, resume only that thread.
2482 The resumption set width depends on whether schedule-multiple is on or off.
2484 Note that if the target_resume interface was more flexible, we could be
2485 smarter here when schedule-multiple is on. For example, imagine 3
2486 inferiors with 2 threads each (1.1, 1.2, 2.1, 2.2, 3.1 and 3.2). Threads
2487 2.1 and 3.2 are both waiting for a vfork-done event. Then we could ask the
2488 target(s) to resume:
2490 - All threads of inferior 1
2491 - Thread 2.1
2492 - Thread 3.2
2494 Since we don't have that flexibility (we can only pass one ptid), just
2495 resume the first thread waiting for a vfork-done event we find (e.g. thread
2496 2.1). */
2497 thread_info *thr = find_thread_waiting_for_vfork_done ();
2498 if (thr != nullptr)
2500 /* If we have a thread that is waiting for a vfork-done event,
2501 then we should have switched to it earlier. Calling
2502 target_resume with thread scope is only possible when the
2503 current thread matches the thread scope. */
2504 gdb_assert (thr->ptid == inferior_ptid);
2505 gdb_assert (thr->inf->process_target ()
2506 == inferior_thread ()->inf->process_target ());
2507 return thr->ptid;
2510 return user_visible_resume_ptid (user_step);
2513 /* Wrapper for target_resume, that handles infrun-specific
2514 bookkeeping. */
2516 static void
2517 do_target_resume (ptid_t resume_ptid, bool step, enum gdb_signal sig)
2519 struct thread_info *tp = inferior_thread ();
2521 gdb_assert (!tp->stop_requested);
2523 /* Install inferior's terminal modes. */
2524 target_terminal::inferior ();
2526 /* Avoid confusing the next resume, if the next stop/resume
2527 happens to apply to another thread. */
2528 tp->set_stop_signal (GDB_SIGNAL_0);
2530 /* Advise target which signals may be handled silently.
2532 If we have removed breakpoints because we are stepping over one
2533 in-line (in any thread), we need to receive all signals to avoid
2534 accidentally skipping a breakpoint during execution of a signal
2535 handler.
2537 Likewise if we're displaced stepping, otherwise a trap for a
2538 breakpoint in a signal handler might be confused with the
2539 displaced step finishing. We don't make the displaced_step_finish
2540 step distinguish the cases instead, because:
2542 - a backtrace while stopped in the signal handler would show the
2543 scratch pad as frame older than the signal handler, instead of
2544 the real mainline code.
2546 - when the thread is later resumed, the signal handler would
2547 return to the scratch pad area, which would no longer be
2548 valid. */
2549 if (step_over_info_valid_p ()
2550 || displaced_step_in_progress (tp->inf))
2551 target_pass_signals ({});
2552 else
2553 target_pass_signals (signal_pass);
2555 /* Request that the target report thread-{created,cloned,exited}
2556 events in the following situations:
2558 - If we are performing an in-line step-over-breakpoint, then we
2559 will remove a breakpoint from the target and only run the
2560 current thread. We don't want any new thread (spawned by the
2561 step) to start running, as it might miss the breakpoint. We
2562 need to clear the step-over state if the stepped thread exits,
2563 so we also enable thread-exit events.
2565 - If we are stepping over a breakpoint out of line (displaced
2566 stepping) then we won't remove a breakpoint from the target,
2567 but, if the step spawns a new clone thread, then we will need
2568 to fixup the $pc address in the clone child too, so we need it
2569 to start stopped. We need to release the displaced stepping
2570 buffer if the stepped thread exits, so we also enable
2571 thread-exit events.
2573 - If scheduler-locking applies, threads that the current thread
2574 spawns should remain halted. It's not strictly necessary to
2575 enable thread-exit events in this case, but it doesn't hurt.
2577 if (step_over_info_valid_p ()
2578 || displaced_step_in_progress_thread (tp)
2579 || schedlock_applies (tp))
2581 gdb_thread_options options
2582 = GDB_THREAD_OPTION_CLONE | GDB_THREAD_OPTION_EXIT;
2583 if (target_supports_set_thread_options (options))
2584 tp->set_thread_options (options);
2585 else
2586 target_thread_events (true);
2588 else if (tp->thread_fsm () != nullptr)
2590 gdb_thread_options options = GDB_THREAD_OPTION_EXIT;
2591 if (target_supports_set_thread_options (options))
2592 tp->set_thread_options (options);
2593 else
2594 target_thread_events (true);
2596 else
2598 if (target_supports_set_thread_options (0))
2599 tp->set_thread_options (0);
2600 else
2602 process_stratum_target *resume_target = tp->inf->process_target ();
2603 if (!any_thread_needs_target_thread_events (resume_target,
2604 resume_ptid))
2605 target_thread_events (false);
2609 /* If we're resuming more than one thread simultaneously, then any
2610 thread other than the leader is being set to run free. Clear any
2611 previous thread option for those threads. */
2612 if (resume_ptid != inferior_ptid && target_supports_set_thread_options (0))
2614 process_stratum_target *resume_target = tp->inf->process_target ();
2615 for (thread_info *thr_iter : all_non_exited_threads (resume_target,
2616 resume_ptid))
2617 if (thr_iter != tp)
2618 thr_iter->set_thread_options (0);
2621 infrun_debug_printf ("resume_ptid=%s, step=%d, sig=%s",
2622 resume_ptid.to_string ().c_str (),
2623 step, gdb_signal_to_symbol_string (sig));
2625 target_resume (resume_ptid, step, sig);
2628 /* Resume the inferior. SIG is the signal to give the inferior
2629 (GDB_SIGNAL_0 for none). Note: don't call this directly; instead
2630 call 'resume', which handles exceptions. */
2632 static void
2633 resume_1 (enum gdb_signal sig)
2635 struct thread_info *tp = inferior_thread ();
2636 regcache *regcache = get_thread_regcache (tp);
2637 struct gdbarch *gdbarch = regcache->arch ();
2638 ptid_t resume_ptid;
2639 /* This represents the user's step vs continue request. When
2640 deciding whether "set scheduler-locking step" applies, it's the
2641 user's intention that counts. */
2642 const int user_step = tp->control.stepping_command;
2643 /* This represents what we'll actually request the target to do.
2644 This can decay from a step to a continue, if e.g., we need to
2645 implement single-stepping with breakpoints (software
2646 single-step). */
2647 bool step;
2649 gdb_assert (!tp->stop_requested);
2650 gdb_assert (!thread_is_in_step_over_chain (tp));
2652 if (tp->has_pending_waitstatus ())
2654 infrun_debug_printf
2655 ("thread %s has pending wait "
2656 "status %s (currently_stepping=%d).",
2657 tp->ptid.to_string ().c_str (),
2658 tp->pending_waitstatus ().to_string ().c_str (),
2659 currently_stepping (tp));
2661 tp->inf->process_target ()->threads_executing = true;
2662 tp->set_resumed (true);
2664 /* FIXME: What should we do if we are supposed to resume this
2665 thread with a signal? Maybe we should maintain a queue of
2666 pending signals to deliver. */
2667 if (sig != GDB_SIGNAL_0)
2669 warning (_("Couldn't deliver signal %s to %s."),
2670 gdb_signal_to_name (sig),
2671 tp->ptid.to_string ().c_str ());
2674 tp->set_stop_signal (GDB_SIGNAL_0);
2676 if (target_can_async_p ())
2678 target_async (true);
2679 /* Tell the event loop we have an event to process. */
2680 mark_async_event_handler (infrun_async_inferior_event_token);
2682 return;
2685 tp->stepped_breakpoint = 0;
2687 /* Depends on stepped_breakpoint. */
2688 step = currently_stepping (tp);
2690 if (current_inferior ()->thread_waiting_for_vfork_done != nullptr)
2692 /* Don't try to single-step a vfork parent that is waiting for
2693 the child to get out of the shared memory region (by exec'ing
2694 or exiting). This is particularly important on software
2695 single-step archs, as the child process would trip on the
2696 software single step breakpoint inserted for the parent
2697 process. Since the parent will not actually execute any
2698 instruction until the child is out of the shared region (such
2699 are vfork's semantics), it is safe to simply continue it.
2700 Eventually, we'll see a TARGET_WAITKIND_VFORK_DONE event for
2701 the parent, and tell it to `keep_going', which automatically
2702 re-sets it stepping. */
2703 infrun_debug_printf ("resume : clear step");
2704 step = false;
2707 CORE_ADDR pc = regcache_read_pc (regcache);
2709 infrun_debug_printf ("step=%d, signal=%s, trap_expected=%d, "
2710 "current thread [%s] at %s",
2711 step, gdb_signal_to_symbol_string (sig),
2712 tp->control.trap_expected,
2713 inferior_ptid.to_string ().c_str (),
2714 paddress (gdbarch, pc));
2716 const address_space *aspace = tp->inf->aspace.get ();
2718 /* Normally, by the time we reach `resume', the breakpoints are either
2719 removed or inserted, as appropriate. The exception is if we're sitting
2720 at a permanent breakpoint; we need to step over it, but permanent
2721 breakpoints can't be removed. So we have to test for it here. */
2722 if (breakpoint_here_p (aspace, pc) == permanent_breakpoint_here)
2724 if (sig != GDB_SIGNAL_0)
2726 /* We have a signal to pass to the inferior. The resume
2727 may, or may not take us to the signal handler. If this
2728 is a step, we'll need to stop in the signal handler, if
2729 there's one, (if the target supports stepping into
2730 handlers), or in the next mainline instruction, if
2731 there's no handler. If this is a continue, we need to be
2732 sure to run the handler with all breakpoints inserted.
2733 In all cases, set a breakpoint at the current address
2734 (where the handler returns to), and once that breakpoint
2735 is hit, resume skipping the permanent breakpoint. If
2736 that breakpoint isn't hit, then we've stepped into the
2737 signal handler (or hit some other event). We'll delete
2738 the step-resume breakpoint then. */
2740 infrun_debug_printf ("resume: skipping permanent breakpoint, "
2741 "deliver signal first");
2743 clear_step_over_info ();
2744 tp->control.trap_expected = 0;
2746 if (tp->control.step_resume_breakpoint == nullptr)
2748 /* Set a "high-priority" step-resume, as we don't want
2749 user breakpoints at PC to trigger (again) when this
2750 hits. */
2751 insert_hp_step_resume_breakpoint_at_frame (get_current_frame ());
2752 gdb_assert (tp->control.step_resume_breakpoint->first_loc ()
2753 .permanent);
2755 tp->step_after_step_resume_breakpoint = step;
2758 insert_breakpoints ();
2760 else
2762 /* There's no signal to pass, we can go ahead and skip the
2763 permanent breakpoint manually. */
2764 infrun_debug_printf ("skipping permanent breakpoint");
2765 gdbarch_skip_permanent_breakpoint (gdbarch, regcache);
2766 /* Update pc to reflect the new address from which we will
2767 execute instructions. */
2768 pc = regcache_read_pc (regcache);
2770 if (step)
2772 /* We've already advanced the PC, so the stepping part
2773 is done. Now we need to arrange for a trap to be
2774 reported to handle_inferior_event. Set a breakpoint
2775 at the current PC, and run to it. Don't update
2776 prev_pc, because if we end in
2777 switch_back_to_stepped_thread, we want the "expected
2778 thread advanced also" branch to be taken. IOW, we
2779 don't want this thread to step further from PC
2780 (overstep). */
2781 gdb_assert (!step_over_info_valid_p ());
2782 insert_single_step_breakpoint (gdbarch, aspace, pc);
2783 insert_breakpoints ();
2785 resume_ptid = internal_resume_ptid (user_step);
2786 do_target_resume (resume_ptid, false, GDB_SIGNAL_0);
2787 tp->set_resumed (true);
2788 return;
2793 /* If we have a breakpoint to step over, make sure to do a single
2794 step only. Same if we have software watchpoints. */
2795 if (tp->control.trap_expected || bpstat_should_step ())
2796 tp->control.may_range_step = 0;
2798 /* If displaced stepping is enabled, step over breakpoints by executing a
2799 copy of the instruction at a different address.
2801 We can't use displaced stepping when we have a signal to deliver;
2802 the comments for displaced_step_prepare explain why. The
2803 comments in the handle_inferior event for dealing with 'random
2804 signals' explain what we do instead.
2806 We can't use displaced stepping when we are waiting for vfork_done
2807 event, displaced stepping breaks the vfork child similarly as single
2808 step software breakpoint. */
2809 if (tp->control.trap_expected
2810 && use_displaced_stepping (tp)
2811 && !step_over_info_valid_p ()
2812 && sig == GDB_SIGNAL_0
2813 && current_inferior ()->thread_waiting_for_vfork_done == nullptr)
2815 displaced_step_prepare_status prepare_status
2816 = displaced_step_prepare (tp);
2818 if (prepare_status == DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE)
2820 infrun_debug_printf ("Got placed in step-over queue");
2822 tp->control.trap_expected = 0;
2823 return;
2825 else if (prepare_status == DISPLACED_STEP_PREPARE_STATUS_CANT)
2827 /* Fallback to stepping over the breakpoint in-line. */
2829 if (target_is_non_stop_p ())
2830 stop_all_threads ("displaced stepping falling back on inline stepping");
2832 set_step_over_info (aspace, regcache_read_pc (regcache), 0,
2833 tp->global_num);
2835 step = maybe_software_singlestep (gdbarch);
2837 insert_breakpoints ();
2839 else if (prepare_status == DISPLACED_STEP_PREPARE_STATUS_OK)
2841 /* Update pc to reflect the new address from which we will
2842 execute instructions due to displaced stepping. */
2843 pc = regcache_read_pc (get_thread_regcache (tp));
2845 step = gdbarch_displaced_step_hw_singlestep (gdbarch);
2847 else
2848 gdb_assert_not_reached ("Invalid displaced_step_prepare_status "
2849 "value.");
2852 /* Do we need to do it the hard way, w/temp breakpoints? */
2853 else if (step)
2854 step = maybe_software_singlestep (gdbarch);
2856 /* Currently, our software single-step implementation leads to different
2857 results than hardware single-stepping in one situation: when stepping
2858 into delivering a signal which has an associated signal handler,
2859 hardware single-step will stop at the first instruction of the handler,
2860 while software single-step will simply skip execution of the handler.
2862 For now, this difference in behavior is accepted since there is no
2863 easy way to actually implement single-stepping into a signal handler
2864 without kernel support.
2866 However, there is one scenario where this difference leads to follow-on
2867 problems: if we're stepping off a breakpoint by removing all breakpoints
2868 and then single-stepping. In this case, the software single-step
2869 behavior means that even if there is a *breakpoint* in the signal
2870 handler, GDB still would not stop.
2872 Fortunately, we can at least fix this particular issue. We detect
2873 here the case where we are about to deliver a signal while software
2874 single-stepping with breakpoints removed. In this situation, we
2875 revert the decisions to remove all breakpoints and insert single-
2876 step breakpoints, and instead we install a step-resume breakpoint
2877 at the current address, deliver the signal without stepping, and
2878 once we arrive back at the step-resume breakpoint, actually step
2879 over the breakpoint we originally wanted to step over. */
2880 if (thread_has_single_step_breakpoints_set (tp)
2881 && sig != GDB_SIGNAL_0
2882 && step_over_info_valid_p ())
2884 /* If we have nested signals or a pending signal is delivered
2885 immediately after a handler returns, might already have
2886 a step-resume breakpoint set on the earlier handler. We cannot
2887 set another step-resume breakpoint; just continue on until the
2888 original breakpoint is hit. */
2889 if (tp->control.step_resume_breakpoint == nullptr)
2891 insert_hp_step_resume_breakpoint_at_frame (get_current_frame ());
2892 tp->step_after_step_resume_breakpoint = 1;
2895 delete_single_step_breakpoints (tp);
2897 clear_step_over_info ();
2898 tp->control.trap_expected = 0;
2900 insert_breakpoints ();
2903 /* If STEP is set, it's a request to use hardware stepping
2904 facilities. But in that case, we should never
2905 use singlestep breakpoint. */
2906 gdb_assert (!(thread_has_single_step_breakpoints_set (tp) && step));
2908 /* Decide the set of threads to ask the target to resume. */
2909 if (tp->control.trap_expected)
2911 /* We're allowing a thread to run past a breakpoint it has
2912 hit, either by single-stepping the thread with the breakpoint
2913 removed, or by displaced stepping, with the breakpoint inserted.
2914 In the former case, we need to single-step only this thread,
2915 and keep others stopped, as they can miss this breakpoint if
2916 allowed to run. That's not really a problem for displaced
2917 stepping, but, we still keep other threads stopped, in case
2918 another thread is also stopped for a breakpoint waiting for
2919 its turn in the displaced stepping queue. */
2920 resume_ptid = inferior_ptid;
2922 else
2923 resume_ptid = internal_resume_ptid (user_step);
2925 if (execution_direction != EXEC_REVERSE
2926 && step && breakpoint_inserted_here_p (aspace, pc))
2928 /* There are two cases where we currently need to step a
2929 breakpoint instruction when we have a signal to deliver:
2931 - See handle_signal_stop where we handle random signals that
2932 could take out us out of the stepping range. Normally, in
2933 that case we end up continuing (instead of stepping) over the
2934 signal handler with a breakpoint at PC, but there are cases
2935 where we should _always_ single-step, even if we have a
2936 step-resume breakpoint, like when a software watchpoint is
2937 set. Assuming single-stepping and delivering a signal at the
2938 same time would takes us to the signal handler, then we could
2939 have removed the breakpoint at PC to step over it. However,
2940 some hardware step targets (like e.g., Mac OS) can't step
2941 into signal handlers, and for those, we need to leave the
2942 breakpoint at PC inserted, as otherwise if the handler
2943 recurses and executes PC again, it'll miss the breakpoint.
2944 So we leave the breakpoint inserted anyway, but we need to
2945 record that we tried to step a breakpoint instruction, so
2946 that adjust_pc_after_break doesn't end up confused.
2948 - In non-stop if we insert a breakpoint (e.g., a step-resume)
2949 in one thread after another thread that was stepping had been
2950 momentarily paused for a step-over. When we re-resume the
2951 stepping thread, it may be resumed from that address with a
2952 breakpoint that hasn't trapped yet. Seen with
2953 gdb.threads/non-stop-fair-events.exp, on targets that don't
2954 do displaced stepping. */
2956 infrun_debug_printf ("resume: [%s] stepped breakpoint",
2957 tp->ptid.to_string ().c_str ());
2959 tp->stepped_breakpoint = 1;
2961 /* Most targets can step a breakpoint instruction, thus
2962 executing it normally. But if this one cannot, just
2963 continue and we will hit it anyway. */
2964 if (gdbarch_cannot_step_breakpoint (gdbarch))
2965 step = false;
2968 if (tp->control.may_range_step)
2970 /* If we're resuming a thread with the PC out of the step
2971 range, then we're doing some nested/finer run control
2972 operation, like stepping the thread out of the dynamic
2973 linker or the displaced stepping scratch pad. We
2974 shouldn't have allowed a range step then. */
2975 gdb_assert (pc_in_thread_step_range (pc, tp));
2978 do_target_resume (resume_ptid, step, sig);
2979 tp->set_resumed (true);
2982 /* Resume the inferior. SIG is the signal to give the inferior
2983 (GDB_SIGNAL_0 for none). This is a wrapper around 'resume_1' that
2984 rolls back state on error. */
2986 static void
2987 resume (gdb_signal sig)
2991 resume_1 (sig);
2993 catch (const gdb_exception &ex)
2995 /* If resuming is being aborted for any reason, delete any
2996 single-step breakpoint resume_1 may have created, to avoid
2997 confusing the following resumption, and to avoid leaving
2998 single-step breakpoints perturbing other threads, in case
2999 we're running in non-stop mode. */
3000 if (inferior_ptid != null_ptid)
3001 delete_single_step_breakpoints (inferior_thread ());
3002 throw;
3007 /* Proceeding. */
3009 /* See infrun.h. */
3011 /* Counter that tracks number of user visible stops. This can be used
3012 to tell whether a command has proceeded the inferior past the
3013 current location. This allows e.g., inferior function calls in
3014 breakpoint commands to not interrupt the command list. When the
3015 call finishes successfully, the inferior is standing at the same
3016 breakpoint as if nothing happened (and so we don't call
3017 normal_stop). */
3018 static ULONGEST current_stop_id;
3020 /* See infrun.h. */
3022 ULONGEST
3023 get_stop_id (void)
3025 return current_stop_id;
3028 /* Called when we report a user visible stop. */
3030 static void
3031 new_stop_id (void)
3033 current_stop_id++;
3036 /* Clear out all variables saying what to do when inferior is continued.
3037 First do this, then set the ones you want, then call `proceed'. */
3039 static void
3040 clear_proceed_status_thread (struct thread_info *tp)
3042 infrun_debug_printf ("%s", tp->ptid.to_string ().c_str ());
3044 /* If we're starting a new sequence, then the previous finished
3045 single-step is no longer relevant. */
3046 if (tp->has_pending_waitstatus ())
3048 if (tp->stop_reason () == TARGET_STOPPED_BY_SINGLE_STEP)
3050 infrun_debug_printf ("pending event of %s was a finished step. "
3051 "Discarding.",
3052 tp->ptid.to_string ().c_str ());
3054 tp->clear_pending_waitstatus ();
3055 tp->set_stop_reason (TARGET_STOPPED_BY_NO_REASON);
3057 else
3059 infrun_debug_printf
3060 ("thread %s has pending wait status %s (currently_stepping=%d).",
3061 tp->ptid.to_string ().c_str (),
3062 tp->pending_waitstatus ().to_string ().c_str (),
3063 currently_stepping (tp));
3067 /* If this signal should not be seen by program, give it zero.
3068 Used for debugging signals. */
3069 if (!signal_pass_state (tp->stop_signal ()))
3070 tp->set_stop_signal (GDB_SIGNAL_0);
3072 tp->release_thread_fsm ();
3074 tp->control.trap_expected = 0;
3075 tp->control.step_range_start = 0;
3076 tp->control.step_range_end = 0;
3077 tp->control.may_range_step = 0;
3078 tp->control.step_frame_id = null_frame_id;
3079 tp->control.step_stack_frame_id = null_frame_id;
3080 tp->control.step_over_calls = STEP_OVER_UNDEBUGGABLE;
3081 tp->control.step_start_function = nullptr;
3082 tp->stop_requested = 0;
3084 tp->control.stop_step = 0;
3086 tp->control.proceed_to_finish = 0;
3088 tp->control.stepping_command = 0;
3090 /* Discard any remaining commands or status from previous stop. */
3091 bpstat_clear (&tp->control.stop_bpstat);
3094 /* Notify the current interpreter and observers that the target is about to
3095 proceed. */
3097 static void
3098 notify_about_to_proceed ()
3100 top_level_interpreter ()->on_about_to_proceed ();
3101 gdb::observers::about_to_proceed.notify ();
3104 void
3105 clear_proceed_status (int step)
3107 /* With scheduler-locking replay, stop replaying other threads if we're
3108 not replaying the user-visible resume ptid.
3110 This is a convenience feature to not require the user to explicitly
3111 stop replaying the other threads. We're assuming that the user's
3112 intent is to resume tracing the recorded process. */
3113 if (!non_stop && scheduler_mode == schedlock_replay
3114 && target_record_is_replaying (minus_one_ptid)
3115 && !target_record_will_replay (user_visible_resume_ptid (step),
3116 execution_direction))
3117 target_record_stop_replaying ();
3119 if (!non_stop && inferior_ptid != null_ptid)
3121 ptid_t resume_ptid = user_visible_resume_ptid (step);
3122 process_stratum_target *resume_target
3123 = user_visible_resume_target (resume_ptid);
3125 /* In all-stop mode, delete the per-thread status of all threads
3126 we're about to resume, implicitly and explicitly. */
3127 for (thread_info *tp : all_non_exited_threads (resume_target, resume_ptid))
3128 clear_proceed_status_thread (tp);
3131 if (inferior_ptid != null_ptid)
3133 struct inferior *inferior;
3135 if (non_stop)
3137 /* If in non-stop mode, only delete the per-thread status of
3138 the current thread. */
3139 clear_proceed_status_thread (inferior_thread ());
3142 inferior = current_inferior ();
3143 inferior->control.stop_soon = NO_STOP_QUIETLY;
3146 notify_about_to_proceed ();
3149 /* Returns true if TP is still stopped at a breakpoint that needs
3150 stepping-over in order to make progress. If the breakpoint is gone
3151 meanwhile, we can skip the whole step-over dance. */
3153 static bool
3154 thread_still_needs_step_over_bp (struct thread_info *tp)
3156 if (tp->stepping_over_breakpoint)
3158 struct regcache *regcache = get_thread_regcache (tp);
3160 if (breakpoint_here_p (tp->inf->aspace.get (),
3161 regcache_read_pc (regcache))
3162 == ordinary_breakpoint_here)
3163 return true;
3165 tp->stepping_over_breakpoint = 0;
3168 return false;
3171 /* Check whether thread TP still needs to start a step-over in order
3172 to make progress when resumed. Returns an bitwise or of enum
3173 step_over_what bits, indicating what needs to be stepped over. */
3175 static step_over_what
3176 thread_still_needs_step_over (struct thread_info *tp)
3178 step_over_what what = 0;
3180 if (thread_still_needs_step_over_bp (tp))
3181 what |= STEP_OVER_BREAKPOINT;
3183 if (tp->stepping_over_watchpoint
3184 && !target_have_steppable_watchpoint ())
3185 what |= STEP_OVER_WATCHPOINT;
3187 return what;
3190 /* Returns true if scheduler locking applies. STEP indicates whether
3191 we're about to do a step/next-like command to a thread. */
3193 static bool
3194 schedlock_applies (struct thread_info *tp)
3196 return (scheduler_mode == schedlock_on
3197 || (scheduler_mode == schedlock_step
3198 && tp->control.stepping_command)
3199 || (scheduler_mode == schedlock_replay
3200 && target_record_will_replay (minus_one_ptid,
3201 execution_direction)));
3204 /* Set process_stratum_target::COMMIT_RESUMED_STATE in all target
3205 stacks that have threads executing and don't have threads with
3206 pending events. */
3208 static void
3209 maybe_set_commit_resumed_all_targets ()
3211 scoped_restore_current_thread restore_thread;
3213 for (inferior *inf : all_non_exited_inferiors ())
3215 process_stratum_target *proc_target = inf->process_target ();
3217 if (proc_target->commit_resumed_state)
3219 /* We already set this in a previous iteration, via another
3220 inferior sharing the process_stratum target. */
3221 continue;
3224 /* If the target has no resumed threads, it would be useless to
3225 ask it to commit the resumed threads. */
3226 if (!proc_target->threads_executing)
3228 infrun_debug_printf ("not requesting commit-resumed for target "
3229 "%s, no resumed threads",
3230 proc_target->shortname ());
3231 continue;
3234 /* As an optimization, if a thread from this target has some
3235 status to report, handle it before requiring the target to
3236 commit its resumed threads: handling the status might lead to
3237 resuming more threads. */
3238 if (proc_target->has_resumed_with_pending_wait_status ())
3240 infrun_debug_printf ("not requesting commit-resumed for target %s, a"
3241 " thread has a pending waitstatus",
3242 proc_target->shortname ());
3243 continue;
3246 switch_to_inferior_no_thread (inf);
3248 if (target_has_pending_events ())
3250 infrun_debug_printf ("not requesting commit-resumed for target %s, "
3251 "target has pending events",
3252 proc_target->shortname ());
3253 continue;
3256 infrun_debug_printf ("enabling commit-resumed for target %s",
3257 proc_target->shortname ());
3259 proc_target->commit_resumed_state = true;
3263 /* See infrun.h. */
3265 void
3266 maybe_call_commit_resumed_all_targets ()
3268 scoped_restore_current_thread restore_thread;
3270 for (inferior *inf : all_non_exited_inferiors ())
3272 process_stratum_target *proc_target = inf->process_target ();
3274 if (!proc_target->commit_resumed_state)
3275 continue;
3277 switch_to_inferior_no_thread (inf);
3279 infrun_debug_printf ("calling commit_resumed for target %s",
3280 proc_target->shortname());
3282 target_commit_resumed ();
3286 /* To track nesting of scoped_disable_commit_resumed objects, ensuring
3287 that only the outermost one attempts to re-enable
3288 commit-resumed. */
3289 static bool enable_commit_resumed = true;
3291 /* See infrun.h. */
3293 scoped_disable_commit_resumed::scoped_disable_commit_resumed
3294 (const char *reason)
3295 : m_reason (reason),
3296 m_prev_enable_commit_resumed (enable_commit_resumed)
3298 infrun_debug_printf ("reason=%s", m_reason);
3300 enable_commit_resumed = false;
3302 for (inferior *inf : all_non_exited_inferiors ())
3304 process_stratum_target *proc_target = inf->process_target ();
3306 if (m_prev_enable_commit_resumed)
3308 /* This is the outermost instance: force all
3309 COMMIT_RESUMED_STATE to false. */
3310 proc_target->commit_resumed_state = false;
3312 else
3314 /* This is not the outermost instance, we expect
3315 COMMIT_RESUMED_STATE to have been cleared by the
3316 outermost instance. */
3317 gdb_assert (!proc_target->commit_resumed_state);
3322 /* See infrun.h. */
3324 void
3325 scoped_disable_commit_resumed::reset ()
3327 if (m_reset)
3328 return;
3329 m_reset = true;
3331 infrun_debug_printf ("reason=%s", m_reason);
3333 gdb_assert (!enable_commit_resumed);
3335 enable_commit_resumed = m_prev_enable_commit_resumed;
3337 if (m_prev_enable_commit_resumed)
3339 /* This is the outermost instance, re-enable
3340 COMMIT_RESUMED_STATE on the targets where it's possible. */
3341 maybe_set_commit_resumed_all_targets ();
3343 else
3345 /* This is not the outermost instance, we expect
3346 COMMIT_RESUMED_STATE to still be false. */
3347 for (inferior *inf : all_non_exited_inferiors ())
3349 process_stratum_target *proc_target = inf->process_target ();
3350 gdb_assert (!proc_target->commit_resumed_state);
3355 /* See infrun.h. */
3357 scoped_disable_commit_resumed::~scoped_disable_commit_resumed ()
3359 reset ();
3362 /* See infrun.h. */
3364 void
3365 scoped_disable_commit_resumed::reset_and_commit ()
3367 reset ();
3368 maybe_call_commit_resumed_all_targets ();
3371 /* See infrun.h. */
3373 scoped_enable_commit_resumed::scoped_enable_commit_resumed
3374 (const char *reason)
3375 : m_reason (reason),
3376 m_prev_enable_commit_resumed (enable_commit_resumed)
3378 infrun_debug_printf ("reason=%s", m_reason);
3380 if (!enable_commit_resumed)
3382 enable_commit_resumed = true;
3384 /* Re-enable COMMIT_RESUMED_STATE on the targets where it's
3385 possible. */
3386 maybe_set_commit_resumed_all_targets ();
3388 maybe_call_commit_resumed_all_targets ();
3392 /* See infrun.h. */
3394 scoped_enable_commit_resumed::~scoped_enable_commit_resumed ()
3396 infrun_debug_printf ("reason=%s", m_reason);
3398 gdb_assert (enable_commit_resumed);
3400 enable_commit_resumed = m_prev_enable_commit_resumed;
3402 if (!enable_commit_resumed)
3404 /* Force all COMMIT_RESUMED_STATE back to false. */
3405 for (inferior *inf : all_non_exited_inferiors ())
3407 process_stratum_target *proc_target = inf->process_target ();
3408 proc_target->commit_resumed_state = false;
3413 /* Check that all the targets we're about to resume are in non-stop
3414 mode. Ideally, we'd only care whether all targets support
3415 target-async, but we're not there yet. E.g., stop_all_threads
3416 doesn't know how to handle all-stop targets. Also, the remote
3417 protocol in all-stop mode is synchronous, irrespective of
3418 target-async, which means that things like a breakpoint re-set
3419 triggered by one target would try to read memory from all targets
3420 and fail. */
3422 static void
3423 check_multi_target_resumption (process_stratum_target *resume_target)
3425 if (!non_stop && resume_target == nullptr)
3427 scoped_restore_current_thread restore_thread;
3429 /* This is used to track whether we're resuming more than one
3430 target. */
3431 process_stratum_target *first_connection = nullptr;
3433 /* The first inferior we see with a target that does not work in
3434 always-non-stop mode. */
3435 inferior *first_not_non_stop = nullptr;
3437 for (inferior *inf : all_non_exited_inferiors ())
3439 switch_to_inferior_no_thread (inf);
3441 if (!target_has_execution ())
3442 continue;
3444 process_stratum_target *proc_target
3445 = current_inferior ()->process_target();
3447 if (!target_is_non_stop_p ())
3448 first_not_non_stop = inf;
3450 if (first_connection == nullptr)
3451 first_connection = proc_target;
3452 else if (first_connection != proc_target
3453 && first_not_non_stop != nullptr)
3455 switch_to_inferior_no_thread (first_not_non_stop);
3457 proc_target = current_inferior ()->process_target();
3459 error (_("Connection %d (%s) does not support "
3460 "multi-target resumption."),
3461 proc_target->connection_number,
3462 make_target_connection_string (proc_target).c_str ());
3468 /* Helper function for `proceed`. Check if thread TP is suitable for
3469 resuming, and, if it is, switch to the thread and call
3470 `keep_going_pass_signal`. If TP is not suitable for resuming then this
3471 function will just return without switching threads. */
3473 static void
3474 proceed_resume_thread_checked (thread_info *tp)
3476 if (!tp->inf->has_execution ())
3478 infrun_debug_printf ("[%s] target has no execution",
3479 tp->ptid.to_string ().c_str ());
3480 return;
3483 if (tp->resumed ())
3485 infrun_debug_printf ("[%s] resumed",
3486 tp->ptid.to_string ().c_str ());
3487 gdb_assert (tp->executing () || tp->has_pending_waitstatus ());
3488 return;
3491 if (thread_is_in_step_over_chain (tp))
3493 infrun_debug_printf ("[%s] needs step-over",
3494 tp->ptid.to_string ().c_str ());
3495 return;
3498 /* When handling a vfork GDB removes all breakpoints from the program
3499 space in which the vfork is being handled. If we are following the
3500 parent then GDB will set the thread_waiting_for_vfork_done member of
3501 the parent inferior. In this case we should take care to only resume
3502 the vfork parent thread, the kernel will hold this thread suspended
3503 until the vfork child has exited or execd, at which point the parent
3504 will be resumed and a VFORK_DONE event sent to GDB. */
3505 if (tp->inf->thread_waiting_for_vfork_done != nullptr)
3507 if (target_is_non_stop_p ())
3509 /* For non-stop targets, regardless of whether GDB is using
3510 all-stop or non-stop mode, threads are controlled
3511 individually.
3513 When a thread is handling a vfork, breakpoints are removed
3514 from the inferior (well, program space in fact), so it is
3515 critical that we don't try to resume any thread other than the
3516 vfork parent. */
3517 if (tp != tp->inf->thread_waiting_for_vfork_done)
3519 infrun_debug_printf ("[%s] thread %s of this inferior is "
3520 "waiting for vfork-done",
3521 tp->ptid.to_string ().c_str (),
3522 tp->inf->thread_waiting_for_vfork_done
3523 ->ptid.to_string ().c_str ());
3524 return;
3527 else
3529 /* For all-stop targets, when we attempt to resume the inferior,
3530 we will only resume the vfork parent thread, this is handled
3531 in internal_resume_ptid.
3533 Additionally, we will always be called with the vfork parent
3534 thread as the current thread (TP) thanks to follow_fork, as
3535 such the following assertion should hold.
3537 Beyond this there is nothing more that needs to be done
3538 here. */
3539 gdb_assert (tp == tp->inf->thread_waiting_for_vfork_done);
3543 /* When handling a vfork GDB removes all breakpoints from the program
3544 space in which the vfork is being handled. If we are following the
3545 child then GDB will set vfork_child member of the vfork parent
3546 inferior. Once the child has either exited or execd then GDB will
3547 detach from the parent process. Until that point GDB should not
3548 resume any thread in the parent process. */
3549 if (tp->inf->vfork_child != nullptr)
3551 infrun_debug_printf ("[%s] thread is part of a vfork parent, child is %d",
3552 tp->ptid.to_string ().c_str (),
3553 tp->inf->vfork_child->pid);
3554 return;
3557 infrun_debug_printf ("resuming %s",
3558 tp->ptid.to_string ().c_str ());
3560 execution_control_state ecs (tp);
3561 switch_to_thread (tp);
3562 keep_going_pass_signal (&ecs);
3563 if (!ecs.wait_some_more)
3564 error (_("Command aborted."));
3567 /* Basic routine for continuing the program in various fashions.
3569 ADDR is the address to resume at, or -1 for resume where stopped.
3570 SIGGNAL is the signal to give it, or GDB_SIGNAL_0 for none,
3571 or GDB_SIGNAL_DEFAULT for act according to how it stopped.
3573 You should call clear_proceed_status before calling proceed. */
3575 void
3576 proceed (CORE_ADDR addr, enum gdb_signal siggnal)
3578 INFRUN_SCOPED_DEBUG_ENTER_EXIT;
3580 struct gdbarch *gdbarch;
3581 CORE_ADDR pc;
3583 /* If we're stopped at a fork/vfork, switch to either the parent or child
3584 thread as defined by the "set follow-fork-mode" command, or, if both
3585 the parent and child are controlled by GDB, and schedule-multiple is
3586 on, follow the child. If none of the above apply then we just proceed
3587 resuming the current thread. */
3588 if (!follow_fork ())
3590 /* The target for some reason decided not to resume. */
3591 normal_stop ();
3592 if (target_can_async_p ())
3593 inferior_event_handler (INF_EXEC_COMPLETE);
3594 return;
3597 /* We'll update this if & when we switch to a new thread. */
3598 update_previous_thread ();
3600 thread_info *cur_thr = inferior_thread ();
3601 infrun_debug_printf ("cur_thr = %s", cur_thr->ptid.to_string ().c_str ());
3603 regcache *regcache = get_thread_regcache (cur_thr);
3604 gdbarch = regcache->arch ();
3605 pc = regcache_read_pc_protected (regcache);
3607 /* Fill in with reasonable starting values. */
3608 init_thread_stepping_state (cur_thr);
3610 gdb_assert (!thread_is_in_step_over_chain (cur_thr));
3612 ptid_t resume_ptid
3613 = user_visible_resume_ptid (cur_thr->control.stepping_command);
3614 process_stratum_target *resume_target
3615 = user_visible_resume_target (resume_ptid);
3617 check_multi_target_resumption (resume_target);
3619 if (addr == (CORE_ADDR) -1)
3621 const address_space *aspace = cur_thr->inf->aspace.get ();
3623 if (cur_thr->stop_pc_p ()
3624 && pc == cur_thr->stop_pc ()
3625 && breakpoint_here_p (aspace, pc) == ordinary_breakpoint_here
3626 && execution_direction != EXEC_REVERSE)
3627 /* There is a breakpoint at the address we will resume at,
3628 step one instruction before inserting breakpoints so that
3629 we do not stop right away (and report a second hit at this
3630 breakpoint).
3632 Note, we don't do this in reverse, because we won't
3633 actually be executing the breakpoint insn anyway.
3634 We'll be (un-)executing the previous instruction. */
3635 cur_thr->stepping_over_breakpoint = 1;
3636 else if (gdbarch_single_step_through_delay_p (gdbarch)
3637 && gdbarch_single_step_through_delay (gdbarch,
3638 get_current_frame ()))
3639 /* We stepped onto an instruction that needs to be stepped
3640 again before re-inserting the breakpoint, do so. */
3641 cur_thr->stepping_over_breakpoint = 1;
3643 else
3645 regcache_write_pc (regcache, addr);
3648 if (siggnal != GDB_SIGNAL_DEFAULT)
3649 cur_thr->set_stop_signal (siggnal);
3651 /* If an exception is thrown from this point on, make sure to
3652 propagate GDB's knowledge of the executing state to the
3653 frontend/user running state. */
3654 scoped_finish_thread_state finish_state (resume_target, resume_ptid);
3656 /* Even if RESUME_PTID is a wildcard, and we end up resuming fewer
3657 threads (e.g., we might need to set threads stepping over
3658 breakpoints first), from the user/frontend's point of view, all
3659 threads in RESUME_PTID are now running. Unless we're calling an
3660 inferior function, as in that case we pretend the inferior
3661 doesn't run at all. */
3662 if (!cur_thr->control.in_infcall)
3663 set_running (resume_target, resume_ptid, true);
3665 infrun_debug_printf ("addr=%s, signal=%s, resume_ptid=%s",
3666 paddress (gdbarch, addr),
3667 gdb_signal_to_symbol_string (siggnal),
3668 resume_ptid.to_string ().c_str ());
3670 annotate_starting ();
3672 /* Make sure that output from GDB appears before output from the
3673 inferior. */
3674 gdb_flush (gdb_stdout);
3676 /* Since we've marked the inferior running, give it the terminal. A
3677 QUIT/Ctrl-C from here on is forwarded to the target (which can
3678 still detect attempts to unblock a stuck connection with repeated
3679 Ctrl-C from within target_pass_ctrlc). */
3680 target_terminal::inferior ();
3682 /* In a multi-threaded task we may select another thread and
3683 then continue or step.
3685 But if a thread that we're resuming had stopped at a breakpoint,
3686 it will immediately cause another breakpoint stop without any
3687 execution (i.e. it will report a breakpoint hit incorrectly). So
3688 we must step over it first.
3690 Look for threads other than the current (TP) that reported a
3691 breakpoint hit and haven't been resumed yet since. */
3693 /* If scheduler locking applies, we can avoid iterating over all
3694 threads. */
3695 if (!non_stop && !schedlock_applies (cur_thr))
3697 for (thread_info *tp : all_non_exited_threads (resume_target,
3698 resume_ptid))
3700 switch_to_thread_no_regs (tp);
3702 /* Ignore the current thread here. It's handled
3703 afterwards. */
3704 if (tp == cur_thr)
3705 continue;
3707 if (!thread_still_needs_step_over (tp))
3708 continue;
3710 gdb_assert (!thread_is_in_step_over_chain (tp));
3712 infrun_debug_printf ("need to step-over [%s] first",
3713 tp->ptid.to_string ().c_str ());
3715 global_thread_step_over_chain_enqueue (tp);
3718 switch_to_thread (cur_thr);
3721 /* Enqueue the current thread last, so that we move all other
3722 threads over their breakpoints first. */
3723 if (cur_thr->stepping_over_breakpoint)
3724 global_thread_step_over_chain_enqueue (cur_thr);
3726 /* If the thread isn't started, we'll still need to set its prev_pc,
3727 so that switch_back_to_stepped_thread knows the thread hasn't
3728 advanced. Must do this before resuming any thread, as in
3729 all-stop/remote, once we resume we can't send any other packet
3730 until the target stops again. */
3731 cur_thr->prev_pc = regcache_read_pc_protected (regcache);
3734 scoped_disable_commit_resumed disable_commit_resumed ("proceeding");
3735 bool step_over_started = start_step_over ();
3737 if (step_over_info_valid_p ())
3739 /* Either this thread started a new in-line step over, or some
3740 other thread was already doing one. In either case, don't
3741 resume anything else until the step-over is finished. */
3743 else if (step_over_started && !target_is_non_stop_p ())
3745 /* A new displaced stepping sequence was started. In all-stop,
3746 we can't talk to the target anymore until it next stops. */
3748 else if (!non_stop && target_is_non_stop_p ())
3750 INFRUN_SCOPED_DEBUG_START_END
3751 ("resuming threads, all-stop-on-top-of-non-stop");
3753 /* In all-stop, but the target is always in non-stop mode.
3754 Start all other threads that are implicitly resumed too. */
3755 for (thread_info *tp : all_non_exited_threads (resume_target,
3756 resume_ptid))
3758 switch_to_thread_no_regs (tp);
3759 proceed_resume_thread_checked (tp);
3762 else
3763 proceed_resume_thread_checked (cur_thr);
3765 disable_commit_resumed.reset_and_commit ();
3768 finish_state.release ();
3770 /* If we've switched threads above, switch back to the previously
3771 current thread. We don't want the user to see a different
3772 selected thread. */
3773 switch_to_thread (cur_thr);
3775 /* Tell the event loop to wait for it to stop. If the target
3776 supports asynchronous execution, it'll do this from within
3777 target_resume. */
3778 if (!target_can_async_p ())
3779 mark_async_event_handler (infrun_async_inferior_event_token);
3783 /* Start remote-debugging of a machine over a serial link. */
3785 void
3786 start_remote (int from_tty)
3788 inferior *inf = current_inferior ();
3789 inf->control.stop_soon = STOP_QUIETLY_REMOTE;
3791 /* Always go on waiting for the target, regardless of the mode. */
3792 /* FIXME: cagney/1999-09-23: At present it isn't possible to
3793 indicate to wait_for_inferior that a target should timeout if
3794 nothing is returned (instead of just blocking). Because of this,
3795 targets expecting an immediate response need to, internally, set
3796 things up so that the target_wait() is forced to eventually
3797 timeout. */
3798 /* FIXME: cagney/1999-09-24: It isn't possible for target_open() to
3799 differentiate to its caller what the state of the target is after
3800 the initial open has been performed. Here we're assuming that
3801 the target has stopped. It should be possible to eventually have
3802 target_open() return to the caller an indication that the target
3803 is currently running and GDB state should be set to the same as
3804 for an async run. */
3805 wait_for_inferior (inf);
3807 /* Now that the inferior has stopped, do any bookkeeping like
3808 loading shared libraries. We want to do this before normal_stop,
3809 so that the displayed frame is up to date. */
3810 post_create_inferior (from_tty);
3812 normal_stop ();
3815 /* Initialize static vars when a new inferior begins. */
3817 void
3818 init_wait_for_inferior (void)
3820 /* These are meaningless until the first time through wait_for_inferior. */
3822 breakpoint_init_inferior (inf_starting);
3824 clear_proceed_status (0);
3826 nullify_last_target_wait_ptid ();
3828 update_previous_thread ();
3833 static void handle_inferior_event (struct execution_control_state *ecs);
3835 static void handle_step_into_function (struct gdbarch *gdbarch,
3836 struct execution_control_state *ecs);
3837 static void handle_step_into_function_backward (struct gdbarch *gdbarch,
3838 struct execution_control_state *ecs);
3839 static void handle_signal_stop (struct execution_control_state *ecs);
3840 static void check_exception_resume (struct execution_control_state *,
3841 frame_info_ptr);
3843 static void end_stepping_range (struct execution_control_state *ecs);
3844 static void stop_waiting (struct execution_control_state *ecs);
3845 static void keep_going (struct execution_control_state *ecs);
3846 static void process_event_stop_test (struct execution_control_state *ecs);
3847 static bool switch_back_to_stepped_thread (struct execution_control_state *ecs);
3849 /* This function is attached as a "thread_stop_requested" observer.
3850 Cleanup local state that assumed the PTID was to be resumed, and
3851 report the stop to the frontend. */
3853 static void
3854 infrun_thread_stop_requested (ptid_t ptid)
3856 process_stratum_target *curr_target = current_inferior ()->process_target ();
3858 /* PTID was requested to stop. If the thread was already stopped,
3859 but the user/frontend doesn't know about that yet (e.g., the
3860 thread had been temporarily paused for some step-over), set up
3861 for reporting the stop now. */
3862 for (thread_info *tp : all_threads (curr_target, ptid))
3864 if (tp->state != THREAD_RUNNING)
3865 continue;
3866 if (tp->executing ())
3867 continue;
3869 /* Remove matching threads from the step-over queue, so
3870 start_step_over doesn't try to resume them
3871 automatically. */
3872 if (thread_is_in_step_over_chain (tp))
3873 global_thread_step_over_chain_remove (tp);
3875 /* If the thread is stopped, but the user/frontend doesn't
3876 know about that yet, queue a pending event, as if the
3877 thread had just stopped now. Unless the thread already had
3878 a pending event. */
3879 if (!tp->has_pending_waitstatus ())
3881 target_waitstatus ws;
3882 ws.set_stopped (GDB_SIGNAL_0);
3883 tp->set_pending_waitstatus (ws);
3886 /* Clear the inline-frame state, since we're re-processing the
3887 stop. */
3888 clear_inline_frame_state (tp);
3890 /* If this thread was paused because some other thread was
3891 doing an inline-step over, let that finish first. Once
3892 that happens, we'll restart all threads and consume pending
3893 stop events then. */
3894 if (step_over_info_valid_p ())
3895 continue;
3897 /* Otherwise we can process the (new) pending event now. Set
3898 it so this pending event is considered by
3899 do_target_wait. */
3900 tp->set_resumed (true);
3904 /* Delete the step resume, single-step and longjmp/exception resume
3905 breakpoints of TP. */
3907 static void
3908 delete_thread_infrun_breakpoints (struct thread_info *tp)
3910 delete_step_resume_breakpoint (tp);
3911 delete_exception_resume_breakpoint (tp);
3912 delete_single_step_breakpoints (tp);
3915 /* If the target still has execution, call FUNC for each thread that
3916 just stopped. In all-stop, that's all the non-exited threads; in
3917 non-stop, that's the current thread, only. */
3919 typedef void (*for_each_just_stopped_thread_callback_func)
3920 (struct thread_info *tp);
3922 static void
3923 for_each_just_stopped_thread (for_each_just_stopped_thread_callback_func func)
3925 if (!target_has_execution () || inferior_ptid == null_ptid)
3926 return;
3928 if (target_is_non_stop_p ())
3930 /* If in non-stop mode, only the current thread stopped. */
3931 func (inferior_thread ());
3933 else
3935 /* In all-stop mode, all threads have stopped. */
3936 for (thread_info *tp : all_non_exited_threads ())
3937 func (tp);
3941 /* Delete the step resume and longjmp/exception resume breakpoints of
3942 the threads that just stopped. */
3944 static void
3945 delete_just_stopped_threads_infrun_breakpoints (void)
3947 for_each_just_stopped_thread (delete_thread_infrun_breakpoints);
3950 /* Delete the single-step breakpoints of the threads that just
3951 stopped. */
3953 static void
3954 delete_just_stopped_threads_single_step_breakpoints (void)
3956 for_each_just_stopped_thread (delete_single_step_breakpoints);
3959 /* See infrun.h. */
3961 void
3962 print_target_wait_results (ptid_t waiton_ptid, ptid_t result_ptid,
3963 const struct target_waitstatus &ws)
3965 infrun_debug_printf ("target_wait (%s [%s], status) =",
3966 waiton_ptid.to_string ().c_str (),
3967 target_pid_to_str (waiton_ptid).c_str ());
3968 infrun_debug_printf (" %s [%s],",
3969 result_ptid.to_string ().c_str (),
3970 target_pid_to_str (result_ptid).c_str ());
3971 infrun_debug_printf (" %s", ws.to_string ().c_str ());
3974 /* Select a thread at random, out of those which are resumed and have
3975 had events. */
3977 static struct thread_info *
3978 random_pending_event_thread (inferior *inf, ptid_t waiton_ptid)
3980 process_stratum_target *proc_target = inf->process_target ();
3981 thread_info *thread
3982 = proc_target->random_resumed_with_pending_wait_status (inf, waiton_ptid);
3984 if (thread == nullptr)
3986 infrun_debug_printf ("None found.");
3987 return nullptr;
3990 infrun_debug_printf ("Found %s.", thread->ptid.to_string ().c_str ());
3991 gdb_assert (thread->resumed ());
3992 gdb_assert (thread->has_pending_waitstatus ());
3994 return thread;
3997 /* Wrapper for target_wait that first checks whether threads have
3998 pending statuses to report before actually asking the target for
3999 more events. INF is the inferior we're using to call target_wait
4000 on. */
4002 static ptid_t
4003 do_target_wait_1 (inferior *inf, ptid_t ptid,
4004 target_waitstatus *status, target_wait_flags options)
4006 struct thread_info *tp;
4008 /* We know that we are looking for an event in the target of inferior
4009 INF, but we don't know which thread the event might come from. As
4010 such we want to make sure that INFERIOR_PTID is reset so that none of
4011 the wait code relies on it - doing so is always a mistake. */
4012 switch_to_inferior_no_thread (inf);
4014 /* First check if there is a resumed thread with a wait status
4015 pending. */
4016 if (ptid == minus_one_ptid || ptid.is_pid ())
4018 tp = random_pending_event_thread (inf, ptid);
4020 else
4022 infrun_debug_printf ("Waiting for specific thread %s.",
4023 ptid.to_string ().c_str ());
4025 /* We have a specific thread to check. */
4026 tp = inf->find_thread (ptid);
4027 gdb_assert (tp != nullptr);
4028 if (!tp->has_pending_waitstatus ())
4029 tp = nullptr;
4032 if (tp != nullptr
4033 && (tp->stop_reason () == TARGET_STOPPED_BY_SW_BREAKPOINT
4034 || tp->stop_reason () == TARGET_STOPPED_BY_HW_BREAKPOINT))
4036 struct regcache *regcache = get_thread_regcache (tp);
4037 struct gdbarch *gdbarch = regcache->arch ();
4038 CORE_ADDR pc;
4039 int discard = 0;
4041 pc = regcache_read_pc (regcache);
4043 if (pc != tp->stop_pc ())
4045 infrun_debug_printf ("PC of %s changed. was=%s, now=%s",
4046 tp->ptid.to_string ().c_str (),
4047 paddress (gdbarch, tp->stop_pc ()),
4048 paddress (gdbarch, pc));
4049 discard = 1;
4051 else if (!breakpoint_inserted_here_p (tp->inf->aspace.get (), pc))
4053 infrun_debug_printf ("previous breakpoint of %s, at %s gone",
4054 tp->ptid.to_string ().c_str (),
4055 paddress (gdbarch, pc));
4057 discard = 1;
4060 if (discard)
4062 infrun_debug_printf ("pending event of %s cancelled.",
4063 tp->ptid.to_string ().c_str ());
4065 tp->clear_pending_waitstatus ();
4066 target_waitstatus ws;
4067 ws.set_spurious ();
4068 tp->set_pending_waitstatus (ws);
4069 tp->set_stop_reason (TARGET_STOPPED_BY_NO_REASON);
4073 if (tp != nullptr)
4075 infrun_debug_printf ("Using pending wait status %s for %s.",
4076 tp->pending_waitstatus ().to_string ().c_str (),
4077 tp->ptid.to_string ().c_str ());
4079 /* Now that we've selected our final event LWP, un-adjust its PC
4080 if it was a software breakpoint (and the target doesn't
4081 always adjust the PC itself). */
4082 if (tp->stop_reason () == TARGET_STOPPED_BY_SW_BREAKPOINT
4083 && !target_supports_stopped_by_sw_breakpoint ())
4085 struct regcache *regcache;
4086 struct gdbarch *gdbarch;
4087 int decr_pc;
4089 regcache = get_thread_regcache (tp);
4090 gdbarch = regcache->arch ();
4092 decr_pc = gdbarch_decr_pc_after_break (gdbarch);
4093 if (decr_pc != 0)
4095 CORE_ADDR pc;
4097 pc = regcache_read_pc (regcache);
4098 regcache_write_pc (regcache, pc + decr_pc);
4102 tp->set_stop_reason (TARGET_STOPPED_BY_NO_REASON);
4103 *status = tp->pending_waitstatus ();
4104 tp->clear_pending_waitstatus ();
4106 /* Wake up the event loop again, until all pending events are
4107 processed. */
4108 if (target_is_async_p ())
4109 mark_async_event_handler (infrun_async_inferior_event_token);
4110 return tp->ptid;
4113 /* But if we don't find one, we'll have to wait. */
4115 /* We can't ask a non-async target to do a non-blocking wait, so this will be
4116 a blocking wait. */
4117 if (!target_can_async_p ())
4118 options &= ~TARGET_WNOHANG;
4120 return target_wait (ptid, status, options);
4123 /* Wrapper for target_wait that first checks whether threads have
4124 pending statuses to report before actually asking the target for
4125 more events. Polls for events from all inferiors/targets. */
4127 static bool
4128 do_target_wait (execution_control_state *ecs, target_wait_flags options)
4130 int num_inferiors = 0;
4131 int random_selector;
4133 /* For fairness, we pick the first inferior/target to poll at random
4134 out of all inferiors that may report events, and then continue
4135 polling the rest of the inferior list starting from that one in a
4136 circular fashion until the whole list is polled once. */
4138 auto inferior_matches = [] (inferior *inf)
4140 return inf->process_target () != nullptr;
4143 /* First see how many matching inferiors we have. */
4144 for (inferior *inf : all_inferiors ())
4145 if (inferior_matches (inf))
4146 num_inferiors++;
4148 if (num_inferiors == 0)
4150 ecs->ws.set_ignore ();
4151 return false;
4154 /* Now randomly pick an inferior out of those that matched. */
4155 random_selector = (int)
4156 ((num_inferiors * (double) rand ()) / (RAND_MAX + 1.0));
4158 if (num_inferiors > 1)
4159 infrun_debug_printf ("Found %d inferiors, starting at #%d",
4160 num_inferiors, random_selector);
4162 /* Select the Nth inferior that matched. */
4164 inferior *selected = nullptr;
4166 for (inferior *inf : all_inferiors ())
4167 if (inferior_matches (inf))
4168 if (random_selector-- == 0)
4170 selected = inf;
4171 break;
4174 /* Now poll for events out of each of the matching inferior's
4175 targets, starting from the selected one. */
4177 auto do_wait = [&] (inferior *inf)
4179 ecs->ptid = do_target_wait_1 (inf, minus_one_ptid, &ecs->ws, options);
4180 ecs->target = inf->process_target ();
4181 return (ecs->ws.kind () != TARGET_WAITKIND_IGNORE);
4184 /* Needed in 'all-stop + target-non-stop' mode, because we end up
4185 here spuriously after the target is all stopped and we've already
4186 reported the stop to the user, polling for events. */
4187 scoped_restore_current_thread restore_thread;
4189 intrusive_list_iterator<inferior> start
4190 = inferior_list.iterator_to (*selected);
4192 for (intrusive_list_iterator<inferior> it = start;
4193 it != inferior_list.end ();
4194 ++it)
4196 inferior *inf = &*it;
4198 if (inferior_matches (inf) && do_wait (inf))
4199 return true;
4202 for (intrusive_list_iterator<inferior> it = inferior_list.begin ();
4203 it != start;
4204 ++it)
4206 inferior *inf = &*it;
4208 if (inferior_matches (inf) && do_wait (inf))
4209 return true;
4212 ecs->ws.set_ignore ();
4213 return false;
4216 /* An event reported by wait_one. */
4218 struct wait_one_event
4220 /* The target the event came out of. */
4221 process_stratum_target *target;
4223 /* The PTID the event was for. */
4224 ptid_t ptid;
4226 /* The waitstatus. */
4227 target_waitstatus ws;
4230 static bool handle_one (const wait_one_event &event);
4231 static int finish_step_over (struct execution_control_state *ecs);
4233 /* Prepare and stabilize the inferior for detaching it. E.g.,
4234 detaching while a thread is displaced stepping is a recipe for
4235 crashing it, as nothing would readjust the PC out of the scratch
4236 pad. */
4238 void
4239 prepare_for_detach (void)
4241 struct inferior *inf = current_inferior ();
4242 ptid_t pid_ptid = ptid_t (inf->pid);
4243 scoped_restore_current_thread restore_thread;
4245 scoped_restore restore_detaching = make_scoped_restore (&inf->detaching, true);
4247 /* Remove all threads of INF from the global step-over chain. We
4248 want to stop any ongoing step-over, not start any new one. */
4249 thread_step_over_list_safe_range range
4250 = make_thread_step_over_list_safe_range (global_thread_step_over_list);
4252 for (thread_info *tp : range)
4253 if (tp->inf == inf)
4255 infrun_debug_printf ("removing thread %s from global step over chain",
4256 tp->ptid.to_string ().c_str ());
4257 global_thread_step_over_chain_remove (tp);
4260 /* If we were already in the middle of an inline step-over, and the
4261 thread stepping belongs to the inferior we're detaching, we need
4262 to restart the threads of other inferiors. */
4263 if (step_over_info.thread != -1)
4265 infrun_debug_printf ("inline step-over in-process while detaching");
4267 thread_info *thr = find_thread_global_id (step_over_info.thread);
4268 if (thr->inf == inf)
4270 /* Since we removed threads of INF from the step-over chain,
4271 we know this won't start a step-over for INF. */
4272 clear_step_over_info ();
4274 if (target_is_non_stop_p ())
4276 /* Start a new step-over in another thread if there's
4277 one that needs it. */
4278 start_step_over ();
4280 /* Restart all other threads (except the
4281 previously-stepping thread, since that one is still
4282 running). */
4283 if (!step_over_info_valid_p ())
4284 restart_threads (thr);
4289 if (displaced_step_in_progress (inf))
4291 infrun_debug_printf ("displaced-stepping in-process while detaching");
4293 /* Stop threads currently displaced stepping, aborting it. */
4295 for (thread_info *thr : inf->non_exited_threads ())
4297 if (thr->displaced_step_state.in_progress ())
4299 if (thr->executing ())
4301 if (!thr->stop_requested)
4303 target_stop (thr->ptid);
4304 thr->stop_requested = true;
4307 else
4308 thr->set_resumed (false);
4312 while (displaced_step_in_progress (inf))
4314 wait_one_event event;
4316 event.target = inf->process_target ();
4317 event.ptid = do_target_wait_1 (inf, pid_ptid, &event.ws, 0);
4319 if (debug_infrun)
4320 print_target_wait_results (pid_ptid, event.ptid, event.ws);
4322 handle_one (event);
4325 /* It's OK to leave some of the threads of INF stopped, since
4326 they'll be detached shortly. */
4330 /* If all-stop, but there exists a non-stop target, stop all threads
4331 now that we're presenting the stop to the user. */
4333 static void
4334 stop_all_threads_if_all_stop_mode ()
4336 if (!non_stop && exists_non_stop_target ())
4337 stop_all_threads ("presenting stop to user in all-stop");
4340 /* Wait for control to return from inferior to debugger.
4342 If inferior gets a signal, we may decide to start it up again
4343 instead of returning. That is why there is a loop in this function.
4344 When this function actually returns it means the inferior
4345 should be left stopped and GDB should read more commands. */
4347 static void
4348 wait_for_inferior (inferior *inf)
4350 infrun_debug_printf ("wait_for_inferior ()");
4352 SCOPE_EXIT { delete_just_stopped_threads_infrun_breakpoints (); };
4354 /* If an error happens while handling the event, propagate GDB's
4355 knowledge of the executing state to the frontend/user running
4356 state. */
4357 scoped_finish_thread_state finish_state
4358 (inf->process_target (), minus_one_ptid);
4360 while (1)
4362 execution_control_state ecs;
4364 overlay_cache_invalid = 1;
4366 /* Flush target cache before starting to handle each event.
4367 Target was running and cache could be stale. This is just a
4368 heuristic. Running threads may modify target memory, but we
4369 don't get any event. */
4370 target_dcache_invalidate (current_program_space->aspace);
4372 ecs.ptid = do_target_wait_1 (inf, minus_one_ptid, &ecs.ws, 0);
4373 ecs.target = inf->process_target ();
4375 if (debug_infrun)
4376 print_target_wait_results (minus_one_ptid, ecs.ptid, ecs.ws);
4378 /* Now figure out what to do with the result of the result. */
4379 handle_inferior_event (&ecs);
4381 if (!ecs.wait_some_more)
4382 break;
4385 stop_all_threads_if_all_stop_mode ();
4387 /* No error, don't finish the state yet. */
4388 finish_state.release ();
4391 /* Cleanup that reinstalls the readline callback handler, if the
4392 target is running in the background. If while handling the target
4393 event something triggered a secondary prompt, like e.g., a
4394 pagination prompt, we'll have removed the callback handler (see
4395 gdb_readline_wrapper_line). Need to do this as we go back to the
4396 event loop, ready to process further input. Note this has no
4397 effect if the handler hasn't actually been removed, because calling
4398 rl_callback_handler_install resets the line buffer, thus losing
4399 input. */
4401 static void
4402 reinstall_readline_callback_handler_cleanup ()
4404 struct ui *ui = current_ui;
4406 if (!ui->async)
4408 /* We're not going back to the top level event loop yet. Don't
4409 install the readline callback, as it'd prep the terminal,
4410 readline-style (raw, noecho) (e.g., --batch). We'll install
4411 it the next time the prompt is displayed, when we're ready
4412 for input. */
4413 return;
4416 if (ui->command_editing && ui->prompt_state != PROMPT_BLOCKED)
4417 gdb_rl_callback_handler_reinstall ();
4420 /* Clean up the FSMs of threads that are now stopped. In non-stop,
4421 that's just the event thread. In all-stop, that's all threads. In
4422 all-stop, threads that had a pending exit no longer have a reason
4423 to be around, as their FSMs/commands are canceled, so we delete
4424 them. This avoids "info threads" listing such threads as if they
4425 were alive (and failing to read their registers), the user being
4426 able to select and resume them (and that failing), etc. */
4428 static void
4429 clean_up_just_stopped_threads_fsms (struct execution_control_state *ecs)
4431 /* The first clean_up call below assumes the event thread is the current
4432 one. */
4433 if (ecs->event_thread != nullptr)
4434 gdb_assert (ecs->event_thread == inferior_thread ());
4436 if (ecs->event_thread != nullptr
4437 && ecs->event_thread->thread_fsm () != nullptr)
4438 ecs->event_thread->thread_fsm ()->clean_up (ecs->event_thread);
4440 if (!non_stop)
4442 scoped_restore_current_thread restore_thread;
4444 for (thread_info *thr : all_threads_safe ())
4446 if (thr->state == THREAD_EXITED)
4447 continue;
4449 if (thr == ecs->event_thread)
4450 continue;
4452 if (thr->thread_fsm () != nullptr)
4454 switch_to_thread (thr);
4455 thr->thread_fsm ()->clean_up (thr);
4458 /* As we are cancelling the command/FSM of this thread,
4459 whatever was the reason we needed to report a thread
4460 exited event to the user, that reason is gone. Delete
4461 the thread, so that the user doesn't see it in the thread
4462 list, the next proceed doesn't try to resume it, etc. */
4463 if (thr->has_pending_waitstatus ()
4464 && (thr->pending_waitstatus ().kind ()
4465 == TARGET_WAITKIND_THREAD_EXITED))
4466 delete_thread (thr);
4471 /* Helper for all_uis_check_sync_execution_done that works on the
4472 current UI. */
4474 static void
4475 check_curr_ui_sync_execution_done (void)
4477 struct ui *ui = current_ui;
4479 if (ui->prompt_state == PROMPT_NEEDED
4480 && ui->async
4481 && !gdb_in_secondary_prompt_p (ui))
4483 target_terminal::ours ();
4484 top_level_interpreter ()->on_sync_execution_done ();
4485 ui->register_file_handler ();
4489 /* See infrun.h. */
4491 void
4492 all_uis_check_sync_execution_done (void)
4494 SWITCH_THRU_ALL_UIS ()
4496 check_curr_ui_sync_execution_done ();
4500 /* See infrun.h. */
4502 void
4503 all_uis_on_sync_execution_starting (void)
4505 SWITCH_THRU_ALL_UIS ()
4507 if (current_ui->prompt_state == PROMPT_NEEDED)
4508 async_disable_stdin ();
4512 /* A quit_handler callback installed while we're handling inferior
4513 events. */
4515 static void
4516 infrun_quit_handler ()
4518 if (target_terminal::is_ours ())
4520 /* Do nothing.
4522 default_quit_handler would throw a quit in this case, but if
4523 we're handling an event while we have the terminal, it means
4524 the target is running a background execution command, and
4525 thus when users press Ctrl-C, they're wanting to interrupt
4526 whatever command they were executing in the command line.
4527 E.g.:
4529 (gdb) c&
4530 (gdb) foo bar whatever<ctrl-c>
4532 That Ctrl-C should clear the input line, not interrupt event
4533 handling if it happens that the user types Ctrl-C at just the
4534 "wrong" time!
4536 It's as-if background event handling was handled by a
4537 separate background thread.
4539 To be clear, the Ctrl-C is not lost -- it will be processed
4540 by the next QUIT call once we're out of fetch_inferior_event
4541 again. */
4543 else
4545 if (check_quit_flag ())
4546 target_pass_ctrlc ();
4550 /* Asynchronous version of wait_for_inferior. It is called by the
4551 event loop whenever a change of state is detected on the file
4552 descriptor corresponding to the target. It can be called more than
4553 once to complete a single execution command. In such cases we need
4554 to keep the state in a global variable ECSS. If it is the last time
4555 that this function is called for a single execution command, then
4556 report to the user that the inferior has stopped, and do the
4557 necessary cleanups. */
4559 void
4560 fetch_inferior_event ()
4562 INFRUN_SCOPED_DEBUG_ENTER_EXIT;
4564 execution_control_state ecs;
4565 int cmd_done = 0;
4567 /* Events are always processed with the main UI as current UI. This
4568 way, warnings, debug output, etc. are always consistently sent to
4569 the main console. */
4570 scoped_restore save_ui = make_scoped_restore (&current_ui, main_ui);
4572 /* Temporarily disable pagination. Otherwise, the user would be
4573 given an option to press 'q' to quit, which would cause an early
4574 exit and could leave GDB in a half-baked state. */
4575 scoped_restore save_pagination
4576 = make_scoped_restore (&pagination_enabled, false);
4578 /* Install a quit handler that does nothing if we have the terminal
4579 (meaning the target is running a background execution command),
4580 so that Ctrl-C never interrupts GDB before the event is fully
4581 handled. */
4582 scoped_restore restore_quit_handler
4583 = make_scoped_restore (&quit_handler, infrun_quit_handler);
4585 /* Make sure a SIGINT does not interrupt an extension language while
4586 we're handling an event. That could interrupt a Python unwinder
4587 or a Python observer or some such. A Ctrl-C should either be
4588 forwarded to the inferior if the inferior has the terminal, or,
4589 if GDB has the terminal, should interrupt the command the user is
4590 typing in the CLI. */
4591 scoped_disable_cooperative_sigint_handling restore_coop_sigint;
4593 /* End up with readline processing input, if necessary. */
4595 SCOPE_EXIT { reinstall_readline_callback_handler_cleanup (); };
4597 /* We're handling a live event, so make sure we're doing live
4598 debugging. If we're looking at traceframes while the target is
4599 running, we're going to need to get back to that mode after
4600 handling the event. */
4601 std::optional<scoped_restore_current_traceframe> maybe_restore_traceframe;
4602 if (non_stop)
4604 maybe_restore_traceframe.emplace ();
4605 set_current_traceframe (-1);
4608 /* The user/frontend should not notice a thread switch due to
4609 internal events. Make sure we revert to the user selected
4610 thread and frame after handling the event and running any
4611 breakpoint commands. */
4612 scoped_restore_current_thread restore_thread;
4614 overlay_cache_invalid = 1;
4615 /* Flush target cache before starting to handle each event. Target
4616 was running and cache could be stale. This is just a heuristic.
4617 Running threads may modify target memory, but we don't get any
4618 event. */
4619 target_dcache_invalidate (current_program_space->aspace);
4621 scoped_restore save_exec_dir
4622 = make_scoped_restore (&execution_direction,
4623 target_execution_direction ());
4625 /* Allow targets to pause their resumed threads while we handle
4626 the event. */
4627 scoped_disable_commit_resumed disable_commit_resumed ("handling event");
4629 if (!do_target_wait (&ecs, TARGET_WNOHANG))
4631 infrun_debug_printf ("do_target_wait returned no event");
4632 disable_commit_resumed.reset_and_commit ();
4633 return;
4636 gdb_assert (ecs.ws.kind () != TARGET_WAITKIND_IGNORE);
4638 /* Switch to the inferior that generated the event, so we can do
4639 target calls. If the event was not associated to a ptid, */
4640 if (ecs.ptid != null_ptid
4641 && ecs.ptid != minus_one_ptid)
4642 switch_to_inferior_no_thread (find_inferior_ptid (ecs.target, ecs.ptid));
4643 else
4644 switch_to_target_no_thread (ecs.target);
4646 if (debug_infrun)
4647 print_target_wait_results (minus_one_ptid, ecs.ptid, ecs.ws);
4649 /* If an error happens while handling the event, propagate GDB's
4650 knowledge of the executing state to the frontend/user running
4651 state. */
4652 ptid_t finish_ptid = !target_is_non_stop_p () ? minus_one_ptid : ecs.ptid;
4653 scoped_finish_thread_state finish_state (ecs.target, finish_ptid);
4655 /* Get executed before scoped_restore_current_thread above to apply
4656 still for the thread which has thrown the exception. */
4657 auto defer_bpstat_clear
4658 = make_scope_exit (bpstat_clear_actions);
4659 auto defer_delete_threads
4660 = make_scope_exit (delete_just_stopped_threads_infrun_breakpoints);
4662 int stop_id = get_stop_id ();
4664 /* Now figure out what to do with the result of the result. */
4665 handle_inferior_event (&ecs);
4667 if (!ecs.wait_some_more)
4669 struct inferior *inf = find_inferior_ptid (ecs.target, ecs.ptid);
4670 bool should_stop = true;
4671 struct thread_info *thr = ecs.event_thread;
4673 delete_just_stopped_threads_infrun_breakpoints ();
4675 if (thr != nullptr && thr->thread_fsm () != nullptr)
4676 should_stop = thr->thread_fsm ()->should_stop (thr);
4678 if (!should_stop)
4680 keep_going (&ecs);
4682 else
4684 bool should_notify_stop = true;
4685 bool proceeded = false;
4687 stop_all_threads_if_all_stop_mode ();
4689 clean_up_just_stopped_threads_fsms (&ecs);
4691 if (stop_id != get_stop_id ())
4693 /* If the stop-id has changed then a stop has already been
4694 presented to the user in handle_inferior_event, this is
4695 likely a failed inferior call. As the stop has already
4696 been announced then we should not notify again.
4698 Also, if the prompt state is not PROMPT_NEEDED then GDB
4699 will not be ready for user input after this function. */
4700 should_notify_stop = false;
4701 gdb_assert (current_ui->prompt_state == PROMPT_NEEDED);
4703 else if (thr != nullptr && thr->thread_fsm () != nullptr)
4704 should_notify_stop
4705 = thr->thread_fsm ()->should_notify_stop ();
4707 if (should_notify_stop)
4709 /* We may not find an inferior if this was a process exit. */
4710 if (inf == nullptr || inf->control.stop_soon == NO_STOP_QUIETLY)
4711 proceeded = normal_stop ();
4714 if (!proceeded)
4716 inferior_event_handler (INF_EXEC_COMPLETE);
4717 cmd_done = 1;
4720 /* If we got a TARGET_WAITKIND_NO_RESUMED event, then the
4721 previously selected thread is gone. We have two
4722 choices - switch to no thread selected, or restore the
4723 previously selected thread (now exited). We chose the
4724 later, just because that's what GDB used to do. After
4725 this, "info threads" says "The current thread <Thread
4726 ID 2> has terminated." instead of "No thread
4727 selected.". */
4728 if (!non_stop
4729 && cmd_done
4730 && ecs.ws.kind () != TARGET_WAITKIND_NO_RESUMED)
4731 restore_thread.dont_restore ();
4735 defer_delete_threads.release ();
4736 defer_bpstat_clear.release ();
4738 /* No error, don't finish the thread states yet. */
4739 finish_state.release ();
4741 disable_commit_resumed.reset_and_commit ();
4743 /* This scope is used to ensure that readline callbacks are
4744 reinstalled here. */
4747 /* Handling this event might have caused some inferiors to become prunable.
4748 For example, the exit of an inferior that was automatically added. Try
4749 to get rid of them. Keeping those around slows down things linearly.
4751 Note that this never removes the current inferior. Therefore, call this
4752 after RESTORE_THREAD went out of scope, in case the event inferior (which was
4753 temporarily made the current inferior) is meant to be deleted.
4755 Call this before all_uis_check_sync_execution_done, so that notifications about
4756 removed inferiors appear before the prompt. */
4757 prune_inferiors ();
4759 /* If a UI was in sync execution mode, and now isn't, restore its
4760 prompt (a synchronous execution command has finished, and we're
4761 ready for input). */
4762 all_uis_check_sync_execution_done ();
4764 if (cmd_done
4765 && exec_done_display_p
4766 && (inferior_ptid == null_ptid
4767 || inferior_thread ()->state != THREAD_RUNNING))
4768 gdb_printf (_("completed.\n"));
4771 /* See infrun.h. */
4773 void
4774 set_step_info (thread_info *tp, frame_info_ptr frame,
4775 struct symtab_and_line sal)
4777 /* This can be removed once this function no longer implicitly relies on the
4778 inferior_ptid value. */
4779 gdb_assert (inferior_ptid == tp->ptid);
4781 tp->control.step_frame_id = get_frame_id (frame);
4782 tp->control.step_stack_frame_id = get_stack_frame_id (frame);
4784 tp->current_symtab = sal.symtab;
4785 tp->current_line = sal.line;
4787 infrun_debug_printf
4788 ("symtab = %s, line = %d, step_frame_id = %s, step_stack_frame_id = %s",
4789 tp->current_symtab != nullptr ? tp->current_symtab->filename : "<null>",
4790 tp->current_line,
4791 tp->control.step_frame_id.to_string ().c_str (),
4792 tp->control.step_stack_frame_id.to_string ().c_str ());
4795 /* Clear context switchable stepping state. */
4797 void
4798 init_thread_stepping_state (struct thread_info *tss)
4800 tss->stepped_breakpoint = 0;
4801 tss->stepping_over_breakpoint = 0;
4802 tss->stepping_over_watchpoint = 0;
4803 tss->step_after_step_resume_breakpoint = 0;
4806 /* See infrun.h. */
4808 void
4809 set_last_target_status (process_stratum_target *target, ptid_t ptid,
4810 const target_waitstatus &status)
4812 target_last_proc_target = target;
4813 target_last_wait_ptid = ptid;
4814 target_last_waitstatus = status;
4817 /* See infrun.h. */
4819 void
4820 get_last_target_status (process_stratum_target **target, ptid_t *ptid,
4821 target_waitstatus *status)
4823 if (target != nullptr)
4824 *target = target_last_proc_target;
4825 if (ptid != nullptr)
4826 *ptid = target_last_wait_ptid;
4827 if (status != nullptr)
4828 *status = target_last_waitstatus;
4831 /* See infrun.h. */
4833 void
4834 nullify_last_target_wait_ptid (void)
4836 target_last_proc_target = nullptr;
4837 target_last_wait_ptid = minus_one_ptid;
4838 target_last_waitstatus = {};
4841 /* Switch thread contexts. */
4843 static void
4844 context_switch (execution_control_state *ecs)
4846 if (ecs->ptid != inferior_ptid
4847 && (inferior_ptid == null_ptid
4848 || ecs->event_thread != inferior_thread ()))
4850 infrun_debug_printf ("Switching context from %s to %s",
4851 inferior_ptid.to_string ().c_str (),
4852 ecs->ptid.to_string ().c_str ());
4855 switch_to_thread (ecs->event_thread);
4858 /* If the target can't tell whether we've hit breakpoints
4859 (target_supports_stopped_by_sw_breakpoint), and we got a SIGTRAP,
4860 check whether that could have been caused by a breakpoint. If so,
4861 adjust the PC, per gdbarch_decr_pc_after_break. */
4863 static void
4864 adjust_pc_after_break (struct thread_info *thread,
4865 const target_waitstatus &ws)
4867 struct regcache *regcache;
4868 struct gdbarch *gdbarch;
4869 CORE_ADDR breakpoint_pc, decr_pc;
4871 /* If we've hit a breakpoint, we'll normally be stopped with SIGTRAP. If
4872 we aren't, just return.
4874 We assume that waitkinds other than TARGET_WAITKIND_STOPPED are not
4875 affected by gdbarch_decr_pc_after_break. Other waitkinds which are
4876 implemented by software breakpoints should be handled through the normal
4877 breakpoint layer.
4879 NOTE drow/2004-01-31: On some targets, breakpoints may generate
4880 different signals (SIGILL or SIGEMT for instance), but it is less
4881 clear where the PC is pointing afterwards. It may not match
4882 gdbarch_decr_pc_after_break. I don't know any specific target that
4883 generates these signals at breakpoints (the code has been in GDB since at
4884 least 1992) so I can not guess how to handle them here.
4886 In earlier versions of GDB, a target with
4887 gdbarch_have_nonsteppable_watchpoint would have the PC after hitting a
4888 watchpoint affected by gdbarch_decr_pc_after_break. I haven't found any
4889 target with both of these set in GDB history, and it seems unlikely to be
4890 correct, so gdbarch_have_nonsteppable_watchpoint is not checked here. */
4892 if (ws.kind () != TARGET_WAITKIND_STOPPED)
4893 return;
4895 if (ws.sig () != GDB_SIGNAL_TRAP)
4896 return;
4898 /* In reverse execution, when a breakpoint is hit, the instruction
4899 under it has already been de-executed. The reported PC always
4900 points at the breakpoint address, so adjusting it further would
4901 be wrong. E.g., consider this case on a decr_pc_after_break == 1
4902 architecture:
4904 B1 0x08000000 : INSN1
4905 B2 0x08000001 : INSN2
4906 0x08000002 : INSN3
4907 PC -> 0x08000003 : INSN4
4909 Say you're stopped at 0x08000003 as above. Reverse continuing
4910 from that point should hit B2 as below. Reading the PC when the
4911 SIGTRAP is reported should read 0x08000001 and INSN2 should have
4912 been de-executed already.
4914 B1 0x08000000 : INSN1
4915 B2 PC -> 0x08000001 : INSN2
4916 0x08000002 : INSN3
4917 0x08000003 : INSN4
4919 We can't apply the same logic as for forward execution, because
4920 we would wrongly adjust the PC to 0x08000000, since there's a
4921 breakpoint at PC - 1. We'd then report a hit on B1, although
4922 INSN1 hadn't been de-executed yet. Doing nothing is the correct
4923 behaviour. */
4924 if (execution_direction == EXEC_REVERSE)
4925 return;
4927 /* If the target can tell whether the thread hit a SW breakpoint,
4928 trust it. Targets that can tell also adjust the PC
4929 themselves. */
4930 if (target_supports_stopped_by_sw_breakpoint ())
4931 return;
4933 /* Note that relying on whether a breakpoint is planted in memory to
4934 determine this can fail. E.g,. the breakpoint could have been
4935 removed since. Or the thread could have been told to step an
4936 instruction the size of a breakpoint instruction, and only
4937 _after_ was a breakpoint inserted at its address. */
4939 /* If this target does not decrement the PC after breakpoints, then
4940 we have nothing to do. */
4941 regcache = get_thread_regcache (thread);
4942 gdbarch = regcache->arch ();
4944 decr_pc = gdbarch_decr_pc_after_break (gdbarch);
4945 if (decr_pc == 0)
4946 return;
4948 const address_space *aspace = thread->inf->aspace.get ();
4950 /* Find the location where (if we've hit a breakpoint) the
4951 breakpoint would be. */
4952 breakpoint_pc = regcache_read_pc (regcache) - decr_pc;
4954 /* If the target can't tell whether a software breakpoint triggered,
4955 fallback to figuring it out based on breakpoints we think were
4956 inserted in the target, and on whether the thread was stepped or
4957 continued. */
4959 /* Check whether there actually is a software breakpoint inserted at
4960 that location.
4962 If in non-stop mode, a race condition is possible where we've
4963 removed a breakpoint, but stop events for that breakpoint were
4964 already queued and arrive later. To suppress those spurious
4965 SIGTRAPs, we keep a list of such breakpoint locations for a bit,
4966 and retire them after a number of stop events are reported. Note
4967 this is an heuristic and can thus get confused. The real fix is
4968 to get the "stopped by SW BP and needs adjustment" info out of
4969 the target/kernel (and thus never reach here; see above). */
4970 if (software_breakpoint_inserted_here_p (aspace, breakpoint_pc)
4971 || (target_is_non_stop_p ()
4972 && moribund_breakpoint_here_p (aspace, breakpoint_pc)))
4974 std::optional<scoped_restore_tmpl<int>> restore_operation_disable;
4976 if (record_full_is_used ())
4977 restore_operation_disable.emplace
4978 (record_full_gdb_operation_disable_set ());
4980 /* When using hardware single-step, a SIGTRAP is reported for both
4981 a completed single-step and a software breakpoint. Need to
4982 differentiate between the two, as the latter needs adjusting
4983 but the former does not.
4985 The SIGTRAP can be due to a completed hardware single-step only if
4986 - we didn't insert software single-step breakpoints
4987 - this thread is currently being stepped
4989 If any of these events did not occur, we must have stopped due
4990 to hitting a software breakpoint, and have to back up to the
4991 breakpoint address.
4993 As a special case, we could have hardware single-stepped a
4994 software breakpoint. In this case (prev_pc == breakpoint_pc),
4995 we also need to back up to the breakpoint address. */
4997 if (thread_has_single_step_breakpoints_set (thread)
4998 || !currently_stepping (thread)
4999 || (thread->stepped_breakpoint
5000 && thread->prev_pc == breakpoint_pc))
5001 regcache_write_pc (regcache, breakpoint_pc);
5005 static bool
5006 stepped_in_from (frame_info_ptr frame, struct frame_id step_frame_id)
5008 for (frame = get_prev_frame (frame);
5009 frame != nullptr;
5010 frame = get_prev_frame (frame))
5012 if (get_frame_id (frame) == step_frame_id)
5013 return true;
5015 if (get_frame_type (frame) != INLINE_FRAME)
5016 break;
5019 return false;
5022 /* Look for an inline frame that is marked for skip.
5023 If PREV_FRAME is TRUE start at the previous frame,
5024 otherwise start at the current frame. Stop at the
5025 first non-inline frame, or at the frame where the
5026 step started. */
5028 static bool
5029 inline_frame_is_marked_for_skip (bool prev_frame, struct thread_info *tp)
5031 frame_info_ptr frame = get_current_frame ();
5033 if (prev_frame)
5034 frame = get_prev_frame (frame);
5036 for (; frame != nullptr; frame = get_prev_frame (frame))
5038 const char *fn = nullptr;
5039 symtab_and_line sal;
5040 struct symbol *sym;
5042 if (get_frame_id (frame) == tp->control.step_frame_id)
5043 break;
5044 if (get_frame_type (frame) != INLINE_FRAME)
5045 break;
5047 sal = find_frame_sal (frame);
5048 sym = get_frame_function (frame);
5050 if (sym != nullptr)
5051 fn = sym->print_name ();
5053 if (sal.line != 0
5054 && function_name_is_marked_for_skip (fn, sal))
5055 return true;
5058 return false;
5061 /* If the event thread has the stop requested flag set, pretend it
5062 stopped for a GDB_SIGNAL_0 (i.e., as if it stopped due to
5063 target_stop). */
5065 static bool
5066 handle_stop_requested (struct execution_control_state *ecs)
5068 if (ecs->event_thread->stop_requested)
5070 ecs->ws.set_stopped (GDB_SIGNAL_0);
5071 handle_signal_stop (ecs);
5072 return true;
5074 return false;
5077 /* Auxiliary function that handles syscall entry/return events.
5078 It returns true if the inferior should keep going (and GDB
5079 should ignore the event), or false if the event deserves to be
5080 processed. */
5082 static bool
5083 handle_syscall_event (struct execution_control_state *ecs)
5085 struct regcache *regcache;
5086 int syscall_number;
5088 context_switch (ecs);
5090 regcache = get_thread_regcache (ecs->event_thread);
5091 syscall_number = ecs->ws.syscall_number ();
5092 ecs->event_thread->set_stop_pc (regcache_read_pc (regcache));
5094 if (catch_syscall_enabled ()
5095 && catching_syscall_number (syscall_number))
5097 infrun_debug_printf ("syscall number=%d", syscall_number);
5099 ecs->event_thread->control.stop_bpstat
5100 = bpstat_stop_status_nowatch (ecs->event_thread->inf->aspace.get (),
5101 ecs->event_thread->stop_pc (),
5102 ecs->event_thread, ecs->ws);
5104 if (handle_stop_requested (ecs))
5105 return false;
5107 if (bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
5109 /* Catchpoint hit. */
5110 return false;
5114 if (handle_stop_requested (ecs))
5115 return false;
5117 /* If no catchpoint triggered for this, then keep going. */
5118 keep_going (ecs);
5120 return true;
5123 /* Lazily fill in the execution_control_state's stop_func_* fields. */
5125 static void
5126 fill_in_stop_func (struct gdbarch *gdbarch,
5127 struct execution_control_state *ecs)
5129 if (!ecs->stop_func_filled_in)
5131 const block *block;
5132 const general_symbol_info *gsi;
5134 /* Don't care about return value; stop_func_start and stop_func_name
5135 will both be 0 if it doesn't work. */
5136 find_pc_partial_function_sym (ecs->event_thread->stop_pc (),
5137 &gsi,
5138 &ecs->stop_func_start,
5139 &ecs->stop_func_end,
5140 &block);
5141 ecs->stop_func_name = gsi == nullptr ? nullptr : gsi->print_name ();
5143 /* The call to find_pc_partial_function, above, will set
5144 stop_func_start and stop_func_end to the start and end
5145 of the range containing the stop pc. If this range
5146 contains the entry pc for the block (which is always the
5147 case for contiguous blocks), advance stop_func_start past
5148 the function's start offset and entrypoint. Note that
5149 stop_func_start is NOT advanced when in a range of a
5150 non-contiguous block that does not contain the entry pc. */
5151 if (block != nullptr
5152 && ecs->stop_func_start <= block->entry_pc ()
5153 && block->entry_pc () < ecs->stop_func_end)
5155 ecs->stop_func_start
5156 += gdbarch_deprecated_function_start_offset (gdbarch);
5158 /* PowerPC functions have a Local Entry Point (LEP) and a Global
5159 Entry Point (GEP). There is only one Entry Point (GEP = LEP) for
5160 other architectures. */
5161 ecs->stop_func_alt_start = ecs->stop_func_start;
5163 if (gdbarch_skip_entrypoint_p (gdbarch))
5164 ecs->stop_func_start
5165 = gdbarch_skip_entrypoint (gdbarch, ecs->stop_func_start);
5168 ecs->stop_func_filled_in = 1;
5173 /* Return the STOP_SOON field of the inferior pointed at by ECS. */
5175 static enum stop_kind
5176 get_inferior_stop_soon (execution_control_state *ecs)
5178 struct inferior *inf = find_inferior_ptid (ecs->target, ecs->ptid);
5180 gdb_assert (inf != nullptr);
5181 return inf->control.stop_soon;
5184 /* Poll for one event out of the current target. Store the resulting
5185 waitstatus in WS, and return the event ptid. Does not block. */
5187 static ptid_t
5188 poll_one_curr_target (struct target_waitstatus *ws)
5190 ptid_t event_ptid;
5192 overlay_cache_invalid = 1;
5194 /* Flush target cache before starting to handle each event.
5195 Target was running and cache could be stale. This is just a
5196 heuristic. Running threads may modify target memory, but we
5197 don't get any event. */
5198 target_dcache_invalidate (current_program_space->aspace);
5200 event_ptid = target_wait (minus_one_ptid, ws, TARGET_WNOHANG);
5202 if (debug_infrun)
5203 print_target_wait_results (minus_one_ptid, event_ptid, *ws);
5205 return event_ptid;
5208 /* Wait for one event out of any target. */
5210 static wait_one_event
5211 wait_one ()
5213 while (1)
5215 for (inferior *inf : all_inferiors ())
5217 process_stratum_target *target = inf->process_target ();
5218 if (target == nullptr
5219 || !target->is_async_p ()
5220 || !target->threads_executing)
5221 continue;
5223 switch_to_inferior_no_thread (inf);
5225 wait_one_event event;
5226 event.target = target;
5227 event.ptid = poll_one_curr_target (&event.ws);
5229 if (event.ws.kind () == TARGET_WAITKIND_NO_RESUMED)
5231 /* If nothing is resumed, remove the target from the
5232 event loop. */
5233 target_async (false);
5235 else if (event.ws.kind () != TARGET_WAITKIND_IGNORE)
5236 return event;
5239 /* Block waiting for some event. */
5241 fd_set readfds;
5242 int nfds = 0;
5244 FD_ZERO (&readfds);
5246 for (inferior *inf : all_inferiors ())
5248 process_stratum_target *target = inf->process_target ();
5249 if (target == nullptr
5250 || !target->is_async_p ()
5251 || !target->threads_executing)
5252 continue;
5254 int fd = target->async_wait_fd ();
5255 FD_SET (fd, &readfds);
5256 if (nfds <= fd)
5257 nfds = fd + 1;
5260 if (nfds == 0)
5262 /* No waitable targets left. All must be stopped. */
5263 infrun_debug_printf ("no waitable targets left");
5265 target_waitstatus ws;
5266 ws.set_no_resumed ();
5267 return {nullptr, minus_one_ptid, std::move (ws)};
5270 QUIT;
5272 int numfds = interruptible_select (nfds, &readfds, 0, nullptr, 0);
5273 if (numfds < 0)
5275 if (errno == EINTR)
5276 continue;
5277 else
5278 perror_with_name ("interruptible_select");
5283 /* Save the thread's event and stop reason to process it later. */
5285 static void
5286 save_waitstatus (struct thread_info *tp, const target_waitstatus &ws)
5288 infrun_debug_printf ("saving status %s for %s",
5289 ws.to_string ().c_str (),
5290 tp->ptid.to_string ().c_str ());
5292 /* Record for later. */
5293 tp->set_pending_waitstatus (ws);
5295 if (ws.kind () == TARGET_WAITKIND_STOPPED
5296 && ws.sig () == GDB_SIGNAL_TRAP)
5298 struct regcache *regcache = get_thread_regcache (tp);
5299 const address_space *aspace = tp->inf->aspace.get ();
5300 CORE_ADDR pc = regcache_read_pc (regcache);
5302 adjust_pc_after_break (tp, tp->pending_waitstatus ());
5304 scoped_restore_current_thread restore_thread;
5305 switch_to_thread (tp);
5307 if (target_stopped_by_watchpoint ())
5308 tp->set_stop_reason (TARGET_STOPPED_BY_WATCHPOINT);
5309 else if (target_supports_stopped_by_sw_breakpoint ()
5310 && target_stopped_by_sw_breakpoint ())
5311 tp->set_stop_reason (TARGET_STOPPED_BY_SW_BREAKPOINT);
5312 else if (target_supports_stopped_by_hw_breakpoint ()
5313 && target_stopped_by_hw_breakpoint ())
5314 tp->set_stop_reason (TARGET_STOPPED_BY_HW_BREAKPOINT);
5315 else if (!target_supports_stopped_by_hw_breakpoint ()
5316 && hardware_breakpoint_inserted_here_p (aspace, pc))
5317 tp->set_stop_reason (TARGET_STOPPED_BY_HW_BREAKPOINT);
5318 else if (!target_supports_stopped_by_sw_breakpoint ()
5319 && software_breakpoint_inserted_here_p (aspace, pc))
5320 tp->set_stop_reason (TARGET_STOPPED_BY_SW_BREAKPOINT);
5321 else if (!thread_has_single_step_breakpoints_set (tp)
5322 && currently_stepping (tp))
5323 tp->set_stop_reason (TARGET_STOPPED_BY_SINGLE_STEP);
5327 /* Mark the non-executing threads accordingly. In all-stop, all
5328 threads of all processes are stopped when we get any event
5329 reported. In non-stop mode, only the event thread stops. */
5331 static void
5332 mark_non_executing_threads (process_stratum_target *target,
5333 ptid_t event_ptid,
5334 const target_waitstatus &ws)
5336 ptid_t mark_ptid;
5338 if (!target_is_non_stop_p ())
5339 mark_ptid = minus_one_ptid;
5340 else if (ws.kind () == TARGET_WAITKIND_SIGNALLED
5341 || ws.kind () == TARGET_WAITKIND_EXITED)
5343 /* If we're handling a process exit in non-stop mode, even
5344 though threads haven't been deleted yet, one would think
5345 that there is nothing to do, as threads of the dead process
5346 will be soon deleted, and threads of any other process were
5347 left running. However, on some targets, threads survive a
5348 process exit event. E.g., for the "checkpoint" command,
5349 when the current checkpoint/fork exits, linux-fork.c
5350 automatically switches to another fork from within
5351 target_mourn_inferior, by associating the same
5352 inferior/thread to another fork. We haven't mourned yet at
5353 this point, but we must mark any threads left in the
5354 process as not-executing so that finish_thread_state marks
5355 them stopped (in the user's perspective) if/when we present
5356 the stop to the user. */
5357 mark_ptid = ptid_t (event_ptid.pid ());
5359 else
5360 mark_ptid = event_ptid;
5362 set_executing (target, mark_ptid, false);
5364 /* Likewise the resumed flag. */
5365 set_resumed (target, mark_ptid, false);
5368 /* Handle one event after stopping threads. If the eventing thread
5369 reports back any interesting event, we leave it pending. If the
5370 eventing thread was in the middle of a displaced step, we
5371 cancel/finish it, and unless the thread's inferior is being
5372 detached, put the thread back in the step-over chain. Returns true
5373 if there are no resumed threads left in the target (thus there's no
5374 point in waiting further), false otherwise. */
5376 static bool
5377 handle_one (const wait_one_event &event)
5379 infrun_debug_printf
5380 ("%s %s", event.ws.to_string ().c_str (),
5381 event.ptid.to_string ().c_str ());
5383 if (event.ws.kind () == TARGET_WAITKIND_NO_RESUMED)
5385 /* All resumed threads exited. */
5386 return true;
5388 else if (event.ws.kind () == TARGET_WAITKIND_THREAD_EXITED
5389 || event.ws.kind () == TARGET_WAITKIND_EXITED
5390 || event.ws.kind () == TARGET_WAITKIND_SIGNALLED)
5392 /* One thread/process exited/signalled. */
5394 thread_info *t = nullptr;
5396 /* The target may have reported just a pid. If so, try
5397 the first non-exited thread. */
5398 if (event.ptid.is_pid ())
5400 int pid = event.ptid.pid ();
5401 inferior *inf = find_inferior_pid (event.target, pid);
5402 for (thread_info *tp : inf->non_exited_threads ())
5404 t = tp;
5405 break;
5408 /* If there is no available thread, the event would
5409 have to be appended to a per-inferior event list,
5410 which does not exist (and if it did, we'd have
5411 to adjust run control command to be able to
5412 resume such an inferior). We assert here instead
5413 of going into an infinite loop. */
5414 gdb_assert (t != nullptr);
5416 infrun_debug_printf
5417 ("using %s", t->ptid.to_string ().c_str ());
5419 else
5421 t = event.target->find_thread (event.ptid);
5422 /* Check if this is the first time we see this thread.
5423 Don't bother adding if it individually exited. */
5424 if (t == nullptr
5425 && event.ws.kind () != TARGET_WAITKIND_THREAD_EXITED)
5426 t = add_thread (event.target, event.ptid);
5429 if (t != nullptr)
5431 /* Set the threads as non-executing to avoid
5432 another stop attempt on them. */
5433 switch_to_thread_no_regs (t);
5434 mark_non_executing_threads (event.target, event.ptid,
5435 event.ws);
5436 save_waitstatus (t, event.ws);
5437 t->stop_requested = false;
5439 if (event.ws.kind () == TARGET_WAITKIND_THREAD_EXITED)
5441 if (displaced_step_finish (t, event.ws)
5442 != DISPLACED_STEP_FINISH_STATUS_OK)
5444 gdb_assert_not_reached ("displaced_step_finish on "
5445 "exited thread failed");
5450 else
5452 thread_info *t = event.target->find_thread (event.ptid);
5453 if (t == nullptr)
5454 t = add_thread (event.target, event.ptid);
5456 t->stop_requested = 0;
5457 t->set_executing (false);
5458 t->set_resumed (false);
5459 t->control.may_range_step = 0;
5461 /* This may be the first time we see the inferior report
5462 a stop. */
5463 if (t->inf->needs_setup)
5465 switch_to_thread_no_regs (t);
5466 setup_inferior (0);
5469 if (event.ws.kind () == TARGET_WAITKIND_STOPPED
5470 && event.ws.sig () == GDB_SIGNAL_0)
5472 /* We caught the event that we intended to catch, so
5473 there's no event to save as pending. */
5475 if (displaced_step_finish (t, event.ws)
5476 == DISPLACED_STEP_FINISH_STATUS_NOT_EXECUTED)
5478 /* Add it back to the step-over queue. */
5479 infrun_debug_printf
5480 ("displaced-step of %s canceled",
5481 t->ptid.to_string ().c_str ());
5483 t->control.trap_expected = 0;
5484 if (!t->inf->detaching)
5485 global_thread_step_over_chain_enqueue (t);
5488 else
5490 struct regcache *regcache;
5492 infrun_debug_printf
5493 ("target_wait %s, saving status for %s",
5494 event.ws.to_string ().c_str (),
5495 t->ptid.to_string ().c_str ());
5497 /* Record for later. */
5498 save_waitstatus (t, event.ws);
5500 if (displaced_step_finish (t, event.ws)
5501 == DISPLACED_STEP_FINISH_STATUS_NOT_EXECUTED)
5503 /* Add it back to the step-over queue. */
5504 t->control.trap_expected = 0;
5505 if (!t->inf->detaching)
5506 global_thread_step_over_chain_enqueue (t);
5509 regcache = get_thread_regcache (t);
5510 t->set_stop_pc (regcache_read_pc (regcache));
5512 infrun_debug_printf ("saved stop_pc=%s for %s "
5513 "(currently_stepping=%d)",
5514 paddress (current_inferior ()->arch (),
5515 t->stop_pc ()),
5516 t->ptid.to_string ().c_str (),
5517 currently_stepping (t));
5521 return false;
5524 /* Helper for stop_all_threads. wait_one waits for events until it
5525 sees a TARGET_WAITKIND_NO_RESUMED event. When it sees one, it
5526 disables target_async for the target to stop waiting for events
5527 from it. TARGET_WAITKIND_NO_RESUMED can be delayed though,
5528 consider, debugging against gdbserver:
5530 #1 - Threads 1-5 are running, and thread 1 hits a breakpoint.
5532 #2 - gdb processes the breakpoint hit for thread 1, stops all
5533 threads, and steps thread 1 over the breakpoint. while
5534 stopping threads, some other threads reported interesting
5535 events, which were left pending in the thread's objects
5536 (infrun's queue).
5538 #2 - Thread 1 exits (it stepped an exit syscall), and gdbserver
5539 reports the thread exit for thread 1. The event ends up in
5540 remote's stop reply queue.
5542 #3 - That was the last resumed thread, so gdbserver reports
5543 no-resumed, and that event also ends up in remote's stop
5544 reply queue, queued after the thread exit from #2.
5546 #4 - gdb processes the thread exit event, which finishes the
5547 step-over, and so gdb restarts all threads (threads with
5548 pending events are left marked resumed, but aren't set
5549 executing). The no-resumed event is still left pending in
5550 the remote stop reply queue.
5552 #5 - Since there are now resumed threads with pending breakpoint
5553 hits, gdb picks one at random to process next.
5555 #5 - gdb picks the breakpoint hit for thread 2 this time, and that
5556 breakpoint also needs to be stepped over, so gdb stops all
5557 threads again.
5559 #6 - stop_all_threads counts number of expected stops and calls
5560 wait_one once for each.
5562 #7 - The first wait_one call collects the no-resumed event from #3
5563 above.
5565 #9 - Seeing the no-resumed event, wait_one disables target async
5566 for the remote target, to stop waiting for events from it.
5567 wait_one from here on always return no-resumed directly
5568 without reaching the target.
5570 #10 - stop_all_threads still hasn't seen all the stops it expects,
5571 so it does another pass.
5573 #11 - Since the remote target is not async (disabled in #9),
5574 wait_one doesn't wait on it, so it won't see the expected
5575 stops, and instead returns no-resumed directly.
5577 #12 - stop_all_threads still haven't seen all the stops, so it
5578 does another pass. goto #11, looping forever.
5580 To handle this, we explicitly (re-)enable target async on all
5581 targets that can async every time stop_all_threads goes wait for
5582 the expected stops. */
5584 static void
5585 reenable_target_async ()
5587 for (inferior *inf : all_inferiors ())
5589 process_stratum_target *target = inf->process_target ();
5590 if (target != nullptr
5591 && target->threads_executing
5592 && target->can_async_p ()
5593 && !target->is_async_p ())
5595 switch_to_inferior_no_thread (inf);
5596 target_async (1);
5601 /* See infrun.h. */
5603 void
5604 stop_all_threads (const char *reason, inferior *inf)
5606 /* We may need multiple passes to discover all threads. */
5607 int pass;
5608 int iterations = 0;
5610 gdb_assert (exists_non_stop_target ());
5612 INFRUN_SCOPED_DEBUG_START_END ("reason=%s, inf=%d", reason,
5613 inf != nullptr ? inf->num : -1);
5615 infrun_debug_show_threads ("non-exited threads",
5616 all_non_exited_threads ());
5618 scoped_restore_current_thread restore_thread;
5620 /* Enable thread events on relevant targets. */
5621 for (auto *target : all_non_exited_process_targets ())
5623 if (inf != nullptr && inf->process_target () != target)
5624 continue;
5626 switch_to_target_no_thread (target);
5627 target_thread_events (true);
5630 SCOPE_EXIT
5632 /* Disable thread events on relevant targets. */
5633 for (auto *target : all_non_exited_process_targets ())
5635 if (inf != nullptr && inf->process_target () != target)
5636 continue;
5638 switch_to_target_no_thread (target);
5639 target_thread_events (false);
5642 /* Use debug_prefixed_printf directly to get a meaningful function
5643 name. */
5644 if (debug_infrun)
5645 debug_prefixed_printf ("infrun", "stop_all_threads", "done");
5648 /* Request threads to stop, and then wait for the stops. Because
5649 threads we already know about can spawn more threads while we're
5650 trying to stop them, and we only learn about new threads when we
5651 update the thread list, do this in a loop, and keep iterating
5652 until two passes find no threads that need to be stopped. */
5653 for (pass = 0; pass < 2; pass++, iterations++)
5655 infrun_debug_printf ("pass=%d, iterations=%d", pass, iterations);
5656 while (1)
5658 int waits_needed = 0;
5660 for (auto *target : all_non_exited_process_targets ())
5662 if (inf != nullptr && inf->process_target () != target)
5663 continue;
5665 switch_to_target_no_thread (target);
5666 update_thread_list ();
5669 /* Go through all threads looking for threads that we need
5670 to tell the target to stop. */
5671 for (thread_info *t : all_non_exited_threads ())
5673 if (inf != nullptr && t->inf != inf)
5674 continue;
5676 /* For a single-target setting with an all-stop target,
5677 we would not even arrive here. For a multi-target
5678 setting, until GDB is able to handle a mixture of
5679 all-stop and non-stop targets, simply skip all-stop
5680 targets' threads. This should be fine due to the
5681 protection of 'check_multi_target_resumption'. */
5683 switch_to_thread_no_regs (t);
5684 if (!target_is_non_stop_p ())
5685 continue;
5687 if (t->executing ())
5689 /* If already stopping, don't request a stop again.
5690 We just haven't seen the notification yet. */
5691 if (!t->stop_requested)
5693 infrun_debug_printf (" %s executing, need stop",
5694 t->ptid.to_string ().c_str ());
5695 target_stop (t->ptid);
5696 t->stop_requested = 1;
5698 else
5700 infrun_debug_printf (" %s executing, already stopping",
5701 t->ptid.to_string ().c_str ());
5704 if (t->stop_requested)
5705 waits_needed++;
5707 else
5709 infrun_debug_printf (" %s not executing",
5710 t->ptid.to_string ().c_str ());
5712 /* The thread may be not executing, but still be
5713 resumed with a pending status to process. */
5714 t->set_resumed (false);
5718 if (waits_needed == 0)
5719 break;
5721 /* If we find new threads on the second iteration, restart
5722 over. We want to see two iterations in a row with all
5723 threads stopped. */
5724 if (pass > 0)
5725 pass = -1;
5727 reenable_target_async ();
5729 for (int i = 0; i < waits_needed; i++)
5731 wait_one_event event = wait_one ();
5732 if (handle_one (event))
5733 break;
5739 /* Handle a TARGET_WAITKIND_NO_RESUMED event. Return true if we
5740 handled the event and should continue waiting. Return false if we
5741 should stop and report the event to the user. */
5743 static bool
5744 handle_no_resumed (struct execution_control_state *ecs)
5746 if (target_can_async_p ())
5748 bool any_sync = false;
5750 for (ui *ui : all_uis ())
5752 if (ui->prompt_state == PROMPT_BLOCKED)
5754 any_sync = true;
5755 break;
5758 if (!any_sync)
5760 /* There were no unwaited-for children left in the target, but,
5761 we're not synchronously waiting for events either. Just
5762 ignore. */
5764 infrun_debug_printf ("TARGET_WAITKIND_NO_RESUMED (ignoring: bg)");
5765 prepare_to_wait (ecs);
5766 return true;
5770 /* Otherwise, if we were running a synchronous execution command, we
5771 may need to cancel it and give the user back the terminal.
5773 In non-stop mode, the target can't tell whether we've already
5774 consumed previous stop events, so it can end up sending us a
5775 no-resumed event like so:
5777 #0 - thread 1 is left stopped
5779 #1 - thread 2 is resumed and hits breakpoint
5780 -> TARGET_WAITKIND_STOPPED
5782 #2 - thread 3 is resumed and exits
5783 this is the last resumed thread, so
5784 -> TARGET_WAITKIND_NO_RESUMED
5786 #3 - gdb processes stop for thread 2 and decides to re-resume
5789 #4 - gdb processes the TARGET_WAITKIND_NO_RESUMED event.
5790 thread 2 is now resumed, so the event should be ignored.
5792 IOW, if the stop for thread 2 doesn't end a foreground command,
5793 then we need to ignore the following TARGET_WAITKIND_NO_RESUMED
5794 event. But it could be that the event meant that thread 2 itself
5795 (or whatever other thread was the last resumed thread) exited.
5797 To address this we refresh the thread list and check whether we
5798 have resumed threads _now_. In the example above, this removes
5799 thread 3 from the thread list. If thread 2 was re-resumed, we
5800 ignore this event. If we find no thread resumed, then we cancel
5801 the synchronous command and show "no unwaited-for " to the
5802 user. */
5804 inferior *curr_inf = current_inferior ();
5806 scoped_restore_current_thread restore_thread;
5807 update_thread_list ();
5809 /* If:
5811 - the current target has no thread executing, and
5812 - the current inferior is native, and
5813 - the current inferior is the one which has the terminal, and
5814 - we did nothing,
5816 then a Ctrl-C from this point on would remain stuck in the
5817 kernel, until a thread resumes and dequeues it. That would
5818 result in the GDB CLI not reacting to Ctrl-C, not able to
5819 interrupt the program. To address this, if the current inferior
5820 no longer has any thread executing, we give the terminal to some
5821 other inferior that has at least one thread executing. */
5822 bool swap_terminal = true;
5824 /* Whether to ignore this TARGET_WAITKIND_NO_RESUMED event, or
5825 whether to report it to the user. */
5826 bool ignore_event = false;
5828 for (thread_info *thread : all_non_exited_threads ())
5830 if (swap_terminal && thread->executing ())
5832 if (thread->inf != curr_inf)
5834 target_terminal::ours ();
5836 switch_to_thread (thread);
5837 target_terminal::inferior ();
5839 swap_terminal = false;
5842 if (!ignore_event && thread->resumed ())
5844 /* Either there were no unwaited-for children left in the
5845 target at some point, but there are now, or some target
5846 other than the eventing one has unwaited-for children
5847 left. Just ignore. */
5848 infrun_debug_printf ("TARGET_WAITKIND_NO_RESUMED "
5849 "(ignoring: found resumed)");
5851 ignore_event = true;
5854 if (ignore_event && !swap_terminal)
5855 break;
5858 if (ignore_event)
5860 switch_to_inferior_no_thread (curr_inf);
5861 prepare_to_wait (ecs);
5862 return true;
5865 /* Go ahead and report the event. */
5866 return false;
5869 /* Handle a TARGET_WAITKIND_THREAD_EXITED event. Return true if we
5870 handled the event and should continue waiting. Return false if we
5871 should stop and report the event to the user. */
5873 static bool
5874 handle_thread_exited (execution_control_state *ecs)
5876 context_switch (ecs);
5878 /* Clear these so we don't re-start the thread stepping over a
5879 breakpoint/watchpoint. */
5880 ecs->event_thread->stepping_over_breakpoint = 0;
5881 ecs->event_thread->stepping_over_watchpoint = 0;
5883 /* If the thread had an FSM, then abort the command. But only after
5884 finishing the step over, as in non-stop mode, aborting this
5885 thread's command should not interfere with other threads. We
5886 must check this before finish_step over, however, which may
5887 update the thread list and delete the event thread. */
5888 bool abort_cmd = (ecs->event_thread->thread_fsm () != nullptr);
5890 /* Mark the thread exited right now, because finish_step_over may
5891 update the thread list and that may delete the thread silently
5892 (depending on target), while we always want to emit the "[Thread
5893 ... exited]" notification. Don't actually delete the thread yet,
5894 because we need to pass its pointer down to finish_step_over. */
5895 set_thread_exited (ecs->event_thread);
5897 /* Maybe the thread was doing a step-over, if so release
5898 resources and start any further pending step-overs.
5900 If we are on a non-stop target and the thread was doing an
5901 in-line step, this also restarts the other threads. */
5902 int ret = finish_step_over (ecs);
5904 /* finish_step_over returns true if it moves ecs' wait status
5905 back into the thread, so that we go handle another pending
5906 event before this one. But we know it never does that if
5907 the event thread has exited. */
5908 gdb_assert (ret == 0);
5910 if (abort_cmd)
5912 /* We're stopping for the thread exit event. Switch to the
5913 event thread again, as finish_step_over may have switched
5914 threads. */
5915 switch_to_thread (ecs->event_thread);
5916 ecs->event_thread = nullptr;
5917 return false;
5920 /* If finish_step_over started a new in-line step-over, don't
5921 try to restart anything else. */
5922 if (step_over_info_valid_p ())
5924 delete_thread (ecs->event_thread);
5925 return true;
5928 /* Maybe we are on an all-stop target and we got this event
5929 while doing a step-like command on another thread. If so,
5930 go back to doing that. If this thread was stepping,
5931 switch_back_to_stepped_thread will consider that the thread
5932 was interrupted mid-step and will try keep stepping it. We
5933 don't want that, the thread is gone. So clear the proceed
5934 status so it doesn't do that. */
5935 clear_proceed_status_thread (ecs->event_thread);
5936 if (switch_back_to_stepped_thread (ecs))
5938 delete_thread (ecs->event_thread);
5939 return true;
5942 inferior *inf = ecs->event_thread->inf;
5943 bool slock_applies = schedlock_applies (ecs->event_thread);
5945 delete_thread (ecs->event_thread);
5946 ecs->event_thread = nullptr;
5948 /* Continue handling the event as if we had gotten a
5949 TARGET_WAITKIND_NO_RESUMED. */
5950 auto handle_as_no_resumed = [ecs] ()
5952 /* handle_no_resumed doesn't really look at the event kind, but
5953 normal_stop does. */
5954 ecs->ws.set_no_resumed ();
5955 ecs->event_thread = nullptr;
5956 ecs->ptid = minus_one_ptid;
5958 /* Re-record the last target status. */
5959 set_last_target_status (ecs->target, ecs->ptid, ecs->ws);
5961 return handle_no_resumed (ecs);
5964 /* If we are on an all-stop target, the target has stopped all
5965 threads to report the event. We don't actually want to
5966 stop, so restart the threads. */
5967 if (!target_is_non_stop_p ())
5969 if (slock_applies)
5971 /* Since the target is !non-stop, then everything is stopped
5972 at this point, and we can't assume we'll get further
5973 events until we resume the target again. Handle this
5974 event like if it were a TARGET_WAITKIND_NO_RESUMED. Note
5975 this refreshes the thread list and checks whether there
5976 are other resumed threads before deciding whether to
5977 print "no-unwaited-for left". This is important because
5978 the user could have done:
5980 (gdb) set scheduler-locking on
5981 (gdb) thread 1
5982 (gdb) c&
5983 (gdb) thread 2
5984 (gdb) c
5986 ... and only one of the threads exited. */
5987 return handle_as_no_resumed ();
5989 else
5991 /* Switch to the first non-exited thread we can find, and
5992 resume. */
5993 auto range = inf->non_exited_threads ();
5994 if (range.begin () == range.end ())
5996 /* Looks like the target reported a
5997 TARGET_WAITKIND_THREAD_EXITED for its last known
5998 thread. */
5999 return handle_as_no_resumed ();
6001 thread_info *non_exited_thread = *range.begin ();
6002 switch_to_thread (non_exited_thread);
6003 insert_breakpoints ();
6004 resume (GDB_SIGNAL_0);
6008 prepare_to_wait (ecs);
6009 return true;
6012 /* Given an execution control state that has been freshly filled in by
6013 an event from the inferior, figure out what it means and take
6014 appropriate action.
6016 The alternatives are:
6018 1) stop_waiting and return; to really stop and return to the
6019 debugger.
6021 2) keep_going and return; to wait for the next event (set
6022 ecs->event_thread->stepping_over_breakpoint to 1 to single step
6023 once). */
6025 static void
6026 handle_inferior_event (struct execution_control_state *ecs)
6028 /* Make sure that all temporary struct value objects that were
6029 created during the handling of the event get deleted at the
6030 end. */
6031 scoped_value_mark free_values;
6033 infrun_debug_printf ("%s", ecs->ws.to_string ().c_str ());
6035 if (ecs->ws.kind () == TARGET_WAITKIND_IGNORE)
6037 /* We had an event in the inferior, but we are not interested in
6038 handling it at this level. The lower layers have already
6039 done what needs to be done, if anything.
6041 One of the possible circumstances for this is when the
6042 inferior produces output for the console. The inferior has
6043 not stopped, and we are ignoring the event. Another possible
6044 circumstance is any event which the lower level knows will be
6045 reported multiple times without an intervening resume. */
6046 prepare_to_wait (ecs);
6047 return;
6050 if (ecs->ws.kind () == TARGET_WAITKIND_NO_RESUMED
6051 && handle_no_resumed (ecs))
6052 return;
6054 /* Cache the last target/ptid/waitstatus. */
6055 set_last_target_status (ecs->target, ecs->ptid, ecs->ws);
6057 /* Always clear state belonging to the previous time we stopped. */
6058 stop_stack_dummy = STOP_NONE;
6060 if (ecs->ws.kind () == TARGET_WAITKIND_NO_RESUMED)
6062 /* No unwaited-for children left. IOW, all resumed children
6063 have exited. */
6064 stop_waiting (ecs);
6065 return;
6068 if (ecs->ws.kind () != TARGET_WAITKIND_EXITED
6069 && ecs->ws.kind () != TARGET_WAITKIND_SIGNALLED)
6071 ecs->event_thread = ecs->target->find_thread (ecs->ptid);
6072 /* If it's a new thread, add it to the thread database. */
6073 if (ecs->event_thread == nullptr)
6074 ecs->event_thread = add_thread (ecs->target, ecs->ptid);
6076 /* Disable range stepping. If the next step request could use a
6077 range, this will be end up re-enabled then. */
6078 ecs->event_thread->control.may_range_step = 0;
6081 /* Dependent on valid ECS->EVENT_THREAD. */
6082 adjust_pc_after_break (ecs->event_thread, ecs->ws);
6084 /* Dependent on the current PC value modified by adjust_pc_after_break. */
6085 reinit_frame_cache ();
6087 breakpoint_retire_moribund ();
6089 /* First, distinguish signals caused by the debugger from signals
6090 that have to do with the program's own actions. Note that
6091 breakpoint insns may cause SIGTRAP or SIGILL or SIGEMT, depending
6092 on the operating system version. Here we detect when a SIGILL or
6093 SIGEMT is really a breakpoint and change it to SIGTRAP. We do
6094 something similar for SIGSEGV, since a SIGSEGV will be generated
6095 when we're trying to execute a breakpoint instruction on a
6096 non-executable stack. This happens for call dummy breakpoints
6097 for architectures like SPARC that place call dummies on the
6098 stack. */
6099 if (ecs->ws.kind () == TARGET_WAITKIND_STOPPED
6100 && (ecs->ws.sig () == GDB_SIGNAL_ILL
6101 || ecs->ws.sig () == GDB_SIGNAL_SEGV
6102 || ecs->ws.sig () == GDB_SIGNAL_EMT))
6104 struct regcache *regcache = get_thread_regcache (ecs->event_thread);
6106 if (breakpoint_inserted_here_p (ecs->event_thread->inf->aspace.get (),
6107 regcache_read_pc (regcache)))
6109 infrun_debug_printf ("Treating signal as SIGTRAP");
6110 ecs->ws.set_stopped (GDB_SIGNAL_TRAP);
6114 mark_non_executing_threads (ecs->target, ecs->ptid, ecs->ws);
6116 switch (ecs->ws.kind ())
6118 case TARGET_WAITKIND_LOADED:
6120 context_switch (ecs);
6121 /* Ignore gracefully during startup of the inferior, as it might
6122 be the shell which has just loaded some objects, otherwise
6123 add the symbols for the newly loaded objects. Also ignore at
6124 the beginning of an attach or remote session; we will query
6125 the full list of libraries once the connection is
6126 established. */
6128 stop_kind stop_soon = get_inferior_stop_soon (ecs);
6129 if (stop_soon == NO_STOP_QUIETLY)
6131 struct regcache *regcache;
6133 regcache = get_thread_regcache (ecs->event_thread);
6135 handle_solib_event ();
6137 ecs->event_thread->set_stop_pc (regcache_read_pc (regcache));
6138 address_space *aspace = ecs->event_thread->inf->aspace.get ();
6139 ecs->event_thread->control.stop_bpstat
6140 = bpstat_stop_status_nowatch (aspace,
6141 ecs->event_thread->stop_pc (),
6142 ecs->event_thread, ecs->ws);
6144 if (handle_stop_requested (ecs))
6145 return;
6147 if (bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
6149 /* A catchpoint triggered. */
6150 process_event_stop_test (ecs);
6151 return;
6154 /* If requested, stop when the dynamic linker notifies
6155 gdb of events. This allows the user to get control
6156 and place breakpoints in initializer routines for
6157 dynamically loaded objects (among other things). */
6158 ecs->event_thread->set_stop_signal (GDB_SIGNAL_0);
6159 if (stop_on_solib_events)
6161 /* Make sure we print "Stopped due to solib-event" in
6162 normal_stop. */
6163 stop_print_frame = true;
6165 stop_waiting (ecs);
6166 return;
6170 /* If we are skipping through a shell, or through shared library
6171 loading that we aren't interested in, resume the program. If
6172 we're running the program normally, also resume. */
6173 if (stop_soon == STOP_QUIETLY || stop_soon == NO_STOP_QUIETLY)
6175 /* Loading of shared libraries might have changed breakpoint
6176 addresses. Make sure new breakpoints are inserted. */
6177 if (stop_soon == NO_STOP_QUIETLY)
6178 insert_breakpoints ();
6179 resume (GDB_SIGNAL_0);
6180 prepare_to_wait (ecs);
6181 return;
6184 /* But stop if we're attaching or setting up a remote
6185 connection. */
6186 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
6187 || stop_soon == STOP_QUIETLY_REMOTE)
6189 infrun_debug_printf ("quietly stopped");
6190 stop_waiting (ecs);
6191 return;
6194 internal_error (_("unhandled stop_soon: %d"), (int) stop_soon);
6197 case TARGET_WAITKIND_SPURIOUS:
6198 if (handle_stop_requested (ecs))
6199 return;
6200 context_switch (ecs);
6201 resume (GDB_SIGNAL_0);
6202 prepare_to_wait (ecs);
6203 return;
6205 case TARGET_WAITKIND_THREAD_CREATED:
6206 if (handle_stop_requested (ecs))
6207 return;
6208 context_switch (ecs);
6209 if (!switch_back_to_stepped_thread (ecs))
6210 keep_going (ecs);
6211 return;
6213 case TARGET_WAITKIND_THREAD_EXITED:
6214 if (handle_thread_exited (ecs))
6215 return;
6216 stop_waiting (ecs);
6217 break;
6219 case TARGET_WAITKIND_EXITED:
6220 case TARGET_WAITKIND_SIGNALLED:
6222 /* Depending on the system, ecs->ptid may point to a thread or
6223 to a process. On some targets, target_mourn_inferior may
6224 need to have access to the just-exited thread. That is the
6225 case of GNU/Linux's "checkpoint" support, for example.
6226 Call the switch_to_xxx routine as appropriate. */
6227 thread_info *thr = ecs->target->find_thread (ecs->ptid);
6228 if (thr != nullptr)
6229 switch_to_thread (thr);
6230 else
6232 inferior *inf = find_inferior_ptid (ecs->target, ecs->ptid);
6233 switch_to_inferior_no_thread (inf);
6236 handle_vfork_child_exec_or_exit (0);
6237 target_terminal::ours (); /* Must do this before mourn anyway. */
6239 /* Clearing any previous state of convenience variables. */
6240 clear_exit_convenience_vars ();
6242 if (ecs->ws.kind () == TARGET_WAITKIND_EXITED)
6244 /* Record the exit code in the convenience variable $_exitcode, so
6245 that the user can inspect this again later. */
6246 set_internalvar_integer (lookup_internalvar ("_exitcode"),
6247 (LONGEST) ecs->ws.exit_status ());
6249 /* Also record this in the inferior itself. */
6250 current_inferior ()->has_exit_code = true;
6251 current_inferior ()->exit_code = (LONGEST) ecs->ws.exit_status ();
6253 /* Support the --return-child-result option. */
6254 return_child_result_value = ecs->ws.exit_status ();
6256 interps_notify_exited (ecs->ws.exit_status ());
6258 else
6260 struct gdbarch *gdbarch = current_inferior ()->arch ();
6262 if (gdbarch_gdb_signal_to_target_p (gdbarch))
6264 /* Set the value of the internal variable $_exitsignal,
6265 which holds the signal uncaught by the inferior. */
6266 set_internalvar_integer (lookup_internalvar ("_exitsignal"),
6267 gdbarch_gdb_signal_to_target (gdbarch,
6268 ecs->ws.sig ()));
6270 else
6272 /* We don't have access to the target's method used for
6273 converting between signal numbers (GDB's internal
6274 representation <-> target's representation).
6275 Therefore, we cannot do a good job at displaying this
6276 information to the user. It's better to just warn
6277 her about it (if infrun debugging is enabled), and
6278 give up. */
6279 infrun_debug_printf ("Cannot fill $_exitsignal with the correct "
6280 "signal number.");
6283 interps_notify_signal_exited (ecs->ws.sig ());
6286 gdb_flush (gdb_stdout);
6287 target_mourn_inferior (inferior_ptid);
6288 stop_print_frame = false;
6289 stop_waiting (ecs);
6290 return;
6292 case TARGET_WAITKIND_FORKED:
6293 case TARGET_WAITKIND_VFORKED:
6294 case TARGET_WAITKIND_THREAD_CLONED:
6296 displaced_step_finish (ecs->event_thread, ecs->ws);
6298 /* Start a new step-over in another thread if there's one that
6299 needs it. */
6300 start_step_over ();
6302 context_switch (ecs);
6304 /* Immediately detach breakpoints from the child before there's
6305 any chance of letting the user delete breakpoints from the
6306 breakpoint lists. If we don't do this early, it's easy to
6307 leave left over traps in the child, vis: "break foo; catch
6308 fork; c; <fork>; del; c; <child calls foo>". We only follow
6309 the fork on the last `continue', and by that time the
6310 breakpoint at "foo" is long gone from the breakpoint table.
6311 If we vforked, then we don't need to unpatch here, since both
6312 parent and child are sharing the same memory pages; we'll
6313 need to unpatch at follow/detach time instead to be certain
6314 that new breakpoints added between catchpoint hit time and
6315 vfork follow are detached. */
6316 if (ecs->ws.kind () == TARGET_WAITKIND_FORKED)
6318 /* This won't actually modify the breakpoint list, but will
6319 physically remove the breakpoints from the child. */
6320 detach_breakpoints (ecs->ws.child_ptid ());
6323 delete_just_stopped_threads_single_step_breakpoints ();
6325 /* In case the event is caught by a catchpoint, remember that
6326 the event is to be followed at the next resume of the thread,
6327 and not immediately. */
6328 ecs->event_thread->pending_follow = ecs->ws;
6330 ecs->event_thread->set_stop_pc
6331 (regcache_read_pc (get_thread_regcache (ecs->event_thread)));
6333 ecs->event_thread->control.stop_bpstat
6334 = bpstat_stop_status_nowatch (ecs->event_thread->inf->aspace.get (),
6335 ecs->event_thread->stop_pc (),
6336 ecs->event_thread, ecs->ws);
6338 if (handle_stop_requested (ecs))
6339 return;
6341 /* If no catchpoint triggered for this, then keep going. Note
6342 that we're interested in knowing the bpstat actually causes a
6343 stop, not just if it may explain the signal. Software
6344 watchpoints, for example, always appear in the bpstat. */
6345 if (!bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
6347 bool follow_child
6348 = (ecs->ws.kind () != TARGET_WAITKIND_THREAD_CLONED
6349 && follow_fork_mode_string == follow_fork_mode_child);
6351 ecs->event_thread->set_stop_signal (GDB_SIGNAL_0);
6353 process_stratum_target *targ
6354 = ecs->event_thread->inf->process_target ();
6356 bool should_resume;
6357 if (ecs->ws.kind () != TARGET_WAITKIND_THREAD_CLONED)
6358 should_resume = follow_fork ();
6359 else
6361 should_resume = true;
6362 inferior *inf = ecs->event_thread->inf;
6363 inf->top_target ()->follow_clone (ecs->ws.child_ptid ());
6364 ecs->event_thread->pending_follow.set_spurious ();
6367 /* Note that one of these may be an invalid pointer,
6368 depending on detach_fork. */
6369 thread_info *parent = ecs->event_thread;
6370 thread_info *child = targ->find_thread (ecs->ws.child_ptid ());
6372 /* At this point, the parent is marked running, and the
6373 child is marked stopped. */
6375 /* If not resuming the parent, mark it stopped. */
6376 if (ecs->ws.kind () != TARGET_WAITKIND_THREAD_CLONED
6377 && follow_child && !detach_fork && !non_stop && !sched_multi)
6378 parent->set_running (false);
6380 /* If resuming the child, mark it running. */
6381 if ((ecs->ws.kind () == TARGET_WAITKIND_THREAD_CLONED
6382 && !schedlock_applies (ecs->event_thread))
6383 || (ecs->ws.kind () != TARGET_WAITKIND_THREAD_CLONED
6384 && (follow_child
6385 || (!detach_fork && (non_stop || sched_multi)))))
6386 child->set_running (true);
6388 /* In non-stop mode, also resume the other branch. */
6389 if ((ecs->ws.kind () == TARGET_WAITKIND_THREAD_CLONED
6390 && target_is_non_stop_p ()
6391 && !schedlock_applies (ecs->event_thread))
6392 || (ecs->ws.kind () != TARGET_WAITKIND_THREAD_CLONED
6393 && (!detach_fork && (non_stop
6394 || (sched_multi
6395 && target_is_non_stop_p ())))))
6397 if (follow_child)
6398 switch_to_thread (parent);
6399 else
6400 switch_to_thread (child);
6402 ecs->event_thread = inferior_thread ();
6403 ecs->ptid = inferior_ptid;
6404 keep_going (ecs);
6407 if (follow_child)
6408 switch_to_thread (child);
6409 else
6410 switch_to_thread (parent);
6412 ecs->event_thread = inferior_thread ();
6413 ecs->ptid = inferior_ptid;
6415 if (should_resume)
6417 /* Never call switch_back_to_stepped_thread if we are waiting for
6418 vfork-done (waiting for an external vfork child to exec or
6419 exit). We will resume only the vforking thread for the purpose
6420 of collecting the vfork-done event, and we will restart any
6421 step once the critical shared address space window is done. */
6422 if ((!follow_child
6423 && detach_fork
6424 && parent->inf->thread_waiting_for_vfork_done != nullptr)
6425 || !switch_back_to_stepped_thread (ecs))
6426 keep_going (ecs);
6428 else
6429 stop_waiting (ecs);
6430 return;
6432 process_event_stop_test (ecs);
6433 return;
6435 case TARGET_WAITKIND_VFORK_DONE:
6436 /* Done with the shared memory region. Re-insert breakpoints in
6437 the parent, and keep going. */
6439 context_switch (ecs);
6441 handle_vfork_done (ecs->event_thread);
6442 gdb_assert (inferior_thread () == ecs->event_thread);
6444 if (handle_stop_requested (ecs))
6445 return;
6447 if (!switch_back_to_stepped_thread (ecs))
6449 gdb_assert (inferior_thread () == ecs->event_thread);
6450 /* This also takes care of reinserting breakpoints in the
6451 previously locked inferior. */
6452 keep_going (ecs);
6454 return;
6456 case TARGET_WAITKIND_EXECD:
6458 /* Note we can't read registers yet (the stop_pc), because we
6459 don't yet know the inferior's post-exec architecture.
6460 'stop_pc' is explicitly read below instead. */
6461 switch_to_thread_no_regs (ecs->event_thread);
6463 /* Do whatever is necessary to the parent branch of the vfork. */
6464 handle_vfork_child_exec_or_exit (1);
6466 /* This causes the eventpoints and symbol table to be reset.
6467 Must do this now, before trying to determine whether to
6468 stop. */
6469 follow_exec (inferior_ptid, ecs->ws.execd_pathname ());
6471 /* In follow_exec we may have deleted the original thread and
6472 created a new one. Make sure that the event thread is the
6473 execd thread for that case (this is a nop otherwise). */
6474 ecs->event_thread = inferior_thread ();
6476 ecs->event_thread->set_stop_pc
6477 (regcache_read_pc (get_thread_regcache (ecs->event_thread)));
6479 ecs->event_thread->control.stop_bpstat
6480 = bpstat_stop_status_nowatch (ecs->event_thread->inf->aspace.get (),
6481 ecs->event_thread->stop_pc (),
6482 ecs->event_thread, ecs->ws);
6484 if (handle_stop_requested (ecs))
6485 return;
6487 /* If no catchpoint triggered for this, then keep going. */
6488 if (!bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
6490 ecs->event_thread->set_stop_signal (GDB_SIGNAL_0);
6491 keep_going (ecs);
6492 return;
6494 process_event_stop_test (ecs);
6495 return;
6497 /* Be careful not to try to gather much state about a thread
6498 that's in a syscall. It's frequently a losing proposition. */
6499 case TARGET_WAITKIND_SYSCALL_ENTRY:
6500 /* Getting the current syscall number. */
6501 if (handle_syscall_event (ecs) == 0)
6502 process_event_stop_test (ecs);
6503 return;
6505 /* Before examining the threads further, step this thread to
6506 get it entirely out of the syscall. (We get notice of the
6507 event when the thread is just on the verge of exiting a
6508 syscall. Stepping one instruction seems to get it back
6509 into user code.) */
6510 case TARGET_WAITKIND_SYSCALL_RETURN:
6511 if (handle_syscall_event (ecs) == 0)
6512 process_event_stop_test (ecs);
6513 return;
6515 case TARGET_WAITKIND_STOPPED:
6516 handle_signal_stop (ecs);
6517 return;
6519 case TARGET_WAITKIND_NO_HISTORY:
6520 /* Reverse execution: target ran out of history info. */
6522 /* Switch to the stopped thread. */
6523 context_switch (ecs);
6524 infrun_debug_printf ("stopped");
6526 delete_just_stopped_threads_single_step_breakpoints ();
6527 ecs->event_thread->set_stop_pc
6528 (regcache_read_pc (get_thread_regcache (inferior_thread ())));
6530 if (handle_stop_requested (ecs))
6531 return;
6533 interps_notify_no_history ();
6534 stop_waiting (ecs);
6535 return;
6539 /* Restart threads back to what they were trying to do back when we
6540 paused them (because of an in-line step-over or vfork, for example).
6541 The EVENT_THREAD thread is ignored (not restarted).
6543 If INF is non-nullptr, only resume threads from INF. */
6545 static void
6546 restart_threads (struct thread_info *event_thread, inferior *inf)
6548 INFRUN_SCOPED_DEBUG_START_END ("event_thread=%s, inf=%d",
6549 event_thread->ptid.to_string ().c_str (),
6550 inf != nullptr ? inf->num : -1);
6552 gdb_assert (!step_over_info_valid_p ());
6554 /* In case the instruction just stepped spawned a new thread. */
6555 update_thread_list ();
6557 for (thread_info *tp : all_non_exited_threads ())
6559 if (inf != nullptr && tp->inf != inf)
6560 continue;
6562 if (tp->inf->detaching)
6564 infrun_debug_printf ("restart threads: [%s] inferior detaching",
6565 tp->ptid.to_string ().c_str ());
6566 continue;
6569 switch_to_thread_no_regs (tp);
6571 if (tp == event_thread)
6573 infrun_debug_printf ("restart threads: [%s] is event thread",
6574 tp->ptid.to_string ().c_str ());
6575 continue;
6578 if (!(tp->state == THREAD_RUNNING || tp->control.in_infcall))
6580 infrun_debug_printf ("restart threads: [%s] not meant to be running",
6581 tp->ptid.to_string ().c_str ());
6582 continue;
6585 if (tp->resumed ())
6587 infrun_debug_printf ("restart threads: [%s] resumed",
6588 tp->ptid.to_string ().c_str ());
6589 gdb_assert (tp->executing () || tp->has_pending_waitstatus ());
6590 continue;
6593 if (thread_is_in_step_over_chain (tp))
6595 infrun_debug_printf ("restart threads: [%s] needs step-over",
6596 tp->ptid.to_string ().c_str ());
6597 gdb_assert (!tp->resumed ());
6598 continue;
6602 if (tp->has_pending_waitstatus ())
6604 infrun_debug_printf ("restart threads: [%s] has pending status",
6605 tp->ptid.to_string ().c_str ());
6606 tp->set_resumed (true);
6607 continue;
6610 gdb_assert (!tp->stop_requested);
6612 /* If some thread needs to start a step-over at this point, it
6613 should still be in the step-over queue, and thus skipped
6614 above. */
6615 if (thread_still_needs_step_over (tp))
6617 internal_error ("thread [%s] needs a step-over, but not in "
6618 "step-over queue\n",
6619 tp->ptid.to_string ().c_str ());
6622 if (currently_stepping (tp))
6624 infrun_debug_printf ("restart threads: [%s] was stepping",
6625 tp->ptid.to_string ().c_str ());
6626 keep_going_stepped_thread (tp);
6628 else
6630 infrun_debug_printf ("restart threads: [%s] continuing",
6631 tp->ptid.to_string ().c_str ());
6632 execution_control_state ecs (tp);
6633 switch_to_thread (tp);
6634 keep_going_pass_signal (&ecs);
6639 /* Callback for iterate_over_threads. Find a resumed thread that has
6640 a pending waitstatus. */
6642 static int
6643 resumed_thread_with_pending_status (struct thread_info *tp,
6644 void *arg)
6646 return tp->resumed () && tp->has_pending_waitstatus ();
6649 /* Called when we get an event that may finish an in-line or
6650 out-of-line (displaced stepping) step-over started previously.
6651 Return true if the event is processed and we should go back to the
6652 event loop; false if the caller should continue processing the
6653 event. */
6655 static int
6656 finish_step_over (struct execution_control_state *ecs)
6658 displaced_step_finish (ecs->event_thread, ecs->ws);
6660 bool had_step_over_info = step_over_info_valid_p ();
6662 if (had_step_over_info)
6664 /* If we're stepping over a breakpoint with all threads locked,
6665 then only the thread that was stepped should be reporting
6666 back an event. */
6667 gdb_assert (ecs->event_thread->control.trap_expected);
6669 update_thread_events_after_step_over (ecs->event_thread, ecs->ws);
6671 clear_step_over_info ();
6674 if (!target_is_non_stop_p ())
6675 return 0;
6677 /* Start a new step-over in another thread if there's one that
6678 needs it. */
6679 start_step_over ();
6681 /* If we were stepping over a breakpoint before, and haven't started
6682 a new in-line step-over sequence, then restart all other threads
6683 (except the event thread). We can't do this in all-stop, as then
6684 e.g., we wouldn't be able to issue any other remote packet until
6685 these other threads stop. */
6686 if (had_step_over_info && !step_over_info_valid_p ())
6688 struct thread_info *pending;
6690 /* If we only have threads with pending statuses, the restart
6691 below won't restart any thread and so nothing re-inserts the
6692 breakpoint we just stepped over. But we need it inserted
6693 when we later process the pending events, otherwise if
6694 another thread has a pending event for this breakpoint too,
6695 we'd discard its event (because the breakpoint that
6696 originally caused the event was no longer inserted). */
6697 context_switch (ecs);
6698 insert_breakpoints ();
6700 restart_threads (ecs->event_thread);
6702 /* If we have events pending, go through handle_inferior_event
6703 again, picking up a pending event at random. This avoids
6704 thread starvation. */
6706 /* But not if we just stepped over a watchpoint in order to let
6707 the instruction execute so we can evaluate its expression.
6708 The set of watchpoints that triggered is recorded in the
6709 breakpoint objects themselves (see bp->watchpoint_triggered).
6710 If we processed another event first, that other event could
6711 clobber this info. */
6712 if (ecs->event_thread->stepping_over_watchpoint)
6713 return 0;
6715 /* The code below is meant to avoid one thread hogging the event
6716 loop by doing constant in-line step overs. If the stepping
6717 thread exited, there's no risk for this to happen, so we can
6718 safely let our caller process the event immediately. */
6719 if (ecs->ws.kind () == TARGET_WAITKIND_THREAD_EXITED)
6720 return 0;
6722 pending = iterate_over_threads (resumed_thread_with_pending_status,
6723 nullptr);
6724 if (pending != nullptr)
6726 struct thread_info *tp = ecs->event_thread;
6727 struct regcache *regcache;
6729 infrun_debug_printf ("found resumed threads with "
6730 "pending events, saving status");
6732 gdb_assert (pending != tp);
6734 /* Record the event thread's event for later. */
6735 save_waitstatus (tp, ecs->ws);
6736 /* This was cleared early, by handle_inferior_event. Set it
6737 so this pending event is considered by
6738 do_target_wait. */
6739 tp->set_resumed (true);
6741 gdb_assert (!tp->executing ());
6743 regcache = get_thread_regcache (tp);
6744 tp->set_stop_pc (regcache_read_pc (regcache));
6746 infrun_debug_printf ("saved stop_pc=%s for %s "
6747 "(currently_stepping=%d)",
6748 paddress (current_inferior ()->arch (),
6749 tp->stop_pc ()),
6750 tp->ptid.to_string ().c_str (),
6751 currently_stepping (tp));
6753 /* This in-line step-over finished; clear this so we won't
6754 start a new one. This is what handle_signal_stop would
6755 do, if we returned false. */
6756 tp->stepping_over_breakpoint = 0;
6758 /* Wake up the event loop again. */
6759 mark_async_event_handler (infrun_async_inferior_event_token);
6761 prepare_to_wait (ecs);
6762 return 1;
6766 return 0;
6769 /* See infrun.h. */
6771 void
6772 notify_signal_received (gdb_signal sig)
6774 interps_notify_signal_received (sig);
6775 gdb::observers::signal_received.notify (sig);
6778 /* See infrun.h. */
6780 void
6781 notify_normal_stop (bpstat *bs, int print_frame)
6783 interps_notify_normal_stop (bs, print_frame);
6784 gdb::observers::normal_stop.notify (bs, print_frame);
6787 /* See infrun.h. */
6789 void notify_user_selected_context_changed (user_selected_what selection)
6791 interps_notify_user_selected_context_changed (selection);
6792 gdb::observers::user_selected_context_changed.notify (selection);
6795 /* Come here when the program has stopped with a signal. */
6797 static void
6798 handle_signal_stop (struct execution_control_state *ecs)
6800 frame_info_ptr frame;
6801 struct gdbarch *gdbarch;
6802 int stopped_by_watchpoint;
6803 enum stop_kind stop_soon;
6804 int random_signal;
6806 gdb_assert (ecs->ws.kind () == TARGET_WAITKIND_STOPPED);
6808 ecs->event_thread->set_stop_signal (ecs->ws.sig ());
6810 /* Do we need to clean up the state of a thread that has
6811 completed a displaced single-step? (Doing so usually affects
6812 the PC, so do it here, before we set stop_pc.) */
6813 if (finish_step_over (ecs))
6814 return;
6816 /* If we either finished a single-step or hit a breakpoint, but
6817 the user wanted this thread to be stopped, pretend we got a
6818 SIG0 (generic unsignaled stop). */
6819 if (ecs->event_thread->stop_requested
6820 && ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP)
6821 ecs->event_thread->set_stop_signal (GDB_SIGNAL_0);
6823 ecs->event_thread->set_stop_pc
6824 (regcache_read_pc (get_thread_regcache (ecs->event_thread)));
6826 context_switch (ecs);
6828 if (deprecated_context_hook)
6829 deprecated_context_hook (ecs->event_thread->global_num);
6831 if (debug_infrun)
6833 struct regcache *regcache = get_thread_regcache (ecs->event_thread);
6834 struct gdbarch *reg_gdbarch = regcache->arch ();
6836 infrun_debug_printf
6837 ("stop_pc=%s", paddress (reg_gdbarch, ecs->event_thread->stop_pc ()));
6838 if (target_stopped_by_watchpoint ())
6840 CORE_ADDR addr;
6842 infrun_debug_printf ("stopped by watchpoint");
6844 if (target_stopped_data_address (current_inferior ()->top_target (),
6845 &addr))
6846 infrun_debug_printf ("stopped data address=%s",
6847 paddress (reg_gdbarch, addr));
6848 else
6849 infrun_debug_printf ("(no data address available)");
6853 /* This is originated from start_remote(), start_inferior() and
6854 shared libraries hook functions. */
6855 stop_soon = get_inferior_stop_soon (ecs);
6856 if (stop_soon == STOP_QUIETLY || stop_soon == STOP_QUIETLY_REMOTE)
6858 infrun_debug_printf ("quietly stopped");
6859 stop_print_frame = true;
6860 stop_waiting (ecs);
6861 return;
6864 /* This originates from attach_command(). We need to overwrite
6865 the stop_signal here, because some kernels don't ignore a
6866 SIGSTOP in a subsequent ptrace(PTRACE_CONT,SIGSTOP) call.
6867 See more comments in inferior.h. On the other hand, if we
6868 get a non-SIGSTOP, report it to the user - assume the backend
6869 will handle the SIGSTOP if it should show up later.
6871 Also consider that the attach is complete when we see a
6872 SIGTRAP. Some systems (e.g. Windows), and stubs supporting
6873 target extended-remote report it instead of a SIGSTOP
6874 (e.g. gdbserver). We already rely on SIGTRAP being our
6875 signal, so this is no exception.
6877 Also consider that the attach is complete when we see a
6878 GDB_SIGNAL_0. In non-stop mode, GDB will explicitly tell
6879 the target to stop all threads of the inferior, in case the
6880 low level attach operation doesn't stop them implicitly. If
6881 they weren't stopped implicitly, then the stub will report a
6882 GDB_SIGNAL_0, meaning: stopped for no particular reason
6883 other than GDB's request. */
6884 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
6885 && (ecs->event_thread->stop_signal () == GDB_SIGNAL_STOP
6886 || ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP
6887 || ecs->event_thread->stop_signal () == GDB_SIGNAL_0))
6889 stop_print_frame = true;
6890 stop_waiting (ecs);
6891 ecs->event_thread->set_stop_signal (GDB_SIGNAL_0);
6892 return;
6895 /* At this point, get hold of the now-current thread's frame. */
6896 frame = get_current_frame ();
6897 gdbarch = get_frame_arch (frame);
6899 /* Pull the single step breakpoints out of the target. */
6900 if (ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP)
6902 struct regcache *regcache;
6903 CORE_ADDR pc;
6905 regcache = get_thread_regcache (ecs->event_thread);
6906 const address_space *aspace = ecs->event_thread->inf->aspace.get ();
6908 pc = regcache_read_pc (regcache);
6910 /* However, before doing so, if this single-step breakpoint was
6911 actually for another thread, set this thread up for moving
6912 past it. */
6913 if (!thread_has_single_step_breakpoint_here (ecs->event_thread,
6914 aspace, pc))
6916 if (single_step_breakpoint_inserted_here_p (aspace, pc))
6918 infrun_debug_printf ("[%s] hit another thread's single-step "
6919 "breakpoint",
6920 ecs->ptid.to_string ().c_str ());
6921 ecs->hit_singlestep_breakpoint = 1;
6924 else
6926 infrun_debug_printf ("[%s] hit its single-step breakpoint",
6927 ecs->ptid.to_string ().c_str ());
6930 delete_just_stopped_threads_single_step_breakpoints ();
6932 if (ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP
6933 && ecs->event_thread->control.trap_expected
6934 && ecs->event_thread->stepping_over_watchpoint)
6935 stopped_by_watchpoint = 0;
6936 else
6937 stopped_by_watchpoint = watchpoints_triggered (ecs->ws);
6939 /* If necessary, step over this watchpoint. We'll be back to display
6940 it in a moment. */
6941 if (stopped_by_watchpoint
6942 && (target_have_steppable_watchpoint ()
6943 || gdbarch_have_nonsteppable_watchpoint (gdbarch)))
6945 /* At this point, we are stopped at an instruction which has
6946 attempted to write to a piece of memory under control of
6947 a watchpoint. The instruction hasn't actually executed
6948 yet. If we were to evaluate the watchpoint expression
6949 now, we would get the old value, and therefore no change
6950 would seem to have occurred.
6952 In order to make watchpoints work `right', we really need
6953 to complete the memory write, and then evaluate the
6954 watchpoint expression. We do this by single-stepping the
6955 target.
6957 It may not be necessary to disable the watchpoint to step over
6958 it. For example, the PA can (with some kernel cooperation)
6959 single step over a watchpoint without disabling the watchpoint.
6961 It is far more common to need to disable a watchpoint to step
6962 the inferior over it. If we have non-steppable watchpoints,
6963 we must disable the current watchpoint; it's simplest to
6964 disable all watchpoints.
6966 Any breakpoint at PC must also be stepped over -- if there's
6967 one, it will have already triggered before the watchpoint
6968 triggered, and we either already reported it to the user, or
6969 it didn't cause a stop and we called keep_going. In either
6970 case, if there was a breakpoint at PC, we must be trying to
6971 step past it. */
6972 ecs->event_thread->stepping_over_watchpoint = 1;
6973 keep_going (ecs);
6974 return;
6977 ecs->event_thread->stepping_over_breakpoint = 0;
6978 ecs->event_thread->stepping_over_watchpoint = 0;
6979 bpstat_clear (&ecs->event_thread->control.stop_bpstat);
6980 ecs->event_thread->control.stop_step = 0;
6981 stop_print_frame = true;
6982 stopped_by_random_signal = 0;
6983 bpstat *stop_chain = nullptr;
6985 /* Hide inlined functions starting here, unless we just performed stepi or
6986 nexti. After stepi and nexti, always show the innermost frame (not any
6987 inline function call sites). */
6988 if (ecs->event_thread->control.step_range_end != 1)
6990 const address_space *aspace = ecs->event_thread->inf->aspace.get ();
6992 /* skip_inline_frames is expensive, so we avoid it if we can
6993 determine that the address is one where functions cannot have
6994 been inlined. This improves performance with inferiors that
6995 load a lot of shared libraries, because the solib event
6996 breakpoint is defined as the address of a function (i.e. not
6997 inline). Note that we have to check the previous PC as well
6998 as the current one to catch cases when we have just
6999 single-stepped off a breakpoint prior to reinstating it.
7000 Note that we're assuming that the code we single-step to is
7001 not inline, but that's not definitive: there's nothing
7002 preventing the event breakpoint function from containing
7003 inlined code, and the single-step ending up there. If the
7004 user had set a breakpoint on that inlined code, the missing
7005 skip_inline_frames call would break things. Fortunately
7006 that's an extremely unlikely scenario. */
7007 if (!pc_at_non_inline_function (aspace,
7008 ecs->event_thread->stop_pc (),
7009 ecs->ws)
7010 && !(ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP
7011 && ecs->event_thread->control.trap_expected
7012 && pc_at_non_inline_function (aspace,
7013 ecs->event_thread->prev_pc,
7014 ecs->ws)))
7016 stop_chain = build_bpstat_chain (aspace,
7017 ecs->event_thread->stop_pc (),
7018 ecs->ws);
7019 skip_inline_frames (ecs->event_thread, stop_chain);
7021 /* Re-fetch current thread's frame in case that invalidated
7022 the frame cache. */
7023 frame = get_current_frame ();
7024 gdbarch = get_frame_arch (frame);
7028 if (ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP
7029 && ecs->event_thread->control.trap_expected
7030 && gdbarch_single_step_through_delay_p (gdbarch)
7031 && currently_stepping (ecs->event_thread))
7033 /* We're trying to step off a breakpoint. Turns out that we're
7034 also on an instruction that needs to be stepped multiple
7035 times before it's been fully executing. E.g., architectures
7036 with a delay slot. It needs to be stepped twice, once for
7037 the instruction and once for the delay slot. */
7038 int step_through_delay
7039 = gdbarch_single_step_through_delay (gdbarch, frame);
7041 if (step_through_delay)
7042 infrun_debug_printf ("step through delay");
7044 if (ecs->event_thread->control.step_range_end == 0
7045 && step_through_delay)
7047 /* The user issued a continue when stopped at a breakpoint.
7048 Set up for another trap and get out of here. */
7049 ecs->event_thread->stepping_over_breakpoint = 1;
7050 keep_going (ecs);
7051 return;
7053 else if (step_through_delay)
7055 /* The user issued a step when stopped at a breakpoint.
7056 Maybe we should stop, maybe we should not - the delay
7057 slot *might* correspond to a line of source. In any
7058 case, don't decide that here, just set
7059 ecs->stepping_over_breakpoint, making sure we
7060 single-step again before breakpoints are re-inserted. */
7061 ecs->event_thread->stepping_over_breakpoint = 1;
7065 /* See if there is a breakpoint/watchpoint/catchpoint/etc. that
7066 handles this event. */
7067 ecs->event_thread->control.stop_bpstat
7068 = bpstat_stop_status (ecs->event_thread->inf->aspace.get (),
7069 ecs->event_thread->stop_pc (),
7070 ecs->event_thread, ecs->ws, stop_chain);
7072 /* Following in case break condition called a
7073 function. */
7074 stop_print_frame = true;
7076 /* This is where we handle "moribund" watchpoints. Unlike
7077 software breakpoints traps, hardware watchpoint traps are
7078 always distinguishable from random traps. If no high-level
7079 watchpoint is associated with the reported stop data address
7080 anymore, then the bpstat does not explain the signal ---
7081 simply make sure to ignore it if `stopped_by_watchpoint' is
7082 set. */
7084 if (ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP
7085 && !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat,
7086 GDB_SIGNAL_TRAP)
7087 && stopped_by_watchpoint)
7089 infrun_debug_printf ("no user watchpoint explains watchpoint SIGTRAP, "
7090 "ignoring");
7093 /* NOTE: cagney/2003-03-29: These checks for a random signal
7094 at one stage in the past included checks for an inferior
7095 function call's call dummy's return breakpoint. The original
7096 comment, that went with the test, read:
7098 ``End of a stack dummy. Some systems (e.g. Sony news) give
7099 another signal besides SIGTRAP, so check here as well as
7100 above.''
7102 If someone ever tries to get call dummys on a
7103 non-executable stack to work (where the target would stop
7104 with something like a SIGSEGV), then those tests might need
7105 to be re-instated. Given, however, that the tests were only
7106 enabled when momentary breakpoints were not being used, I
7107 suspect that it won't be the case.
7109 NOTE: kettenis/2004-02-05: Indeed such checks don't seem to
7110 be necessary for call dummies on a non-executable stack on
7111 SPARC. */
7113 /* See if the breakpoints module can explain the signal. */
7114 random_signal
7115 = !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat,
7116 ecs->event_thread->stop_signal ());
7118 /* Maybe this was a trap for a software breakpoint that has since
7119 been removed. */
7120 if (random_signal && target_stopped_by_sw_breakpoint ())
7122 if (gdbarch_program_breakpoint_here_p (gdbarch,
7123 ecs->event_thread->stop_pc ()))
7125 struct regcache *regcache;
7126 int decr_pc;
7128 /* Re-adjust PC to what the program would see if GDB was not
7129 debugging it. */
7130 regcache = get_thread_regcache (ecs->event_thread);
7131 decr_pc = gdbarch_decr_pc_after_break (gdbarch);
7132 if (decr_pc != 0)
7134 std::optional<scoped_restore_tmpl<int>>
7135 restore_operation_disable;
7137 if (record_full_is_used ())
7138 restore_operation_disable.emplace
7139 (record_full_gdb_operation_disable_set ());
7141 regcache_write_pc (regcache,
7142 ecs->event_thread->stop_pc () + decr_pc);
7145 else
7147 /* A delayed software breakpoint event. Ignore the trap. */
7148 infrun_debug_printf ("delayed software breakpoint trap, ignoring");
7149 random_signal = 0;
7153 /* Maybe this was a trap for a hardware breakpoint/watchpoint that
7154 has since been removed. */
7155 if (random_signal && target_stopped_by_hw_breakpoint ())
7157 /* A delayed hardware breakpoint event. Ignore the trap. */
7158 infrun_debug_printf ("delayed hardware breakpoint/watchpoint "
7159 "trap, ignoring");
7160 random_signal = 0;
7163 /* If not, perhaps stepping/nexting can. */
7164 if (random_signal)
7165 random_signal = !(ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP
7166 && currently_stepping (ecs->event_thread));
7168 /* Perhaps the thread hit a single-step breakpoint of _another_
7169 thread. Single-step breakpoints are transparent to the
7170 breakpoints module. */
7171 if (random_signal)
7172 random_signal = !ecs->hit_singlestep_breakpoint;
7174 /* No? Perhaps we got a moribund watchpoint. */
7175 if (random_signal)
7176 random_signal = !stopped_by_watchpoint;
7178 /* Always stop if the user explicitly requested this thread to
7179 remain stopped. */
7180 if (ecs->event_thread->stop_requested)
7182 random_signal = 1;
7183 infrun_debug_printf ("user-requested stop");
7186 /* For the program's own signals, act according to
7187 the signal handling tables. */
7189 if (random_signal)
7191 /* Signal not for debugging purposes. */
7192 enum gdb_signal stop_signal = ecs->event_thread->stop_signal ();
7194 infrun_debug_printf ("random signal (%s)",
7195 gdb_signal_to_symbol_string (stop_signal));
7197 stopped_by_random_signal = 1;
7199 /* Always stop on signals if we're either just gaining control
7200 of the program, or the user explicitly requested this thread
7201 to remain stopped. */
7202 if (stop_soon != NO_STOP_QUIETLY
7203 || ecs->event_thread->stop_requested
7204 || signal_stop_state (ecs->event_thread->stop_signal ()))
7206 stop_waiting (ecs);
7207 return;
7210 /* Notify observers the signal has "handle print" set. Note we
7211 returned early above if stopping; normal_stop handles the
7212 printing in that case. */
7213 if (signal_print[ecs->event_thread->stop_signal ()])
7215 /* The signal table tells us to print about this signal. */
7216 target_terminal::ours_for_output ();
7217 notify_signal_received (ecs->event_thread->stop_signal ());
7218 target_terminal::inferior ();
7221 /* Clear the signal if it should not be passed. */
7222 if (signal_program[ecs->event_thread->stop_signal ()] == 0)
7223 ecs->event_thread->set_stop_signal (GDB_SIGNAL_0);
7225 if (ecs->event_thread->prev_pc == ecs->event_thread->stop_pc ()
7226 && ecs->event_thread->control.trap_expected
7227 && ecs->event_thread->control.step_resume_breakpoint == nullptr)
7229 /* We were just starting a new sequence, attempting to
7230 single-step off of a breakpoint and expecting a SIGTRAP.
7231 Instead this signal arrives. This signal will take us out
7232 of the stepping range so GDB needs to remember to, when
7233 the signal handler returns, resume stepping off that
7234 breakpoint. */
7235 /* To simplify things, "continue" is forced to use the same
7236 code paths as single-step - set a breakpoint at the
7237 signal return address and then, once hit, step off that
7238 breakpoint. */
7239 infrun_debug_printf ("signal arrived while stepping over breakpoint");
7241 insert_hp_step_resume_breakpoint_at_frame (frame);
7242 ecs->event_thread->step_after_step_resume_breakpoint = 1;
7243 /* Reset trap_expected to ensure breakpoints are re-inserted. */
7244 ecs->event_thread->control.trap_expected = 0;
7246 /* If we were nexting/stepping some other thread, switch to
7247 it, so that we don't continue it, losing control. */
7248 if (!switch_back_to_stepped_thread (ecs))
7249 keep_going (ecs);
7250 return;
7253 if (ecs->event_thread->stop_signal () != GDB_SIGNAL_0
7254 && (pc_in_thread_step_range (ecs->event_thread->stop_pc (),
7255 ecs->event_thread)
7256 || ecs->event_thread->control.step_range_end == 1)
7257 && (get_stack_frame_id (frame)
7258 == ecs->event_thread->control.step_stack_frame_id)
7259 && ecs->event_thread->control.step_resume_breakpoint == nullptr)
7261 /* The inferior is about to take a signal that will take it
7262 out of the single step range. Set a breakpoint at the
7263 current PC (which is presumably where the signal handler
7264 will eventually return) and then allow the inferior to
7265 run free.
7267 Note that this is only needed for a signal delivered
7268 while in the single-step range. Nested signals aren't a
7269 problem as they eventually all return. */
7270 infrun_debug_printf ("signal may take us out of single-step range");
7272 clear_step_over_info ();
7273 insert_hp_step_resume_breakpoint_at_frame (frame);
7274 ecs->event_thread->step_after_step_resume_breakpoint = 1;
7275 /* Reset trap_expected to ensure breakpoints are re-inserted. */
7276 ecs->event_thread->control.trap_expected = 0;
7277 keep_going (ecs);
7278 return;
7281 /* Note: step_resume_breakpoint may be non-NULL. This occurs
7282 when either there's a nested signal, or when there's a
7283 pending signal enabled just as the signal handler returns
7284 (leaving the inferior at the step-resume-breakpoint without
7285 actually executing it). Either way continue until the
7286 breakpoint is really hit. */
7288 if (!switch_back_to_stepped_thread (ecs))
7290 infrun_debug_printf ("random signal, keep going");
7292 keep_going (ecs);
7294 return;
7297 process_event_stop_test (ecs);
7300 /* Return the address for the beginning of the line. */
7302 CORE_ADDR
7303 update_line_range_start (CORE_ADDR pc, struct execution_control_state *ecs)
7305 /* The line table may have multiple entries for the same source code line.
7306 Given the PC, check the line table and return the PC that corresponds
7307 to the line table entry for the source line that PC is in. */
7308 CORE_ADDR start_line_pc = ecs->event_thread->control.step_range_start;
7309 std::optional<CORE_ADDR> real_range_start;
7311 /* Call find_line_range_start to get the smallest address in the
7312 linetable for multiple Line X entries in the line table. */
7313 real_range_start = find_line_range_start (pc);
7315 if (real_range_start.has_value ())
7316 start_line_pc = *real_range_start;
7318 return start_line_pc;
7321 /* Come here when we've got some debug event / signal we can explain
7322 (IOW, not a random signal), and test whether it should cause a
7323 stop, or whether we should resume the inferior (transparently).
7324 E.g., could be a breakpoint whose condition evaluates false; we
7325 could be still stepping within the line; etc. */
7327 static void
7328 process_event_stop_test (struct execution_control_state *ecs)
7330 struct symtab_and_line stop_pc_sal;
7331 frame_info_ptr frame;
7332 struct gdbarch *gdbarch;
7333 CORE_ADDR jmp_buf_pc;
7334 struct bpstat_what what;
7336 /* Handle cases caused by hitting a breakpoint. */
7338 frame = get_current_frame ();
7339 gdbarch = get_frame_arch (frame);
7341 what = bpstat_what (ecs->event_thread->control.stop_bpstat);
7343 if (what.call_dummy)
7345 stop_stack_dummy = what.call_dummy;
7348 /* A few breakpoint types have callbacks associated (e.g.,
7349 bp_jit_event). Run them now. */
7350 bpstat_run_callbacks (ecs->event_thread->control.stop_bpstat);
7352 /* If we hit an internal event that triggers symbol changes, the
7353 current frame will be invalidated within bpstat_what (e.g., if we
7354 hit an internal solib event). Re-fetch it. */
7355 frame = get_current_frame ();
7356 gdbarch = get_frame_arch (frame);
7358 /* Shorthand to make if statements smaller. */
7359 struct frame_id original_frame_id
7360 = ecs->event_thread->control.step_frame_id;
7361 struct frame_id curr_frame_id = get_frame_id (get_current_frame ());
7363 switch (what.main_action)
7365 case BPSTAT_WHAT_SET_LONGJMP_RESUME:
7366 /* If we hit the breakpoint at longjmp while stepping, we
7367 install a momentary breakpoint at the target of the
7368 jmp_buf. */
7370 infrun_debug_printf ("BPSTAT_WHAT_SET_LONGJMP_RESUME");
7372 ecs->event_thread->stepping_over_breakpoint = 1;
7374 if (what.is_longjmp)
7376 struct value *arg_value;
7378 /* If we set the longjmp breakpoint via a SystemTap probe,
7379 then use it to extract the arguments. The destination PC
7380 is the third argument to the probe. */
7381 arg_value = probe_safe_evaluate_at_pc (frame, 2);
7382 if (arg_value)
7384 jmp_buf_pc = value_as_address (arg_value);
7385 jmp_buf_pc = gdbarch_addr_bits_remove (gdbarch, jmp_buf_pc);
7387 else if (!gdbarch_get_longjmp_target_p (gdbarch)
7388 || !gdbarch_get_longjmp_target (gdbarch,
7389 frame, &jmp_buf_pc))
7391 infrun_debug_printf ("BPSTAT_WHAT_SET_LONGJMP_RESUME "
7392 "(!gdbarch_get_longjmp_target)");
7393 keep_going (ecs);
7394 return;
7397 /* Insert a breakpoint at resume address. */
7398 insert_longjmp_resume_breakpoint (gdbarch, jmp_buf_pc);
7400 else
7401 check_exception_resume (ecs, frame);
7402 keep_going (ecs);
7403 return;
7405 case BPSTAT_WHAT_CLEAR_LONGJMP_RESUME:
7407 frame_info_ptr init_frame;
7409 /* There are several cases to consider.
7411 1. The initiating frame no longer exists. In this case we
7412 must stop, because the exception or longjmp has gone too
7413 far.
7415 2. The initiating frame exists, and is the same as the
7416 current frame. We stop, because the exception or longjmp
7417 has been caught.
7419 3. The initiating frame exists and is different from the
7420 current frame. This means the exception or longjmp has
7421 been caught beneath the initiating frame, so keep going.
7423 4. longjmp breakpoint has been placed just to protect
7424 against stale dummy frames and user is not interested in
7425 stopping around longjmps. */
7427 infrun_debug_printf ("BPSTAT_WHAT_CLEAR_LONGJMP_RESUME");
7429 gdb_assert (ecs->event_thread->control.exception_resume_breakpoint
7430 != nullptr);
7431 delete_exception_resume_breakpoint (ecs->event_thread);
7433 if (what.is_longjmp)
7435 check_longjmp_breakpoint_for_call_dummy (ecs->event_thread);
7437 if (!frame_id_p (ecs->event_thread->initiating_frame))
7439 /* Case 4. */
7440 keep_going (ecs);
7441 return;
7445 init_frame = frame_find_by_id (ecs->event_thread->initiating_frame);
7447 if (init_frame)
7449 if (curr_frame_id == ecs->event_thread->initiating_frame)
7451 /* Case 2. Fall through. */
7453 else
7455 /* Case 3. */
7456 keep_going (ecs);
7457 return;
7461 /* For Cases 1 and 2, remove the step-resume breakpoint, if it
7462 exists. */
7463 delete_step_resume_breakpoint (ecs->event_thread);
7465 end_stepping_range (ecs);
7467 return;
7469 case BPSTAT_WHAT_SINGLE:
7470 infrun_debug_printf ("BPSTAT_WHAT_SINGLE");
7471 ecs->event_thread->stepping_over_breakpoint = 1;
7472 /* Still need to check other stuff, at least the case where we
7473 are stepping and step out of the right range. */
7474 break;
7476 case BPSTAT_WHAT_STEP_RESUME:
7477 infrun_debug_printf ("BPSTAT_WHAT_STEP_RESUME");
7479 delete_step_resume_breakpoint (ecs->event_thread);
7480 if (ecs->event_thread->control.proceed_to_finish
7481 && execution_direction == EXEC_REVERSE)
7483 struct thread_info *tp = ecs->event_thread;
7485 /* We are finishing a function in reverse, and just hit the
7486 step-resume breakpoint at the start address of the
7487 function, and we're almost there -- just need to back up
7488 by one more single-step, which should take us back to the
7489 function call. */
7490 tp->control.step_range_start = tp->control.step_range_end = 1;
7491 keep_going (ecs);
7492 return;
7494 fill_in_stop_func (gdbarch, ecs);
7495 if (ecs->event_thread->stop_pc () == ecs->stop_func_start
7496 && execution_direction == EXEC_REVERSE)
7498 /* We are stepping over a function call in reverse, and just
7499 hit the step-resume breakpoint at the start address of
7500 the function. Go back to single-stepping, which should
7501 take us back to the function call. */
7502 ecs->event_thread->stepping_over_breakpoint = 1;
7503 keep_going (ecs);
7504 return;
7506 break;
7508 case BPSTAT_WHAT_STOP_NOISY:
7509 infrun_debug_printf ("BPSTAT_WHAT_STOP_NOISY");
7510 stop_print_frame = true;
7512 /* Assume the thread stopped for a breakpoint. We'll still check
7513 whether a/the breakpoint is there when the thread is next
7514 resumed. */
7515 ecs->event_thread->stepping_over_breakpoint = 1;
7517 stop_waiting (ecs);
7518 return;
7520 case BPSTAT_WHAT_STOP_SILENT:
7521 infrun_debug_printf ("BPSTAT_WHAT_STOP_SILENT");
7522 stop_print_frame = false;
7524 /* Assume the thread stopped for a breakpoint. We'll still check
7525 whether a/the breakpoint is there when the thread is next
7526 resumed. */
7527 ecs->event_thread->stepping_over_breakpoint = 1;
7528 stop_waiting (ecs);
7529 return;
7531 case BPSTAT_WHAT_HP_STEP_RESUME:
7532 infrun_debug_printf ("BPSTAT_WHAT_HP_STEP_RESUME");
7534 delete_step_resume_breakpoint (ecs->event_thread);
7535 if (ecs->event_thread->step_after_step_resume_breakpoint)
7537 /* Back when the step-resume breakpoint was inserted, we
7538 were trying to single-step off a breakpoint. Go back to
7539 doing that. */
7540 ecs->event_thread->step_after_step_resume_breakpoint = 0;
7541 ecs->event_thread->stepping_over_breakpoint = 1;
7542 keep_going (ecs);
7543 return;
7545 break;
7547 case BPSTAT_WHAT_KEEP_CHECKING:
7548 break;
7551 /* If we stepped a permanent breakpoint and we had a high priority
7552 step-resume breakpoint for the address we stepped, but we didn't
7553 hit it, then we must have stepped into the signal handler. The
7554 step-resume was only necessary to catch the case of _not_
7555 stepping into the handler, so delete it, and fall through to
7556 checking whether the step finished. */
7557 if (ecs->event_thread->stepped_breakpoint)
7559 struct breakpoint *sr_bp
7560 = ecs->event_thread->control.step_resume_breakpoint;
7562 if (sr_bp != nullptr
7563 && sr_bp->first_loc ().permanent
7564 && sr_bp->type == bp_hp_step_resume
7565 && sr_bp->first_loc ().address == ecs->event_thread->prev_pc)
7567 infrun_debug_printf ("stepped permanent breakpoint, stopped in handler");
7568 delete_step_resume_breakpoint (ecs->event_thread);
7569 ecs->event_thread->step_after_step_resume_breakpoint = 0;
7573 /* We come here if we hit a breakpoint but should not stop for it.
7574 Possibly we also were stepping and should stop for that. So fall
7575 through and test for stepping. But, if not stepping, do not
7576 stop. */
7578 /* In all-stop mode, if we're currently stepping but have stopped in
7579 some other thread, we need to switch back to the stepped thread. */
7580 if (switch_back_to_stepped_thread (ecs))
7581 return;
7583 if (ecs->event_thread->control.step_resume_breakpoint)
7585 infrun_debug_printf ("step-resume breakpoint is inserted");
7587 /* Having a step-resume breakpoint overrides anything
7588 else having to do with stepping commands until
7589 that breakpoint is reached. */
7590 keep_going (ecs);
7591 return;
7594 if (ecs->event_thread->control.step_range_end == 0)
7596 infrun_debug_printf ("no stepping, continue");
7597 /* Likewise if we aren't even stepping. */
7598 keep_going (ecs);
7599 return;
7602 /* Re-fetch current thread's frame in case the code above caused
7603 the frame cache to be re-initialized, making our FRAME variable
7604 a dangling pointer. */
7605 frame = get_current_frame ();
7606 gdbarch = get_frame_arch (frame);
7607 fill_in_stop_func (gdbarch, ecs);
7609 /* If stepping through a line, keep going if still within it.
7611 Note that step_range_end is the address of the first instruction
7612 beyond the step range, and NOT the address of the last instruction
7613 within it!
7615 Note also that during reverse execution, we may be stepping
7616 through a function epilogue and therefore must detect when
7617 the current-frame changes in the middle of a line. */
7619 if (pc_in_thread_step_range (ecs->event_thread->stop_pc (),
7620 ecs->event_thread)
7621 && (execution_direction != EXEC_REVERSE
7622 || curr_frame_id == original_frame_id))
7624 infrun_debug_printf
7625 ("stepping inside range [%s-%s]",
7626 paddress (gdbarch, ecs->event_thread->control.step_range_start),
7627 paddress (gdbarch, ecs->event_thread->control.step_range_end));
7629 /* Tentatively re-enable range stepping; `resume' disables it if
7630 necessary (e.g., if we're stepping over a breakpoint or we
7631 have software watchpoints). */
7632 ecs->event_thread->control.may_range_step = 1;
7634 /* When stepping backward, stop at beginning of line range
7635 (unless it's the function entry point, in which case
7636 keep going back to the call point). */
7637 CORE_ADDR stop_pc = ecs->event_thread->stop_pc ();
7638 if (stop_pc == ecs->event_thread->control.step_range_start
7639 && stop_pc != ecs->stop_func_start
7640 && execution_direction == EXEC_REVERSE)
7641 end_stepping_range (ecs);
7642 else
7643 keep_going (ecs);
7645 return;
7648 /* We stepped out of the stepping range. */
7650 /* If we are stepping at the source level and entered the runtime
7651 loader dynamic symbol resolution code...
7653 EXEC_FORWARD: we keep on single stepping until we exit the run
7654 time loader code and reach the callee's address.
7656 EXEC_REVERSE: we've already executed the callee (backward), and
7657 the runtime loader code is handled just like any other
7658 undebuggable function call. Now we need only keep stepping
7659 backward through the trampoline code, and that's handled further
7660 down, so there is nothing for us to do here. */
7662 if (execution_direction != EXEC_REVERSE
7663 && ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
7664 && in_solib_dynsym_resolve_code (ecs->event_thread->stop_pc ())
7665 && (ecs->event_thread->control.step_start_function == nullptr
7666 || !in_solib_dynsym_resolve_code (
7667 ecs->event_thread->control.step_start_function->value_block ()
7668 ->entry_pc ())))
7670 CORE_ADDR pc_after_resolver =
7671 gdbarch_skip_solib_resolver (gdbarch, ecs->event_thread->stop_pc ());
7673 infrun_debug_printf ("stepped into dynsym resolve code");
7675 if (pc_after_resolver)
7677 /* Set up a step-resume breakpoint at the address
7678 indicated by SKIP_SOLIB_RESOLVER. */
7679 symtab_and_line sr_sal;
7680 sr_sal.pc = pc_after_resolver;
7681 sr_sal.pspace = get_frame_program_space (frame);
7683 insert_step_resume_breakpoint_at_sal (gdbarch,
7684 sr_sal, null_frame_id);
7687 keep_going (ecs);
7688 return;
7691 /* Step through an indirect branch thunk. */
7692 if (ecs->event_thread->control.step_over_calls != STEP_OVER_NONE
7693 && gdbarch_in_indirect_branch_thunk (gdbarch,
7694 ecs->event_thread->stop_pc ()))
7696 infrun_debug_printf ("stepped into indirect branch thunk");
7697 keep_going (ecs);
7698 return;
7701 if (ecs->event_thread->control.step_range_end != 1
7702 && (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
7703 || ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
7704 && get_frame_type (frame) == SIGTRAMP_FRAME)
7706 infrun_debug_printf ("stepped into signal trampoline");
7707 /* The inferior, while doing a "step" or "next", has ended up in
7708 a signal trampoline (either by a signal being delivered or by
7709 the signal handler returning). Just single-step until the
7710 inferior leaves the trampoline (either by calling the handler
7711 or returning). */
7712 keep_going (ecs);
7713 return;
7716 /* If we're in the return path from a shared library trampoline,
7717 we want to proceed through the trampoline when stepping. */
7718 /* macro/2012-04-25: This needs to come before the subroutine
7719 call check below as on some targets return trampolines look
7720 like subroutine calls (MIPS16 return thunks). */
7721 if (gdbarch_in_solib_return_trampoline (gdbarch,
7722 ecs->event_thread->stop_pc (),
7723 ecs->stop_func_name)
7724 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE)
7726 /* Determine where this trampoline returns. */
7727 CORE_ADDR stop_pc = ecs->event_thread->stop_pc ();
7728 CORE_ADDR real_stop_pc
7729 = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
7731 infrun_debug_printf ("stepped into solib return tramp");
7733 /* Only proceed through if we know where it's going. */
7734 if (real_stop_pc)
7736 /* And put the step-breakpoint there and go until there. */
7737 symtab_and_line sr_sal;
7738 sr_sal.pc = real_stop_pc;
7739 sr_sal.section = find_pc_overlay (sr_sal.pc);
7740 sr_sal.pspace = get_frame_program_space (frame);
7742 /* Do not specify what the fp should be when we stop since
7743 on some machines the prologue is where the new fp value
7744 is established. */
7745 insert_step_resume_breakpoint_at_sal (gdbarch,
7746 sr_sal, null_frame_id);
7748 /* Restart without fiddling with the step ranges or
7749 other state. */
7750 keep_going (ecs);
7751 return;
7755 /* Check for subroutine calls. The check for the current frame
7756 equalling the step ID is not necessary - the check of the
7757 previous frame's ID is sufficient - but it is a common case and
7758 cheaper than checking the previous frame's ID.
7760 NOTE: frame_id::operator== will never report two invalid frame IDs as
7761 being equal, so to get into this block, both the current and
7762 previous frame must have valid frame IDs. */
7763 /* The outer_frame_id check is a heuristic to detect stepping
7764 through startup code. If we step over an instruction which
7765 sets the stack pointer from an invalid value to a valid value,
7766 we may detect that as a subroutine call from the mythical
7767 "outermost" function. This could be fixed by marking
7768 outermost frames as !stack_p,code_p,special_p. Then the
7769 initial outermost frame, before sp was valid, would
7770 have code_addr == &_start. See the comment in frame_id::operator==
7771 for more. */
7773 /* We want "nexti" to step into, not over, signal handlers invoked
7774 by the kernel, therefore this subroutine check should not trigger
7775 for a signal handler invocation. On most platforms, this is already
7776 not the case, as the kernel puts a signal trampoline frame onto the
7777 stack to handle proper return after the handler, and therefore at this
7778 point, the current frame is a grandchild of the step frame, not a
7779 child. However, on some platforms, the kernel actually uses a
7780 trampoline to handle *invocation* of the handler. In that case,
7781 when executing the first instruction of the trampoline, this check
7782 would erroneously detect the trampoline invocation as a subroutine
7783 call. Fix this by checking for SIGTRAMP_FRAME. */
7784 if ((get_stack_frame_id (frame)
7785 != ecs->event_thread->control.step_stack_frame_id)
7786 && get_frame_type (frame) != SIGTRAMP_FRAME
7787 && ((frame_unwind_caller_id (get_current_frame ())
7788 == ecs->event_thread->control.step_stack_frame_id)
7789 && ((ecs->event_thread->control.step_stack_frame_id
7790 != outer_frame_id)
7791 || (ecs->event_thread->control.step_start_function
7792 != find_pc_function (ecs->event_thread->stop_pc ())))))
7794 CORE_ADDR stop_pc = ecs->event_thread->stop_pc ();
7795 CORE_ADDR real_stop_pc;
7797 infrun_debug_printf ("stepped into subroutine");
7799 if (ecs->event_thread->control.step_over_calls == STEP_OVER_NONE)
7801 /* I presume that step_over_calls is only 0 when we're
7802 supposed to be stepping at the assembly language level
7803 ("stepi"). Just stop. */
7804 /* And this works the same backward as frontward. MVS */
7805 end_stepping_range (ecs);
7806 return;
7809 /* Reverse stepping through solib trampolines. */
7811 if (execution_direction == EXEC_REVERSE
7812 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE
7813 && (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
7814 || (ecs->stop_func_start == 0
7815 && in_solib_dynsym_resolve_code (stop_pc))))
7817 /* Any solib trampoline code can be handled in reverse
7818 by simply continuing to single-step. We have already
7819 executed the solib function (backwards), and a few
7820 steps will take us back through the trampoline to the
7821 caller. */
7822 keep_going (ecs);
7823 return;
7826 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
7828 /* We're doing a "next".
7830 Normal (forward) execution: set a breakpoint at the
7831 callee's return address (the address at which the caller
7832 will resume).
7834 Reverse (backward) execution. set the step-resume
7835 breakpoint at the start of the function that we just
7836 stepped into (backwards), and continue to there. When we
7837 get there, we'll need to single-step back to the caller. */
7839 if (execution_direction == EXEC_REVERSE)
7841 /* If we're already at the start of the function, we've either
7842 just stepped backward into a single instruction function,
7843 or stepped back out of a signal handler to the first instruction
7844 of the function. Just keep going, which will single-step back
7845 to the caller. */
7846 if (ecs->stop_func_start != stop_pc && ecs->stop_func_start != 0)
7848 /* Normal function call return (static or dynamic). */
7849 symtab_and_line sr_sal;
7850 sr_sal.pc = ecs->stop_func_start;
7851 sr_sal.pspace = get_frame_program_space (frame);
7852 insert_step_resume_breakpoint_at_sal (gdbarch,
7853 sr_sal, get_stack_frame_id (frame));
7856 else
7857 insert_step_resume_breakpoint_at_caller (frame);
7859 keep_going (ecs);
7860 return;
7863 /* If we are in a function call trampoline (a stub between the
7864 calling routine and the real function), locate the real
7865 function. That's what tells us (a) whether we want to step
7866 into it at all, and (b) what prologue we want to run to the
7867 end of, if we do step into it. */
7868 real_stop_pc = skip_language_trampoline (frame, stop_pc);
7869 if (real_stop_pc == 0)
7870 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
7871 if (real_stop_pc != 0)
7872 ecs->stop_func_start = real_stop_pc;
7874 if (real_stop_pc != 0 && in_solib_dynsym_resolve_code (real_stop_pc))
7876 symtab_and_line sr_sal;
7877 sr_sal.pc = ecs->stop_func_start;
7878 sr_sal.pspace = get_frame_program_space (frame);
7880 insert_step_resume_breakpoint_at_sal (gdbarch,
7881 sr_sal, null_frame_id);
7882 keep_going (ecs);
7883 return;
7886 /* If we have line number information for the function we are
7887 thinking of stepping into and the function isn't on the skip
7888 list, step into it.
7890 If there are several symtabs at that PC (e.g. with include
7891 files), just want to know whether *any* of them have line
7892 numbers. find_pc_line handles this. */
7894 struct symtab_and_line tmp_sal;
7896 tmp_sal = find_pc_line (ecs->stop_func_start, 0);
7897 if (tmp_sal.line != 0
7898 && !function_name_is_marked_for_skip (ecs->stop_func_name,
7899 tmp_sal)
7900 && !inline_frame_is_marked_for_skip (true, ecs->event_thread))
7902 if (execution_direction == EXEC_REVERSE)
7903 handle_step_into_function_backward (gdbarch, ecs);
7904 else
7905 handle_step_into_function (gdbarch, ecs);
7906 return;
7910 /* If we have no line number and the step-stop-if-no-debug is
7911 set, we stop the step so that the user has a chance to switch
7912 in assembly mode. */
7913 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
7914 && step_stop_if_no_debug)
7916 end_stepping_range (ecs);
7917 return;
7920 if (execution_direction == EXEC_REVERSE)
7922 /* If we're already at the start of the function, we've either just
7923 stepped backward into a single instruction function without line
7924 number info, or stepped back out of a signal handler to the first
7925 instruction of the function without line number info. Just keep
7926 going, which will single-step back to the caller. */
7927 if (ecs->stop_func_start != stop_pc)
7929 /* Set a breakpoint at callee's start address.
7930 From there we can step once and be back in the caller. */
7931 symtab_and_line sr_sal;
7932 sr_sal.pc = ecs->stop_func_start;
7933 sr_sal.pspace = get_frame_program_space (frame);
7934 insert_step_resume_breakpoint_at_sal (gdbarch,
7935 sr_sal, null_frame_id);
7938 else
7939 /* Set a breakpoint at callee's return address (the address
7940 at which the caller will resume). */
7941 insert_step_resume_breakpoint_at_caller (frame);
7943 keep_going (ecs);
7944 return;
7947 /* Reverse stepping through solib trampolines. */
7949 if (execution_direction == EXEC_REVERSE
7950 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE)
7952 CORE_ADDR stop_pc = ecs->event_thread->stop_pc ();
7954 if (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
7955 || (ecs->stop_func_start == 0
7956 && in_solib_dynsym_resolve_code (stop_pc)))
7958 /* Any solib trampoline code can be handled in reverse
7959 by simply continuing to single-step. We have already
7960 executed the solib function (backwards), and a few
7961 steps will take us back through the trampoline to the
7962 caller. */
7963 keep_going (ecs);
7964 return;
7966 else if (in_solib_dynsym_resolve_code (stop_pc))
7968 /* Stepped backward into the solib dynsym resolver.
7969 Set a breakpoint at its start and continue, then
7970 one more step will take us out. */
7971 symtab_and_line sr_sal;
7972 sr_sal.pc = ecs->stop_func_start;
7973 sr_sal.pspace = get_frame_program_space (frame);
7974 insert_step_resume_breakpoint_at_sal (gdbarch,
7975 sr_sal, null_frame_id);
7976 keep_going (ecs);
7977 return;
7981 /* This always returns the sal for the inner-most frame when we are in a
7982 stack of inlined frames, even if GDB actually believes that it is in a
7983 more outer frame. This is checked for below by calls to
7984 inline_skipped_frames. */
7985 stop_pc_sal = find_pc_line (ecs->event_thread->stop_pc (), 0);
7987 /* NOTE: tausq/2004-05-24: This if block used to be done before all
7988 the trampoline processing logic, however, there are some trampolines
7989 that have no names, so we should do trampoline handling first. */
7990 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
7991 && ecs->stop_func_name == nullptr
7992 && stop_pc_sal.line == 0)
7994 infrun_debug_printf ("stepped into undebuggable function");
7996 /* The inferior just stepped into, or returned to, an
7997 undebuggable function (where there is no debugging information
7998 and no line number corresponding to the address where the
7999 inferior stopped). Since we want to skip this kind of code,
8000 we keep going until the inferior returns from this
8001 function - unless the user has asked us not to (via
8002 set step-mode) or we no longer know how to get back
8003 to the call site. */
8004 if (step_stop_if_no_debug
8005 || !frame_id_p (frame_unwind_caller_id (frame)))
8007 /* If we have no line number and the step-stop-if-no-debug
8008 is set, we stop the step so that the user has a chance to
8009 switch in assembly mode. */
8010 end_stepping_range (ecs);
8011 return;
8013 else
8015 /* Set a breakpoint at callee's return address (the address
8016 at which the caller will resume). */
8017 insert_step_resume_breakpoint_at_caller (frame);
8018 keep_going (ecs);
8019 return;
8023 if (execution_direction == EXEC_REVERSE
8024 && ecs->event_thread->control.proceed_to_finish
8025 && ecs->event_thread->stop_pc () >= ecs->stop_func_alt_start
8026 && ecs->event_thread->stop_pc () < ecs->stop_func_start)
8028 /* We are executing the reverse-finish command.
8029 If the system supports multiple entry points and we are finishing a
8030 function in reverse. If we are between the entry points single-step
8031 back to the alternate entry point. If we are at the alternate entry
8032 point -- just need to back up by one more single-step, which
8033 should take us back to the function call. */
8034 ecs->event_thread->control.step_range_start
8035 = ecs->event_thread->control.step_range_end = 1;
8036 keep_going (ecs);
8037 return;
8041 if (ecs->event_thread->control.step_range_end == 1)
8043 /* It is stepi or nexti. We always want to stop stepping after
8044 one instruction. */
8045 infrun_debug_printf ("stepi/nexti");
8046 end_stepping_range (ecs);
8047 return;
8050 if (stop_pc_sal.line == 0)
8052 /* We have no line number information. That means to stop
8053 stepping (does this always happen right after one instruction,
8054 when we do "s" in a function with no line numbers,
8055 or can this happen as a result of a return or longjmp?). */
8056 infrun_debug_printf ("line number info");
8057 end_stepping_range (ecs);
8058 return;
8061 /* Look for "calls" to inlined functions, part one. If the inline
8062 frame machinery detected some skipped call sites, we have entered
8063 a new inline function. */
8065 if ((curr_frame_id == original_frame_id)
8066 && inline_skipped_frames (ecs->event_thread))
8068 infrun_debug_printf ("stepped into inlined function");
8070 symtab_and_line call_sal = find_frame_sal (get_current_frame ());
8072 if (ecs->event_thread->control.step_over_calls != STEP_OVER_ALL)
8074 /* For "step", we're going to stop. But if the call site
8075 for this inlined function is on the same source line as
8076 we were previously stepping, go down into the function
8077 first. Otherwise stop at the call site. */
8079 if (call_sal.line == ecs->event_thread->current_line
8080 && call_sal.symtab == ecs->event_thread->current_symtab)
8082 step_into_inline_frame (ecs->event_thread);
8083 if (inline_frame_is_marked_for_skip (false, ecs->event_thread))
8085 keep_going (ecs);
8086 return;
8090 end_stepping_range (ecs);
8091 return;
8093 else
8095 /* For "next", we should stop at the call site if it is on a
8096 different source line. Otherwise continue through the
8097 inlined function. */
8098 if (call_sal.line == ecs->event_thread->current_line
8099 && call_sal.symtab == ecs->event_thread->current_symtab)
8100 keep_going (ecs);
8101 else
8102 end_stepping_range (ecs);
8103 return;
8107 /* Look for "calls" to inlined functions, part two. If we are still
8108 in the same real function we were stepping through, but we have
8109 to go further up to find the exact frame ID, we are stepping
8110 through a more inlined call beyond its call site. */
8112 if (get_frame_type (get_current_frame ()) == INLINE_FRAME
8113 && (curr_frame_id != original_frame_id)
8114 && stepped_in_from (get_current_frame (), original_frame_id))
8116 infrun_debug_printf ("stepping through inlined function");
8118 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL
8119 || inline_frame_is_marked_for_skip (false, ecs->event_thread))
8120 keep_going (ecs);
8121 else
8122 end_stepping_range (ecs);
8123 return;
8126 bool refresh_step_info = true;
8127 if ((ecs->event_thread->stop_pc () == stop_pc_sal.pc)
8128 && (ecs->event_thread->current_line != stop_pc_sal.line
8129 || ecs->event_thread->current_symtab != stop_pc_sal.symtab))
8131 /* We are at a different line. */
8133 if (stop_pc_sal.is_stmt)
8135 if (execution_direction == EXEC_REVERSE)
8137 /* We are stepping backwards make sure we have reached the
8138 beginning of the line. */
8139 CORE_ADDR stop_pc = ecs->event_thread->stop_pc ();
8140 CORE_ADDR start_line_pc
8141 = update_line_range_start (stop_pc, ecs);
8143 if (stop_pc != start_line_pc)
8145 /* Have not reached the beginning of the source code line.
8146 Set a step range. Execution should stop in any function
8147 calls we execute back into before reaching the beginning
8148 of the line. */
8149 ecs->event_thread->control.step_range_start
8150 = start_line_pc;
8151 ecs->event_thread->control.step_range_end = stop_pc;
8152 set_step_info (ecs->event_thread, frame, stop_pc_sal);
8153 keep_going (ecs);
8154 return;
8158 /* We are at the start of a statement.
8160 So stop. Note that we don't stop if we step into the middle of a
8161 statement. That is said to make things like for (;;) statements
8162 work better. */
8163 infrun_debug_printf ("stepped to a different line");
8164 end_stepping_range (ecs);
8165 return;
8167 else if (curr_frame_id == original_frame_id)
8169 /* We are not at the start of a statement, and we have not changed
8170 frame.
8172 We ignore this line table entry, and continue stepping forward,
8173 looking for a better place to stop. */
8174 refresh_step_info = false;
8175 infrun_debug_printf ("stepped to a different line, but "
8176 "it's not the start of a statement");
8178 else
8180 /* We are not the start of a statement, and we have changed frame.
8182 We ignore this line table entry, and continue stepping forward,
8183 looking for a better place to stop. Keep refresh_step_info at
8184 true to note that the frame has changed, but ignore the line
8185 number to make sure we don't ignore a subsequent entry with the
8186 same line number. */
8187 stop_pc_sal.line = 0;
8188 infrun_debug_printf ("stepped to a different frame, but "
8189 "it's not the start of a statement");
8192 else if (execution_direction == EXEC_REVERSE
8193 && curr_frame_id != original_frame_id
8194 && original_frame_id.code_addr_p && curr_frame_id.code_addr_p
8195 && original_frame_id.code_addr == curr_frame_id.code_addr)
8197 /* If we enter here, we're leaving a recursive function call. In this
8198 situation, we shouldn't refresh the step information, because if we
8199 do, we'll lose the frame_id of when we started stepping, and this
8200 will make GDB not know we need to print frame information. */
8201 refresh_step_info = false;
8202 infrun_debug_printf ("reverse stepping, left a recursive call, don't "
8203 "update step info so we remember we left a frame");
8206 /* We aren't done stepping.
8208 Optimize by setting the stepping range to the line.
8209 (We might not be in the original line, but if we entered a
8210 new line in mid-statement, we continue stepping. This makes
8211 things like for(;;) statements work better.)
8213 If we entered a SAL that indicates a non-statement line table entry,
8214 then we update the stepping range, but we don't update the step info,
8215 which includes things like the line number we are stepping away from.
8216 This means we will stop when we find a line table entry that is marked
8217 as is-statement, even if it matches the non-statement one we just
8218 stepped into. */
8220 ecs->event_thread->control.step_range_start = stop_pc_sal.pc;
8221 ecs->event_thread->control.step_range_end = stop_pc_sal.end;
8222 ecs->event_thread->control.may_range_step = 1;
8223 infrun_debug_printf
8224 ("updated step range, start = %s, end = %s, may_range_step = %d",
8225 paddress (gdbarch, ecs->event_thread->control.step_range_start),
8226 paddress (gdbarch, ecs->event_thread->control.step_range_end),
8227 ecs->event_thread->control.may_range_step);
8228 if (refresh_step_info)
8229 set_step_info (ecs->event_thread, frame, stop_pc_sal);
8231 infrun_debug_printf ("keep going");
8233 if (execution_direction == EXEC_REVERSE)
8235 CORE_ADDR stop_pc = ecs->event_thread->stop_pc ();
8237 /* Make sure the stop_pc is set to the beginning of the line. */
8238 if (stop_pc != ecs->event_thread->control.step_range_start)
8239 ecs->event_thread->control.step_range_start
8240 = update_line_range_start (stop_pc, ecs);
8243 keep_going (ecs);
8246 static bool restart_stepped_thread (process_stratum_target *resume_target,
8247 ptid_t resume_ptid);
8249 /* In all-stop mode, if we're currently stepping but have stopped in
8250 some other thread, we may need to switch back to the stepped
8251 thread. Returns true we set the inferior running, false if we left
8252 it stopped (and the event needs further processing). */
8254 static bool
8255 switch_back_to_stepped_thread (struct execution_control_state *ecs)
8257 if (!target_is_non_stop_p ())
8259 /* If any thread is blocked on some internal breakpoint, and we
8260 simply need to step over that breakpoint to get it going
8261 again, do that first. */
8263 /* However, if we see an event for the stepping thread, then we
8264 know all other threads have been moved past their breakpoints
8265 already. Let the caller check whether the step is finished,
8266 etc., before deciding to move it past a breakpoint. */
8267 if (ecs->event_thread->control.step_range_end != 0)
8268 return false;
8270 /* Check if the current thread is blocked on an incomplete
8271 step-over, interrupted by a random signal. */
8272 if (ecs->event_thread->control.trap_expected
8273 && ecs->event_thread->stop_signal () != GDB_SIGNAL_TRAP)
8275 infrun_debug_printf
8276 ("need to finish step-over of [%s]",
8277 ecs->event_thread->ptid.to_string ().c_str ());
8278 keep_going (ecs);
8279 return true;
8282 /* Check if the current thread is blocked by a single-step
8283 breakpoint of another thread. */
8284 if (ecs->hit_singlestep_breakpoint)
8286 infrun_debug_printf ("need to step [%s] over single-step breakpoint",
8287 ecs->ptid.to_string ().c_str ());
8288 keep_going (ecs);
8289 return true;
8292 /* If this thread needs yet another step-over (e.g., stepping
8293 through a delay slot), do it first before moving on to
8294 another thread. */
8295 if (thread_still_needs_step_over (ecs->event_thread))
8297 infrun_debug_printf
8298 ("thread [%s] still needs step-over",
8299 ecs->event_thread->ptid.to_string ().c_str ());
8300 keep_going (ecs);
8301 return true;
8304 /* If scheduler locking applies even if not stepping, there's no
8305 need to walk over threads. Above we've checked whether the
8306 current thread is stepping. If some other thread not the
8307 event thread is stepping, then it must be that scheduler
8308 locking is not in effect. */
8309 if (schedlock_applies (ecs->event_thread))
8310 return false;
8312 /* Otherwise, we no longer expect a trap in the current thread.
8313 Clear the trap_expected flag before switching back -- this is
8314 what keep_going does as well, if we call it. */
8315 ecs->event_thread->control.trap_expected = 0;
8317 /* Likewise, clear the signal if it should not be passed. */
8318 if (!signal_program[ecs->event_thread->stop_signal ()])
8319 ecs->event_thread->set_stop_signal (GDB_SIGNAL_0);
8321 if (restart_stepped_thread (ecs->target, ecs->ptid))
8323 prepare_to_wait (ecs);
8324 return true;
8327 switch_to_thread (ecs->event_thread);
8330 return false;
8333 /* Look for the thread that was stepping, and resume it.
8334 RESUME_TARGET / RESUME_PTID indicate the set of threads the caller
8335 is resuming. Return true if a thread was started, false
8336 otherwise. */
8338 static bool
8339 restart_stepped_thread (process_stratum_target *resume_target,
8340 ptid_t resume_ptid)
8342 /* Do all pending step-overs before actually proceeding with
8343 step/next/etc. */
8344 if (start_step_over ())
8345 return true;
8347 for (thread_info *tp : all_threads_safe ())
8349 if (tp->state == THREAD_EXITED)
8350 continue;
8352 if (tp->has_pending_waitstatus ())
8353 continue;
8355 /* Ignore threads of processes the caller is not
8356 resuming. */
8357 if (!sched_multi
8358 && (tp->inf->process_target () != resume_target
8359 || tp->inf->pid != resume_ptid.pid ()))
8360 continue;
8362 if (tp->control.trap_expected)
8364 infrun_debug_printf ("switching back to stepped thread (step-over)");
8366 if (keep_going_stepped_thread (tp))
8367 return true;
8371 for (thread_info *tp : all_threads_safe ())
8373 if (tp->state == THREAD_EXITED)
8374 continue;
8376 if (tp->has_pending_waitstatus ())
8377 continue;
8379 /* Ignore threads of processes the caller is not
8380 resuming. */
8381 if (!sched_multi
8382 && (tp->inf->process_target () != resume_target
8383 || tp->inf->pid != resume_ptid.pid ()))
8384 continue;
8386 /* Did we find the stepping thread? */
8387 if (tp->control.step_range_end)
8389 infrun_debug_printf ("switching back to stepped thread (stepping)");
8391 if (keep_going_stepped_thread (tp))
8392 return true;
8396 return false;
8399 /* See infrun.h. */
8401 void
8402 restart_after_all_stop_detach (process_stratum_target *proc_target)
8404 /* Note we don't check target_is_non_stop_p() here, because the
8405 current inferior may no longer have a process_stratum target
8406 pushed, as we just detached. */
8408 /* See if we have a THREAD_RUNNING thread that need to be
8409 re-resumed. If we have any thread that is already executing,
8410 then we don't need to resume the target -- it is already been
8411 resumed. With the remote target (in all-stop), it's even
8412 impossible to issue another resumption if the target is already
8413 resumed, until the target reports a stop. */
8414 for (thread_info *thr : all_threads (proc_target))
8416 if (thr->state != THREAD_RUNNING)
8417 continue;
8419 /* If we have any thread that is already executing, then we
8420 don't need to resume the target -- it is already been
8421 resumed. */
8422 if (thr->executing ())
8423 return;
8425 /* If we have a pending event to process, skip resuming the
8426 target and go straight to processing it. */
8427 if (thr->resumed () && thr->has_pending_waitstatus ())
8428 return;
8431 /* Alright, we need to re-resume the target. If a thread was
8432 stepping, we need to restart it stepping. */
8433 if (restart_stepped_thread (proc_target, minus_one_ptid))
8434 return;
8436 /* Otherwise, find the first THREAD_RUNNING thread and resume
8437 it. */
8438 for (thread_info *thr : all_threads (proc_target))
8440 if (thr->state != THREAD_RUNNING)
8441 continue;
8443 execution_control_state ecs (thr);
8444 switch_to_thread (thr);
8445 keep_going (&ecs);
8446 return;
8450 /* Set a previously stepped thread back to stepping. Returns true on
8451 success, false if the resume is not possible (e.g., the thread
8452 vanished). */
8454 static bool
8455 keep_going_stepped_thread (struct thread_info *tp)
8457 frame_info_ptr frame;
8459 /* If the stepping thread exited, then don't try to switch back and
8460 resume it, which could fail in several different ways depending
8461 on the target. Instead, just keep going.
8463 We can find a stepping dead thread in the thread list in two
8464 cases:
8466 - The target supports thread exit events, and when the target
8467 tries to delete the thread from the thread list, inferior_ptid
8468 pointed at the exiting thread. In such case, calling
8469 delete_thread does not really remove the thread from the list;
8470 instead, the thread is left listed, with 'exited' state.
8472 - The target's debug interface does not support thread exit
8473 events, and so we have no idea whatsoever if the previously
8474 stepping thread is still alive. For that reason, we need to
8475 synchronously query the target now. */
8477 if (tp->state == THREAD_EXITED || !target_thread_alive (tp->ptid))
8479 infrun_debug_printf ("not resuming previously stepped thread, it has "
8480 "vanished");
8482 delete_thread (tp);
8483 return false;
8486 infrun_debug_printf ("resuming previously stepped thread");
8488 execution_control_state ecs (tp);
8489 switch_to_thread (tp);
8491 tp->set_stop_pc (regcache_read_pc (get_thread_regcache (tp)));
8492 frame = get_current_frame ();
8494 /* If the PC of the thread we were trying to single-step has
8495 changed, then that thread has trapped or been signaled, but the
8496 event has not been reported to GDB yet. Re-poll the target
8497 looking for this particular thread's event (i.e. temporarily
8498 enable schedlock) by:
8500 - setting a break at the current PC
8501 - resuming that particular thread, only (by setting trap
8502 expected)
8504 This prevents us continuously moving the single-step breakpoint
8505 forward, one instruction at a time, overstepping. */
8507 if (tp->stop_pc () != tp->prev_pc)
8509 ptid_t resume_ptid;
8511 infrun_debug_printf ("expected thread advanced also (%s -> %s)",
8512 paddress (current_inferior ()->arch (), tp->prev_pc),
8513 paddress (current_inferior ()->arch (),
8514 tp->stop_pc ()));
8516 /* Clear the info of the previous step-over, as it's no longer
8517 valid (if the thread was trying to step over a breakpoint, it
8518 has already succeeded). It's what keep_going would do too,
8519 if we called it. Do this before trying to insert the sss
8520 breakpoint, otherwise if we were previously trying to step
8521 over this exact address in another thread, the breakpoint is
8522 skipped. */
8523 clear_step_over_info ();
8524 tp->control.trap_expected = 0;
8526 insert_single_step_breakpoint (get_frame_arch (frame),
8527 get_frame_address_space (frame),
8528 tp->stop_pc ());
8530 tp->set_resumed (true);
8531 resume_ptid = internal_resume_ptid (tp->control.stepping_command);
8532 do_target_resume (resume_ptid, false, GDB_SIGNAL_0);
8534 else
8536 infrun_debug_printf ("expected thread still hasn't advanced");
8538 keep_going_pass_signal (&ecs);
8541 return true;
8544 /* Is thread TP in the middle of (software or hardware)
8545 single-stepping? (Note the result of this function must never be
8546 passed directly as target_resume's STEP parameter.) */
8548 static bool
8549 currently_stepping (struct thread_info *tp)
8551 return ((tp->control.step_range_end
8552 && tp->control.step_resume_breakpoint == nullptr)
8553 || tp->control.trap_expected
8554 || tp->stepped_breakpoint
8555 || bpstat_should_step ());
8558 /* Inferior has stepped into a subroutine call with source code that
8559 we should not step over. Do step to the first line of code in
8560 it. */
8562 static void
8563 handle_step_into_function (struct gdbarch *gdbarch,
8564 struct execution_control_state *ecs)
8566 fill_in_stop_func (gdbarch, ecs);
8568 compunit_symtab *cust
8569 = find_pc_compunit_symtab (ecs->event_thread->stop_pc ());
8570 if (cust != nullptr && cust->language () != language_asm)
8571 ecs->stop_func_start
8572 = gdbarch_skip_prologue_noexcept (gdbarch, ecs->stop_func_start);
8574 symtab_and_line stop_func_sal = find_pc_line (ecs->stop_func_start, 0);
8575 /* Use the step_resume_break to step until the end of the prologue,
8576 even if that involves jumps (as it seems to on the vax under
8577 4.2). */
8578 /* If the prologue ends in the middle of a source line, continue to
8579 the end of that source line (if it is still within the function).
8580 Otherwise, just go to end of prologue. */
8581 if (stop_func_sal.end
8582 && stop_func_sal.pc != ecs->stop_func_start
8583 && stop_func_sal.end < ecs->stop_func_end)
8584 ecs->stop_func_start = stop_func_sal.end;
8586 /* Architectures which require breakpoint adjustment might not be able
8587 to place a breakpoint at the computed address. If so, the test
8588 ``ecs->stop_func_start == stop_pc'' will never succeed. Adjust
8589 ecs->stop_func_start to an address at which a breakpoint may be
8590 legitimately placed.
8592 Note: kevinb/2004-01-19: On FR-V, if this adjustment is not
8593 made, GDB will enter an infinite loop when stepping through
8594 optimized code consisting of VLIW instructions which contain
8595 subinstructions corresponding to different source lines. On
8596 FR-V, it's not permitted to place a breakpoint on any but the
8597 first subinstruction of a VLIW instruction. When a breakpoint is
8598 set, GDB will adjust the breakpoint address to the beginning of
8599 the VLIW instruction. Thus, we need to make the corresponding
8600 adjustment here when computing the stop address. */
8602 if (gdbarch_adjust_breakpoint_address_p (gdbarch))
8604 ecs->stop_func_start
8605 = gdbarch_adjust_breakpoint_address (gdbarch,
8606 ecs->stop_func_start);
8609 if (ecs->stop_func_start == ecs->event_thread->stop_pc ())
8611 /* We are already there: stop now. */
8612 end_stepping_range (ecs);
8613 return;
8615 else
8617 /* Put the step-breakpoint there and go until there. */
8618 symtab_and_line sr_sal;
8619 sr_sal.pc = ecs->stop_func_start;
8620 sr_sal.section = find_pc_overlay (ecs->stop_func_start);
8621 sr_sal.pspace = get_frame_program_space (get_current_frame ());
8623 /* Do not specify what the fp should be when we stop since on
8624 some machines the prologue is where the new fp value is
8625 established. */
8626 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal, null_frame_id);
8628 /* And make sure stepping stops right away then. */
8629 ecs->event_thread->control.step_range_end
8630 = ecs->event_thread->control.step_range_start;
8632 keep_going (ecs);
8635 /* Inferior has stepped backward into a subroutine call with source
8636 code that we should not step over. Do step to the beginning of the
8637 last line of code in it. */
8639 static void
8640 handle_step_into_function_backward (struct gdbarch *gdbarch,
8641 struct execution_control_state *ecs)
8643 struct compunit_symtab *cust;
8644 struct symtab_and_line stop_func_sal;
8646 fill_in_stop_func (gdbarch, ecs);
8648 cust = find_pc_compunit_symtab (ecs->event_thread->stop_pc ());
8649 if (cust != nullptr && cust->language () != language_asm)
8650 ecs->stop_func_start
8651 = gdbarch_skip_prologue_noexcept (gdbarch, ecs->stop_func_start);
8653 stop_func_sal = find_pc_line (ecs->event_thread->stop_pc (), 0);
8655 /* OK, we're just going to keep stepping here. */
8656 if (stop_func_sal.pc == ecs->event_thread->stop_pc ())
8658 /* We're there already. Just stop stepping now. */
8659 end_stepping_range (ecs);
8661 else
8663 /* Else just reset the step range and keep going.
8664 No step-resume breakpoint, they don't work for
8665 epilogues, which can have multiple entry paths. */
8666 ecs->event_thread->control.step_range_start = stop_func_sal.pc;
8667 ecs->event_thread->control.step_range_end = stop_func_sal.end;
8668 keep_going (ecs);
8670 return;
8673 /* Insert a "step-resume breakpoint" at SR_SAL with frame ID SR_ID.
8674 This is used to both functions and to skip over code. */
8676 static void
8677 insert_step_resume_breakpoint_at_sal_1 (struct gdbarch *gdbarch,
8678 struct symtab_and_line sr_sal,
8679 struct frame_id sr_id,
8680 enum bptype sr_type)
8682 /* There should never be more than one step-resume or longjmp-resume
8683 breakpoint per thread, so we should never be setting a new
8684 step_resume_breakpoint when one is already active. */
8685 gdb_assert (inferior_thread ()->control.step_resume_breakpoint == nullptr);
8686 gdb_assert (sr_type == bp_step_resume || sr_type == bp_hp_step_resume);
8688 infrun_debug_printf ("inserting step-resume breakpoint at %s",
8689 paddress (gdbarch, sr_sal.pc));
8691 inferior_thread ()->control.step_resume_breakpoint
8692 = set_momentary_breakpoint (gdbarch, sr_sal, sr_id, sr_type).release ();
8695 void
8696 insert_step_resume_breakpoint_at_sal (struct gdbarch *gdbarch,
8697 struct symtab_and_line sr_sal,
8698 struct frame_id sr_id)
8700 insert_step_resume_breakpoint_at_sal_1 (gdbarch,
8701 sr_sal, sr_id,
8702 bp_step_resume);
8705 /* Insert a "high-priority step-resume breakpoint" at RETURN_FRAME.pc.
8706 This is used to skip a potential signal handler.
8708 This is called with the interrupted function's frame. The signal
8709 handler, when it returns, will resume the interrupted function at
8710 RETURN_FRAME.pc. */
8712 static void
8713 insert_hp_step_resume_breakpoint_at_frame (frame_info_ptr return_frame)
8715 gdb_assert (return_frame != nullptr);
8717 struct gdbarch *gdbarch = get_frame_arch (return_frame);
8719 symtab_and_line sr_sal;
8720 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch, get_frame_pc (return_frame));
8721 sr_sal.section = find_pc_overlay (sr_sal.pc);
8722 sr_sal.pspace = get_frame_program_space (return_frame);
8724 insert_step_resume_breakpoint_at_sal_1 (gdbarch, sr_sal,
8725 get_stack_frame_id (return_frame),
8726 bp_hp_step_resume);
8729 /* Insert a "step-resume breakpoint" at the previous frame's PC. This
8730 is used to skip a function after stepping into it (for "next" or if
8731 the called function has no debugging information).
8733 The current function has almost always been reached by single
8734 stepping a call or return instruction. NEXT_FRAME belongs to the
8735 current function, and the breakpoint will be set at the caller's
8736 resume address.
8738 This is a separate function rather than reusing
8739 insert_hp_step_resume_breakpoint_at_frame in order to avoid
8740 get_prev_frame, which may stop prematurely (see the implementation
8741 of frame_unwind_caller_id for an example). */
8743 static void
8744 insert_step_resume_breakpoint_at_caller (frame_info_ptr next_frame)
8746 /* We shouldn't have gotten here if we don't know where the call site
8747 is. */
8748 gdb_assert (frame_id_p (frame_unwind_caller_id (next_frame)));
8750 struct gdbarch *gdbarch = frame_unwind_caller_arch (next_frame);
8752 symtab_and_line sr_sal;
8753 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch,
8754 frame_unwind_caller_pc (next_frame));
8755 sr_sal.section = find_pc_overlay (sr_sal.pc);
8756 sr_sal.pspace = frame_unwind_program_space (next_frame);
8758 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal,
8759 frame_unwind_caller_id (next_frame));
8762 /* Insert a "longjmp-resume" breakpoint at PC. This is used to set a
8763 new breakpoint at the target of a jmp_buf. The handling of
8764 longjmp-resume uses the same mechanisms used for handling
8765 "step-resume" breakpoints. */
8767 static void
8768 insert_longjmp_resume_breakpoint (struct gdbarch *gdbarch, CORE_ADDR pc)
8770 /* There should never be more than one longjmp-resume breakpoint per
8771 thread, so we should never be setting a new
8772 longjmp_resume_breakpoint when one is already active. */
8773 gdb_assert (inferior_thread ()->control.exception_resume_breakpoint == nullptr);
8775 infrun_debug_printf ("inserting longjmp-resume breakpoint at %s",
8776 paddress (gdbarch, pc));
8778 inferior_thread ()->control.exception_resume_breakpoint =
8779 set_momentary_breakpoint_at_pc (gdbarch, pc, bp_longjmp_resume).release ();
8782 /* Insert an exception resume breakpoint. TP is the thread throwing
8783 the exception. The block B is the block of the unwinder debug hook
8784 function. FRAME is the frame corresponding to the call to this
8785 function. SYM is the symbol of the function argument holding the
8786 target PC of the exception. */
8788 static void
8789 insert_exception_resume_breakpoint (struct thread_info *tp,
8790 const struct block *b,
8791 frame_info_ptr frame,
8792 struct symbol *sym)
8796 struct block_symbol vsym;
8797 struct value *value;
8798 CORE_ADDR handler;
8799 struct breakpoint *bp;
8801 vsym = lookup_symbol_search_name (sym->search_name (),
8802 b, VAR_DOMAIN);
8803 value = read_var_value (vsym.symbol, vsym.block, frame);
8804 /* If the value was optimized out, revert to the old behavior. */
8805 if (! value->optimized_out ())
8807 handler = value_as_address (value);
8809 infrun_debug_printf ("exception resume at %lx",
8810 (unsigned long) handler);
8812 /* set_momentary_breakpoint_at_pc creates a thread-specific
8813 breakpoint for the current inferior thread. */
8814 gdb_assert (tp == inferior_thread ());
8815 bp = set_momentary_breakpoint_at_pc (get_frame_arch (frame),
8816 handler,
8817 bp_exception_resume).release ();
8819 /* set_momentary_breakpoint_at_pc invalidates FRAME. */
8820 frame = nullptr;
8822 tp->control.exception_resume_breakpoint = bp;
8825 catch (const gdb_exception_error &e)
8827 /* We want to ignore errors here. */
8831 /* A helper for check_exception_resume that sets an
8832 exception-breakpoint based on a SystemTap probe. */
8834 static void
8835 insert_exception_resume_from_probe (struct thread_info *tp,
8836 const struct bound_probe *probe,
8837 frame_info_ptr frame)
8839 struct value *arg_value;
8840 CORE_ADDR handler;
8841 struct breakpoint *bp;
8843 arg_value = probe_safe_evaluate_at_pc (frame, 1);
8844 if (!arg_value)
8845 return;
8847 handler = value_as_address (arg_value);
8849 infrun_debug_printf ("exception resume at %s",
8850 paddress (probe->objfile->arch (), handler));
8852 /* set_momentary_breakpoint_at_pc creates a thread-specific breakpoint
8853 for the current inferior thread. */
8854 gdb_assert (tp == inferior_thread ());
8855 bp = set_momentary_breakpoint_at_pc (get_frame_arch (frame),
8856 handler, bp_exception_resume).release ();
8857 tp->control.exception_resume_breakpoint = bp;
8860 /* This is called when an exception has been intercepted. Check to
8861 see whether the exception's destination is of interest, and if so,
8862 set an exception resume breakpoint there. */
8864 static void
8865 check_exception_resume (struct execution_control_state *ecs,
8866 frame_info_ptr frame)
8868 struct bound_probe probe;
8869 struct symbol *func;
8871 /* First see if this exception unwinding breakpoint was set via a
8872 SystemTap probe point. If so, the probe has two arguments: the
8873 CFA and the HANDLER. We ignore the CFA, extract the handler, and
8874 set a breakpoint there. */
8875 probe = find_probe_by_pc (get_frame_pc (frame));
8876 if (probe.prob)
8878 insert_exception_resume_from_probe (ecs->event_thread, &probe, frame);
8879 return;
8882 func = get_frame_function (frame);
8883 if (!func)
8884 return;
8888 const struct block *b;
8889 int argno = 0;
8891 /* The exception breakpoint is a thread-specific breakpoint on
8892 the unwinder's debug hook, declared as:
8894 void _Unwind_DebugHook (void *cfa, void *handler);
8896 The CFA argument indicates the frame to which control is
8897 about to be transferred. HANDLER is the destination PC.
8899 We ignore the CFA and set a temporary breakpoint at HANDLER.
8900 This is not extremely efficient but it avoids issues in gdb
8901 with computing the DWARF CFA, and it also works even in weird
8902 cases such as throwing an exception from inside a signal
8903 handler. */
8905 b = func->value_block ();
8906 for (struct symbol *sym : block_iterator_range (b))
8908 if (!sym->is_argument ())
8909 continue;
8911 if (argno == 0)
8912 ++argno;
8913 else
8915 insert_exception_resume_breakpoint (ecs->event_thread,
8916 b, frame, sym);
8917 break;
8921 catch (const gdb_exception_error &e)
8926 static void
8927 stop_waiting (struct execution_control_state *ecs)
8929 infrun_debug_printf ("stop_waiting");
8931 /* Let callers know we don't want to wait for the inferior anymore. */
8932 ecs->wait_some_more = 0;
8935 /* Like keep_going, but passes the signal to the inferior, even if the
8936 signal is set to nopass. */
8938 static void
8939 keep_going_pass_signal (struct execution_control_state *ecs)
8941 gdb_assert (ecs->event_thread->ptid == inferior_ptid);
8942 gdb_assert (!ecs->event_thread->resumed ());
8944 /* Save the pc before execution, to compare with pc after stop. */
8945 ecs->event_thread->prev_pc
8946 = regcache_read_pc_protected (get_thread_regcache (ecs->event_thread));
8948 if (ecs->event_thread->control.trap_expected)
8950 struct thread_info *tp = ecs->event_thread;
8952 infrun_debug_printf ("%s has trap_expected set, "
8953 "resuming to collect trap",
8954 tp->ptid.to_string ().c_str ());
8956 /* We haven't yet gotten our trap, and either: intercepted a
8957 non-signal event (e.g., a fork); or took a signal which we
8958 are supposed to pass through to the inferior. Simply
8959 continue. */
8960 resume (ecs->event_thread->stop_signal ());
8962 else if (step_over_info_valid_p ())
8964 /* Another thread is stepping over a breakpoint in-line. If
8965 this thread needs a step-over too, queue the request. In
8966 either case, this resume must be deferred for later. */
8967 struct thread_info *tp = ecs->event_thread;
8969 if (ecs->hit_singlestep_breakpoint
8970 || thread_still_needs_step_over (tp))
8972 infrun_debug_printf ("step-over already in progress: "
8973 "step-over for %s deferred",
8974 tp->ptid.to_string ().c_str ());
8975 global_thread_step_over_chain_enqueue (tp);
8977 else
8978 infrun_debug_printf ("step-over in progress: resume of %s deferred",
8979 tp->ptid.to_string ().c_str ());
8981 else
8983 regcache *regcache = get_thread_regcache (ecs->event_thread);
8984 int remove_bp;
8985 int remove_wps;
8986 step_over_what step_what;
8988 /* Either the trap was not expected, but we are continuing
8989 anyway (if we got a signal, the user asked it be passed to
8990 the child)
8991 -- or --
8992 We got our expected trap, but decided we should resume from
8995 We're going to run this baby now!
8997 Note that insert_breakpoints won't try to re-insert
8998 already inserted breakpoints. Therefore, we don't
8999 care if breakpoints were already inserted, or not. */
9001 /* If we need to step over a breakpoint, and we're not using
9002 displaced stepping to do so, insert all breakpoints
9003 (watchpoints, etc.) but the one we're stepping over, step one
9004 instruction, and then re-insert the breakpoint when that step
9005 is finished. */
9007 step_what = thread_still_needs_step_over (ecs->event_thread);
9009 remove_bp = (ecs->hit_singlestep_breakpoint
9010 || (step_what & STEP_OVER_BREAKPOINT));
9011 remove_wps = (step_what & STEP_OVER_WATCHPOINT);
9013 /* We can't use displaced stepping if we need to step past a
9014 watchpoint. The instruction copied to the scratch pad would
9015 still trigger the watchpoint. */
9016 if (remove_bp
9017 && (remove_wps || !use_displaced_stepping (ecs->event_thread)))
9019 set_step_over_info (ecs->event_thread->inf->aspace.get (),
9020 regcache_read_pc (regcache), remove_wps,
9021 ecs->event_thread->global_num);
9023 else if (remove_wps)
9024 set_step_over_info (nullptr, 0, remove_wps, -1);
9026 /* If we now need to do an in-line step-over, we need to stop
9027 all other threads. Note this must be done before
9028 insert_breakpoints below, because that removes the breakpoint
9029 we're about to step over, otherwise other threads could miss
9030 it. */
9031 if (step_over_info_valid_p () && target_is_non_stop_p ())
9032 stop_all_threads ("starting in-line step-over");
9034 /* Stop stepping if inserting breakpoints fails. */
9037 insert_breakpoints ();
9039 catch (const gdb_exception_error &e)
9041 exception_print (gdb_stderr, e);
9042 stop_waiting (ecs);
9043 clear_step_over_info ();
9044 return;
9047 ecs->event_thread->control.trap_expected = (remove_bp || remove_wps);
9049 resume (ecs->event_thread->stop_signal ());
9052 prepare_to_wait (ecs);
9055 /* Called when we should continue running the inferior, because the
9056 current event doesn't cause a user visible stop. This does the
9057 resuming part; waiting for the next event is done elsewhere. */
9059 static void
9060 keep_going (struct execution_control_state *ecs)
9062 if (ecs->event_thread->control.trap_expected
9063 && ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP)
9064 ecs->event_thread->control.trap_expected = 0;
9066 if (!signal_program[ecs->event_thread->stop_signal ()])
9067 ecs->event_thread->set_stop_signal (GDB_SIGNAL_0);
9068 keep_going_pass_signal (ecs);
9071 /* This function normally comes after a resume, before
9072 handle_inferior_event exits. It takes care of any last bits of
9073 housekeeping, and sets the all-important wait_some_more flag. */
9075 static void
9076 prepare_to_wait (struct execution_control_state *ecs)
9078 infrun_debug_printf ("prepare_to_wait");
9080 ecs->wait_some_more = 1;
9082 /* If the target can't async, emulate it by marking the infrun event
9083 handler such that as soon as we get back to the event-loop, we
9084 immediately end up in fetch_inferior_event again calling
9085 target_wait. */
9086 if (!target_can_async_p ())
9087 mark_infrun_async_event_handler ();
9090 /* We are done with the step range of a step/next/si/ni command.
9091 Called once for each n of a "step n" operation. */
9093 static void
9094 end_stepping_range (struct execution_control_state *ecs)
9096 ecs->event_thread->control.stop_step = 1;
9097 stop_waiting (ecs);
9100 /* Several print_*_reason functions to print why the inferior has stopped.
9101 We always print something when the inferior exits, or receives a signal.
9102 The rest of the cases are dealt with later on in normal_stop and
9103 print_it_typical. Ideally there should be a call to one of these
9104 print_*_reason functions functions from handle_inferior_event each time
9105 stop_waiting is called.
9107 Note that we don't call these directly, instead we delegate that to
9108 the interpreters, through observers. Interpreters then call these
9109 with whatever uiout is right. */
9111 void
9112 print_signal_exited_reason (struct ui_out *uiout, enum gdb_signal siggnal)
9114 annotate_signalled ();
9115 if (uiout->is_mi_like_p ())
9116 uiout->field_string
9117 ("reason", async_reason_lookup (EXEC_ASYNC_EXITED_SIGNALLED));
9118 uiout->text ("\nProgram terminated with signal ");
9119 annotate_signal_name ();
9120 uiout->field_string ("signal-name",
9121 gdb_signal_to_name (siggnal));
9122 annotate_signal_name_end ();
9123 uiout->text (", ");
9124 annotate_signal_string ();
9125 uiout->field_string ("signal-meaning",
9126 gdb_signal_to_string (siggnal));
9127 annotate_signal_string_end ();
9128 uiout->text (".\n");
9129 uiout->text ("The program no longer exists.\n");
9132 void
9133 print_exited_reason (struct ui_out *uiout, int exitstatus)
9135 struct inferior *inf = current_inferior ();
9136 std::string pidstr = target_pid_to_str (ptid_t (inf->pid));
9138 annotate_exited (exitstatus);
9139 if (exitstatus)
9141 if (uiout->is_mi_like_p ())
9142 uiout->field_string ("reason", async_reason_lookup (EXEC_ASYNC_EXITED));
9143 std::string exit_code_str
9144 = string_printf ("0%o", (unsigned int) exitstatus);
9145 uiout->message ("[Inferior %s (%s) exited with code %pF]\n",
9146 plongest (inf->num), pidstr.c_str (),
9147 string_field ("exit-code", exit_code_str.c_str ()));
9149 else
9151 if (uiout->is_mi_like_p ())
9152 uiout->field_string
9153 ("reason", async_reason_lookup (EXEC_ASYNC_EXITED_NORMALLY));
9154 uiout->message ("[Inferior %s (%s) exited normally]\n",
9155 plongest (inf->num), pidstr.c_str ());
9159 void
9160 print_signal_received_reason (struct ui_out *uiout, enum gdb_signal siggnal)
9162 struct thread_info *thr = inferior_thread ();
9164 infrun_debug_printf ("signal = %s", gdb_signal_to_string (siggnal));
9166 annotate_signal ();
9168 if (uiout->is_mi_like_p ())
9170 else if (show_thread_that_caused_stop ())
9172 uiout->text ("\nThread ");
9173 uiout->field_string ("thread-id", print_thread_id (thr));
9175 const char *name = thread_name (thr);
9176 if (name != nullptr)
9178 uiout->text (" \"");
9179 uiout->field_string ("name", name);
9180 uiout->text ("\"");
9183 else
9184 uiout->text ("\nProgram");
9186 if (siggnal == GDB_SIGNAL_0 && !uiout->is_mi_like_p ())
9187 uiout->text (" stopped");
9188 else
9190 uiout->text (" received signal ");
9191 annotate_signal_name ();
9192 if (uiout->is_mi_like_p ())
9193 uiout->field_string
9194 ("reason", async_reason_lookup (EXEC_ASYNC_SIGNAL_RECEIVED));
9195 uiout->field_string ("signal-name", gdb_signal_to_name (siggnal));
9196 annotate_signal_name_end ();
9197 uiout->text (", ");
9198 annotate_signal_string ();
9199 uiout->field_string ("signal-meaning", gdb_signal_to_string (siggnal));
9201 regcache *regcache = get_thread_regcache (thr);
9202 struct gdbarch *gdbarch = regcache->arch ();
9203 if (gdbarch_report_signal_info_p (gdbarch))
9204 gdbarch_report_signal_info (gdbarch, uiout, siggnal);
9206 annotate_signal_string_end ();
9208 uiout->text (".\n");
9211 void
9212 print_no_history_reason (struct ui_out *uiout)
9214 if (uiout->is_mi_like_p ())
9215 uiout->field_string ("reason", async_reason_lookup (EXEC_ASYNC_NO_HISTORY));
9216 else
9217 uiout->text ("\nNo more reverse-execution history.\n");
9220 /* Print current location without a level number, if we have changed
9221 functions or hit a breakpoint. Print source line if we have one.
9222 bpstat_print contains the logic deciding in detail what to print,
9223 based on the event(s) that just occurred. */
9225 static void
9226 print_stop_location (const target_waitstatus &ws)
9228 int bpstat_ret;
9229 enum print_what source_flag;
9230 int do_frame_printing = 1;
9231 struct thread_info *tp = inferior_thread ();
9233 bpstat_ret = bpstat_print (tp->control.stop_bpstat, ws.kind ());
9234 switch (bpstat_ret)
9236 case PRINT_UNKNOWN:
9237 /* FIXME: cagney/2002-12-01: Given that a frame ID does (or
9238 should) carry around the function and does (or should) use
9239 that when doing a frame comparison. */
9240 if (tp->control.stop_step
9241 && (tp->control.step_frame_id
9242 == get_frame_id (get_current_frame ()))
9243 && (tp->control.step_start_function
9244 == find_pc_function (tp->stop_pc ())))
9246 /* Finished step, just print source line. */
9247 source_flag = SRC_LINE;
9249 else
9251 /* Print location and source line. */
9252 source_flag = SRC_AND_LOC;
9254 break;
9255 case PRINT_SRC_AND_LOC:
9256 /* Print location and source line. */
9257 source_flag = SRC_AND_LOC;
9258 break;
9259 case PRINT_SRC_ONLY:
9260 source_flag = SRC_LINE;
9261 break;
9262 case PRINT_NOTHING:
9263 /* Something bogus. */
9264 source_flag = SRC_LINE;
9265 do_frame_printing = 0;
9266 break;
9267 default:
9268 internal_error (_("Unknown value."));
9271 /* The behavior of this routine with respect to the source
9272 flag is:
9273 SRC_LINE: Print only source line
9274 LOCATION: Print only location
9275 SRC_AND_LOC: Print location and source line. */
9276 if (do_frame_printing)
9277 print_stack_frame (get_selected_frame (nullptr), 0, source_flag, 1);
9280 /* See infrun.h. */
9282 void
9283 print_stop_event (struct ui_out *uiout, bool displays)
9285 struct target_waitstatus last;
9286 struct thread_info *tp;
9288 get_last_target_status (nullptr, nullptr, &last);
9291 scoped_restore save_uiout = make_scoped_restore (&current_uiout, uiout);
9293 print_stop_location (last);
9295 /* Display the auto-display expressions. */
9296 if (displays)
9297 do_displays ();
9300 tp = inferior_thread ();
9301 if (tp->thread_fsm () != nullptr
9302 && tp->thread_fsm ()->finished_p ())
9304 struct return_value_info *rv;
9306 rv = tp->thread_fsm ()->return_value ();
9307 if (rv != nullptr)
9308 print_return_value (uiout, rv);
9312 /* See infrun.h. */
9314 void
9315 maybe_remove_breakpoints (void)
9317 if (!breakpoints_should_be_inserted_now () && target_has_execution ())
9319 if (remove_breakpoints ())
9321 target_terminal::ours_for_output ();
9322 gdb_printf (_("Cannot remove breakpoints because "
9323 "program is no longer writable.\nFurther "
9324 "execution is probably impossible.\n"));
9329 /* The execution context that just caused a normal stop. */
9331 struct stop_context
9333 stop_context ();
9335 DISABLE_COPY_AND_ASSIGN (stop_context);
9337 bool changed () const;
9339 /* The stop ID. */
9340 ULONGEST stop_id;
9342 /* The event PTID. */
9344 ptid_t ptid;
9346 /* If stopp for a thread event, this is the thread that caused the
9347 stop. */
9348 thread_info_ref thread;
9350 /* The inferior that caused the stop. */
9351 int inf_num;
9354 /* Initializes a new stop context. If stopped for a thread event, this
9355 takes a strong reference to the thread. */
9357 stop_context::stop_context ()
9359 stop_id = get_stop_id ();
9360 ptid = inferior_ptid;
9361 inf_num = current_inferior ()->num;
9363 if (inferior_ptid != null_ptid)
9365 /* Take a strong reference so that the thread can't be deleted
9366 yet. */
9367 thread = thread_info_ref::new_reference (inferior_thread ());
9371 /* Return true if the current context no longer matches the saved stop
9372 context. */
9374 bool
9375 stop_context::changed () const
9377 if (ptid != inferior_ptid)
9378 return true;
9379 if (inf_num != current_inferior ()->num)
9380 return true;
9381 if (thread != nullptr && thread->state != THREAD_STOPPED)
9382 return true;
9383 if (get_stop_id () != stop_id)
9384 return true;
9385 return false;
9388 /* See infrun.h. */
9390 bool
9391 normal_stop ()
9393 struct target_waitstatus last;
9395 get_last_target_status (nullptr, nullptr, &last);
9397 new_stop_id ();
9399 /* If an exception is thrown from this point on, make sure to
9400 propagate GDB's knowledge of the executing state to the
9401 frontend/user running state. A QUIT is an easy exception to see
9402 here, so do this before any filtered output. */
9404 ptid_t finish_ptid = null_ptid;
9406 if (!non_stop)
9407 finish_ptid = minus_one_ptid;
9408 else if (last.kind () == TARGET_WAITKIND_SIGNALLED
9409 || last.kind () == TARGET_WAITKIND_EXITED)
9411 /* On some targets, we may still have live threads in the
9412 inferior when we get a process exit event. E.g., for
9413 "checkpoint", when the current checkpoint/fork exits,
9414 linux-fork.c automatically switches to another fork from
9415 within target_mourn_inferior. */
9416 if (inferior_ptid != null_ptid)
9417 finish_ptid = ptid_t (inferior_ptid.pid ());
9419 else if (last.kind () != TARGET_WAITKIND_NO_RESUMED
9420 && last.kind () != TARGET_WAITKIND_THREAD_EXITED)
9421 finish_ptid = inferior_ptid;
9423 std::optional<scoped_finish_thread_state> maybe_finish_thread_state;
9424 if (finish_ptid != null_ptid)
9426 maybe_finish_thread_state.emplace
9427 (user_visible_resume_target (finish_ptid), finish_ptid);
9430 /* As we're presenting a stop, and potentially removing breakpoints,
9431 update the thread list so we can tell whether there are threads
9432 running on the target. With target remote, for example, we can
9433 only learn about new threads when we explicitly update the thread
9434 list. Do this before notifying the interpreters about signal
9435 stops, end of stepping ranges, etc., so that the "new thread"
9436 output is emitted before e.g., "Program received signal FOO",
9437 instead of after. */
9438 update_thread_list ();
9440 if (last.kind () == TARGET_WAITKIND_STOPPED && stopped_by_random_signal)
9441 notify_signal_received (inferior_thread ()->stop_signal ());
9443 /* As with the notification of thread events, we want to delay
9444 notifying the user that we've switched thread context until
9445 the inferior actually stops.
9447 There's no point in saying anything if the inferior has exited.
9448 Note that SIGNALLED here means "exited with a signal", not
9449 "received a signal".
9451 Also skip saying anything in non-stop mode. In that mode, as we
9452 don't want GDB to switch threads behind the user's back, to avoid
9453 races where the user is typing a command to apply to thread x,
9454 but GDB switches to thread y before the user finishes entering
9455 the command, fetch_inferior_event installs a cleanup to restore
9456 the current thread back to the thread the user had selected right
9457 after this event is handled, so we're not really switching, only
9458 informing of a stop. */
9459 if (!non_stop)
9461 if ((last.kind () != TARGET_WAITKIND_SIGNALLED
9462 && last.kind () != TARGET_WAITKIND_EXITED
9463 && last.kind () != TARGET_WAITKIND_NO_RESUMED
9464 && last.kind () != TARGET_WAITKIND_THREAD_EXITED)
9465 && target_has_execution ()
9466 && previous_thread != inferior_thread ())
9468 SWITCH_THRU_ALL_UIS ()
9470 target_terminal::ours_for_output ();
9471 gdb_printf (_("[Switching to %s]\n"),
9472 target_pid_to_str (inferior_ptid).c_str ());
9473 annotate_thread_changed ();
9477 update_previous_thread ();
9480 if (last.kind () == TARGET_WAITKIND_NO_RESUMED
9481 || last.kind () == TARGET_WAITKIND_THREAD_EXITED)
9483 stop_print_frame = false;
9485 SWITCH_THRU_ALL_UIS ()
9486 if (current_ui->prompt_state == PROMPT_BLOCKED)
9488 target_terminal::ours_for_output ();
9489 if (last.kind () == TARGET_WAITKIND_NO_RESUMED)
9490 gdb_printf (_("No unwaited-for children left.\n"));
9491 else if (last.kind () == TARGET_WAITKIND_THREAD_EXITED)
9492 gdb_printf (_("Command aborted, thread exited.\n"));
9493 else
9494 gdb_assert_not_reached ("unhandled");
9498 /* Note: this depends on the update_thread_list call above. */
9499 maybe_remove_breakpoints ();
9501 /* If an auto-display called a function and that got a signal,
9502 delete that auto-display to avoid an infinite recursion. */
9504 if (stopped_by_random_signal)
9505 disable_current_display ();
9507 SWITCH_THRU_ALL_UIS ()
9509 async_enable_stdin ();
9512 /* Let the user/frontend see the threads as stopped. */
9513 maybe_finish_thread_state.reset ();
9515 /* Select innermost stack frame - i.e., current frame is frame 0,
9516 and current location is based on that. Handle the case where the
9517 dummy call is returning after being stopped. E.g. the dummy call
9518 previously hit a breakpoint. (If the dummy call returns
9519 normally, we won't reach here.) Do this before the stop hook is
9520 run, so that it doesn't get to see the temporary dummy frame,
9521 which is not where we'll present the stop. */
9522 if (has_stack_frames ())
9524 if (stop_stack_dummy == STOP_STACK_DUMMY)
9526 /* Pop the empty frame that contains the stack dummy. This
9527 also restores inferior state prior to the call (struct
9528 infcall_suspend_state). */
9529 frame_info_ptr frame = get_current_frame ();
9531 gdb_assert (get_frame_type (frame) == DUMMY_FRAME);
9532 frame_pop (frame);
9533 /* frame_pop calls reinit_frame_cache as the last thing it
9534 does which means there's now no selected frame. */
9537 select_frame (get_current_frame ());
9539 /* Set the current source location. */
9540 set_current_sal_from_frame (get_current_frame ());
9543 /* Look up the hook_stop and run it (CLI internally handles problem
9544 of stop_command's pre-hook not existing). */
9545 stop_context saved_context;
9549 execute_cmd_pre_hook (stop_command);
9551 catch (const gdb_exception_error &ex)
9553 exception_fprintf (gdb_stderr, ex,
9554 "Error while running hook_stop:\n");
9557 /* If the stop hook resumes the target, then there's no point in
9558 trying to notify about the previous stop; its context is
9559 gone. Likewise if the command switches thread or inferior --
9560 the observers would print a stop for the wrong
9561 thread/inferior. */
9562 if (saved_context.changed ())
9563 return true;
9565 /* Notify observers about the stop. This is where the interpreters
9566 print the stop event. */
9567 notify_normal_stop ((inferior_ptid != null_ptid
9568 ? inferior_thread ()->control.stop_bpstat
9569 : nullptr),
9570 stop_print_frame);
9571 annotate_stopped ();
9573 if (target_has_execution ())
9575 if (last.kind () != TARGET_WAITKIND_SIGNALLED
9576 && last.kind () != TARGET_WAITKIND_EXITED
9577 && last.kind () != TARGET_WAITKIND_NO_RESUMED
9578 && last.kind () != TARGET_WAITKIND_THREAD_EXITED)
9579 /* Delete the breakpoint we stopped at, if it wants to be deleted.
9580 Delete any breakpoint that is to be deleted at the next stop. */
9581 breakpoint_auto_delete (inferior_thread ()->control.stop_bpstat);
9584 return false;
9588 signal_stop_state (int signo)
9590 return signal_stop[signo];
9594 signal_print_state (int signo)
9596 return signal_print[signo];
9600 signal_pass_state (int signo)
9602 return signal_program[signo];
9605 static void
9606 signal_cache_update (int signo)
9608 if (signo == -1)
9610 for (signo = 0; signo < (int) GDB_SIGNAL_LAST; signo++)
9611 signal_cache_update (signo);
9613 return;
9616 signal_pass[signo] = (signal_stop[signo] == 0
9617 && signal_print[signo] == 0
9618 && signal_program[signo] == 1
9619 && signal_catch[signo] == 0);
9623 signal_stop_update (int signo, int state)
9625 int ret = signal_stop[signo];
9627 signal_stop[signo] = state;
9628 signal_cache_update (signo);
9629 return ret;
9633 signal_print_update (int signo, int state)
9635 int ret = signal_print[signo];
9637 signal_print[signo] = state;
9638 signal_cache_update (signo);
9639 return ret;
9643 signal_pass_update (int signo, int state)
9645 int ret = signal_program[signo];
9647 signal_program[signo] = state;
9648 signal_cache_update (signo);
9649 return ret;
9652 /* Update the global 'signal_catch' from INFO and notify the
9653 target. */
9655 void
9656 signal_catch_update (const unsigned int *info)
9658 int i;
9660 for (i = 0; i < GDB_SIGNAL_LAST; ++i)
9661 signal_catch[i] = info[i] > 0;
9662 signal_cache_update (-1);
9663 target_pass_signals (signal_pass);
9666 static void
9667 sig_print_header (void)
9669 gdb_printf (_("Signal Stop\tPrint\tPass "
9670 "to program\tDescription\n"));
9673 static void
9674 sig_print_info (enum gdb_signal oursig)
9676 const char *name = gdb_signal_to_name (oursig);
9677 int name_padding = 13 - strlen (name);
9679 if (name_padding <= 0)
9680 name_padding = 0;
9682 gdb_printf ("%s", name);
9683 gdb_printf ("%*.*s ", name_padding, name_padding, " ");
9684 gdb_printf ("%s\t", signal_stop[oursig] ? "Yes" : "No");
9685 gdb_printf ("%s\t", signal_print[oursig] ? "Yes" : "No");
9686 gdb_printf ("%s\t\t", signal_program[oursig] ? "Yes" : "No");
9687 gdb_printf ("%s\n", gdb_signal_to_string (oursig));
9690 /* Specify how various signals in the inferior should be handled. */
9692 static void
9693 handle_command (const char *args, int from_tty)
9695 int digits, wordlen;
9696 int sigfirst, siglast;
9697 enum gdb_signal oursig;
9698 int allsigs;
9700 if (args == nullptr)
9702 error_no_arg (_("signal to handle"));
9705 /* Allocate and zero an array of flags for which signals to handle. */
9707 const size_t nsigs = GDB_SIGNAL_LAST;
9708 unsigned char sigs[nsigs] {};
9710 /* Break the command line up into args. */
9712 gdb_argv built_argv (args);
9714 /* Walk through the args, looking for signal oursigs, signal names, and
9715 actions. Signal numbers and signal names may be interspersed with
9716 actions, with the actions being performed for all signals cumulatively
9717 specified. Signal ranges can be specified as <LOW>-<HIGH>. */
9719 for (char *arg : built_argv)
9721 wordlen = strlen (arg);
9722 for (digits = 0; isdigit (arg[digits]); digits++)
9725 allsigs = 0;
9726 sigfirst = siglast = -1;
9728 if (wordlen >= 1 && !strncmp (arg, "all", wordlen))
9730 /* Apply action to all signals except those used by the
9731 debugger. Silently skip those. */
9732 allsigs = 1;
9733 sigfirst = 0;
9734 siglast = nsigs - 1;
9736 else if (wordlen >= 1 && !strncmp (arg, "stop", wordlen))
9738 SET_SIGS (nsigs, sigs, signal_stop);
9739 SET_SIGS (nsigs, sigs, signal_print);
9741 else if (wordlen >= 1 && !strncmp (arg, "ignore", wordlen))
9743 UNSET_SIGS (nsigs, sigs, signal_program);
9745 else if (wordlen >= 2 && !strncmp (arg, "print", wordlen))
9747 SET_SIGS (nsigs, sigs, signal_print);
9749 else if (wordlen >= 2 && !strncmp (arg, "pass", wordlen))
9751 SET_SIGS (nsigs, sigs, signal_program);
9753 else if (wordlen >= 3 && !strncmp (arg, "nostop", wordlen))
9755 UNSET_SIGS (nsigs, sigs, signal_stop);
9757 else if (wordlen >= 3 && !strncmp (arg, "noignore", wordlen))
9759 SET_SIGS (nsigs, sigs, signal_program);
9761 else if (wordlen >= 4 && !strncmp (arg, "noprint", wordlen))
9763 UNSET_SIGS (nsigs, sigs, signal_print);
9764 UNSET_SIGS (nsigs, sigs, signal_stop);
9766 else if (wordlen >= 4 && !strncmp (arg, "nopass", wordlen))
9768 UNSET_SIGS (nsigs, sigs, signal_program);
9770 else if (digits > 0)
9772 /* It is numeric. The numeric signal refers to our own
9773 internal signal numbering from target.h, not to host/target
9774 signal number. This is a feature; users really should be
9775 using symbolic names anyway, and the common ones like
9776 SIGHUP, SIGINT, SIGALRM, etc. will work right anyway. */
9778 sigfirst = siglast = (int)
9779 gdb_signal_from_command (atoi (arg));
9780 if (arg[digits] == '-')
9782 siglast = (int)
9783 gdb_signal_from_command (atoi (arg + digits + 1));
9785 if (sigfirst > siglast)
9787 /* Bet he didn't figure we'd think of this case... */
9788 std::swap (sigfirst, siglast);
9791 else
9793 oursig = gdb_signal_from_name (arg);
9794 if (oursig != GDB_SIGNAL_UNKNOWN)
9796 sigfirst = siglast = (int) oursig;
9798 else
9800 /* Not a number and not a recognized flag word => complain. */
9801 error (_("Unrecognized or ambiguous flag word: \"%s\"."), arg);
9805 /* If any signal numbers or symbol names were found, set flags for
9806 which signals to apply actions to. */
9808 for (int signum = sigfirst; signum >= 0 && signum <= siglast; signum++)
9810 switch ((enum gdb_signal) signum)
9812 case GDB_SIGNAL_TRAP:
9813 case GDB_SIGNAL_INT:
9814 if (!allsigs && !sigs[signum])
9816 if (query (_("%s is used by the debugger.\n\
9817 Are you sure you want to change it? "),
9818 gdb_signal_to_name ((enum gdb_signal) signum)))
9820 sigs[signum] = 1;
9822 else
9823 gdb_printf (_("Not confirmed, unchanged.\n"));
9825 break;
9826 case GDB_SIGNAL_0:
9827 case GDB_SIGNAL_DEFAULT:
9828 case GDB_SIGNAL_UNKNOWN:
9829 /* Make sure that "all" doesn't print these. */
9830 break;
9831 default:
9832 sigs[signum] = 1;
9833 break;
9838 for (int signum = 0; signum < nsigs; signum++)
9839 if (sigs[signum])
9841 signal_cache_update (-1);
9842 target_pass_signals (signal_pass);
9843 target_program_signals (signal_program);
9845 if (from_tty)
9847 /* Show the results. */
9848 sig_print_header ();
9849 for (; signum < nsigs; signum++)
9850 if (sigs[signum])
9851 sig_print_info ((enum gdb_signal) signum);
9854 break;
9858 /* Complete the "handle" command. */
9860 static void
9861 handle_completer (struct cmd_list_element *ignore,
9862 completion_tracker &tracker,
9863 const char *text, const char *word)
9865 static const char * const keywords[] =
9867 "all",
9868 "stop",
9869 "ignore",
9870 "print",
9871 "pass",
9872 "nostop",
9873 "noignore",
9874 "noprint",
9875 "nopass",
9876 nullptr,
9879 signal_completer (ignore, tracker, text, word);
9880 complete_on_enum (tracker, keywords, word, word);
9883 enum gdb_signal
9884 gdb_signal_from_command (int num)
9886 if (num >= 1 && num <= 15)
9887 return (enum gdb_signal) num;
9888 error (_("Only signals 1-15 are valid as numeric signals.\n\
9889 Use \"info signals\" for a list of symbolic signals."));
9892 /* Print current contents of the tables set by the handle command.
9893 It is possible we should just be printing signals actually used
9894 by the current target (but for things to work right when switching
9895 targets, all signals should be in the signal tables). */
9897 static void
9898 info_signals_command (const char *signum_exp, int from_tty)
9900 enum gdb_signal oursig;
9902 sig_print_header ();
9904 if (signum_exp)
9906 /* First see if this is a symbol name. */
9907 oursig = gdb_signal_from_name (signum_exp);
9908 if (oursig == GDB_SIGNAL_UNKNOWN)
9910 /* No, try numeric. */
9911 oursig =
9912 gdb_signal_from_command (parse_and_eval_long (signum_exp));
9914 sig_print_info (oursig);
9915 return;
9918 gdb_printf ("\n");
9919 /* These ugly casts brought to you by the native VAX compiler. */
9920 for (oursig = GDB_SIGNAL_FIRST;
9921 (int) oursig < (int) GDB_SIGNAL_LAST;
9922 oursig = (enum gdb_signal) ((int) oursig + 1))
9924 QUIT;
9926 if (oursig != GDB_SIGNAL_UNKNOWN
9927 && oursig != GDB_SIGNAL_DEFAULT && oursig != GDB_SIGNAL_0)
9928 sig_print_info (oursig);
9931 gdb_printf (_("\nUse the \"handle\" command "
9932 "to change these tables.\n"));
9935 /* The $_siginfo convenience variable is a bit special. We don't know
9936 for sure the type of the value until we actually have a chance to
9937 fetch the data. The type can change depending on gdbarch, so it is
9938 also dependent on which thread you have selected.
9940 1. making $_siginfo be an internalvar that creates a new value on
9941 access.
9943 2. making the value of $_siginfo be an lval_computed value. */
9945 /* This function implements the lval_computed support for reading a
9946 $_siginfo value. */
9948 static void
9949 siginfo_value_read (struct value *v)
9951 LONGEST transferred;
9953 /* If we can access registers, so can we access $_siginfo. Likewise
9954 vice versa. */
9955 validate_registers_access ();
9957 transferred =
9958 target_read (current_inferior ()->top_target (),
9959 TARGET_OBJECT_SIGNAL_INFO,
9960 nullptr,
9961 v->contents_all_raw ().data (),
9962 v->offset (),
9963 v->type ()->length ());
9965 if (transferred != v->type ()->length ())
9966 error (_("Unable to read siginfo"));
9969 /* This function implements the lval_computed support for writing a
9970 $_siginfo value. */
9972 static void
9973 siginfo_value_write (struct value *v, struct value *fromval)
9975 LONGEST transferred;
9977 /* If we can access registers, so can we access $_siginfo. Likewise
9978 vice versa. */
9979 validate_registers_access ();
9981 transferred = target_write (current_inferior ()->top_target (),
9982 TARGET_OBJECT_SIGNAL_INFO,
9983 nullptr,
9984 fromval->contents_all_raw ().data (),
9985 v->offset (),
9986 fromval->type ()->length ());
9988 if (transferred != fromval->type ()->length ())
9989 error (_("Unable to write siginfo"));
9992 static const struct lval_funcs siginfo_value_funcs =
9994 siginfo_value_read,
9995 siginfo_value_write
9998 /* Return a new value with the correct type for the siginfo object of
9999 the current thread using architecture GDBARCH. Return a void value
10000 if there's no object available. */
10002 static struct value *
10003 siginfo_make_value (struct gdbarch *gdbarch, struct internalvar *var,
10004 void *ignore)
10006 if (target_has_stack ()
10007 && inferior_ptid != null_ptid
10008 && gdbarch_get_siginfo_type_p (gdbarch))
10010 struct type *type = gdbarch_get_siginfo_type (gdbarch);
10012 return value::allocate_computed (type, &siginfo_value_funcs, nullptr);
10015 return value::allocate (builtin_type (gdbarch)->builtin_void);
10019 /* infcall_suspend_state contains state about the program itself like its
10020 registers and any signal it received when it last stopped.
10021 This state must be restored regardless of how the inferior function call
10022 ends (either successfully, or after it hits a breakpoint or signal)
10023 if the program is to properly continue where it left off. */
10025 class infcall_suspend_state
10027 public:
10028 /* Capture state from GDBARCH, TP, and REGCACHE that must be restored
10029 once the inferior function call has finished. */
10030 infcall_suspend_state (struct gdbarch *gdbarch,
10031 const struct thread_info *tp,
10032 struct regcache *regcache)
10033 : m_registers (new readonly_detached_regcache (*regcache))
10035 tp->save_suspend_to (m_thread_suspend);
10037 gdb::unique_xmalloc_ptr<gdb_byte> siginfo_data;
10039 if (gdbarch_get_siginfo_type_p (gdbarch))
10041 struct type *type = gdbarch_get_siginfo_type (gdbarch);
10042 size_t len = type->length ();
10044 siginfo_data.reset ((gdb_byte *) xmalloc (len));
10046 if (target_read (current_inferior ()->top_target (),
10047 TARGET_OBJECT_SIGNAL_INFO, nullptr,
10048 siginfo_data.get (), 0, len) != len)
10050 /* Errors ignored. */
10051 siginfo_data.reset (nullptr);
10055 if (siginfo_data)
10057 m_siginfo_gdbarch = gdbarch;
10058 m_siginfo_data = std::move (siginfo_data);
10062 /* Return a pointer to the stored register state. */
10064 readonly_detached_regcache *registers () const
10066 return m_registers.get ();
10069 /* Restores the stored state into GDBARCH, TP, and REGCACHE. */
10071 void restore (struct gdbarch *gdbarch,
10072 struct thread_info *tp,
10073 struct regcache *regcache) const
10075 tp->restore_suspend_from (m_thread_suspend);
10077 if (m_siginfo_gdbarch == gdbarch)
10079 struct type *type = gdbarch_get_siginfo_type (gdbarch);
10081 /* Errors ignored. */
10082 target_write (current_inferior ()->top_target (),
10083 TARGET_OBJECT_SIGNAL_INFO, nullptr,
10084 m_siginfo_data.get (), 0, type->length ());
10087 /* The inferior can be gone if the user types "print exit(0)"
10088 (and perhaps other times). */
10089 if (target_has_execution ())
10090 /* NB: The register write goes through to the target. */
10091 regcache->restore (registers ());
10094 private:
10095 /* How the current thread stopped before the inferior function call was
10096 executed. */
10097 struct thread_suspend_state m_thread_suspend;
10099 /* The registers before the inferior function call was executed. */
10100 std::unique_ptr<readonly_detached_regcache> m_registers;
10102 /* Format of SIGINFO_DATA or NULL if it is not present. */
10103 struct gdbarch *m_siginfo_gdbarch = nullptr;
10105 /* The inferior format depends on SIGINFO_GDBARCH and it has a length of
10106 gdbarch_get_siginfo_type ()->length (). For different gdbarch the
10107 content would be invalid. */
10108 gdb::unique_xmalloc_ptr<gdb_byte> m_siginfo_data;
10111 infcall_suspend_state_up
10112 save_infcall_suspend_state ()
10114 struct thread_info *tp = inferior_thread ();
10115 regcache *regcache = get_thread_regcache (tp);
10116 struct gdbarch *gdbarch = regcache->arch ();
10118 infcall_suspend_state_up inf_state
10119 (new struct infcall_suspend_state (gdbarch, tp, regcache));
10121 /* Having saved the current state, adjust the thread state, discarding
10122 any stop signal information. The stop signal is not useful when
10123 starting an inferior function call, and run_inferior_call will not use
10124 the signal due to its `proceed' call with GDB_SIGNAL_0. */
10125 tp->set_stop_signal (GDB_SIGNAL_0);
10127 return inf_state;
10130 /* Restore inferior session state to INF_STATE. */
10132 void
10133 restore_infcall_suspend_state (struct infcall_suspend_state *inf_state)
10135 struct thread_info *tp = inferior_thread ();
10136 regcache *regcache = get_thread_regcache (inferior_thread ());
10137 struct gdbarch *gdbarch = regcache->arch ();
10139 inf_state->restore (gdbarch, tp, regcache);
10140 discard_infcall_suspend_state (inf_state);
10143 void
10144 discard_infcall_suspend_state (struct infcall_suspend_state *inf_state)
10146 delete inf_state;
10149 readonly_detached_regcache *
10150 get_infcall_suspend_state_regcache (struct infcall_suspend_state *inf_state)
10152 return inf_state->registers ();
10155 /* infcall_control_state contains state regarding gdb's control of the
10156 inferior itself like stepping control. It also contains session state like
10157 the user's currently selected frame. */
10159 struct infcall_control_state
10161 struct thread_control_state thread_control;
10162 struct inferior_control_state inferior_control;
10164 /* Other fields: */
10165 enum stop_stack_kind stop_stack_dummy = STOP_NONE;
10166 int stopped_by_random_signal = 0;
10168 /* ID and level of the selected frame when the inferior function
10169 call was made. */
10170 struct frame_id selected_frame_id {};
10171 int selected_frame_level = -1;
10174 /* Save all of the information associated with the inferior<==>gdb
10175 connection. */
10177 infcall_control_state_up
10178 save_infcall_control_state ()
10180 infcall_control_state_up inf_status (new struct infcall_control_state);
10181 struct thread_info *tp = inferior_thread ();
10182 struct inferior *inf = current_inferior ();
10184 inf_status->thread_control = tp->control;
10185 inf_status->inferior_control = inf->control;
10187 tp->control.step_resume_breakpoint = nullptr;
10188 tp->control.exception_resume_breakpoint = nullptr;
10190 /* Save original bpstat chain to INF_STATUS; replace it in TP with copy of
10191 chain. If caller's caller is walking the chain, they'll be happier if we
10192 hand them back the original chain when restore_infcall_control_state is
10193 called. */
10194 tp->control.stop_bpstat = bpstat_copy (tp->control.stop_bpstat);
10196 /* Other fields: */
10197 inf_status->stop_stack_dummy = stop_stack_dummy;
10198 inf_status->stopped_by_random_signal = stopped_by_random_signal;
10200 save_selected_frame (&inf_status->selected_frame_id,
10201 &inf_status->selected_frame_level);
10203 return inf_status;
10206 /* Restore inferior session state to INF_STATUS. */
10208 void
10209 restore_infcall_control_state (struct infcall_control_state *inf_status)
10211 struct thread_info *tp = inferior_thread ();
10212 struct inferior *inf = current_inferior ();
10214 if (tp->control.step_resume_breakpoint)
10215 tp->control.step_resume_breakpoint->disposition = disp_del_at_next_stop;
10217 if (tp->control.exception_resume_breakpoint)
10218 tp->control.exception_resume_breakpoint->disposition
10219 = disp_del_at_next_stop;
10221 /* Handle the bpstat_copy of the chain. */
10222 bpstat_clear (&tp->control.stop_bpstat);
10224 tp->control = inf_status->thread_control;
10225 inf->control = inf_status->inferior_control;
10227 /* Other fields: */
10228 stop_stack_dummy = inf_status->stop_stack_dummy;
10229 stopped_by_random_signal = inf_status->stopped_by_random_signal;
10231 if (target_has_stack ())
10233 restore_selected_frame (inf_status->selected_frame_id,
10234 inf_status->selected_frame_level);
10237 delete inf_status;
10240 void
10241 discard_infcall_control_state (struct infcall_control_state *inf_status)
10243 if (inf_status->thread_control.step_resume_breakpoint)
10244 inf_status->thread_control.step_resume_breakpoint->disposition
10245 = disp_del_at_next_stop;
10247 if (inf_status->thread_control.exception_resume_breakpoint)
10248 inf_status->thread_control.exception_resume_breakpoint->disposition
10249 = disp_del_at_next_stop;
10251 /* See save_infcall_control_state for info on stop_bpstat. */
10252 bpstat_clear (&inf_status->thread_control.stop_bpstat);
10254 delete inf_status;
10257 /* See infrun.h. */
10259 void
10260 clear_exit_convenience_vars (void)
10262 clear_internalvar (lookup_internalvar ("_exitsignal"));
10263 clear_internalvar (lookup_internalvar ("_exitcode"));
10267 /* User interface for reverse debugging:
10268 Set exec-direction / show exec-direction commands
10269 (returns error unless target implements to_set_exec_direction method). */
10271 enum exec_direction_kind execution_direction = EXEC_FORWARD;
10272 static const char exec_forward[] = "forward";
10273 static const char exec_reverse[] = "reverse";
10274 static const char *exec_direction = exec_forward;
10275 static const char *const exec_direction_names[] = {
10276 exec_forward,
10277 exec_reverse,
10278 nullptr
10281 static void
10282 set_exec_direction_func (const char *args, int from_tty,
10283 struct cmd_list_element *cmd)
10285 if (target_can_execute_reverse ())
10287 if (!strcmp (exec_direction, exec_forward))
10288 execution_direction = EXEC_FORWARD;
10289 else if (!strcmp (exec_direction, exec_reverse))
10290 execution_direction = EXEC_REVERSE;
10292 else
10294 exec_direction = exec_forward;
10295 error (_("Target does not support this operation."));
10299 static void
10300 show_exec_direction_func (struct ui_file *out, int from_tty,
10301 struct cmd_list_element *cmd, const char *value)
10303 switch (execution_direction) {
10304 case EXEC_FORWARD:
10305 gdb_printf (out, _("Forward.\n"));
10306 break;
10307 case EXEC_REVERSE:
10308 gdb_printf (out, _("Reverse.\n"));
10309 break;
10310 default:
10311 internal_error (_("bogus execution_direction value: %d"),
10312 (int) execution_direction);
10316 static void
10317 show_schedule_multiple (struct ui_file *file, int from_tty,
10318 struct cmd_list_element *c, const char *value)
10320 gdb_printf (file, _("Resuming the execution of threads "
10321 "of all processes is %s.\n"), value);
10324 /* Implementation of `siginfo' variable. */
10326 static const struct internalvar_funcs siginfo_funcs =
10328 siginfo_make_value,
10329 nullptr,
10332 /* Callback for infrun's target events source. This is marked when a
10333 thread has a pending status to process. */
10335 static void
10336 infrun_async_inferior_event_handler (gdb_client_data data)
10338 clear_async_event_handler (infrun_async_inferior_event_token);
10339 inferior_event_handler (INF_REG_EVENT);
10342 #if GDB_SELF_TEST
10343 namespace selftests
10346 /* Verify that when two threads with the same ptid exist (from two different
10347 targets) and one of them changes ptid, we only update inferior_ptid if
10348 it is appropriate. */
10350 static void
10351 infrun_thread_ptid_changed ()
10353 gdbarch *arch = current_inferior ()->arch ();
10355 /* The thread which inferior_ptid represents changes ptid. */
10357 scoped_restore_current_pspace_and_thread restore;
10359 scoped_mock_context<test_target_ops> target1 (arch);
10360 scoped_mock_context<test_target_ops> target2 (arch);
10362 ptid_t old_ptid (111, 222);
10363 ptid_t new_ptid (111, 333);
10365 target1.mock_inferior.pid = old_ptid.pid ();
10366 target1.mock_thread.ptid = old_ptid;
10367 target1.mock_inferior.ptid_thread_map.clear ();
10368 target1.mock_inferior.ptid_thread_map[old_ptid] = &target1.mock_thread;
10370 target2.mock_inferior.pid = old_ptid.pid ();
10371 target2.mock_thread.ptid = old_ptid;
10372 target2.mock_inferior.ptid_thread_map.clear ();
10373 target2.mock_inferior.ptid_thread_map[old_ptid] = &target2.mock_thread;
10375 auto restore_inferior_ptid = make_scoped_restore (&inferior_ptid, old_ptid);
10376 set_current_inferior (&target1.mock_inferior);
10378 thread_change_ptid (&target1.mock_target, old_ptid, new_ptid);
10380 gdb_assert (inferior_ptid == new_ptid);
10383 /* A thread with the same ptid as inferior_ptid, but from another target,
10384 changes ptid. */
10386 scoped_restore_current_pspace_and_thread restore;
10388 scoped_mock_context<test_target_ops> target1 (arch);
10389 scoped_mock_context<test_target_ops> target2 (arch);
10391 ptid_t old_ptid (111, 222);
10392 ptid_t new_ptid (111, 333);
10394 target1.mock_inferior.pid = old_ptid.pid ();
10395 target1.mock_thread.ptid = old_ptid;
10396 target1.mock_inferior.ptid_thread_map.clear ();
10397 target1.mock_inferior.ptid_thread_map[old_ptid] = &target1.mock_thread;
10399 target2.mock_inferior.pid = old_ptid.pid ();
10400 target2.mock_thread.ptid = old_ptid;
10401 target2.mock_inferior.ptid_thread_map.clear ();
10402 target2.mock_inferior.ptid_thread_map[old_ptid] = &target2.mock_thread;
10404 auto restore_inferior_ptid = make_scoped_restore (&inferior_ptid, old_ptid);
10405 set_current_inferior (&target2.mock_inferior);
10407 thread_change_ptid (&target1.mock_target, old_ptid, new_ptid);
10409 gdb_assert (inferior_ptid == old_ptid);
10413 } /* namespace selftests */
10415 #endif /* GDB_SELF_TEST */
10417 void _initialize_infrun ();
10418 void
10419 _initialize_infrun ()
10421 struct cmd_list_element *c;
10423 /* Register extra event sources in the event loop. */
10424 infrun_async_inferior_event_token
10425 = create_async_event_handler (infrun_async_inferior_event_handler, nullptr,
10426 "infrun");
10428 cmd_list_element *info_signals_cmd
10429 = add_info ("signals", info_signals_command, _("\
10430 What debugger does when program gets various signals.\n\
10431 Specify a signal as argument to print info on that signal only."));
10432 add_info_alias ("handle", info_signals_cmd, 0);
10434 c = add_com ("handle", class_run, handle_command, _("\
10435 Specify how to handle signals.\n\
10436 Usage: handle SIGNAL [ACTIONS]\n\
10437 Args are signals and actions to apply to those signals.\n\
10438 If no actions are specified, the current settings for the specified signals\n\
10439 will be displayed instead.\n\
10441 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
10442 from 1-15 are allowed for compatibility with old versions of GDB.\n\
10443 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
10444 The special arg \"all\" is recognized to mean all signals except those\n\
10445 used by the debugger, typically SIGTRAP and SIGINT.\n\
10447 Recognized actions include \"stop\", \"nostop\", \"print\", \"noprint\",\n\
10448 \"pass\", \"nopass\", \"ignore\", or \"noignore\".\n\
10449 Stop means reenter debugger if this signal happens (implies print).\n\
10450 Print means print a message if this signal happens.\n\
10451 Pass means let program see this signal; otherwise program doesn't know.\n\
10452 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
10453 Pass and Stop may be combined.\n\
10455 Multiple signals may be specified. Signal numbers and signal names\n\
10456 may be interspersed with actions, with the actions being performed for\n\
10457 all signals cumulatively specified."));
10458 set_cmd_completer (c, handle_completer);
10460 stop_command = add_cmd ("stop", class_obscure,
10461 not_just_help_class_command, _("\
10462 There is no `stop' command, but you can set a hook on `stop'.\n\
10463 This allows you to set a list of commands to be run each time execution\n\
10464 of the program stops."), &cmdlist);
10466 add_setshow_boolean_cmd
10467 ("infrun", class_maintenance, &debug_infrun,
10468 _("Set inferior debugging."),
10469 _("Show inferior debugging."),
10470 _("When non-zero, inferior specific debugging is enabled."),
10471 nullptr, show_debug_infrun, &setdebuglist, &showdebuglist);
10473 add_setshow_boolean_cmd ("non-stop", no_class,
10474 &non_stop_1, _("\
10475 Set whether gdb controls the inferior in non-stop mode."), _("\
10476 Show whether gdb controls the inferior in non-stop mode."), _("\
10477 When debugging a multi-threaded program and this setting is\n\
10478 off (the default, also called all-stop mode), when one thread stops\n\
10479 (for a breakpoint, watchpoint, exception, or similar events), GDB stops\n\
10480 all other threads in the program while you interact with the thread of\n\
10481 interest. When you continue or step a thread, you can allow the other\n\
10482 threads to run, or have them remain stopped, but while you inspect any\n\
10483 thread's state, all threads stop.\n\
10485 In non-stop mode, when one thread stops, other threads can continue\n\
10486 to run freely. You'll be able to step each thread independently,\n\
10487 leave it stopped or free to run as needed."),
10488 set_non_stop,
10489 show_non_stop,
10490 &setlist,
10491 &showlist);
10493 for (size_t i = 0; i < GDB_SIGNAL_LAST; i++)
10495 signal_stop[i] = 1;
10496 signal_print[i] = 1;
10497 signal_program[i] = 1;
10498 signal_catch[i] = 0;
10501 /* Signals caused by debugger's own actions should not be given to
10502 the program afterwards.
10504 Do not deliver GDB_SIGNAL_TRAP by default, except when the user
10505 explicitly specifies that it should be delivered to the target
10506 program. Typically, that would occur when a user is debugging a
10507 target monitor on a simulator: the target monitor sets a
10508 breakpoint; the simulator encounters this breakpoint and halts
10509 the simulation handing control to GDB; GDB, noting that the stop
10510 address doesn't map to any known breakpoint, returns control back
10511 to the simulator; the simulator then delivers the hardware
10512 equivalent of a GDB_SIGNAL_TRAP to the program being
10513 debugged. */
10514 signal_program[GDB_SIGNAL_TRAP] = 0;
10515 signal_program[GDB_SIGNAL_INT] = 0;
10517 /* Signals that are not errors should not normally enter the debugger. */
10518 signal_stop[GDB_SIGNAL_ALRM] = 0;
10519 signal_print[GDB_SIGNAL_ALRM] = 0;
10520 signal_stop[GDB_SIGNAL_VTALRM] = 0;
10521 signal_print[GDB_SIGNAL_VTALRM] = 0;
10522 signal_stop[GDB_SIGNAL_PROF] = 0;
10523 signal_print[GDB_SIGNAL_PROF] = 0;
10524 signal_stop[GDB_SIGNAL_CHLD] = 0;
10525 signal_print[GDB_SIGNAL_CHLD] = 0;
10526 signal_stop[GDB_SIGNAL_IO] = 0;
10527 signal_print[GDB_SIGNAL_IO] = 0;
10528 signal_stop[GDB_SIGNAL_POLL] = 0;
10529 signal_print[GDB_SIGNAL_POLL] = 0;
10530 signal_stop[GDB_SIGNAL_URG] = 0;
10531 signal_print[GDB_SIGNAL_URG] = 0;
10532 signal_stop[GDB_SIGNAL_WINCH] = 0;
10533 signal_print[GDB_SIGNAL_WINCH] = 0;
10534 signal_stop[GDB_SIGNAL_PRIO] = 0;
10535 signal_print[GDB_SIGNAL_PRIO] = 0;
10537 /* These signals are used internally by user-level thread
10538 implementations. (See signal(5) on Solaris.) Like the above
10539 signals, a healthy program receives and handles them as part of
10540 its normal operation. */
10541 signal_stop[GDB_SIGNAL_LWP] = 0;
10542 signal_print[GDB_SIGNAL_LWP] = 0;
10543 signal_stop[GDB_SIGNAL_WAITING] = 0;
10544 signal_print[GDB_SIGNAL_WAITING] = 0;
10545 signal_stop[GDB_SIGNAL_CANCEL] = 0;
10546 signal_print[GDB_SIGNAL_CANCEL] = 0;
10547 signal_stop[GDB_SIGNAL_LIBRT] = 0;
10548 signal_print[GDB_SIGNAL_LIBRT] = 0;
10550 /* Update cached state. */
10551 signal_cache_update (-1);
10553 add_setshow_zinteger_cmd ("stop-on-solib-events", class_support,
10554 &stop_on_solib_events, _("\
10555 Set stopping for shared library events."), _("\
10556 Show stopping for shared library events."), _("\
10557 If nonzero, gdb will give control to the user when the dynamic linker\n\
10558 notifies gdb of shared library events. The most common event of interest\n\
10559 to the user would be loading/unloading of a new library."),
10560 set_stop_on_solib_events,
10561 show_stop_on_solib_events,
10562 &setlist, &showlist);
10564 add_setshow_enum_cmd ("follow-fork-mode", class_run,
10565 follow_fork_mode_kind_names,
10566 &follow_fork_mode_string, _("\
10567 Set debugger response to a program call of fork or vfork."), _("\
10568 Show debugger response to a program call of fork or vfork."), _("\
10569 A fork or vfork creates a new process. follow-fork-mode can be:\n\
10570 parent - the original process is debugged after a fork\n\
10571 child - the new process is debugged after a fork\n\
10572 The unfollowed process will continue to run.\n\
10573 By default, the debugger will follow the parent process."),
10574 nullptr,
10575 show_follow_fork_mode_string,
10576 &setlist, &showlist);
10578 add_setshow_enum_cmd ("follow-exec-mode", class_run,
10579 follow_exec_mode_names,
10580 &follow_exec_mode_string, _("\
10581 Set debugger response to a program call of exec."), _("\
10582 Show debugger response to a program call of exec."), _("\
10583 An exec call replaces the program image of a process.\n\
10585 follow-exec-mode can be:\n\
10587 new - the debugger creates a new inferior and rebinds the process\n\
10588 to this new inferior. The program the process was running before\n\
10589 the exec call can be restarted afterwards by restarting the original\n\
10590 inferior.\n\
10592 same - the debugger keeps the process bound to the same inferior.\n\
10593 The new executable image replaces the previous executable loaded in\n\
10594 the inferior. Restarting the inferior after the exec call restarts\n\
10595 the executable the process was running after the exec call.\n\
10597 By default, the debugger will use the same inferior."),
10598 nullptr,
10599 show_follow_exec_mode_string,
10600 &setlist, &showlist);
10602 add_setshow_enum_cmd ("scheduler-locking", class_run,
10603 scheduler_enums, &scheduler_mode, _("\
10604 Set mode for locking scheduler during execution."), _("\
10605 Show mode for locking scheduler during execution."), _("\
10606 off == no locking (threads may preempt at any time)\n\
10607 on == full locking (no thread except the current thread may run)\n\
10608 This applies to both normal execution and replay mode.\n\
10609 step == scheduler locked during stepping commands (step, next, stepi, nexti).\n\
10610 In this mode, other threads may run during other commands.\n\
10611 This applies to both normal execution and replay mode.\n\
10612 replay == scheduler locked in replay mode and unlocked during normal execution."),
10613 set_schedlock_func, /* traps on target vector */
10614 show_scheduler_mode,
10615 &setlist, &showlist);
10617 add_setshow_boolean_cmd ("schedule-multiple", class_run, &sched_multi, _("\
10618 Set mode for resuming threads of all processes."), _("\
10619 Show mode for resuming threads of all processes."), _("\
10620 When on, execution commands (such as 'continue' or 'next') resume all\n\
10621 threads of all processes. When off (which is the default), execution\n\
10622 commands only resume the threads of the current process. The set of\n\
10623 threads that are resumed is further refined by the scheduler-locking\n\
10624 mode (see help set scheduler-locking)."),
10625 nullptr,
10626 show_schedule_multiple,
10627 &setlist, &showlist);
10629 add_setshow_boolean_cmd ("step-mode", class_run, &step_stop_if_no_debug, _("\
10630 Set mode of the step operation."), _("\
10631 Show mode of the step operation."), _("\
10632 When set, doing a step over a function without debug line information\n\
10633 will stop at the first instruction of that function. Otherwise, the\n\
10634 function is skipped and the step command stops at a different source line."),
10635 nullptr,
10636 show_step_stop_if_no_debug,
10637 &setlist, &showlist);
10639 add_setshow_auto_boolean_cmd ("displaced-stepping", class_run,
10640 &can_use_displaced_stepping, _("\
10641 Set debugger's willingness to use displaced stepping."), _("\
10642 Show debugger's willingness to use displaced stepping."), _("\
10643 If on, gdb will use displaced stepping to step over breakpoints if it is\n\
10644 supported by the target architecture. If off, gdb will not use displaced\n\
10645 stepping to step over breakpoints, even if such is supported by the target\n\
10646 architecture. If auto (which is the default), gdb will use displaced stepping\n\
10647 if the target architecture supports it and non-stop mode is active, but will not\n\
10648 use it in all-stop mode (see help set non-stop)."),
10649 nullptr,
10650 show_can_use_displaced_stepping,
10651 &setlist, &showlist);
10653 add_setshow_enum_cmd ("exec-direction", class_run, exec_direction_names,
10654 &exec_direction, _("Set direction of execution.\n\
10655 Options are 'forward' or 'reverse'."),
10656 _("Show direction of execution (forward/reverse)."),
10657 _("Tells gdb whether to execute forward or backward."),
10658 set_exec_direction_func, show_exec_direction_func,
10659 &setlist, &showlist);
10661 /* Set/show detach-on-fork: user-settable mode. */
10663 add_setshow_boolean_cmd ("detach-on-fork", class_run, &detach_fork, _("\
10664 Set whether gdb will detach the child of a fork."), _("\
10665 Show whether gdb will detach the child of a fork."), _("\
10666 Tells gdb whether to detach the child of a fork."),
10667 nullptr, nullptr, &setlist, &showlist);
10669 /* Set/show disable address space randomization mode. */
10671 add_setshow_boolean_cmd ("disable-randomization", class_support,
10672 &disable_randomization, _("\
10673 Set disabling of debuggee's virtual address space randomization."), _("\
10674 Show disabling of debuggee's virtual address space randomization."), _("\
10675 When this mode is on (which is the default), randomization of the virtual\n\
10676 address space is disabled. Standalone programs run with the randomization\n\
10677 enabled by default on some platforms."),
10678 &set_disable_randomization,
10679 &show_disable_randomization,
10680 &setlist, &showlist);
10682 /* ptid initializations */
10683 inferior_ptid = null_ptid;
10684 target_last_wait_ptid = minus_one_ptid;
10686 gdb::observers::thread_ptid_changed.attach (infrun_thread_ptid_changed,
10687 "infrun");
10688 gdb::observers::thread_stop_requested.attach (infrun_thread_stop_requested,
10689 "infrun");
10690 gdb::observers::inferior_exit.attach (infrun_inferior_exit, "infrun");
10691 gdb::observers::inferior_execd.attach (infrun_inferior_execd, "infrun");
10693 /* Explicitly create without lookup, since that tries to create a
10694 value with a void typed value, and when we get here, gdbarch
10695 isn't initialized yet. At this point, we're quite sure there
10696 isn't another convenience variable of the same name. */
10697 create_internalvar_type_lazy ("_siginfo", &siginfo_funcs, nullptr);
10699 add_setshow_boolean_cmd ("observer", no_class,
10700 &observer_mode_1, _("\
10701 Set whether gdb controls the inferior in observer mode."), _("\
10702 Show whether gdb controls the inferior in observer mode."), _("\
10703 In observer mode, GDB can get data from the inferior, but not\n\
10704 affect its execution. Registers and memory may not be changed,\n\
10705 breakpoints may not be set, and the program cannot be interrupted\n\
10706 or signalled."),
10707 set_observer_mode,
10708 show_observer_mode,
10709 &setlist,
10710 &showlist);
10712 #if GDB_SELF_TEST
10713 selftests::register_test ("infrun_thread_ptid_changed",
10714 selftests::infrun_thread_ptid_changed);
10715 #endif