1 /* Target-struct-independent code to start (run) and stop an inferior
4 Copyright (C) 1986-2024 Free Software Foundation, Inc.
6 This file is part of GDB.
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
22 #include "displaced-stepping.h"
28 #include "breakpoint.h"
32 #include "target-connection.h"
33 #include "gdbthread.h"
41 #include "observable.h"
46 #include "mi/mi-common.h"
47 #include "event-top.h"
49 #include "record-full.h"
50 #include "inline-frame.h"
52 #include "tracepoint.h"
56 #include "completer.h"
57 #include "target-descriptions.h"
58 #include "target-dcache.h"
61 #include "gdbsupport/event-loop.h"
62 #include "thread-fsm.h"
63 #include "gdbsupport/enum-flags.h"
64 #include "progspace-and-thread.h"
66 #include "arch-utils.h"
67 #include "gdbsupport/scope-exit.h"
68 #include "gdbsupport/forward-scope-exit.h"
69 #include "gdbsupport/gdb_select.h"
70 #include <unordered_map>
71 #include "async-event.h"
72 #include "gdbsupport/selftest.h"
73 #include "scoped-mock-context.h"
74 #include "test-target.h"
75 #include "gdbsupport/common-debug.h"
76 #include "gdbsupport/buildargv.h"
77 #include "extension.h"
81 /* Prototypes for local functions */
83 static void sig_print_info (enum gdb_signal
);
85 static void sig_print_header (void);
87 static void follow_inferior_reset_breakpoints (void);
89 static bool currently_stepping (struct thread_info
*tp
);
91 static void insert_hp_step_resume_breakpoint_at_frame (const frame_info_ptr
&);
93 static void insert_step_resume_breakpoint_at_caller (const frame_info_ptr
&);
95 static void insert_longjmp_resume_breakpoint (struct gdbarch
*, CORE_ADDR
);
97 static bool maybe_software_singlestep (struct gdbarch
*gdbarch
);
99 static void resume (gdb_signal sig
);
101 static void wait_for_inferior (inferior
*inf
);
103 static void restart_threads (struct thread_info
*event_thread
,
104 inferior
*inf
= nullptr);
106 static bool start_step_over (void);
108 static bool step_over_info_valid_p (void);
110 static bool schedlock_applies (struct thread_info
*tp
);
112 /* Asynchronous signal handler registered as event loop source for
113 when we have pending events ready to be passed to the core. */
114 static struct async_event_handler
*infrun_async_inferior_event_token
;
116 /* Stores whether infrun_async was previously enabled or disabled.
117 Starts off as -1, indicating "never enabled/disabled". */
118 static int infrun_is_async
= -1;
119 static CORE_ADDR
update_line_range_start (CORE_ADDR pc
,
120 struct execution_control_state
*ecs
);
125 infrun_async (int enable
)
127 if (infrun_is_async
!= enable
)
129 infrun_is_async
= enable
;
131 infrun_debug_printf ("enable=%d", enable
);
134 mark_async_event_handler (infrun_async_inferior_event_token
);
136 clear_async_event_handler (infrun_async_inferior_event_token
);
143 mark_infrun_async_event_handler (void)
145 mark_async_event_handler (infrun_async_inferior_event_token
);
148 /* When set, stop the 'step' command if we enter a function which has
149 no line number information. The normal behavior is that we step
150 over such function. */
151 bool step_stop_if_no_debug
= false;
153 show_step_stop_if_no_debug (struct ui_file
*file
, int from_tty
,
154 struct cmd_list_element
*c
, const char *value
)
156 gdb_printf (file
, _("Mode of the step operation is %s.\n"), value
);
159 /* proceed and normal_stop use this to notify the user when the
160 inferior stopped in a different thread than it had been running in.
161 It can also be used to find for which thread normal_stop last
163 static thread_info_ref previous_thread
;
168 update_previous_thread ()
170 if (inferior_ptid
== null_ptid
)
171 previous_thread
= nullptr;
173 previous_thread
= thread_info_ref::new_reference (inferior_thread ());
179 get_previous_thread ()
181 return previous_thread
.get ();
184 /* If set (default for legacy reasons), when following a fork, GDB
185 will detach from one of the fork branches, child or parent.
186 Exactly which branch is detached depends on 'set follow-fork-mode'
189 static bool detach_fork
= true;
191 bool debug_infrun
= false;
193 show_debug_infrun (struct ui_file
*file
, int from_tty
,
194 struct cmd_list_element
*c
, const char *value
)
196 gdb_printf (file
, _("Inferior debugging is %s.\n"), value
);
199 /* Support for disabling address space randomization. */
201 bool disable_randomization
= true;
204 show_disable_randomization (struct ui_file
*file
, int from_tty
,
205 struct cmd_list_element
*c
, const char *value
)
207 if (target_supports_disable_randomization ())
209 _("Disabling randomization of debuggee's "
210 "virtual address space is %s.\n"),
213 gdb_puts (_("Disabling randomization of debuggee's "
214 "virtual address space is unsupported on\n"
215 "this platform.\n"), file
);
219 set_disable_randomization (const char *args
, int from_tty
,
220 struct cmd_list_element
*c
)
222 if (!target_supports_disable_randomization ())
223 error (_("Disabling randomization of debuggee's "
224 "virtual address space is unsupported on\n"
228 /* User interface for non-stop mode. */
230 bool non_stop
= false;
231 static bool non_stop_1
= false;
234 set_non_stop (const char *args
, int from_tty
,
235 struct cmd_list_element
*c
)
237 if (target_has_execution ())
239 non_stop_1
= non_stop
;
240 error (_("Cannot change this setting while the inferior is running."));
243 non_stop
= non_stop_1
;
247 show_non_stop (struct ui_file
*file
, int from_tty
,
248 struct cmd_list_element
*c
, const char *value
)
251 _("Controlling the inferior in non-stop mode is %s.\n"),
255 /* "Observer mode" is somewhat like a more extreme version of
256 non-stop, in which all GDB operations that might affect the
257 target's execution have been disabled. */
259 static bool observer_mode
= false;
260 static bool observer_mode_1
= false;
263 set_observer_mode (const char *args
, int from_tty
,
264 struct cmd_list_element
*c
)
266 if (target_has_execution ())
268 observer_mode_1
= observer_mode
;
269 error (_("Cannot change this setting while the inferior is running."));
272 observer_mode
= observer_mode_1
;
274 may_write_registers
= !observer_mode
;
275 may_write_memory
= !observer_mode
;
276 may_insert_breakpoints
= !observer_mode
;
277 may_insert_tracepoints
= !observer_mode
;
278 /* We can insert fast tracepoints in or out of observer mode,
279 but enable them if we're going into this mode. */
281 may_insert_fast_tracepoints
= true;
282 may_stop
= !observer_mode
;
283 update_target_permissions ();
285 /* Going *into* observer mode we must force non-stop, then
286 going out we leave it that way. */
289 pagination_enabled
= false;
290 non_stop
= non_stop_1
= true;
294 gdb_printf (_("Observer mode is now %s.\n"),
295 (observer_mode
? "on" : "off"));
299 show_observer_mode (struct ui_file
*file
, int from_tty
,
300 struct cmd_list_element
*c
, const char *value
)
302 gdb_printf (file
, _("Observer mode is %s.\n"), value
);
305 /* This updates the value of observer mode based on changes in
306 permissions. Note that we are deliberately ignoring the values of
307 may-write-registers and may-write-memory, since the user may have
308 reason to enable these during a session, for instance to turn on a
309 debugging-related global. */
312 update_observer_mode (void)
314 bool newval
= (!may_insert_breakpoints
315 && !may_insert_tracepoints
316 && may_insert_fast_tracepoints
320 /* Let the user know if things change. */
321 if (newval
!= observer_mode
)
322 gdb_printf (_("Observer mode is now %s.\n"),
323 (newval
? "on" : "off"));
325 observer_mode
= observer_mode_1
= newval
;
328 /* Tables of how to react to signals; the user sets them. */
330 static unsigned char signal_stop
[GDB_SIGNAL_LAST
];
331 static unsigned char signal_print
[GDB_SIGNAL_LAST
];
332 static unsigned char signal_program
[GDB_SIGNAL_LAST
];
334 /* Table of signals that are registered with "catch signal". A
335 non-zero entry indicates that the signal is caught by some "catch
337 static unsigned char signal_catch
[GDB_SIGNAL_LAST
];
339 /* Table of signals that the target may silently handle.
340 This is automatically determined from the flags above,
341 and simply cached here. */
342 static unsigned char signal_pass
[GDB_SIGNAL_LAST
];
344 #define SET_SIGS(nsigs,sigs,flags) \
346 int signum = (nsigs); \
347 while (signum-- > 0) \
348 if ((sigs)[signum]) \
349 (flags)[signum] = 1; \
352 #define UNSET_SIGS(nsigs,sigs,flags) \
354 int signum = (nsigs); \
355 while (signum-- > 0) \
356 if ((sigs)[signum]) \
357 (flags)[signum] = 0; \
360 /* Update the target's copy of SIGNAL_PROGRAM. The sole purpose of
361 this function is to avoid exporting `signal_program'. */
364 update_signals_program_target (void)
366 target_program_signals (signal_program
);
369 /* Value to pass to target_resume() to cause all threads to resume. */
371 #define RESUME_ALL minus_one_ptid
373 /* Command list pointer for the "stop" placeholder. */
375 static struct cmd_list_element
*stop_command
;
377 /* Nonzero if we want to give control to the user when we're notified
378 of shared library events by the dynamic linker. */
379 int stop_on_solib_events
;
381 /* Enable or disable optional shared library event breakpoints
382 as appropriate when the above flag is changed. */
385 set_stop_on_solib_events (const char *args
,
386 int from_tty
, struct cmd_list_element
*c
)
388 update_solib_breakpoints ();
392 show_stop_on_solib_events (struct ui_file
*file
, int from_tty
,
393 struct cmd_list_element
*c
, const char *value
)
395 gdb_printf (file
, _("Stopping for shared library events is %s.\n"),
399 /* True after stop if current stack frame should be printed. */
401 static bool stop_print_frame
;
403 /* This is a cached copy of the target/ptid/waitstatus of the last
404 event returned by target_wait().
405 This information is returned by get_last_target_status(). */
406 static process_stratum_target
*target_last_proc_target
;
407 static ptid_t target_last_wait_ptid
;
408 static struct target_waitstatus target_last_waitstatus
;
410 void init_thread_stepping_state (struct thread_info
*tss
);
412 static const char follow_fork_mode_child
[] = "child";
413 static const char follow_fork_mode_parent
[] = "parent";
415 static const char *const follow_fork_mode_kind_names
[] = {
416 follow_fork_mode_child
,
417 follow_fork_mode_parent
,
421 static const char *follow_fork_mode_string
= follow_fork_mode_parent
;
423 show_follow_fork_mode_string (struct ui_file
*file
, int from_tty
,
424 struct cmd_list_element
*c
, const char *value
)
427 _("Debugger response to a program "
428 "call of fork or vfork is \"%s\".\n"),
433 /* Handle changes to the inferior list based on the type of fork,
434 which process is being followed, and whether the other process
435 should be detached. On entry inferior_ptid must be the ptid of
436 the fork parent. At return inferior_ptid is the ptid of the
437 followed inferior. */
440 follow_fork_inferior (bool follow_child
, bool detach_fork
)
442 INFRUN_SCOPED_DEBUG_ENTER_EXIT
;
444 infrun_debug_printf ("follow_child = %d, detach_fork = %d",
445 follow_child
, detach_fork
);
447 target_waitkind fork_kind
= inferior_thread ()->pending_follow
.kind ();
448 gdb_assert (fork_kind
== TARGET_WAITKIND_FORKED
449 || fork_kind
== TARGET_WAITKIND_VFORKED
);
450 bool has_vforked
= fork_kind
== TARGET_WAITKIND_VFORKED
;
451 ptid_t parent_ptid
= inferior_ptid
;
452 ptid_t child_ptid
= inferior_thread ()->pending_follow
.child_ptid ();
455 && !non_stop
/* Non-stop always resumes both branches. */
456 && current_ui
->prompt_state
== PROMPT_BLOCKED
457 && !(follow_child
|| detach_fork
|| sched_multi
))
459 /* The parent stays blocked inside the vfork syscall until the
460 child execs or exits. If we don't let the child run, then
461 the parent stays blocked. If we're telling the parent to run
462 in the foreground, the user will not be able to ctrl-c to get
463 back the terminal, effectively hanging the debug session. */
464 gdb_printf (gdb_stderr
, _("\
465 Can not resume the parent process over vfork in the foreground while\n\
466 holding the child stopped. Try \"set detach-on-fork\" or \
467 \"set schedule-multiple\".\n"));
471 inferior
*parent_inf
= current_inferior ();
472 inferior
*child_inf
= nullptr;
474 gdb_assert (parent_inf
->thread_waiting_for_vfork_done
== nullptr);
478 /* Detach new forked process? */
481 /* Before detaching from the child, remove all breakpoints
482 from it. If we forked, then this has already been taken
483 care of by infrun.c. If we vforked however, any
484 breakpoint inserted in the parent is visible in the
485 child, even those added while stopped in a vfork
486 catchpoint. This will remove the breakpoints from the
487 parent also, but they'll be reinserted below. */
490 /* Keep breakpoints list in sync. */
491 remove_breakpoints_inf (current_inferior ());
494 if (print_inferior_events
)
496 /* Ensure that we have a process ptid. */
497 ptid_t process_ptid
= ptid_t (child_ptid
.pid ());
499 target_terminal::ours_for_output ();
500 gdb_printf (_("[Detaching after %s from child %s]\n"),
501 has_vforked
? "vfork" : "fork",
502 target_pid_to_str (process_ptid
).c_str ());
507 /* Add process to GDB's tables. */
508 child_inf
= add_inferior (child_ptid
.pid ());
510 child_inf
->attach_flag
= parent_inf
->attach_flag
;
511 copy_terminal_info (child_inf
, parent_inf
);
512 child_inf
->set_arch (parent_inf
->arch ());
513 child_inf
->tdesc_info
= parent_inf
->tdesc_info
;
515 child_inf
->symfile_flags
= SYMFILE_NO_READ
;
517 /* If this is a vfork child, then the address-space is
518 shared with the parent. */
521 child_inf
->pspace
= parent_inf
->pspace
;
522 child_inf
->aspace
= parent_inf
->aspace
;
524 exec_on_vfork (child_inf
);
526 /* The parent will be frozen until the child is done
527 with the shared region. Keep track of the
529 child_inf
->vfork_parent
= parent_inf
;
530 child_inf
->pending_detach
= false;
531 parent_inf
->vfork_child
= child_inf
;
532 parent_inf
->pending_detach
= false;
536 child_inf
->pspace
= new program_space (new_address_space ());
537 child_inf
->aspace
= child_inf
->pspace
->aspace
;
538 child_inf
->removable
= true;
539 clone_program_space (child_inf
->pspace
, parent_inf
->pspace
);
545 /* If we detached from the child, then we have to be careful
546 to not insert breakpoints in the parent until the child
547 is done with the shared memory region. However, if we're
548 staying attached to the child, then we can and should
549 insert breakpoints, so that we can debug it. A
550 subsequent child exec or exit is enough to know when does
551 the child stops using the parent's address space. */
552 parent_inf
->thread_waiting_for_vfork_done
553 = detach_fork
? inferior_thread () : nullptr;
554 parent_inf
->pspace
->breakpoints_not_allowed
= detach_fork
;
557 ("parent_inf->thread_waiting_for_vfork_done == %s",
558 (parent_inf
->thread_waiting_for_vfork_done
== nullptr
560 : (parent_inf
->thread_waiting_for_vfork_done
561 ->ptid
.to_string ().c_str ())));
566 /* Follow the child. */
568 if (print_inferior_events
)
570 std::string parent_pid
= target_pid_to_str (parent_ptid
);
571 std::string child_pid
= target_pid_to_str (child_ptid
);
573 target_terminal::ours_for_output ();
574 gdb_printf (_("[Attaching after %s %s to child %s]\n"),
576 has_vforked
? "vfork" : "fork",
580 /* Add the new inferior first, so that the target_detach below
581 doesn't unpush the target. */
583 child_inf
= add_inferior (child_ptid
.pid ());
585 child_inf
->attach_flag
= parent_inf
->attach_flag
;
586 copy_terminal_info (child_inf
, parent_inf
);
587 child_inf
->set_arch (parent_inf
->arch ());
588 child_inf
->tdesc_info
= parent_inf
->tdesc_info
;
592 /* If this is a vfork child, then the address-space is shared
594 child_inf
->aspace
= parent_inf
->aspace
;
595 child_inf
->pspace
= parent_inf
->pspace
;
597 exec_on_vfork (child_inf
);
599 else if (detach_fork
)
601 /* We follow the child and detach from the parent: move the parent's
602 program space to the child. This simplifies some things, like
603 doing "next" over fork() and landing on the expected line in the
604 child (note, that is broken with "set detach-on-fork off").
606 Before assigning brand new spaces for the parent, remove
607 breakpoints from it: because the new pspace won't match
608 currently inserted locations, the normal detach procedure
609 wouldn't remove them, and we would leave them inserted when
611 remove_breakpoints_inf (parent_inf
);
613 child_inf
->aspace
= parent_inf
->aspace
;
614 child_inf
->pspace
= parent_inf
->pspace
;
615 parent_inf
->pspace
= new program_space (new_address_space ());
616 parent_inf
->aspace
= parent_inf
->pspace
->aspace
;
617 clone_program_space (parent_inf
->pspace
, child_inf
->pspace
);
619 /* The parent inferior is still the current one, so keep things
621 set_current_program_space (parent_inf
->pspace
);
625 child_inf
->pspace
= new program_space (new_address_space ());
626 child_inf
->aspace
= child_inf
->pspace
->aspace
;
627 child_inf
->removable
= true;
628 child_inf
->symfile_flags
= SYMFILE_NO_READ
;
629 clone_program_space (child_inf
->pspace
, parent_inf
->pspace
);
633 gdb_assert (current_inferior () == parent_inf
);
635 /* If we are setting up an inferior for the child, target_follow_fork is
636 responsible for pushing the appropriate targets on the new inferior's
637 target stack and adding the initial thread (with ptid CHILD_PTID).
639 If we are not setting up an inferior for the child (because following
640 the parent and detach_fork is true), it is responsible for detaching
642 target_follow_fork (child_inf
, child_ptid
, fork_kind
, follow_child
,
645 gdb::observers::inferior_forked
.notify (parent_inf
, child_inf
, fork_kind
);
647 /* target_follow_fork must leave the parent as the current inferior. If we
648 want to follow the child, we make it the current one below. */
649 gdb_assert (current_inferior () == parent_inf
);
651 /* If there is a child inferior, target_follow_fork must have created a thread
653 if (child_inf
!= nullptr)
654 gdb_assert (!child_inf
->thread_list
.empty ());
656 /* Clear the parent thread's pending follow field. Do this before calling
657 target_detach, so that the target can differentiate the two following
660 - We continue past a fork with "follow-fork-mode == child" &&
661 "detach-on-fork on", and therefore detach the parent. In that
662 case the target should not detach the fork child.
663 - We run to a fork catchpoint and the user types "detach". In that
664 case, the target should detach the fork child in addition to the
667 The former case will have pending_follow cleared, the later will have
668 pending_follow set. */
669 thread_info
*parent_thread
= parent_inf
->find_thread (parent_ptid
);
670 gdb_assert (parent_thread
!= nullptr);
671 parent_thread
->pending_follow
.set_spurious ();
673 /* Detach the parent if needed. */
676 /* If we're vforking, we want to hold on to the parent until
677 the child exits or execs. At child exec or exit time we
678 can remove the old breakpoints from the parent and detach
679 or resume debugging it. Otherwise, detach the parent now;
680 we'll want to reuse it's program/address spaces, but we
681 can't set them to the child before removing breakpoints
682 from the parent, otherwise, the breakpoints module could
683 decide to remove breakpoints from the wrong process (since
684 they'd be assigned to the same address space). */
688 gdb_assert (child_inf
->vfork_parent
== nullptr);
689 gdb_assert (parent_inf
->vfork_child
== nullptr);
690 child_inf
->vfork_parent
= parent_inf
;
691 child_inf
->pending_detach
= false;
692 parent_inf
->vfork_child
= child_inf
;
693 parent_inf
->pending_detach
= detach_fork
;
695 else if (detach_fork
)
697 if (print_inferior_events
)
699 /* Ensure that we have a process ptid. */
700 ptid_t process_ptid
= ptid_t (parent_ptid
.pid ());
702 target_terminal::ours_for_output ();
703 gdb_printf (_("[Detaching after fork from "
705 target_pid_to_str (process_ptid
).c_str ());
708 target_detach (parent_inf
, 0);
712 /* If we ended up creating a new inferior, call post_create_inferior to inform
713 the various subcomponents. */
714 if (child_inf
!= nullptr)
716 /* If FOLLOW_CHILD, we leave CHILD_INF as the current inferior
717 (do not restore the parent as the current inferior). */
718 std::optional
<scoped_restore_current_thread
> maybe_restore
;
720 if (!follow_child
&& !sched_multi
)
721 maybe_restore
.emplace ();
723 switch_to_thread (*child_inf
->threads ().begin ());
724 post_create_inferior (0);
730 /* Set the last target status as TP having stopped. */
733 set_last_target_status_stopped (thread_info
*tp
)
735 set_last_target_status (tp
->inf
->process_target (), tp
->ptid
,
736 target_waitstatus
{}.set_stopped (GDB_SIGNAL_0
));
739 /* Tell the target to follow the fork we're stopped at. Returns true
740 if the inferior should be resumed; false, if the target for some
741 reason decided it's best not to resume. */
746 INFRUN_SCOPED_DEBUG_ENTER_EXIT
;
748 bool follow_child
= (follow_fork_mode_string
== follow_fork_mode_child
);
749 bool should_resume
= true;
751 /* Copy user stepping state to the new inferior thread. FIXME: the
752 followed fork child thread should have a copy of most of the
753 parent thread structure's run control related fields, not just these.
754 Initialized to avoid "may be used uninitialized" warnings from gcc. */
755 struct breakpoint
*step_resume_breakpoint
= nullptr;
756 struct breakpoint
*exception_resume_breakpoint
= nullptr;
757 CORE_ADDR step_range_start
= 0;
758 CORE_ADDR step_range_end
= 0;
759 int current_line
= 0;
760 symtab
*current_symtab
= nullptr;
761 struct frame_id step_frame_id
= { 0 };
765 thread_info
*cur_thr
= inferior_thread ();
768 = user_visible_resume_ptid (cur_thr
->control
.stepping_command
);
769 process_stratum_target
*resume_target
770 = user_visible_resume_target (resume_ptid
);
772 /* Check if there's a thread that we're about to resume, other
773 than the current, with an unfollowed fork/vfork. If so,
774 switch back to it, to tell the target to follow it (in either
775 direction). We'll afterwards refuse to resume, and inform
776 the user what happened. */
777 for (thread_info
*tp
: all_non_exited_threads (resume_target
,
783 /* follow_fork_inferior clears tp->pending_follow, and below
784 we'll need the value after the follow_fork_inferior
786 target_waitkind kind
= tp
->pending_follow
.kind ();
788 if (kind
!= TARGET_WAITKIND_SPURIOUS
)
790 infrun_debug_printf ("need to follow-fork [%s] first",
791 tp
->ptid
.to_string ().c_str ());
793 switch_to_thread (tp
);
795 /* Set up inferior(s) as specified by the caller, and
796 tell the target to do whatever is necessary to follow
797 either parent or child. */
800 /* The thread that started the execution command
801 won't exist in the child. Abort the command and
802 immediately stop in this thread, in the child,
804 should_resume
= false;
808 /* Following the parent, so let the thread fork its
809 child freely, it won't influence the current
810 execution command. */
811 if (follow_fork_inferior (follow_child
, detach_fork
))
813 /* Target refused to follow, or there's some
814 other reason we shouldn't resume. */
815 switch_to_thread (cur_thr
);
816 set_last_target_status_stopped (cur_thr
);
820 /* If we're following a vfork, when we need to leave
821 the just-forked thread as selected, as we need to
822 solo-resume it to collect the VFORK_DONE event.
823 If we're following a fork, however, switch back
824 to the original thread that we continue stepping
826 if (kind
!= TARGET_WAITKIND_VFORKED
)
828 gdb_assert (kind
== TARGET_WAITKIND_FORKED
);
829 switch_to_thread (cur_thr
);
838 thread_info
*tp
= inferior_thread ();
840 /* If there were any forks/vforks that were caught and are now to be
841 followed, then do so now. */
842 switch (tp
->pending_follow
.kind ())
844 case TARGET_WAITKIND_FORKED
:
845 case TARGET_WAITKIND_VFORKED
:
847 ptid_t parent
, child
;
848 std::unique_ptr
<struct thread_fsm
> thread_fsm
;
850 /* If the user did a next/step, etc, over a fork call,
851 preserve the stepping state in the fork child. */
852 if (follow_child
&& should_resume
)
854 step_resume_breakpoint
= clone_momentary_breakpoint
855 (tp
->control
.step_resume_breakpoint
);
856 step_range_start
= tp
->control
.step_range_start
;
857 step_range_end
= tp
->control
.step_range_end
;
858 current_line
= tp
->current_line
;
859 current_symtab
= tp
->current_symtab
;
860 step_frame_id
= tp
->control
.step_frame_id
;
861 exception_resume_breakpoint
862 = clone_momentary_breakpoint (tp
->control
.exception_resume_breakpoint
);
863 thread_fsm
= tp
->release_thread_fsm ();
865 /* For now, delete the parent's sr breakpoint, otherwise,
866 parent/child sr breakpoints are considered duplicates,
867 and the child version will not be installed. Remove
868 this when the breakpoints module becomes aware of
869 inferiors and address spaces. */
870 delete_step_resume_breakpoint (tp
);
871 tp
->control
.step_range_start
= 0;
872 tp
->control
.step_range_end
= 0;
873 tp
->control
.step_frame_id
= null_frame_id
;
874 delete_exception_resume_breakpoint (tp
);
877 parent
= inferior_ptid
;
878 child
= tp
->pending_follow
.child_ptid ();
880 /* If handling a vfork, stop all the inferior's threads, they will be
881 restarted when the vfork shared region is complete. */
882 if (tp
->pending_follow
.kind () == TARGET_WAITKIND_VFORKED
883 && target_is_non_stop_p ())
884 stop_all_threads ("handling vfork", tp
->inf
);
886 process_stratum_target
*parent_targ
= tp
->inf
->process_target ();
887 /* Set up inferior(s) as specified by the caller, and tell the
888 target to do whatever is necessary to follow either parent
890 if (follow_fork_inferior (follow_child
, detach_fork
))
892 /* Target refused to follow, or there's some other reason
893 we shouldn't resume. */
898 /* If we followed the child, switch to it... */
901 tp
= parent_targ
->find_thread (child
);
902 switch_to_thread (tp
);
904 /* ... and preserve the stepping state, in case the
905 user was stepping over the fork call. */
908 tp
->control
.step_resume_breakpoint
909 = step_resume_breakpoint
;
910 tp
->control
.step_range_start
= step_range_start
;
911 tp
->control
.step_range_end
= step_range_end
;
912 tp
->current_line
= current_line
;
913 tp
->current_symtab
= current_symtab
;
914 tp
->control
.step_frame_id
= step_frame_id
;
915 tp
->control
.exception_resume_breakpoint
916 = exception_resume_breakpoint
;
917 tp
->set_thread_fsm (std::move (thread_fsm
));
921 /* If we get here, it was because we're trying to
922 resume from a fork catchpoint, but, the user
923 has switched threads away from the thread that
924 forked. In that case, the resume command
925 issued is most likely not applicable to the
926 child, so just warn, and refuse to resume. */
927 warning (_("Not resuming: switched threads "
928 "before following fork child."));
931 /* Reset breakpoints in the child as appropriate. */
932 follow_inferior_reset_breakpoints ();
937 case TARGET_WAITKIND_SPURIOUS
:
938 /* Nothing to follow. */
941 internal_error ("Unexpected pending_follow.kind %d\n",
942 tp
->pending_follow
.kind ());
947 set_last_target_status_stopped (tp
);
948 return should_resume
;
952 follow_inferior_reset_breakpoints (void)
954 struct thread_info
*tp
= inferior_thread ();
956 /* Was there a step_resume breakpoint? (There was if the user
957 did a "next" at the fork() call.) If so, explicitly reset its
958 thread number. Cloned step_resume breakpoints are disabled on
959 creation, so enable it here now that it is associated with the
962 step_resumes are a form of bp that are made to be per-thread.
963 Since we created the step_resume bp when the parent process
964 was being debugged, and now are switching to the child process,
965 from the breakpoint package's viewpoint, that's a switch of
966 "threads". We must update the bp's notion of which thread
967 it is for, or it'll be ignored when it triggers. */
969 if (tp
->control
.step_resume_breakpoint
)
971 breakpoint_re_set_thread (tp
->control
.step_resume_breakpoint
);
972 tp
->control
.step_resume_breakpoint
->first_loc ().enabled
= 1;
975 /* Treat exception_resume breakpoints like step_resume breakpoints. */
976 if (tp
->control
.exception_resume_breakpoint
)
978 breakpoint_re_set_thread (tp
->control
.exception_resume_breakpoint
);
979 tp
->control
.exception_resume_breakpoint
->first_loc ().enabled
= 1;
982 /* Reinsert all breakpoints in the child. The user may have set
983 breakpoints after catching the fork, in which case those
984 were never set in the child, but only in the parent. This makes
985 sure the inserted breakpoints match the breakpoint list. */
987 breakpoint_re_set ();
988 insert_breakpoints ();
991 /* The child has exited or execed: resume THREAD, a thread of the parent,
992 if it was meant to be executing. */
995 proceed_after_vfork_done (thread_info
*thread
)
997 if (thread
->state
== THREAD_RUNNING
998 && !thread
->executing ()
999 && !thread
->stop_requested
1000 && thread
->stop_signal () == GDB_SIGNAL_0
)
1002 infrun_debug_printf ("resuming vfork parent thread %s",
1003 thread
->ptid
.to_string ().c_str ());
1005 switch_to_thread (thread
);
1006 clear_proceed_status (0);
1007 proceed ((CORE_ADDR
) -1, GDB_SIGNAL_DEFAULT
);
1011 /* Called whenever we notice an exec or exit event, to handle
1012 detaching or resuming a vfork parent. */
1015 handle_vfork_child_exec_or_exit (int exec
)
1017 INFRUN_SCOPED_DEBUG_ENTER_EXIT
;
1019 struct inferior
*inf
= current_inferior ();
1021 if (inf
->vfork_parent
)
1023 inferior
*resume_parent
= nullptr;
1025 /* This exec or exit marks the end of the shared memory region
1026 between the parent and the child. Break the bonds. */
1027 inferior
*vfork_parent
= inf
->vfork_parent
;
1028 inf
->vfork_parent
->vfork_child
= nullptr;
1029 inf
->vfork_parent
= nullptr;
1031 /* If the user wanted to detach from the parent, now is the
1033 if (vfork_parent
->pending_detach
)
1035 struct program_space
*pspace
;
1037 /* follow-fork child, detach-on-fork on. */
1039 vfork_parent
->pending_detach
= false;
1041 scoped_restore_current_pspace_and_thread restore_thread
;
1043 /* We're letting loose of the parent. */
1044 thread_info
*tp
= any_live_thread_of_inferior (vfork_parent
);
1045 switch_to_thread (tp
);
1047 /* We're about to detach from the parent, which implicitly
1048 removes breakpoints from its address space. There's a
1049 catch here: we want to reuse the spaces for the child,
1050 but, parent/child are still sharing the pspace at this
1051 point, although the exec in reality makes the kernel give
1052 the child a fresh set of new pages. The problem here is
1053 that the breakpoints module being unaware of this, would
1054 likely chose the child process to write to the parent
1055 address space. Swapping the child temporarily away from
1056 the spaces has the desired effect. Yes, this is "sort
1059 pspace
= inf
->pspace
;
1060 inf
->pspace
= nullptr;
1061 address_space_ref_ptr aspace
= std::move (inf
->aspace
);
1063 if (print_inferior_events
)
1066 = target_pid_to_str (ptid_t (vfork_parent
->pid
));
1068 target_terminal::ours_for_output ();
1072 gdb_printf (_("[Detaching vfork parent %s "
1073 "after child exec]\n"), pidstr
.c_str ());
1077 gdb_printf (_("[Detaching vfork parent %s "
1078 "after child exit]\n"), pidstr
.c_str ());
1082 target_detach (vfork_parent
, 0);
1085 inf
->pspace
= pspace
;
1086 inf
->aspace
= aspace
;
1090 /* We're staying attached to the parent, so, really give the
1091 child a new address space. */
1092 inf
->pspace
= new program_space (maybe_new_address_space ());
1093 inf
->aspace
= inf
->pspace
->aspace
;
1094 inf
->removable
= true;
1095 set_current_program_space (inf
->pspace
);
1097 resume_parent
= vfork_parent
;
1101 /* If this is a vfork child exiting, then the pspace and
1102 aspaces were shared with the parent. Since we're
1103 reporting the process exit, we'll be mourning all that is
1104 found in the address space, and switching to null_ptid,
1105 preparing to start a new inferior. But, since we don't
1106 want to clobber the parent's address/program spaces, we
1107 go ahead and create a new one for this exiting
1110 scoped_restore_current_thread restore_thread
;
1112 /* Temporarily switch to the vfork parent, to facilitate ptrace
1113 calls done during maybe_new_address_space. */
1114 switch_to_thread (any_live_thread_of_inferior (vfork_parent
));
1115 address_space_ref_ptr aspace
= maybe_new_address_space ();
1117 /* Switch back to the vfork child inferior. Switch to no-thread
1118 while running clone_program_space, so that clone_program_space
1119 doesn't want to read the selected frame of a dead process. */
1120 switch_to_inferior_no_thread (inf
);
1122 inf
->pspace
= new program_space (std::move (aspace
));
1123 inf
->aspace
= inf
->pspace
->aspace
;
1124 set_current_program_space (inf
->pspace
);
1125 inf
->removable
= true;
1126 inf
->symfile_flags
= SYMFILE_NO_READ
;
1127 clone_program_space (inf
->pspace
, vfork_parent
->pspace
);
1129 resume_parent
= vfork_parent
;
1132 gdb_assert (current_program_space
== inf
->pspace
);
1134 if (non_stop
&& resume_parent
!= nullptr)
1136 /* If the user wanted the parent to be running, let it go
1138 scoped_restore_current_thread restore_thread
;
1140 infrun_debug_printf ("resuming vfork parent process %d",
1141 resume_parent
->pid
);
1143 for (thread_info
*thread
: resume_parent
->threads ())
1144 proceed_after_vfork_done (thread
);
1149 /* Handle TARGET_WAITKIND_VFORK_DONE. */
1152 handle_vfork_done (thread_info
*event_thread
)
1154 INFRUN_SCOPED_DEBUG_ENTER_EXIT
;
1156 /* We only care about this event if inferior::thread_waiting_for_vfork_done is
1157 set, that is if we are waiting for a vfork child not under our control
1158 (because we detached it) to exec or exit.
1160 If an inferior has vforked and we are debugging the child, we don't use
1161 the vfork-done event to get notified about the end of the shared address
1162 space window. We rely instead on the child's exec or exit event, and the
1163 inferior::vfork_{parent,child} fields are used instead. See
1164 handle_vfork_child_exec_or_exit for that. */
1165 if (event_thread
->inf
->thread_waiting_for_vfork_done
== nullptr)
1167 infrun_debug_printf ("not waiting for a vfork-done event");
1171 /* We stopped all threads (other than the vforking thread) of the inferior in
1172 follow_fork and kept them stopped until now. It should therefore not be
1173 possible for another thread to have reported a vfork during that window.
1174 If THREAD_WAITING_FOR_VFORK_DONE is set, it has to be the same thread whose
1175 vfork-done we are handling right now. */
1176 gdb_assert (event_thread
->inf
->thread_waiting_for_vfork_done
== event_thread
);
1178 event_thread
->inf
->thread_waiting_for_vfork_done
= nullptr;
1179 event_thread
->inf
->pspace
->breakpoints_not_allowed
= 0;
1181 /* On non-stop targets, we stopped all the inferior's threads in follow_fork,
1182 resume them now. On all-stop targets, everything that needs to be resumed
1183 will be when we resume the event thread. */
1184 if (target_is_non_stop_p ())
1186 /* restart_threads and start_step_over may change the current thread, make
1187 sure we leave the event thread as the current thread. */
1188 scoped_restore_current_thread restore_thread
;
1190 insert_breakpoints ();
1193 if (!step_over_info_valid_p ())
1194 restart_threads (event_thread
, event_thread
->inf
);
1198 /* Enum strings for "set|show follow-exec-mode". */
1200 static const char follow_exec_mode_new
[] = "new";
1201 static const char follow_exec_mode_same
[] = "same";
1202 static const char *const follow_exec_mode_names
[] =
1204 follow_exec_mode_new
,
1205 follow_exec_mode_same
,
1209 static const char *follow_exec_mode_string
= follow_exec_mode_same
;
1211 show_follow_exec_mode_string (struct ui_file
*file
, int from_tty
,
1212 struct cmd_list_element
*c
, const char *value
)
1214 gdb_printf (file
, _("Follow exec mode is \"%s\".\n"), value
);
1217 /* EXEC_FILE_TARGET is assumed to be non-NULL. */
1220 follow_exec (ptid_t ptid
, const char *exec_file_target
)
1222 int pid
= ptid
.pid ();
1223 ptid_t process_ptid
;
1225 /* Switch terminal for any messages produced e.g. by
1226 breakpoint_re_set. */
1227 target_terminal::ours_for_output ();
1229 /* This is an exec event that we actually wish to pay attention to.
1230 Refresh our symbol table to the newly exec'd program, remove any
1231 momentary bp's, etc.
1233 If there are breakpoints, they aren't really inserted now,
1234 since the exec() transformed our inferior into a fresh set
1237 We want to preserve symbolic breakpoints on the list, since
1238 we have hopes that they can be reset after the new a.out's
1239 symbol table is read.
1241 However, any "raw" breakpoints must be removed from the list
1242 (e.g., the solib bp's), since their address is probably invalid
1245 And, we DON'T want to call delete_breakpoints() here, since
1246 that may write the bp's "shadow contents" (the instruction
1247 value that was overwritten with a TRAP instruction). Since
1248 we now have a new a.out, those shadow contents aren't valid. */
1250 mark_breakpoints_out (current_program_space
);
1252 /* The target reports the exec event to the main thread, even if
1253 some other thread does the exec, and even if the main thread was
1254 stopped or already gone. We may still have non-leader threads of
1255 the process on our list. E.g., on targets that don't have thread
1256 exit events (like remote) and nothing forces an update of the
1257 thread list up to here. When debugging remotely, it's best to
1258 avoid extra traffic, when possible, so avoid syncing the thread
1259 list with the target, and instead go ahead and delete all threads
1260 of the process but the one that reported the event. Note this must
1261 be done before calling update_breakpoints_after_exec, as
1262 otherwise clearing the threads' resources would reference stale
1263 thread breakpoints -- it may have been one of these threads that
1264 stepped across the exec. We could just clear their stepping
1265 states, but as long as we're iterating, might as well delete
1266 them. Deleting them now rather than at the next user-visible
1267 stop provides a nicer sequence of events for user and MI
1269 for (thread_info
*th
: all_threads_safe ())
1270 if (th
->ptid
.pid () == pid
&& th
->ptid
!= ptid
)
1273 /* We also need to clear any left over stale state for the
1274 leader/event thread. E.g., if there was any step-resume
1275 breakpoint or similar, it's gone now. We cannot truly
1276 step-to-next statement through an exec(). */
1277 thread_info
*th
= inferior_thread ();
1278 th
->control
.step_resume_breakpoint
= nullptr;
1279 th
->control
.exception_resume_breakpoint
= nullptr;
1280 th
->control
.single_step_breakpoints
= nullptr;
1281 th
->control
.step_range_start
= 0;
1282 th
->control
.step_range_end
= 0;
1284 /* The user may have had the main thread held stopped in the
1285 previous image (e.g., schedlock on, or non-stop). Release
1287 th
->stop_requested
= 0;
1289 update_breakpoints_after_exec ();
1291 /* What is this a.out's name? */
1292 process_ptid
= ptid_t (pid
);
1293 gdb_printf (_("%s is executing new program: %s\n"),
1294 target_pid_to_str (process_ptid
).c_str (),
1297 /* We've followed the inferior through an exec. Therefore, the
1298 inferior has essentially been killed & reborn. */
1300 breakpoint_init_inferior (current_inferior (), inf_execd
);
1302 gdb::unique_xmalloc_ptr
<char> exec_file_host
1303 = exec_file_find (exec_file_target
, nullptr);
1305 /* If we were unable to map the executable target pathname onto a host
1306 pathname, tell the user that. Otherwise GDB's subsequent behavior
1307 is confusing. Maybe it would even be better to stop at this point
1308 so that the user can specify a file manually before continuing. */
1309 if (exec_file_host
== nullptr)
1310 warning (_("Could not load symbols for executable %s.\n"
1311 "Do you need \"set sysroot\"?"),
1314 /* Reset the shared library package. This ensures that we get a
1315 shlib event when the child reaches "_start", at which point the
1316 dld will have had a chance to initialize the child. */
1317 /* Also, loading a symbol file below may trigger symbol lookups, and
1318 we don't want those to be satisfied by the libraries of the
1319 previous incarnation of this process. */
1320 no_shared_libraries (nullptr, 0);
1322 inferior
*execing_inferior
= current_inferior ();
1323 inferior
*following_inferior
;
1325 if (follow_exec_mode_string
== follow_exec_mode_new
)
1327 /* The user wants to keep the old inferior and program spaces
1328 around. Create a new fresh one, and switch to it. */
1330 /* Do exit processing for the original inferior before setting the new
1331 inferior's pid. Having two inferiors with the same pid would confuse
1332 find_inferior_p(t)id. Transfer the terminal state and info from the
1333 old to the new inferior. */
1334 following_inferior
= add_inferior_with_spaces ();
1336 swap_terminal_info (following_inferior
, execing_inferior
);
1337 exit_inferior (execing_inferior
);
1339 following_inferior
->pid
= pid
;
1343 /* follow-exec-mode is "same", we continue execution in the execing
1345 following_inferior
= execing_inferior
;
1347 /* The old description may no longer be fit for the new image.
1348 E.g, a 64-bit process exec'ed a 32-bit process. Clear the
1349 old description; we'll read a new one below. No need to do
1350 this on "follow-exec-mode new", as the old inferior stays
1351 around (its description is later cleared/refetched on
1353 target_clear_description ();
1356 target_follow_exec (following_inferior
, ptid
, exec_file_target
);
1358 gdb_assert (current_inferior () == following_inferior
);
1359 gdb_assert (current_program_space
== following_inferior
->pspace
);
1361 /* Attempt to open the exec file. SYMFILE_DEFER_BP_RESET is used
1362 because the proper displacement for a PIE (Position Independent
1363 Executable) main symbol file will only be computed by
1364 solib_create_inferior_hook below. breakpoint_re_set would fail
1365 to insert the breakpoints with the zero displacement. */
1366 try_open_exec_file (exec_file_host
.get (), following_inferior
,
1367 SYMFILE_DEFER_BP_RESET
);
1369 /* If the target can specify a description, read it. Must do this
1370 after flipping to the new executable (because the target supplied
1371 description must be compatible with the executable's
1372 architecture, and the old executable may e.g., be 32-bit, while
1373 the new one 64-bit), and before anything involving memory or
1375 target_find_description ();
1377 gdb::observers::inferior_execd
.notify (execing_inferior
, following_inferior
);
1379 breakpoint_re_set ();
1381 /* Reinsert all breakpoints. (Those which were symbolic have
1382 been reset to the proper address in the new a.out, thanks
1383 to symbol_file_command...). */
1384 insert_breakpoints ();
1386 /* The next resume of this inferior should bring it to the shlib
1387 startup breakpoints. (If the user had also set bp's on
1388 "main" from the old (parent) process, then they'll auto-
1389 matically get reset there in the new process.). */
1392 /* The chain of threads that need to do a step-over operation to get
1393 past e.g., a breakpoint. What technique is used to step over the
1394 breakpoint/watchpoint does not matter -- all threads end up in the
1395 same queue, to maintain rough temporal order of execution, in order
1396 to avoid starvation, otherwise, we could e.g., find ourselves
1397 constantly stepping the same couple threads past their breakpoints
1398 over and over, if the single-step finish fast enough. */
1399 thread_step_over_list global_thread_step_over_list
;
1401 /* Bit flags indicating what the thread needs to step over. */
1403 enum step_over_what_flag
1405 /* Step over a breakpoint. */
1406 STEP_OVER_BREAKPOINT
= 1,
1408 /* Step past a non-continuable watchpoint, in order to let the
1409 instruction execute so we can evaluate the watchpoint
1411 STEP_OVER_WATCHPOINT
= 2
1413 DEF_ENUM_FLAGS_TYPE (enum step_over_what_flag
, step_over_what
);
1415 /* Info about an instruction that is being stepped over. */
1417 struct step_over_info
1419 /* If we're stepping past a breakpoint, this is the address space
1420 and address of the instruction the breakpoint is set at. We'll
1421 skip inserting all breakpoints here. Valid iff ASPACE is
1423 const address_space
*aspace
= nullptr;
1424 CORE_ADDR address
= 0;
1426 /* The instruction being stepped over triggers a nonsteppable
1427 watchpoint. If true, we'll skip inserting watchpoints. */
1428 int nonsteppable_watchpoint_p
= 0;
1430 /* The thread's global number. */
1434 /* The step-over info of the location that is being stepped over.
1436 Note that with async/breakpoint always-inserted mode, a user might
1437 set a new breakpoint/watchpoint/etc. exactly while a breakpoint is
1438 being stepped over. As setting a new breakpoint inserts all
1439 breakpoints, we need to make sure the breakpoint being stepped over
1440 isn't inserted then. We do that by only clearing the step-over
1441 info when the step-over is actually finished (or aborted).
1443 Presently GDB can only step over one breakpoint at any given time.
1444 Given threads that can't run code in the same address space as the
1445 breakpoint's can't really miss the breakpoint, GDB could be taught
1446 to step-over at most one breakpoint per address space (so this info
1447 could move to the address space object if/when GDB is extended).
1448 The set of breakpoints being stepped over will normally be much
1449 smaller than the set of all breakpoints, so a flag in the
1450 breakpoint location structure would be wasteful. A separate list
1451 also saves complexity and run-time, as otherwise we'd have to go
1452 through all breakpoint locations clearing their flag whenever we
1453 start a new sequence. Similar considerations weigh against storing
1454 this info in the thread object. Plus, not all step overs actually
1455 have breakpoint locations -- e.g., stepping past a single-step
1456 breakpoint, or stepping to complete a non-continuable
1458 static struct step_over_info step_over_info
;
1460 /* Record the address of the breakpoint/instruction we're currently
1462 N.B. We record the aspace and address now, instead of say just the thread,
1463 because when we need the info later the thread may be running. */
1466 set_step_over_info (const address_space
*aspace
, CORE_ADDR address
,
1467 int nonsteppable_watchpoint_p
,
1470 step_over_info
.aspace
= aspace
;
1471 step_over_info
.address
= address
;
1472 step_over_info
.nonsteppable_watchpoint_p
= nonsteppable_watchpoint_p
;
1473 step_over_info
.thread
= thread
;
1476 /* Called when we're not longer stepping over a breakpoint / an
1477 instruction, so all breakpoints are free to be (re)inserted. */
1480 clear_step_over_info (void)
1482 infrun_debug_printf ("clearing step over info");
1483 step_over_info
.aspace
= nullptr;
1484 step_over_info
.address
= 0;
1485 step_over_info
.nonsteppable_watchpoint_p
= 0;
1486 step_over_info
.thread
= -1;
1492 stepping_past_instruction_at (struct address_space
*aspace
,
1495 return (step_over_info
.aspace
!= nullptr
1496 && breakpoint_address_match (aspace
, address
,
1497 step_over_info
.aspace
,
1498 step_over_info
.address
));
1504 thread_is_stepping_over_breakpoint (int thread
)
1506 return (step_over_info
.thread
!= -1
1507 && thread
== step_over_info
.thread
);
1513 stepping_past_nonsteppable_watchpoint (void)
1515 return step_over_info
.nonsteppable_watchpoint_p
;
1518 /* Returns true if step-over info is valid. */
1521 step_over_info_valid_p (void)
1523 return (step_over_info
.aspace
!= nullptr
1524 || stepping_past_nonsteppable_watchpoint ());
1528 /* Displaced stepping. */
1530 /* In non-stop debugging mode, we must take special care to manage
1531 breakpoints properly; in particular, the traditional strategy for
1532 stepping a thread past a breakpoint it has hit is unsuitable.
1533 'Displaced stepping' is a tactic for stepping one thread past a
1534 breakpoint it has hit while ensuring that other threads running
1535 concurrently will hit the breakpoint as they should.
1537 The traditional way to step a thread T off a breakpoint in a
1538 multi-threaded program in all-stop mode is as follows:
1540 a0) Initially, all threads are stopped, and breakpoints are not
1542 a1) We single-step T, leaving breakpoints uninserted.
1543 a2) We insert breakpoints, and resume all threads.
1545 In non-stop debugging, however, this strategy is unsuitable: we
1546 don't want to have to stop all threads in the system in order to
1547 continue or step T past a breakpoint. Instead, we use displaced
1550 n0) Initially, T is stopped, other threads are running, and
1551 breakpoints are inserted.
1552 n1) We copy the instruction "under" the breakpoint to a separate
1553 location, outside the main code stream, making any adjustments
1554 to the instruction, register, and memory state as directed by
1556 n2) We single-step T over the instruction at its new location.
1557 n3) We adjust the resulting register and memory state as directed
1558 by T's architecture. This includes resetting T's PC to point
1559 back into the main instruction stream.
1562 This approach depends on the following gdbarch methods:
1564 - gdbarch_max_insn_length and gdbarch_displaced_step_location
1565 indicate where to copy the instruction, and how much space must
1566 be reserved there. We use these in step n1.
1568 - gdbarch_displaced_step_copy_insn copies a instruction to a new
1569 address, and makes any necessary adjustments to the instruction,
1570 register contents, and memory. We use this in step n1.
1572 - gdbarch_displaced_step_fixup adjusts registers and memory after
1573 we have successfully single-stepped the instruction, to yield the
1574 same effect the instruction would have had if we had executed it
1575 at its original address. We use this in step n3.
1577 The gdbarch_displaced_step_copy_insn and
1578 gdbarch_displaced_step_fixup functions must be written so that
1579 copying an instruction with gdbarch_displaced_step_copy_insn,
1580 single-stepping across the copied instruction, and then applying
1581 gdbarch_displaced_insn_fixup should have the same effects on the
1582 thread's memory and registers as stepping the instruction in place
1583 would have. Exactly which responsibilities fall to the copy and
1584 which fall to the fixup is up to the author of those functions.
1586 See the comments in gdbarch.sh for details.
1588 Note that displaced stepping and software single-step cannot
1589 currently be used in combination, although with some care I think
1590 they could be made to. Software single-step works by placing
1591 breakpoints on all possible subsequent instructions; if the
1592 displaced instruction is a PC-relative jump, those breakpoints
1593 could fall in very strange places --- on pages that aren't
1594 executable, or at addresses that are not proper instruction
1595 boundaries. (We do generally let other threads run while we wait
1596 to hit the software single-step breakpoint, and they might
1597 encounter such a corrupted instruction.) One way to work around
1598 this would be to have gdbarch_displaced_step_copy_insn fully
1599 simulate the effect of PC-relative instructions (and return NULL)
1600 on architectures that use software single-stepping.
1602 In non-stop mode, we can have independent and simultaneous step
1603 requests, so more than one thread may need to simultaneously step
1604 over a breakpoint. The current implementation assumes there is
1605 only one scratch space per process. In this case, we have to
1606 serialize access to the scratch space. If thread A wants to step
1607 over a breakpoint, but we are currently waiting for some other
1608 thread to complete a displaced step, we leave thread A stopped and
1609 place it in the displaced_step_request_queue. Whenever a displaced
1610 step finishes, we pick the next thread in the queue and start a new
1611 displaced step operation on it. See displaced_step_prepare and
1612 displaced_step_finish for details. */
1614 /* Return true if THREAD is doing a displaced step. */
1617 displaced_step_in_progress_thread (thread_info
*thread
)
1619 gdb_assert (thread
!= nullptr);
1621 return thread
->displaced_step_state
.in_progress ();
1624 /* Return true if INF has a thread doing a displaced step. */
1627 displaced_step_in_progress (inferior
*inf
)
1629 return inf
->displaced_step_state
.in_progress_count
> 0;
1632 /* Return true if any thread is doing a displaced step. */
1635 displaced_step_in_progress_any_thread ()
1637 for (inferior
*inf
: all_non_exited_inferiors ())
1639 if (displaced_step_in_progress (inf
))
1647 infrun_inferior_exit (struct inferior
*inf
)
1649 inf
->displaced_step_state
.reset ();
1650 inf
->thread_waiting_for_vfork_done
= nullptr;
1654 infrun_inferior_execd (inferior
*exec_inf
, inferior
*follow_inf
)
1656 /* If some threads where was doing a displaced step in this inferior at the
1657 moment of the exec, they no longer exist. Even if the exec'ing thread
1658 doing a displaced step, we don't want to to any fixup nor restore displaced
1659 stepping buffer bytes. */
1660 follow_inf
->displaced_step_state
.reset ();
1662 for (thread_info
*thread
: follow_inf
->threads ())
1663 thread
->displaced_step_state
.reset ();
1665 /* Since an in-line step is done with everything else stopped, if there was
1666 one in progress at the time of the exec, it must have been the exec'ing
1668 clear_step_over_info ();
1670 follow_inf
->thread_waiting_for_vfork_done
= nullptr;
1673 /* If ON, and the architecture supports it, GDB will use displaced
1674 stepping to step over breakpoints. If OFF, or if the architecture
1675 doesn't support it, GDB will instead use the traditional
1676 hold-and-step approach. If AUTO (which is the default), GDB will
1677 decide which technique to use to step over breakpoints depending on
1678 whether the target works in a non-stop way (see use_displaced_stepping). */
1680 static enum auto_boolean can_use_displaced_stepping
= AUTO_BOOLEAN_AUTO
;
1683 show_can_use_displaced_stepping (struct ui_file
*file
, int from_tty
,
1684 struct cmd_list_element
*c
,
1687 if (can_use_displaced_stepping
== AUTO_BOOLEAN_AUTO
)
1689 _("Debugger's willingness to use displaced stepping "
1690 "to step over breakpoints is %s (currently %s).\n"),
1691 value
, target_is_non_stop_p () ? "on" : "off");
1694 _("Debugger's willingness to use displaced stepping "
1695 "to step over breakpoints is %s.\n"), value
);
1698 /* Return true if the gdbarch implements the required methods to use
1699 displaced stepping. */
1702 gdbarch_supports_displaced_stepping (gdbarch
*arch
)
1704 /* Only check for the presence of `prepare`. The gdbarch verification ensures
1705 that if `prepare` is provided, so is `finish`. */
1706 return gdbarch_displaced_step_prepare_p (arch
);
1709 /* Return non-zero if displaced stepping can/should be used to step
1710 over breakpoints of thread TP. */
1713 use_displaced_stepping (thread_info
*tp
)
1715 /* If the user disabled it explicitly, don't use displaced stepping. */
1716 if (can_use_displaced_stepping
== AUTO_BOOLEAN_FALSE
)
1719 /* If "auto", only use displaced stepping if the target operates in a non-stop
1721 if (can_use_displaced_stepping
== AUTO_BOOLEAN_AUTO
1722 && !target_is_non_stop_p ())
1725 gdbarch
*gdbarch
= get_thread_regcache (tp
)->arch ();
1727 /* If the architecture doesn't implement displaced stepping, don't use
1729 if (!gdbarch_supports_displaced_stepping (gdbarch
))
1732 /* If recording, don't use displaced stepping. */
1733 if (find_record_target () != nullptr)
1736 /* If displaced stepping failed before for this inferior, don't bother trying
1738 if (tp
->inf
->displaced_step_state
.failed_before
)
1744 /* Simple function wrapper around displaced_step_thread_state::reset. */
1747 displaced_step_reset (displaced_step_thread_state
*displaced
)
1749 displaced
->reset ();
1752 /* A cleanup that wraps displaced_step_reset. We use this instead of, say,
1753 SCOPE_EXIT, because it needs to be discardable with "cleanup.release ()". */
1755 using displaced_step_reset_cleanup
= FORWARD_SCOPE_EXIT (displaced_step_reset
);
1757 /* Prepare to single-step, using displaced stepping.
1759 Note that we cannot use displaced stepping when we have a signal to
1760 deliver. If we have a signal to deliver and an instruction to step
1761 over, then after the step, there will be no indication from the
1762 target whether the thread entered a signal handler or ignored the
1763 signal and stepped over the instruction successfully --- both cases
1764 result in a simple SIGTRAP. In the first case we mustn't do a
1765 fixup, and in the second case we must --- but we can't tell which.
1766 Comments in the code for 'random signals' in handle_inferior_event
1767 explain how we handle this case instead.
1769 Returns DISPLACED_STEP_PREPARE_STATUS_OK if preparing was successful -- this
1770 thread is going to be stepped now; DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE
1771 if displaced stepping this thread got queued; or
1772 DISPLACED_STEP_PREPARE_STATUS_CANT if this instruction can't be displaced
1775 static displaced_step_prepare_status
1776 displaced_step_prepare_throw (thread_info
*tp
)
1778 regcache
*regcache
= get_thread_regcache (tp
);
1779 struct gdbarch
*gdbarch
= regcache
->arch ();
1780 displaced_step_thread_state
&disp_step_thread_state
1781 = tp
->displaced_step_state
;
1783 /* We should never reach this function if the architecture does not
1784 support displaced stepping. */
1785 gdb_assert (gdbarch_supports_displaced_stepping (gdbarch
));
1787 /* Nor if the thread isn't meant to step over a breakpoint. */
1788 gdb_assert (tp
->control
.trap_expected
);
1790 /* Disable range stepping while executing in the scratch pad. We
1791 want a single-step even if executing the displaced instruction in
1792 the scratch buffer lands within the stepping range (e.g., a
1794 tp
->control
.may_range_step
= 0;
1796 /* We are about to start a displaced step for this thread. If one is already
1797 in progress, something's wrong. */
1798 gdb_assert (!disp_step_thread_state
.in_progress ());
1800 if (tp
->inf
->displaced_step_state
.unavailable
)
1802 /* The gdbarch tells us it's not worth asking to try a prepare because
1803 it is likely that it will return unavailable, so don't bother asking. */
1805 displaced_debug_printf ("deferring step of %s",
1806 tp
->ptid
.to_string ().c_str ());
1808 global_thread_step_over_chain_enqueue (tp
);
1809 return DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE
;
1812 displaced_debug_printf ("displaced-stepping %s now",
1813 tp
->ptid
.to_string ().c_str ());
1815 scoped_restore_current_thread restore_thread
;
1817 switch_to_thread (tp
);
1819 CORE_ADDR original_pc
= regcache_read_pc (regcache
);
1820 CORE_ADDR displaced_pc
;
1822 /* Display the instruction we are going to displaced step. */
1823 if (debug_displaced
)
1825 string_file tmp_stream
;
1826 int dislen
= gdb_print_insn (gdbarch
, original_pc
, &tmp_stream
,
1831 gdb::byte_vector
insn_buf (dislen
);
1832 read_memory (original_pc
, insn_buf
.data (), insn_buf
.size ());
1834 std::string insn_bytes
= bytes_to_string (insn_buf
);
1836 displaced_debug_printf ("original insn %s: %s \t %s",
1837 paddress (gdbarch
, original_pc
),
1838 insn_bytes
.c_str (),
1839 tmp_stream
.string ().c_str ());
1842 displaced_debug_printf ("original insn %s: invalid length: %d",
1843 paddress (gdbarch
, original_pc
), dislen
);
1846 displaced_step_prepare_status status
1847 = gdbarch_displaced_step_prepare (gdbarch
, tp
, displaced_pc
);
1849 if (status
== DISPLACED_STEP_PREPARE_STATUS_CANT
)
1851 displaced_debug_printf ("failed to prepare (%s)",
1852 tp
->ptid
.to_string ().c_str ());
1854 return DISPLACED_STEP_PREPARE_STATUS_CANT
;
1856 else if (status
== DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE
)
1858 /* Not enough displaced stepping resources available, defer this
1859 request by placing it the queue. */
1861 displaced_debug_printf ("not enough resources available, "
1862 "deferring step of %s",
1863 tp
->ptid
.to_string ().c_str ());
1865 global_thread_step_over_chain_enqueue (tp
);
1867 return DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE
;
1870 gdb_assert (status
== DISPLACED_STEP_PREPARE_STATUS_OK
);
1872 /* Save the information we need to fix things up if the step
1874 disp_step_thread_state
.set (gdbarch
);
1876 tp
->inf
->displaced_step_state
.in_progress_count
++;
1878 displaced_debug_printf ("prepared successfully thread=%s, "
1879 "original_pc=%s, displaced_pc=%s",
1880 tp
->ptid
.to_string ().c_str (),
1881 paddress (gdbarch
, original_pc
),
1882 paddress (gdbarch
, displaced_pc
));
1884 /* Display the new displaced instruction(s). */
1885 if (debug_displaced
)
1887 string_file tmp_stream
;
1888 CORE_ADDR addr
= displaced_pc
;
1890 /* If displaced stepping is going to use h/w single step then we know
1891 that the replacement instruction can only be a single instruction,
1892 in that case set the end address at the next byte.
1894 Otherwise the displaced stepping copy instruction routine could
1895 have generated multiple instructions, and all we know is that they
1896 must fit within the LEN bytes of the buffer. */
1898 = addr
+ (gdbarch_displaced_step_hw_singlestep (gdbarch
)
1899 ? 1 : gdbarch_displaced_step_buffer_length (gdbarch
));
1903 int dislen
= gdb_print_insn (gdbarch
, addr
, &tmp_stream
, nullptr);
1906 displaced_debug_printf
1907 ("replacement insn %s: invalid length: %d",
1908 paddress (gdbarch
, addr
), dislen
);
1912 gdb::byte_vector
insn_buf (dislen
);
1913 read_memory (addr
, insn_buf
.data (), insn_buf
.size ());
1915 std::string insn_bytes
= bytes_to_string (insn_buf
);
1916 std::string insn_str
= tmp_stream
.release ();
1917 displaced_debug_printf ("replacement insn %s: %s \t %s",
1918 paddress (gdbarch
, addr
),
1919 insn_bytes
.c_str (),
1925 return DISPLACED_STEP_PREPARE_STATUS_OK
;
1928 /* Wrapper for displaced_step_prepare_throw that disabled further
1929 attempts at displaced stepping if we get a memory error. */
1931 static displaced_step_prepare_status
1932 displaced_step_prepare (thread_info
*thread
)
1934 displaced_step_prepare_status status
1935 = DISPLACED_STEP_PREPARE_STATUS_CANT
;
1939 status
= displaced_step_prepare_throw (thread
);
1941 catch (const gdb_exception_error
&ex
)
1943 if (ex
.error
!= MEMORY_ERROR
1944 && ex
.error
!= NOT_SUPPORTED_ERROR
)
1947 infrun_debug_printf ("caught exception, disabling displaced stepping: %s",
1950 /* Be verbose if "set displaced-stepping" is "on", silent if
1952 if (can_use_displaced_stepping
== AUTO_BOOLEAN_TRUE
)
1954 warning (_("disabling displaced stepping: %s"),
1958 /* Disable further displaced stepping attempts. */
1959 thread
->inf
->displaced_step_state
.failed_before
= 1;
1965 /* True if any thread of TARGET that matches RESUME_PTID requires
1966 target_thread_events enabled. This assumes TARGET does not support
1967 target thread options. */
1970 any_thread_needs_target_thread_events (process_stratum_target
*target
,
1973 for (thread_info
*tp
: all_non_exited_threads (target
, resume_ptid
))
1974 if (displaced_step_in_progress_thread (tp
)
1975 || schedlock_applies (tp
)
1976 || tp
->thread_fsm () != nullptr)
1981 /* Maybe disable thread-{cloned,created,exited} event reporting after
1982 a step-over (either in-line or displaced) finishes. */
1985 update_thread_events_after_step_over (thread_info
*event_thread
,
1986 const target_waitstatus
&event_status
)
1988 if (schedlock_applies (event_thread
))
1990 /* If scheduler-locking applies, continue reporting
1991 thread-created/thread-cloned events. */
1994 else if (target_supports_set_thread_options (0))
1996 /* We can control per-thread options. Disable events for the
1997 event thread, unless the thread is gone. */
1998 if (event_status
.kind () != TARGET_WAITKIND_THREAD_EXITED
)
1999 event_thread
->set_thread_options (0);
2003 /* We can only control the target-wide target_thread_events
2004 setting. Disable it, but only if other threads in the target
2005 don't need it enabled. */
2006 process_stratum_target
*target
= event_thread
->inf
->process_target ();
2007 if (!any_thread_needs_target_thread_events (target
, minus_one_ptid
))
2008 target_thread_events (false);
2012 /* If we displaced stepped an instruction successfully, adjust registers and
2013 memory to yield the same effect the instruction would have had if we had
2014 executed it at its original address, and return
2015 DISPLACED_STEP_FINISH_STATUS_OK. If the instruction didn't complete,
2016 relocate the PC and return DISPLACED_STEP_FINISH_STATUS_NOT_EXECUTED.
2018 If the thread wasn't displaced stepping, return
2019 DISPLACED_STEP_FINISH_STATUS_OK as well. */
2021 static displaced_step_finish_status
2022 displaced_step_finish (thread_info
*event_thread
,
2023 const target_waitstatus
&event_status
)
2025 /* Check whether the parent is displaced stepping. */
2026 inferior
*parent_inf
= event_thread
->inf
;
2028 /* If this was a fork/vfork/clone, this event indicates that the
2029 displaced stepping of the syscall instruction has been done, so
2030 we perform cleanup for parent here. Also note that this
2031 operation also cleans up the child for vfork, because their pages
2034 /* If this is a fork (child gets its own address space copy) and
2035 some displaced step buffers were in use at the time of the fork,
2036 restore the displaced step buffer bytes in the child process.
2038 Architectures which support displaced stepping and fork events
2039 must supply an implementation of
2040 gdbarch_displaced_step_restore_all_in_ptid. This is not enforced
2041 during gdbarch validation to support architectures which support
2042 displaced stepping but not forks. */
2043 if (event_status
.kind () == TARGET_WAITKIND_FORKED
)
2045 struct regcache
*parent_regcache
= get_thread_regcache (event_thread
);
2046 struct gdbarch
*gdbarch
= parent_regcache
->arch ();
2048 if (gdbarch_supports_displaced_stepping (gdbarch
))
2049 gdbarch_displaced_step_restore_all_in_ptid
2050 (gdbarch
, parent_inf
, event_status
.child_ptid ());
2053 displaced_step_thread_state
*displaced
= &event_thread
->displaced_step_state
;
2055 /* Was this thread performing a displaced step? */
2056 if (!displaced
->in_progress ())
2057 return DISPLACED_STEP_FINISH_STATUS_OK
;
2059 update_thread_events_after_step_over (event_thread
, event_status
);
2061 gdb_assert (event_thread
->inf
->displaced_step_state
.in_progress_count
> 0);
2062 event_thread
->inf
->displaced_step_state
.in_progress_count
--;
2064 /* Fixup may need to read memory/registers. Switch to the thread
2065 that we're fixing up. Also, target_stopped_by_watchpoint checks
2066 the current thread, and displaced_step_restore performs ptid-dependent
2067 memory accesses using current_inferior(). */
2068 switch_to_thread (event_thread
);
2070 displaced_step_reset_cleanup
cleanup (displaced
);
2072 /* Do the fixup, and release the resources acquired to do the displaced
2074 displaced_step_finish_status status
2075 = gdbarch_displaced_step_finish (displaced
->get_original_gdbarch (),
2076 event_thread
, event_status
);
2078 if (event_status
.kind () == TARGET_WAITKIND_FORKED
2079 || event_status
.kind () == TARGET_WAITKIND_VFORKED
2080 || event_status
.kind () == TARGET_WAITKIND_THREAD_CLONED
)
2082 /* Since the vfork/fork/clone syscall instruction was executed
2083 in the scratchpad, the child's PC is also within the
2084 scratchpad. Set the child's PC to the parent's PC value,
2085 which has already been fixed up. Note: we use the parent's
2086 aspace here, although we're touching the child, because the
2087 child hasn't been added to the inferior list yet at this
2090 struct regcache
*parent_regcache
= get_thread_regcache (event_thread
);
2091 struct gdbarch
*gdbarch
= parent_regcache
->arch ();
2092 struct regcache
*child_regcache
2093 = get_thread_arch_regcache (parent_inf
, event_status
.child_ptid (),
2095 /* Read PC value of parent. */
2096 CORE_ADDR parent_pc
= regcache_read_pc (parent_regcache
);
2098 displaced_debug_printf ("write child pc from %s to %s",
2100 regcache_read_pc (child_regcache
)),
2101 paddress (gdbarch
, parent_pc
));
2103 regcache_write_pc (child_regcache
, parent_pc
);
2109 /* Data to be passed around while handling an event. This data is
2110 discarded between events. */
2111 struct execution_control_state
2113 explicit execution_control_state (thread_info
*thr
= nullptr)
2114 : ptid (thr
== nullptr ? null_ptid
: thr
->ptid
),
2119 process_stratum_target
*target
= nullptr;
2121 /* The thread that got the event, if this was a thread event; NULL
2123 struct thread_info
*event_thread
;
2125 struct target_waitstatus ws
;
2126 int stop_func_filled_in
= 0;
2127 CORE_ADDR stop_func_alt_start
= 0;
2128 CORE_ADDR stop_func_start
= 0;
2129 CORE_ADDR stop_func_end
= 0;
2130 const char *stop_func_name
= nullptr;
2131 int wait_some_more
= 0;
2133 /* True if the event thread hit the single-step breakpoint of
2134 another thread. Thus the event doesn't cause a stop, the thread
2135 needs to be single-stepped past the single-step breakpoint before
2136 we can switch back to the original stepping thread. */
2137 int hit_singlestep_breakpoint
= 0;
2140 static void keep_going_pass_signal (struct execution_control_state
*ecs
);
2141 static void prepare_to_wait (struct execution_control_state
*ecs
);
2142 static bool keep_going_stepped_thread (struct thread_info
*tp
);
2143 static step_over_what
thread_still_needs_step_over (struct thread_info
*tp
);
2145 /* Are there any pending step-over requests? If so, run all we can
2146 now and return true. Otherwise, return false. */
2149 start_step_over (void)
2151 INFRUN_SCOPED_DEBUG_ENTER_EXIT
;
2153 /* Don't start a new step-over if we already have an in-line
2154 step-over operation ongoing. */
2155 if (step_over_info_valid_p ())
2158 /* Steal the global thread step over chain. As we try to initiate displaced
2159 steps, threads will be enqueued in the global chain if no buffers are
2160 available. If we iterated on the global chain directly, we might iterate
2162 thread_step_over_list threads_to_step
2163 = std::move (global_thread_step_over_list
);
2165 infrun_debug_printf ("stealing global queue of threads to step, length = %d",
2166 thread_step_over_chain_length (threads_to_step
));
2168 bool started
= false;
2170 /* On scope exit (whatever the reason, return or exception), if there are
2171 threads left in the THREADS_TO_STEP chain, put back these threads in the
2175 if (threads_to_step
.empty ())
2176 infrun_debug_printf ("step-over queue now empty");
2179 infrun_debug_printf ("putting back %d threads to step in global queue",
2180 thread_step_over_chain_length (threads_to_step
));
2182 global_thread_step_over_chain_enqueue_chain
2183 (std::move (threads_to_step
));
2187 thread_step_over_list_safe_range range
2188 = make_thread_step_over_list_safe_range (threads_to_step
);
2190 for (thread_info
*tp
: range
)
2192 step_over_what step_what
;
2193 int must_be_in_line
;
2195 gdb_assert (!tp
->stop_requested
);
2197 if (tp
->inf
->displaced_step_state
.unavailable
)
2199 /* The arch told us to not even try preparing another displaced step
2200 for this inferior. Just leave the thread in THREADS_TO_STEP, it
2201 will get moved to the global chain on scope exit. */
2205 if (tp
->inf
->thread_waiting_for_vfork_done
!= nullptr)
2207 /* When we stop all threads, handling a vfork, any thread in the step
2208 over chain remains there. A user could also try to continue a
2209 thread stopped at a breakpoint while another thread is waiting for
2210 a vfork-done event. In any case, we don't want to start a step
2215 /* Remove thread from the THREADS_TO_STEP chain. If anything goes wrong
2216 while we try to prepare the displaced step, we don't add it back to
2217 the global step over chain. This is to avoid a thread staying in the
2218 step over chain indefinitely if something goes wrong when resuming it
2219 If the error is intermittent and it still needs a step over, it will
2220 get enqueued again when we try to resume it normally. */
2221 threads_to_step
.erase (threads_to_step
.iterator_to (*tp
));
2223 step_what
= thread_still_needs_step_over (tp
);
2224 must_be_in_line
= ((step_what
& STEP_OVER_WATCHPOINT
)
2225 || ((step_what
& STEP_OVER_BREAKPOINT
)
2226 && !use_displaced_stepping (tp
)));
2228 /* We currently stop all threads of all processes to step-over
2229 in-line. If we need to start a new in-line step-over, let
2230 any pending displaced steps finish first. */
2231 if (must_be_in_line
&& displaced_step_in_progress_any_thread ())
2233 global_thread_step_over_chain_enqueue (tp
);
2237 if (tp
->control
.trap_expected
2239 || tp
->executing ())
2241 internal_error ("[%s] has inconsistent state: "
2242 "trap_expected=%d, resumed=%d, executing=%d\n",
2243 tp
->ptid
.to_string ().c_str (),
2244 tp
->control
.trap_expected
,
2249 infrun_debug_printf ("resuming [%s] for step-over",
2250 tp
->ptid
.to_string ().c_str ());
2252 /* keep_going_pass_signal skips the step-over if the breakpoint
2253 is no longer inserted. In all-stop, we want to keep looking
2254 for a thread that needs a step-over instead of resuming TP,
2255 because we wouldn't be able to resume anything else until the
2256 target stops again. In non-stop, the resume always resumes
2257 only TP, so it's OK to let the thread resume freely. */
2258 if (!target_is_non_stop_p () && !step_what
)
2261 switch_to_thread (tp
);
2262 execution_control_state
ecs (tp
);
2263 keep_going_pass_signal (&ecs
);
2265 if (!ecs
.wait_some_more
)
2266 error (_("Command aborted."));
2268 /* If the thread's step over could not be initiated because no buffers
2269 were available, it was re-added to the global step over chain. */
2272 infrun_debug_printf ("[%s] was resumed.",
2273 tp
->ptid
.to_string ().c_str ());
2274 gdb_assert (!thread_is_in_step_over_chain (tp
));
2278 infrun_debug_printf ("[%s] was NOT resumed.",
2279 tp
->ptid
.to_string ().c_str ());
2280 gdb_assert (thread_is_in_step_over_chain (tp
));
2283 /* If we started a new in-line step-over, we're done. */
2284 if (step_over_info_valid_p ())
2286 gdb_assert (tp
->control
.trap_expected
);
2291 if (!target_is_non_stop_p ())
2293 /* On all-stop, shouldn't have resumed unless we needed a
2295 gdb_assert (tp
->control
.trap_expected
2296 || tp
->step_after_step_resume_breakpoint
);
2298 /* With remote targets (at least), in all-stop, we can't
2299 issue any further remote commands until the program stops
2305 /* Either the thread no longer needed a step-over, or a new
2306 displaced stepping sequence started. Even in the latter
2307 case, continue looking. Maybe we can also start another
2308 displaced step on a thread of other process. */
2314 /* Update global variables holding ptids to hold NEW_PTID if they were
2315 holding OLD_PTID. */
2317 infrun_thread_ptid_changed (process_stratum_target
*target
,
2318 ptid_t old_ptid
, ptid_t new_ptid
)
2320 if (inferior_ptid
== old_ptid
2321 && current_inferior ()->process_target () == target
)
2322 inferior_ptid
= new_ptid
;
2327 static const char schedlock_off
[] = "off";
2328 static const char schedlock_on
[] = "on";
2329 static const char schedlock_step
[] = "step";
2330 static const char schedlock_replay
[] = "replay";
2331 static const char *const scheduler_enums
[] = {
2338 static const char *scheduler_mode
= schedlock_replay
;
2340 show_scheduler_mode (struct ui_file
*file
, int from_tty
,
2341 struct cmd_list_element
*c
, const char *value
)
2344 _("Mode for locking scheduler "
2345 "during execution is \"%s\".\n"),
2350 set_schedlock_func (const char *args
, int from_tty
, struct cmd_list_element
*c
)
2352 if (!target_can_lock_scheduler ())
2354 scheduler_mode
= schedlock_off
;
2355 error (_("Target '%s' cannot support this command."),
2356 target_shortname ());
2360 /* True if execution commands resume all threads of all processes by
2361 default; otherwise, resume only threads of the current inferior
2363 bool sched_multi
= false;
2365 /* Try to setup for software single stepping. Return true if target_resume()
2366 should use hardware single step.
2368 GDBARCH the current gdbarch. */
2371 maybe_software_singlestep (struct gdbarch
*gdbarch
)
2373 bool hw_step
= true;
2375 if (execution_direction
== EXEC_FORWARD
2376 && gdbarch_software_single_step_p (gdbarch
))
2377 hw_step
= !insert_single_step_breakpoints (gdbarch
);
2385 user_visible_resume_ptid (int step
)
2391 /* With non-stop mode on, threads are always handled
2393 resume_ptid
= inferior_ptid
;
2395 else if ((scheduler_mode
== schedlock_on
)
2396 || (scheduler_mode
== schedlock_step
&& step
))
2398 /* User-settable 'scheduler' mode requires solo thread
2400 resume_ptid
= inferior_ptid
;
2402 else if ((scheduler_mode
== schedlock_replay
)
2403 && target_record_will_replay (minus_one_ptid
, execution_direction
))
2405 /* User-settable 'scheduler' mode requires solo thread resume in replay
2407 resume_ptid
= inferior_ptid
;
2409 else if (!sched_multi
&& target_supports_multi_process ())
2411 /* Resume all threads of the current process (and none of other
2413 resume_ptid
= ptid_t (inferior_ptid
.pid ());
2417 /* Resume all threads of all processes. */
2418 resume_ptid
= RESUME_ALL
;
2426 process_stratum_target
*
2427 user_visible_resume_target (ptid_t resume_ptid
)
2429 return (resume_ptid
== minus_one_ptid
&& sched_multi
2431 : current_inferior ()->process_target ());
2434 /* Find a thread from the inferiors that we'll resume that is waiting
2435 for a vfork-done event. */
2437 static thread_info
*
2438 find_thread_waiting_for_vfork_done ()
2440 gdb_assert (!target_is_non_stop_p ());
2444 for (inferior
*inf
: all_non_exited_inferiors ())
2445 if (inf
->thread_waiting_for_vfork_done
!= nullptr)
2446 return inf
->thread_waiting_for_vfork_done
;
2450 inferior
*cur_inf
= current_inferior ();
2451 if (cur_inf
->thread_waiting_for_vfork_done
!= nullptr)
2452 return cur_inf
->thread_waiting_for_vfork_done
;
2457 /* Return a ptid representing the set of threads that we will resume,
2458 in the perspective of the target, assuming run control handling
2459 does not require leaving some threads stopped (e.g., stepping past
2460 breakpoint). USER_STEP indicates whether we're about to start the
2461 target for a stepping command. */
2464 internal_resume_ptid (int user_step
)
2466 /* In non-stop, we always control threads individually. Note that
2467 the target may always work in non-stop mode even with "set
2468 non-stop off", in which case user_visible_resume_ptid could
2469 return a wildcard ptid. */
2470 if (target_is_non_stop_p ())
2471 return inferior_ptid
;
2473 /* The rest of the function assumes non-stop==off and
2474 target-non-stop==off.
2476 If a thread is waiting for a vfork-done event, it means breakpoints are out
2477 for this inferior (well, program space in fact). We don't want to resume
2478 any thread other than the one waiting for vfork done, otherwise these other
2479 threads could miss breakpoints. So if a thread in the resumption set is
2480 waiting for a vfork-done event, resume only that thread.
2482 The resumption set width depends on whether schedule-multiple is on or off.
2484 Note that if the target_resume interface was more flexible, we could be
2485 smarter here when schedule-multiple is on. For example, imagine 3
2486 inferiors with 2 threads each (1.1, 1.2, 2.1, 2.2, 3.1 and 3.2). Threads
2487 2.1 and 3.2 are both waiting for a vfork-done event. Then we could ask the
2488 target(s) to resume:
2490 - All threads of inferior 1
2494 Since we don't have that flexibility (we can only pass one ptid), just
2495 resume the first thread waiting for a vfork-done event we find (e.g. thread
2497 thread_info
*thr
= find_thread_waiting_for_vfork_done ();
2500 /* If we have a thread that is waiting for a vfork-done event,
2501 then we should have switched to it earlier. Calling
2502 target_resume with thread scope is only possible when the
2503 current thread matches the thread scope. */
2504 gdb_assert (thr
->ptid
== inferior_ptid
);
2505 gdb_assert (thr
->inf
->process_target ()
2506 == inferior_thread ()->inf
->process_target ());
2510 return user_visible_resume_ptid (user_step
);
2513 /* Wrapper for target_resume, that handles infrun-specific
2517 do_target_resume (ptid_t resume_ptid
, bool step
, enum gdb_signal sig
)
2519 struct thread_info
*tp
= inferior_thread ();
2521 gdb_assert (!tp
->stop_requested
);
2523 /* Install inferior's terminal modes. */
2524 target_terminal::inferior ();
2526 /* Avoid confusing the next resume, if the next stop/resume
2527 happens to apply to another thread. */
2528 tp
->set_stop_signal (GDB_SIGNAL_0
);
2530 /* Advise target which signals may be handled silently.
2532 If we have removed breakpoints because we are stepping over one
2533 in-line (in any thread), we need to receive all signals to avoid
2534 accidentally skipping a breakpoint during execution of a signal
2537 Likewise if we're displaced stepping, otherwise a trap for a
2538 breakpoint in a signal handler might be confused with the
2539 displaced step finishing. We don't make the displaced_step_finish
2540 step distinguish the cases instead, because:
2542 - a backtrace while stopped in the signal handler would show the
2543 scratch pad as frame older than the signal handler, instead of
2544 the real mainline code.
2546 - when the thread is later resumed, the signal handler would
2547 return to the scratch pad area, which would no longer be
2549 if (step_over_info_valid_p ()
2550 || displaced_step_in_progress (tp
->inf
))
2551 target_pass_signals ({});
2553 target_pass_signals (signal_pass
);
2555 /* Request that the target report thread-{created,cloned,exited}
2556 events in the following situations:
2558 - If we are performing an in-line step-over-breakpoint, then we
2559 will remove a breakpoint from the target and only run the
2560 current thread. We don't want any new thread (spawned by the
2561 step) to start running, as it might miss the breakpoint. We
2562 need to clear the step-over state if the stepped thread exits,
2563 so we also enable thread-exit events.
2565 - If we are stepping over a breakpoint out of line (displaced
2566 stepping) then we won't remove a breakpoint from the target,
2567 but, if the step spawns a new clone thread, then we will need
2568 to fixup the $pc address in the clone child too, so we need it
2569 to start stopped. We need to release the displaced stepping
2570 buffer if the stepped thread exits, so we also enable
2573 - If scheduler-locking applies, threads that the current thread
2574 spawns should remain halted. It's not strictly necessary to
2575 enable thread-exit events in this case, but it doesn't hurt.
2577 if (step_over_info_valid_p ()
2578 || displaced_step_in_progress_thread (tp
)
2579 || schedlock_applies (tp
))
2581 gdb_thread_options options
2582 = GDB_THREAD_OPTION_CLONE
| GDB_THREAD_OPTION_EXIT
;
2583 if (target_supports_set_thread_options (options
))
2584 tp
->set_thread_options (options
);
2586 target_thread_events (true);
2588 else if (tp
->thread_fsm () != nullptr)
2590 gdb_thread_options options
= GDB_THREAD_OPTION_EXIT
;
2591 if (target_supports_set_thread_options (options
))
2592 tp
->set_thread_options (options
);
2594 target_thread_events (true);
2598 if (target_supports_set_thread_options (0))
2599 tp
->set_thread_options (0);
2602 process_stratum_target
*resume_target
= tp
->inf
->process_target ();
2603 if (!any_thread_needs_target_thread_events (resume_target
,
2605 target_thread_events (false);
2609 /* If we're resuming more than one thread simultaneously, then any
2610 thread other than the leader is being set to run free. Clear any
2611 previous thread option for those threads. */
2612 if (resume_ptid
!= inferior_ptid
&& target_supports_set_thread_options (0))
2614 process_stratum_target
*resume_target
= tp
->inf
->process_target ();
2615 for (thread_info
*thr_iter
: all_non_exited_threads (resume_target
,
2618 thr_iter
->set_thread_options (0);
2621 infrun_debug_printf ("resume_ptid=%s, step=%d, sig=%s",
2622 resume_ptid
.to_string ().c_str (),
2623 step
, gdb_signal_to_symbol_string (sig
));
2625 target_resume (resume_ptid
, step
, sig
);
2628 /* Resume the inferior. SIG is the signal to give the inferior
2629 (GDB_SIGNAL_0 for none). Note: don't call this directly; instead
2630 call 'resume', which handles exceptions. */
2633 resume_1 (enum gdb_signal sig
)
2635 struct thread_info
*tp
= inferior_thread ();
2636 regcache
*regcache
= get_thread_regcache (tp
);
2637 struct gdbarch
*gdbarch
= regcache
->arch ();
2639 /* This represents the user's step vs continue request. When
2640 deciding whether "set scheduler-locking step" applies, it's the
2641 user's intention that counts. */
2642 const int user_step
= tp
->control
.stepping_command
;
2643 /* This represents what we'll actually request the target to do.
2644 This can decay from a step to a continue, if e.g., we need to
2645 implement single-stepping with breakpoints (software
2649 gdb_assert (!tp
->stop_requested
);
2650 gdb_assert (!thread_is_in_step_over_chain (tp
));
2652 if (tp
->has_pending_waitstatus ())
2655 ("thread %s has pending wait "
2656 "status %s (currently_stepping=%d).",
2657 tp
->ptid
.to_string ().c_str (),
2658 tp
->pending_waitstatus ().to_string ().c_str (),
2659 currently_stepping (tp
));
2661 tp
->inf
->process_target ()->threads_executing
= true;
2662 tp
->set_resumed (true);
2664 /* FIXME: What should we do if we are supposed to resume this
2665 thread with a signal? Maybe we should maintain a queue of
2666 pending signals to deliver. */
2667 if (sig
!= GDB_SIGNAL_0
)
2669 warning (_("Couldn't deliver signal %s to %s."),
2670 gdb_signal_to_name (sig
),
2671 tp
->ptid
.to_string ().c_str ());
2674 tp
->set_stop_signal (GDB_SIGNAL_0
);
2676 if (target_can_async_p ())
2678 target_async (true);
2679 /* Tell the event loop we have an event to process. */
2680 mark_async_event_handler (infrun_async_inferior_event_token
);
2685 tp
->stepped_breakpoint
= 0;
2687 /* Depends on stepped_breakpoint. */
2688 step
= currently_stepping (tp
);
2690 if (current_inferior ()->thread_waiting_for_vfork_done
!= nullptr)
2692 /* Don't try to single-step a vfork parent that is waiting for
2693 the child to get out of the shared memory region (by exec'ing
2694 or exiting). This is particularly important on software
2695 single-step archs, as the child process would trip on the
2696 software single step breakpoint inserted for the parent
2697 process. Since the parent will not actually execute any
2698 instruction until the child is out of the shared region (such
2699 are vfork's semantics), it is safe to simply continue it.
2700 Eventually, we'll see a TARGET_WAITKIND_VFORK_DONE event for
2701 the parent, and tell it to `keep_going', which automatically
2702 re-sets it stepping. */
2703 infrun_debug_printf ("resume : clear step");
2707 CORE_ADDR pc
= regcache_read_pc (regcache
);
2709 infrun_debug_printf ("step=%d, signal=%s, trap_expected=%d, "
2710 "current thread [%s] at %s",
2711 step
, gdb_signal_to_symbol_string (sig
),
2712 tp
->control
.trap_expected
,
2713 inferior_ptid
.to_string ().c_str (),
2714 paddress (gdbarch
, pc
));
2716 const address_space
*aspace
= tp
->inf
->aspace
.get ();
2718 /* Normally, by the time we reach `resume', the breakpoints are either
2719 removed or inserted, as appropriate. The exception is if we're sitting
2720 at a permanent breakpoint; we need to step over it, but permanent
2721 breakpoints can't be removed. So we have to test for it here. */
2722 if (breakpoint_here_p (aspace
, pc
) == permanent_breakpoint_here
)
2724 if (sig
!= GDB_SIGNAL_0
)
2726 /* We have a signal to pass to the inferior. The resume
2727 may, or may not take us to the signal handler. If this
2728 is a step, we'll need to stop in the signal handler, if
2729 there's one, (if the target supports stepping into
2730 handlers), or in the next mainline instruction, if
2731 there's no handler. If this is a continue, we need to be
2732 sure to run the handler with all breakpoints inserted.
2733 In all cases, set a breakpoint at the current address
2734 (where the handler returns to), and once that breakpoint
2735 is hit, resume skipping the permanent breakpoint. If
2736 that breakpoint isn't hit, then we've stepped into the
2737 signal handler (or hit some other event). We'll delete
2738 the step-resume breakpoint then. */
2740 infrun_debug_printf ("resume: skipping permanent breakpoint, "
2741 "deliver signal first");
2743 clear_step_over_info ();
2744 tp
->control
.trap_expected
= 0;
2746 if (tp
->control
.step_resume_breakpoint
== nullptr)
2748 /* Set a "high-priority" step-resume, as we don't want
2749 user breakpoints at PC to trigger (again) when this
2751 insert_hp_step_resume_breakpoint_at_frame (get_current_frame ());
2752 gdb_assert (tp
->control
.step_resume_breakpoint
->first_loc ()
2755 tp
->step_after_step_resume_breakpoint
= step
;
2758 insert_breakpoints ();
2762 /* There's no signal to pass, we can go ahead and skip the
2763 permanent breakpoint manually. */
2764 infrun_debug_printf ("skipping permanent breakpoint");
2765 gdbarch_skip_permanent_breakpoint (gdbarch
, regcache
);
2766 /* Update pc to reflect the new address from which we will
2767 execute instructions. */
2768 pc
= regcache_read_pc (regcache
);
2772 /* We've already advanced the PC, so the stepping part
2773 is done. Now we need to arrange for a trap to be
2774 reported to handle_inferior_event. Set a breakpoint
2775 at the current PC, and run to it. Don't update
2776 prev_pc, because if we end in
2777 switch_back_to_stepped_thread, we want the "expected
2778 thread advanced also" branch to be taken. IOW, we
2779 don't want this thread to step further from PC
2781 gdb_assert (!step_over_info_valid_p ());
2782 insert_single_step_breakpoint (gdbarch
, aspace
, pc
);
2783 insert_breakpoints ();
2785 resume_ptid
= internal_resume_ptid (user_step
);
2786 do_target_resume (resume_ptid
, false, GDB_SIGNAL_0
);
2787 tp
->set_resumed (true);
2793 /* If we have a breakpoint to step over, make sure to do a single
2794 step only. Same if we have software watchpoints. */
2795 if (tp
->control
.trap_expected
|| bpstat_should_step ())
2796 tp
->control
.may_range_step
= 0;
2798 /* If displaced stepping is enabled, step over breakpoints by executing a
2799 copy of the instruction at a different address.
2801 We can't use displaced stepping when we have a signal to deliver;
2802 the comments for displaced_step_prepare explain why. The
2803 comments in the handle_inferior event for dealing with 'random
2804 signals' explain what we do instead.
2806 We can't use displaced stepping when we are waiting for vfork_done
2807 event, displaced stepping breaks the vfork child similarly as single
2808 step software breakpoint. */
2809 if (tp
->control
.trap_expected
2810 && use_displaced_stepping (tp
)
2811 && !step_over_info_valid_p ()
2812 && sig
== GDB_SIGNAL_0
2813 && current_inferior ()->thread_waiting_for_vfork_done
== nullptr)
2815 displaced_step_prepare_status prepare_status
2816 = displaced_step_prepare (tp
);
2818 if (prepare_status
== DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE
)
2820 infrun_debug_printf ("Got placed in step-over queue");
2822 tp
->control
.trap_expected
= 0;
2825 else if (prepare_status
== DISPLACED_STEP_PREPARE_STATUS_CANT
)
2827 /* Fallback to stepping over the breakpoint in-line. */
2829 if (target_is_non_stop_p ())
2830 stop_all_threads ("displaced stepping falling back on inline stepping");
2832 set_step_over_info (aspace
, regcache_read_pc (regcache
), 0,
2835 step
= maybe_software_singlestep (gdbarch
);
2837 insert_breakpoints ();
2839 else if (prepare_status
== DISPLACED_STEP_PREPARE_STATUS_OK
)
2841 /* Update pc to reflect the new address from which we will
2842 execute instructions due to displaced stepping. */
2843 pc
= regcache_read_pc (get_thread_regcache (tp
));
2845 step
= gdbarch_displaced_step_hw_singlestep (gdbarch
);
2848 gdb_assert_not_reached ("Invalid displaced_step_prepare_status "
2852 /* Do we need to do it the hard way, w/temp breakpoints? */
2854 step
= maybe_software_singlestep (gdbarch
);
2856 /* Currently, our software single-step implementation leads to different
2857 results than hardware single-stepping in one situation: when stepping
2858 into delivering a signal which has an associated signal handler,
2859 hardware single-step will stop at the first instruction of the handler,
2860 while software single-step will simply skip execution of the handler.
2862 For now, this difference in behavior is accepted since there is no
2863 easy way to actually implement single-stepping into a signal handler
2864 without kernel support.
2866 However, there is one scenario where this difference leads to follow-on
2867 problems: if we're stepping off a breakpoint by removing all breakpoints
2868 and then single-stepping. In this case, the software single-step
2869 behavior means that even if there is a *breakpoint* in the signal
2870 handler, GDB still would not stop.
2872 Fortunately, we can at least fix this particular issue. We detect
2873 here the case where we are about to deliver a signal while software
2874 single-stepping with breakpoints removed. In this situation, we
2875 revert the decisions to remove all breakpoints and insert single-
2876 step breakpoints, and instead we install a step-resume breakpoint
2877 at the current address, deliver the signal without stepping, and
2878 once we arrive back at the step-resume breakpoint, actually step
2879 over the breakpoint we originally wanted to step over. */
2880 if (thread_has_single_step_breakpoints_set (tp
)
2881 && sig
!= GDB_SIGNAL_0
2882 && step_over_info_valid_p ())
2884 /* If we have nested signals or a pending signal is delivered
2885 immediately after a handler returns, might already have
2886 a step-resume breakpoint set on the earlier handler. We cannot
2887 set another step-resume breakpoint; just continue on until the
2888 original breakpoint is hit. */
2889 if (tp
->control
.step_resume_breakpoint
== nullptr)
2891 insert_hp_step_resume_breakpoint_at_frame (get_current_frame ());
2892 tp
->step_after_step_resume_breakpoint
= 1;
2895 delete_single_step_breakpoints (tp
);
2897 clear_step_over_info ();
2898 tp
->control
.trap_expected
= 0;
2900 insert_breakpoints ();
2903 /* If STEP is set, it's a request to use hardware stepping
2904 facilities. But in that case, we should never
2905 use singlestep breakpoint. */
2906 gdb_assert (!(thread_has_single_step_breakpoints_set (tp
) && step
));
2908 /* Decide the set of threads to ask the target to resume. */
2909 if (tp
->control
.trap_expected
)
2911 /* We're allowing a thread to run past a breakpoint it has
2912 hit, either by single-stepping the thread with the breakpoint
2913 removed, or by displaced stepping, with the breakpoint inserted.
2914 In the former case, we need to single-step only this thread,
2915 and keep others stopped, as they can miss this breakpoint if
2916 allowed to run. That's not really a problem for displaced
2917 stepping, but, we still keep other threads stopped, in case
2918 another thread is also stopped for a breakpoint waiting for
2919 its turn in the displaced stepping queue. */
2920 resume_ptid
= inferior_ptid
;
2923 resume_ptid
= internal_resume_ptid (user_step
);
2925 if (execution_direction
!= EXEC_REVERSE
2926 && step
&& breakpoint_inserted_here_p (aspace
, pc
))
2928 /* There are two cases where we currently need to step a
2929 breakpoint instruction when we have a signal to deliver:
2931 - See handle_signal_stop where we handle random signals that
2932 could take out us out of the stepping range. Normally, in
2933 that case we end up continuing (instead of stepping) over the
2934 signal handler with a breakpoint at PC, but there are cases
2935 where we should _always_ single-step, even if we have a
2936 step-resume breakpoint, like when a software watchpoint is
2937 set. Assuming single-stepping and delivering a signal at the
2938 same time would takes us to the signal handler, then we could
2939 have removed the breakpoint at PC to step over it. However,
2940 some hardware step targets (like e.g., Mac OS) can't step
2941 into signal handlers, and for those, we need to leave the
2942 breakpoint at PC inserted, as otherwise if the handler
2943 recurses and executes PC again, it'll miss the breakpoint.
2944 So we leave the breakpoint inserted anyway, but we need to
2945 record that we tried to step a breakpoint instruction, so
2946 that adjust_pc_after_break doesn't end up confused.
2948 - In non-stop if we insert a breakpoint (e.g., a step-resume)
2949 in one thread after another thread that was stepping had been
2950 momentarily paused for a step-over. When we re-resume the
2951 stepping thread, it may be resumed from that address with a
2952 breakpoint that hasn't trapped yet. Seen with
2953 gdb.threads/non-stop-fair-events.exp, on targets that don't
2954 do displaced stepping. */
2956 infrun_debug_printf ("resume: [%s] stepped breakpoint",
2957 tp
->ptid
.to_string ().c_str ());
2959 tp
->stepped_breakpoint
= 1;
2961 /* Most targets can step a breakpoint instruction, thus
2962 executing it normally. But if this one cannot, just
2963 continue and we will hit it anyway. */
2964 if (gdbarch_cannot_step_breakpoint (gdbarch
))
2968 if (tp
->control
.may_range_step
)
2970 /* If we're resuming a thread with the PC out of the step
2971 range, then we're doing some nested/finer run control
2972 operation, like stepping the thread out of the dynamic
2973 linker or the displaced stepping scratch pad. We
2974 shouldn't have allowed a range step then. */
2975 gdb_assert (pc_in_thread_step_range (pc
, tp
));
2978 do_target_resume (resume_ptid
, step
, sig
);
2979 tp
->set_resumed (true);
2982 /* Resume the inferior. SIG is the signal to give the inferior
2983 (GDB_SIGNAL_0 for none). This is a wrapper around 'resume_1' that
2984 rolls back state on error. */
2987 resume (gdb_signal sig
)
2993 catch (const gdb_exception
&ex
)
2995 /* If resuming is being aborted for any reason, delete any
2996 single-step breakpoint resume_1 may have created, to avoid
2997 confusing the following resumption, and to avoid leaving
2998 single-step breakpoints perturbing other threads, in case
2999 we're running in non-stop mode. */
3000 if (inferior_ptid
!= null_ptid
)
3001 delete_single_step_breakpoints (inferior_thread ());
3011 /* Counter that tracks number of user visible stops. This can be used
3012 to tell whether a command has proceeded the inferior past the
3013 current location. This allows e.g., inferior function calls in
3014 breakpoint commands to not interrupt the command list. When the
3015 call finishes successfully, the inferior is standing at the same
3016 breakpoint as if nothing happened (and so we don't call
3018 static ULONGEST current_stop_id
;
3025 return current_stop_id
;
3028 /* Called when we report a user visible stop. */
3036 /* Clear out all variables saying what to do when inferior is continued.
3037 First do this, then set the ones you want, then call `proceed'. */
3040 clear_proceed_status_thread (struct thread_info
*tp
)
3042 infrun_debug_printf ("%s", tp
->ptid
.to_string ().c_str ());
3044 /* If we're starting a new sequence, then the previous finished
3045 single-step is no longer relevant. */
3046 if (tp
->has_pending_waitstatus ())
3048 if (tp
->stop_reason () == TARGET_STOPPED_BY_SINGLE_STEP
)
3050 infrun_debug_printf ("pending event of %s was a finished step. "
3052 tp
->ptid
.to_string ().c_str ());
3054 tp
->clear_pending_waitstatus ();
3055 tp
->set_stop_reason (TARGET_STOPPED_BY_NO_REASON
);
3060 ("thread %s has pending wait status %s (currently_stepping=%d).",
3061 tp
->ptid
.to_string ().c_str (),
3062 tp
->pending_waitstatus ().to_string ().c_str (),
3063 currently_stepping (tp
));
3067 /* If this signal should not be seen by program, give it zero.
3068 Used for debugging signals. */
3069 if (!signal_pass_state (tp
->stop_signal ()))
3070 tp
->set_stop_signal (GDB_SIGNAL_0
);
3072 tp
->release_thread_fsm ();
3074 tp
->control
.trap_expected
= 0;
3075 tp
->control
.step_range_start
= 0;
3076 tp
->control
.step_range_end
= 0;
3077 tp
->control
.may_range_step
= 0;
3078 tp
->control
.step_frame_id
= null_frame_id
;
3079 tp
->control
.step_stack_frame_id
= null_frame_id
;
3080 tp
->control
.step_over_calls
= STEP_OVER_UNDEBUGGABLE
;
3081 tp
->control
.step_start_function
= nullptr;
3082 tp
->stop_requested
= 0;
3084 tp
->control
.stop_step
= 0;
3086 tp
->control
.proceed_to_finish
= 0;
3088 tp
->control
.stepping_command
= 0;
3090 /* Discard any remaining commands or status from previous stop. */
3091 bpstat_clear (&tp
->control
.stop_bpstat
);
3094 /* Notify the current interpreter and observers that the target is about to
3098 notify_about_to_proceed ()
3100 top_level_interpreter ()->on_about_to_proceed ();
3101 gdb::observers::about_to_proceed
.notify ();
3105 clear_proceed_status (int step
)
3107 /* With scheduler-locking replay, stop replaying other threads if we're
3108 not replaying the user-visible resume ptid.
3110 This is a convenience feature to not require the user to explicitly
3111 stop replaying the other threads. We're assuming that the user's
3112 intent is to resume tracing the recorded process. */
3113 if (!non_stop
&& scheduler_mode
== schedlock_replay
3114 && target_record_is_replaying (minus_one_ptid
)
3115 && !target_record_will_replay (user_visible_resume_ptid (step
),
3116 execution_direction
))
3117 target_record_stop_replaying ();
3119 if (!non_stop
&& inferior_ptid
!= null_ptid
)
3121 ptid_t resume_ptid
= user_visible_resume_ptid (step
);
3122 process_stratum_target
*resume_target
3123 = user_visible_resume_target (resume_ptid
);
3125 /* In all-stop mode, delete the per-thread status of all threads
3126 we're about to resume, implicitly and explicitly. */
3127 for (thread_info
*tp
: all_non_exited_threads (resume_target
, resume_ptid
))
3128 clear_proceed_status_thread (tp
);
3131 if (inferior_ptid
!= null_ptid
)
3133 struct inferior
*inferior
;
3137 /* If in non-stop mode, only delete the per-thread status of
3138 the current thread. */
3139 clear_proceed_status_thread (inferior_thread ());
3142 inferior
= current_inferior ();
3143 inferior
->control
.stop_soon
= NO_STOP_QUIETLY
;
3146 notify_about_to_proceed ();
3149 /* Returns true if TP is still stopped at a breakpoint that needs
3150 stepping-over in order to make progress. If the breakpoint is gone
3151 meanwhile, we can skip the whole step-over dance. */
3154 thread_still_needs_step_over_bp (struct thread_info
*tp
)
3156 if (tp
->stepping_over_breakpoint
)
3158 struct regcache
*regcache
= get_thread_regcache (tp
);
3160 if (breakpoint_here_p (tp
->inf
->aspace
.get (),
3161 regcache_read_pc (regcache
))
3162 == ordinary_breakpoint_here
)
3165 tp
->stepping_over_breakpoint
= 0;
3171 /* Check whether thread TP still needs to start a step-over in order
3172 to make progress when resumed. Returns an bitwise or of enum
3173 step_over_what bits, indicating what needs to be stepped over. */
3175 static step_over_what
3176 thread_still_needs_step_over (struct thread_info
*tp
)
3178 step_over_what what
= 0;
3180 if (thread_still_needs_step_over_bp (tp
))
3181 what
|= STEP_OVER_BREAKPOINT
;
3183 if (tp
->stepping_over_watchpoint
3184 && !target_have_steppable_watchpoint ())
3185 what
|= STEP_OVER_WATCHPOINT
;
3190 /* Returns true if scheduler locking applies. STEP indicates whether
3191 we're about to do a step/next-like command to a thread. */
3194 schedlock_applies (struct thread_info
*tp
)
3196 return (scheduler_mode
== schedlock_on
3197 || (scheduler_mode
== schedlock_step
3198 && tp
->control
.stepping_command
)
3199 || (scheduler_mode
== schedlock_replay
3200 && target_record_will_replay (minus_one_ptid
,
3201 execution_direction
)));
3204 /* Set process_stratum_target::COMMIT_RESUMED_STATE in all target
3205 stacks that have threads executing and don't have threads with
3209 maybe_set_commit_resumed_all_targets ()
3211 scoped_restore_current_thread restore_thread
;
3213 for (inferior
*inf
: all_non_exited_inferiors ())
3215 process_stratum_target
*proc_target
= inf
->process_target ();
3217 if (proc_target
->commit_resumed_state
)
3219 /* We already set this in a previous iteration, via another
3220 inferior sharing the process_stratum target. */
3224 /* If the target has no resumed threads, it would be useless to
3225 ask it to commit the resumed threads. */
3226 if (!proc_target
->threads_executing
)
3228 infrun_debug_printf ("not requesting commit-resumed for target "
3229 "%s, no resumed threads",
3230 proc_target
->shortname ());
3234 /* As an optimization, if a thread from this target has some
3235 status to report, handle it before requiring the target to
3236 commit its resumed threads: handling the status might lead to
3237 resuming more threads. */
3238 if (proc_target
->has_resumed_with_pending_wait_status ())
3240 infrun_debug_printf ("not requesting commit-resumed for target %s, a"
3241 " thread has a pending waitstatus",
3242 proc_target
->shortname ());
3246 switch_to_inferior_no_thread (inf
);
3248 if (target_has_pending_events ())
3250 infrun_debug_printf ("not requesting commit-resumed for target %s, "
3251 "target has pending events",
3252 proc_target
->shortname ());
3256 infrun_debug_printf ("enabling commit-resumed for target %s",
3257 proc_target
->shortname ());
3259 proc_target
->commit_resumed_state
= true;
3266 maybe_call_commit_resumed_all_targets ()
3268 scoped_restore_current_thread restore_thread
;
3270 for (inferior
*inf
: all_non_exited_inferiors ())
3272 process_stratum_target
*proc_target
= inf
->process_target ();
3274 if (!proc_target
->commit_resumed_state
)
3277 switch_to_inferior_no_thread (inf
);
3279 infrun_debug_printf ("calling commit_resumed for target %s",
3280 proc_target
->shortname());
3282 target_commit_resumed ();
3286 /* To track nesting of scoped_disable_commit_resumed objects, ensuring
3287 that only the outermost one attempts to re-enable
3289 static bool enable_commit_resumed
= true;
3293 scoped_disable_commit_resumed::scoped_disable_commit_resumed
3294 (const char *reason
)
3295 : m_reason (reason
),
3296 m_prev_enable_commit_resumed (enable_commit_resumed
)
3298 infrun_debug_printf ("reason=%s", m_reason
);
3300 enable_commit_resumed
= false;
3302 for (inferior
*inf
: all_non_exited_inferiors ())
3304 process_stratum_target
*proc_target
= inf
->process_target ();
3306 if (m_prev_enable_commit_resumed
)
3308 /* This is the outermost instance: force all
3309 COMMIT_RESUMED_STATE to false. */
3310 proc_target
->commit_resumed_state
= false;
3314 /* This is not the outermost instance, we expect
3315 COMMIT_RESUMED_STATE to have been cleared by the
3316 outermost instance. */
3317 gdb_assert (!proc_target
->commit_resumed_state
);
3325 scoped_disable_commit_resumed::reset ()
3331 infrun_debug_printf ("reason=%s", m_reason
);
3333 gdb_assert (!enable_commit_resumed
);
3335 enable_commit_resumed
= m_prev_enable_commit_resumed
;
3337 if (m_prev_enable_commit_resumed
)
3339 /* This is the outermost instance, re-enable
3340 COMMIT_RESUMED_STATE on the targets where it's possible. */
3341 maybe_set_commit_resumed_all_targets ();
3345 /* This is not the outermost instance, we expect
3346 COMMIT_RESUMED_STATE to still be false. */
3347 for (inferior
*inf
: all_non_exited_inferiors ())
3349 process_stratum_target
*proc_target
= inf
->process_target ();
3350 gdb_assert (!proc_target
->commit_resumed_state
);
3357 scoped_disable_commit_resumed::~scoped_disable_commit_resumed ()
3365 scoped_disable_commit_resumed::reset_and_commit ()
3368 maybe_call_commit_resumed_all_targets ();
3373 scoped_enable_commit_resumed::scoped_enable_commit_resumed
3374 (const char *reason
)
3375 : m_reason (reason
),
3376 m_prev_enable_commit_resumed (enable_commit_resumed
)
3378 infrun_debug_printf ("reason=%s", m_reason
);
3380 if (!enable_commit_resumed
)
3382 enable_commit_resumed
= true;
3384 /* Re-enable COMMIT_RESUMED_STATE on the targets where it's
3386 maybe_set_commit_resumed_all_targets ();
3388 maybe_call_commit_resumed_all_targets ();
3394 scoped_enable_commit_resumed::~scoped_enable_commit_resumed ()
3396 infrun_debug_printf ("reason=%s", m_reason
);
3398 gdb_assert (enable_commit_resumed
);
3400 enable_commit_resumed
= m_prev_enable_commit_resumed
;
3402 if (!enable_commit_resumed
)
3404 /* Force all COMMIT_RESUMED_STATE back to false. */
3405 for (inferior
*inf
: all_non_exited_inferiors ())
3407 process_stratum_target
*proc_target
= inf
->process_target ();
3408 proc_target
->commit_resumed_state
= false;
3413 /* Check that all the targets we're about to resume are in non-stop
3414 mode. Ideally, we'd only care whether all targets support
3415 target-async, but we're not there yet. E.g., stop_all_threads
3416 doesn't know how to handle all-stop targets. Also, the remote
3417 protocol in all-stop mode is synchronous, irrespective of
3418 target-async, which means that things like a breakpoint re-set
3419 triggered by one target would try to read memory from all targets
3423 check_multi_target_resumption (process_stratum_target
*resume_target
)
3425 if (!non_stop
&& resume_target
== nullptr)
3427 scoped_restore_current_thread restore_thread
;
3429 /* This is used to track whether we're resuming more than one
3431 process_stratum_target
*first_connection
= nullptr;
3433 /* The first inferior we see with a target that does not work in
3434 always-non-stop mode. */
3435 inferior
*first_not_non_stop
= nullptr;
3437 for (inferior
*inf
: all_non_exited_inferiors ())
3439 switch_to_inferior_no_thread (inf
);
3441 if (!target_has_execution ())
3444 process_stratum_target
*proc_target
3445 = current_inferior ()->process_target();
3447 if (!target_is_non_stop_p ())
3448 first_not_non_stop
= inf
;
3450 if (first_connection
== nullptr)
3451 first_connection
= proc_target
;
3452 else if (first_connection
!= proc_target
3453 && first_not_non_stop
!= nullptr)
3455 switch_to_inferior_no_thread (first_not_non_stop
);
3457 proc_target
= current_inferior ()->process_target();
3459 error (_("Connection %d (%s) does not support "
3460 "multi-target resumption."),
3461 proc_target
->connection_number
,
3462 make_target_connection_string (proc_target
).c_str ());
3468 /* Helper function for `proceed`. Check if thread TP is suitable for
3469 resuming, and, if it is, switch to the thread and call
3470 `keep_going_pass_signal`. If TP is not suitable for resuming then this
3471 function will just return without switching threads. */
3474 proceed_resume_thread_checked (thread_info
*tp
)
3476 if (!tp
->inf
->has_execution ())
3478 infrun_debug_printf ("[%s] target has no execution",
3479 tp
->ptid
.to_string ().c_str ());
3485 infrun_debug_printf ("[%s] resumed",
3486 tp
->ptid
.to_string ().c_str ());
3487 gdb_assert (tp
->executing () || tp
->has_pending_waitstatus ());
3491 if (thread_is_in_step_over_chain (tp
))
3493 infrun_debug_printf ("[%s] needs step-over",
3494 tp
->ptid
.to_string ().c_str ());
3498 /* When handling a vfork GDB removes all breakpoints from the program
3499 space in which the vfork is being handled. If we are following the
3500 parent then GDB will set the thread_waiting_for_vfork_done member of
3501 the parent inferior. In this case we should take care to only resume
3502 the vfork parent thread, the kernel will hold this thread suspended
3503 until the vfork child has exited or execd, at which point the parent
3504 will be resumed and a VFORK_DONE event sent to GDB. */
3505 if (tp
->inf
->thread_waiting_for_vfork_done
!= nullptr)
3507 if (target_is_non_stop_p ())
3509 /* For non-stop targets, regardless of whether GDB is using
3510 all-stop or non-stop mode, threads are controlled
3513 When a thread is handling a vfork, breakpoints are removed
3514 from the inferior (well, program space in fact), so it is
3515 critical that we don't try to resume any thread other than the
3517 if (tp
!= tp
->inf
->thread_waiting_for_vfork_done
)
3519 infrun_debug_printf ("[%s] thread %s of this inferior is "
3520 "waiting for vfork-done",
3521 tp
->ptid
.to_string ().c_str (),
3522 tp
->inf
->thread_waiting_for_vfork_done
3523 ->ptid
.to_string ().c_str ());
3529 /* For all-stop targets, when we attempt to resume the inferior,
3530 we will only resume the vfork parent thread, this is handled
3531 in internal_resume_ptid.
3533 Additionally, we will always be called with the vfork parent
3534 thread as the current thread (TP) thanks to follow_fork, as
3535 such the following assertion should hold.
3537 Beyond this there is nothing more that needs to be done
3539 gdb_assert (tp
== tp
->inf
->thread_waiting_for_vfork_done
);
3543 /* When handling a vfork GDB removes all breakpoints from the program
3544 space in which the vfork is being handled. If we are following the
3545 child then GDB will set vfork_child member of the vfork parent
3546 inferior. Once the child has either exited or execd then GDB will
3547 detach from the parent process. Until that point GDB should not
3548 resume any thread in the parent process. */
3549 if (tp
->inf
->vfork_child
!= nullptr)
3551 infrun_debug_printf ("[%s] thread is part of a vfork parent, child is %d",
3552 tp
->ptid
.to_string ().c_str (),
3553 tp
->inf
->vfork_child
->pid
);
3557 infrun_debug_printf ("resuming %s",
3558 tp
->ptid
.to_string ().c_str ());
3560 execution_control_state
ecs (tp
);
3561 switch_to_thread (tp
);
3562 keep_going_pass_signal (&ecs
);
3563 if (!ecs
.wait_some_more
)
3564 error (_("Command aborted."));
3567 /* Basic routine for continuing the program in various fashions.
3569 ADDR is the address to resume at, or -1 for resume where stopped.
3570 SIGGNAL is the signal to give it, or GDB_SIGNAL_0 for none,
3571 or GDB_SIGNAL_DEFAULT for act according to how it stopped.
3573 You should call clear_proceed_status before calling proceed. */
3576 proceed (CORE_ADDR addr
, enum gdb_signal siggnal
)
3578 INFRUN_SCOPED_DEBUG_ENTER_EXIT
;
3580 struct gdbarch
*gdbarch
;
3583 /* If we're stopped at a fork/vfork, switch to either the parent or child
3584 thread as defined by the "set follow-fork-mode" command, or, if both
3585 the parent and child are controlled by GDB, and schedule-multiple is
3586 on, follow the child. If none of the above apply then we just proceed
3587 resuming the current thread. */
3588 if (!follow_fork ())
3590 /* The target for some reason decided not to resume. */
3592 if (target_can_async_p ())
3593 inferior_event_handler (INF_EXEC_COMPLETE
);
3597 /* We'll update this if & when we switch to a new thread. */
3598 update_previous_thread ();
3600 thread_info
*cur_thr
= inferior_thread ();
3601 infrun_debug_printf ("cur_thr = %s", cur_thr
->ptid
.to_string ().c_str ());
3603 regcache
*regcache
= get_thread_regcache (cur_thr
);
3604 gdbarch
= regcache
->arch ();
3605 pc
= regcache_read_pc_protected (regcache
);
3607 /* Fill in with reasonable starting values. */
3608 init_thread_stepping_state (cur_thr
);
3610 gdb_assert (!thread_is_in_step_over_chain (cur_thr
));
3613 = user_visible_resume_ptid (cur_thr
->control
.stepping_command
);
3614 process_stratum_target
*resume_target
3615 = user_visible_resume_target (resume_ptid
);
3617 check_multi_target_resumption (resume_target
);
3619 if (addr
== (CORE_ADDR
) -1)
3621 const address_space
*aspace
= cur_thr
->inf
->aspace
.get ();
3623 if (cur_thr
->stop_pc_p ()
3624 && pc
== cur_thr
->stop_pc ()
3625 && breakpoint_here_p (aspace
, pc
) == ordinary_breakpoint_here
3626 && execution_direction
!= EXEC_REVERSE
)
3627 /* There is a breakpoint at the address we will resume at,
3628 step one instruction before inserting breakpoints so that
3629 we do not stop right away (and report a second hit at this
3632 Note, we don't do this in reverse, because we won't
3633 actually be executing the breakpoint insn anyway.
3634 We'll be (un-)executing the previous instruction. */
3635 cur_thr
->stepping_over_breakpoint
= 1;
3636 else if (gdbarch_single_step_through_delay_p (gdbarch
)
3637 && gdbarch_single_step_through_delay (gdbarch
,
3638 get_current_frame ()))
3639 /* We stepped onto an instruction that needs to be stepped
3640 again before re-inserting the breakpoint, do so. */
3641 cur_thr
->stepping_over_breakpoint
= 1;
3645 regcache_write_pc (regcache
, addr
);
3648 if (siggnal
!= GDB_SIGNAL_DEFAULT
)
3649 cur_thr
->set_stop_signal (siggnal
);
3651 /* If an exception is thrown from this point on, make sure to
3652 propagate GDB's knowledge of the executing state to the
3653 frontend/user running state. */
3654 scoped_finish_thread_state
finish_state (resume_target
, resume_ptid
);
3656 /* Even if RESUME_PTID is a wildcard, and we end up resuming fewer
3657 threads (e.g., we might need to set threads stepping over
3658 breakpoints first), from the user/frontend's point of view, all
3659 threads in RESUME_PTID are now running. Unless we're calling an
3660 inferior function, as in that case we pretend the inferior
3661 doesn't run at all. */
3662 if (!cur_thr
->control
.in_infcall
)
3663 set_running (resume_target
, resume_ptid
, true);
3665 infrun_debug_printf ("addr=%s, signal=%s, resume_ptid=%s",
3666 paddress (gdbarch
, addr
),
3667 gdb_signal_to_symbol_string (siggnal
),
3668 resume_ptid
.to_string ().c_str ());
3670 annotate_starting ();
3672 /* Make sure that output from GDB appears before output from the
3674 gdb_flush (gdb_stdout
);
3676 /* Since we've marked the inferior running, give it the terminal. A
3677 QUIT/Ctrl-C from here on is forwarded to the target (which can
3678 still detect attempts to unblock a stuck connection with repeated
3679 Ctrl-C from within target_pass_ctrlc). */
3680 target_terminal::inferior ();
3682 /* In a multi-threaded task we may select another thread and
3683 then continue or step.
3685 But if a thread that we're resuming had stopped at a breakpoint,
3686 it will immediately cause another breakpoint stop without any
3687 execution (i.e. it will report a breakpoint hit incorrectly). So
3688 we must step over it first.
3690 Look for threads other than the current (TP) that reported a
3691 breakpoint hit and haven't been resumed yet since. */
3693 /* If scheduler locking applies, we can avoid iterating over all
3695 if (!non_stop
&& !schedlock_applies (cur_thr
))
3697 for (thread_info
*tp
: all_non_exited_threads (resume_target
,
3700 switch_to_thread_no_regs (tp
);
3702 /* Ignore the current thread here. It's handled
3707 if (!thread_still_needs_step_over (tp
))
3710 gdb_assert (!thread_is_in_step_over_chain (tp
));
3712 infrun_debug_printf ("need to step-over [%s] first",
3713 tp
->ptid
.to_string ().c_str ());
3715 global_thread_step_over_chain_enqueue (tp
);
3718 switch_to_thread (cur_thr
);
3721 /* Enqueue the current thread last, so that we move all other
3722 threads over their breakpoints first. */
3723 if (cur_thr
->stepping_over_breakpoint
)
3724 global_thread_step_over_chain_enqueue (cur_thr
);
3726 /* If the thread isn't started, we'll still need to set its prev_pc,
3727 so that switch_back_to_stepped_thread knows the thread hasn't
3728 advanced. Must do this before resuming any thread, as in
3729 all-stop/remote, once we resume we can't send any other packet
3730 until the target stops again. */
3731 cur_thr
->prev_pc
= regcache_read_pc_protected (regcache
);
3734 scoped_disable_commit_resumed
disable_commit_resumed ("proceeding");
3735 bool step_over_started
= start_step_over ();
3737 if (step_over_info_valid_p ())
3739 /* Either this thread started a new in-line step over, or some
3740 other thread was already doing one. In either case, don't
3741 resume anything else until the step-over is finished. */
3743 else if (step_over_started
&& !target_is_non_stop_p ())
3745 /* A new displaced stepping sequence was started. In all-stop,
3746 we can't talk to the target anymore until it next stops. */
3748 else if (!non_stop
&& target_is_non_stop_p ())
3750 INFRUN_SCOPED_DEBUG_START_END
3751 ("resuming threads, all-stop-on-top-of-non-stop");
3753 /* In all-stop, but the target is always in non-stop mode.
3754 Start all other threads that are implicitly resumed too. */
3755 for (thread_info
*tp
: all_non_exited_threads (resume_target
,
3758 switch_to_thread_no_regs (tp
);
3759 proceed_resume_thread_checked (tp
);
3763 proceed_resume_thread_checked (cur_thr
);
3765 disable_commit_resumed
.reset_and_commit ();
3768 finish_state
.release ();
3770 /* If we've switched threads above, switch back to the previously
3771 current thread. We don't want the user to see a different
3773 switch_to_thread (cur_thr
);
3775 /* Tell the event loop to wait for it to stop. If the target
3776 supports asynchronous execution, it'll do this from within
3778 if (!target_can_async_p ())
3779 mark_async_event_handler (infrun_async_inferior_event_token
);
3783 /* Start remote-debugging of a machine over a serial link. */
3786 start_remote (int from_tty
)
3788 inferior
*inf
= current_inferior ();
3789 inf
->control
.stop_soon
= STOP_QUIETLY_REMOTE
;
3791 /* Always go on waiting for the target, regardless of the mode. */
3792 /* FIXME: cagney/1999-09-23: At present it isn't possible to
3793 indicate to wait_for_inferior that a target should timeout if
3794 nothing is returned (instead of just blocking). Because of this,
3795 targets expecting an immediate response need to, internally, set
3796 things up so that the target_wait() is forced to eventually
3798 /* FIXME: cagney/1999-09-24: It isn't possible for target_open() to
3799 differentiate to its caller what the state of the target is after
3800 the initial open has been performed. Here we're assuming that
3801 the target has stopped. It should be possible to eventually have
3802 target_open() return to the caller an indication that the target
3803 is currently running and GDB state should be set to the same as
3804 for an async run. */
3805 wait_for_inferior (inf
);
3807 /* Now that the inferior has stopped, do any bookkeeping like
3808 loading shared libraries. We want to do this before normal_stop,
3809 so that the displayed frame is up to date. */
3810 post_create_inferior (from_tty
);
3815 /* Initialize static vars when a new inferior begins. */
3818 init_wait_for_inferior (void)
3820 /* These are meaningless until the first time through wait_for_inferior. */
3822 breakpoint_init_inferior (current_inferior (), inf_starting
);
3824 clear_proceed_status (0);
3826 nullify_last_target_wait_ptid ();
3828 update_previous_thread ();
3833 static void handle_inferior_event (struct execution_control_state
*ecs
);
3835 static void handle_step_into_function (struct gdbarch
*gdbarch
,
3836 struct execution_control_state
*ecs
);
3837 static void handle_step_into_function_backward (struct gdbarch
*gdbarch
,
3838 struct execution_control_state
*ecs
);
3839 static void handle_signal_stop (struct execution_control_state
*ecs
);
3840 static void check_exception_resume (struct execution_control_state
*,
3841 const frame_info_ptr
&);
3843 static void end_stepping_range (struct execution_control_state
*ecs
);
3844 static void stop_waiting (struct execution_control_state
*ecs
);
3845 static void keep_going (struct execution_control_state
*ecs
);
3846 static void process_event_stop_test (struct execution_control_state
*ecs
);
3847 static bool switch_back_to_stepped_thread (struct execution_control_state
*ecs
);
3849 /* This function is attached as a "thread_stop_requested" observer.
3850 Cleanup local state that assumed the PTID was to be resumed, and
3851 report the stop to the frontend. */
3854 infrun_thread_stop_requested (ptid_t ptid
)
3856 process_stratum_target
*curr_target
= current_inferior ()->process_target ();
3858 /* PTID was requested to stop. If the thread was already stopped,
3859 but the user/frontend doesn't know about that yet (e.g., the
3860 thread had been temporarily paused for some step-over), set up
3861 for reporting the stop now. */
3862 for (thread_info
*tp
: all_threads (curr_target
, ptid
))
3864 if (tp
->state
!= THREAD_RUNNING
)
3866 if (tp
->executing ())
3869 /* Remove matching threads from the step-over queue, so
3870 start_step_over doesn't try to resume them
3872 if (thread_is_in_step_over_chain (tp
))
3873 global_thread_step_over_chain_remove (tp
);
3875 /* If the thread is stopped, but the user/frontend doesn't
3876 know about that yet, queue a pending event, as if the
3877 thread had just stopped now. Unless the thread already had
3879 if (!tp
->has_pending_waitstatus ())
3881 target_waitstatus ws
;
3882 ws
.set_stopped (GDB_SIGNAL_0
);
3883 tp
->set_pending_waitstatus (ws
);
3886 /* Clear the inline-frame state, since we're re-processing the
3888 clear_inline_frame_state (tp
);
3890 /* If this thread was paused because some other thread was
3891 doing an inline-step over, let that finish first. Once
3892 that happens, we'll restart all threads and consume pending
3893 stop events then. */
3894 if (step_over_info_valid_p ())
3897 /* Otherwise we can process the (new) pending event now. Set
3898 it so this pending event is considered by
3900 tp
->set_resumed (true);
3904 /* Delete the step resume, single-step and longjmp/exception resume
3905 breakpoints of TP. */
3908 delete_thread_infrun_breakpoints (struct thread_info
*tp
)
3910 delete_step_resume_breakpoint (tp
);
3911 delete_exception_resume_breakpoint (tp
);
3912 delete_single_step_breakpoints (tp
);
3915 /* If the target still has execution, call FUNC for each thread that
3916 just stopped. In all-stop, that's all the non-exited threads; in
3917 non-stop, that's the current thread, only. */
3919 typedef void (*for_each_just_stopped_thread_callback_func
)
3920 (struct thread_info
*tp
);
3923 for_each_just_stopped_thread (for_each_just_stopped_thread_callback_func func
)
3925 if (!target_has_execution () || inferior_ptid
== null_ptid
)
3928 if (target_is_non_stop_p ())
3930 /* If in non-stop mode, only the current thread stopped. */
3931 func (inferior_thread ());
3935 /* In all-stop mode, all threads have stopped. */
3936 for (thread_info
*tp
: all_non_exited_threads ())
3941 /* Delete the step resume and longjmp/exception resume breakpoints of
3942 the threads that just stopped. */
3945 delete_just_stopped_threads_infrun_breakpoints (void)
3947 for_each_just_stopped_thread (delete_thread_infrun_breakpoints
);
3950 /* Delete the single-step breakpoints of the threads that just
3954 delete_just_stopped_threads_single_step_breakpoints (void)
3956 for_each_just_stopped_thread (delete_single_step_breakpoints
);
3962 print_target_wait_results (ptid_t waiton_ptid
, ptid_t result_ptid
,
3963 const struct target_waitstatus
&ws
)
3965 infrun_debug_printf ("target_wait (%s [%s], status) =",
3966 waiton_ptid
.to_string ().c_str (),
3967 target_pid_to_str (waiton_ptid
).c_str ());
3968 infrun_debug_printf (" %s [%s],",
3969 result_ptid
.to_string ().c_str (),
3970 target_pid_to_str (result_ptid
).c_str ());
3971 infrun_debug_printf (" %s", ws
.to_string ().c_str ());
3974 /* Select a thread at random, out of those which are resumed and have
3977 static struct thread_info
*
3978 random_pending_event_thread (inferior
*inf
, ptid_t waiton_ptid
)
3980 process_stratum_target
*proc_target
= inf
->process_target ();
3982 = proc_target
->random_resumed_with_pending_wait_status (inf
, waiton_ptid
);
3984 if (thread
== nullptr)
3986 infrun_debug_printf ("None found.");
3990 infrun_debug_printf ("Found %s.", thread
->ptid
.to_string ().c_str ());
3991 gdb_assert (thread
->resumed ());
3992 gdb_assert (thread
->has_pending_waitstatus ());
3997 /* Wrapper for target_wait that first checks whether threads have
3998 pending statuses to report before actually asking the target for
3999 more events. INF is the inferior we're using to call target_wait
4003 do_target_wait_1 (inferior
*inf
, ptid_t ptid
,
4004 target_waitstatus
*status
, target_wait_flags options
)
4006 struct thread_info
*tp
;
4008 /* We know that we are looking for an event in the target of inferior
4009 INF, but we don't know which thread the event might come from. As
4010 such we want to make sure that INFERIOR_PTID is reset so that none of
4011 the wait code relies on it - doing so is always a mistake. */
4012 switch_to_inferior_no_thread (inf
);
4014 /* First check if there is a resumed thread with a wait status
4016 if (ptid
== minus_one_ptid
|| ptid
.is_pid ())
4018 tp
= random_pending_event_thread (inf
, ptid
);
4022 infrun_debug_printf ("Waiting for specific thread %s.",
4023 ptid
.to_string ().c_str ());
4025 /* We have a specific thread to check. */
4026 tp
= inf
->find_thread (ptid
);
4027 gdb_assert (tp
!= nullptr);
4028 if (!tp
->has_pending_waitstatus ())
4033 && (tp
->stop_reason () == TARGET_STOPPED_BY_SW_BREAKPOINT
4034 || tp
->stop_reason () == TARGET_STOPPED_BY_HW_BREAKPOINT
))
4036 struct regcache
*regcache
= get_thread_regcache (tp
);
4037 struct gdbarch
*gdbarch
= regcache
->arch ();
4041 pc
= regcache_read_pc (regcache
);
4043 if (pc
!= tp
->stop_pc ())
4045 infrun_debug_printf ("PC of %s changed. was=%s, now=%s",
4046 tp
->ptid
.to_string ().c_str (),
4047 paddress (gdbarch
, tp
->stop_pc ()),
4048 paddress (gdbarch
, pc
));
4051 else if (!breakpoint_inserted_here_p (tp
->inf
->aspace
.get (), pc
))
4053 infrun_debug_printf ("previous breakpoint of %s, at %s gone",
4054 tp
->ptid
.to_string ().c_str (),
4055 paddress (gdbarch
, pc
));
4062 infrun_debug_printf ("pending event of %s cancelled.",
4063 tp
->ptid
.to_string ().c_str ());
4065 tp
->clear_pending_waitstatus ();
4066 target_waitstatus ws
;
4068 tp
->set_pending_waitstatus (ws
);
4069 tp
->set_stop_reason (TARGET_STOPPED_BY_NO_REASON
);
4075 infrun_debug_printf ("Using pending wait status %s for %s.",
4076 tp
->pending_waitstatus ().to_string ().c_str (),
4077 tp
->ptid
.to_string ().c_str ());
4079 /* Now that we've selected our final event LWP, un-adjust its PC
4080 if it was a software breakpoint (and the target doesn't
4081 always adjust the PC itself). */
4082 if (tp
->stop_reason () == TARGET_STOPPED_BY_SW_BREAKPOINT
4083 && !target_supports_stopped_by_sw_breakpoint ())
4085 struct regcache
*regcache
;
4086 struct gdbarch
*gdbarch
;
4089 regcache
= get_thread_regcache (tp
);
4090 gdbarch
= regcache
->arch ();
4092 decr_pc
= gdbarch_decr_pc_after_break (gdbarch
);
4097 pc
= regcache_read_pc (regcache
);
4098 regcache_write_pc (regcache
, pc
+ decr_pc
);
4102 tp
->set_stop_reason (TARGET_STOPPED_BY_NO_REASON
);
4103 *status
= tp
->pending_waitstatus ();
4104 tp
->clear_pending_waitstatus ();
4106 /* Wake up the event loop again, until all pending events are
4108 if (target_is_async_p ())
4109 mark_async_event_handler (infrun_async_inferior_event_token
);
4113 /* But if we don't find one, we'll have to wait. */
4115 /* We can't ask a non-async target to do a non-blocking wait, so this will be
4117 if (!target_can_async_p ())
4118 options
&= ~TARGET_WNOHANG
;
4120 return target_wait (ptid
, status
, options
);
4123 /* Wrapper for target_wait that first checks whether threads have
4124 pending statuses to report before actually asking the target for
4125 more events. Polls for events from all inferiors/targets. */
4128 do_target_wait (execution_control_state
*ecs
, target_wait_flags options
)
4130 int num_inferiors
= 0;
4131 int random_selector
;
4133 /* For fairness, we pick the first inferior/target to poll at random
4134 out of all inferiors that may report events, and then continue
4135 polling the rest of the inferior list starting from that one in a
4136 circular fashion until the whole list is polled once. */
4138 auto inferior_matches
= [] (inferior
*inf
)
4140 return inf
->process_target () != nullptr;
4143 /* First see how many matching inferiors we have. */
4144 for (inferior
*inf
: all_inferiors ())
4145 if (inferior_matches (inf
))
4148 if (num_inferiors
== 0)
4150 ecs
->ws
.set_ignore ();
4154 /* Now randomly pick an inferior out of those that matched. */
4155 random_selector
= (int)
4156 ((num_inferiors
* (double) rand ()) / (RAND_MAX
+ 1.0));
4158 if (num_inferiors
> 1)
4159 infrun_debug_printf ("Found %d inferiors, starting at #%d",
4160 num_inferiors
, random_selector
);
4162 /* Select the Nth inferior that matched. */
4164 inferior
*selected
= nullptr;
4166 for (inferior
*inf
: all_inferiors ())
4167 if (inferior_matches (inf
))
4168 if (random_selector
-- == 0)
4174 /* Now poll for events out of each of the matching inferior's
4175 targets, starting from the selected one. */
4177 auto do_wait
= [&] (inferior
*inf
)
4179 ecs
->ptid
= do_target_wait_1 (inf
, minus_one_ptid
, &ecs
->ws
, options
);
4180 ecs
->target
= inf
->process_target ();
4181 return (ecs
->ws
.kind () != TARGET_WAITKIND_IGNORE
);
4184 /* Needed in 'all-stop + target-non-stop' mode, because we end up
4185 here spuriously after the target is all stopped and we've already
4186 reported the stop to the user, polling for events. */
4187 scoped_restore_current_thread restore_thread
;
4189 intrusive_list_iterator
<inferior
> start
4190 = inferior_list
.iterator_to (*selected
);
4192 for (intrusive_list_iterator
<inferior
> it
= start
;
4193 it
!= inferior_list
.end ();
4196 inferior
*inf
= &*it
;
4198 if (inferior_matches (inf
) && do_wait (inf
))
4202 for (intrusive_list_iterator
<inferior
> it
= inferior_list
.begin ();
4206 inferior
*inf
= &*it
;
4208 if (inferior_matches (inf
) && do_wait (inf
))
4212 ecs
->ws
.set_ignore ();
4216 /* An event reported by wait_one. */
4218 struct wait_one_event
4220 /* The target the event came out of. */
4221 process_stratum_target
*target
;
4223 /* The PTID the event was for. */
4226 /* The waitstatus. */
4227 target_waitstatus ws
;
4230 static bool handle_one (const wait_one_event
&event
);
4231 static int finish_step_over (struct execution_control_state
*ecs
);
4233 /* Prepare and stabilize the inferior for detaching it. E.g.,
4234 detaching while a thread is displaced stepping is a recipe for
4235 crashing it, as nothing would readjust the PC out of the scratch
4239 prepare_for_detach (void)
4241 struct inferior
*inf
= current_inferior ();
4242 ptid_t pid_ptid
= ptid_t (inf
->pid
);
4243 scoped_restore_current_thread restore_thread
;
4245 scoped_restore restore_detaching
= make_scoped_restore (&inf
->detaching
, true);
4247 /* Remove all threads of INF from the global step-over chain. We
4248 want to stop any ongoing step-over, not start any new one. */
4249 thread_step_over_list_safe_range range
4250 = make_thread_step_over_list_safe_range (global_thread_step_over_list
);
4252 for (thread_info
*tp
: range
)
4255 infrun_debug_printf ("removing thread %s from global step over chain",
4256 tp
->ptid
.to_string ().c_str ());
4257 global_thread_step_over_chain_remove (tp
);
4260 /* If we were already in the middle of an inline step-over, and the
4261 thread stepping belongs to the inferior we're detaching, we need
4262 to restart the threads of other inferiors. */
4263 if (step_over_info
.thread
!= -1)
4265 infrun_debug_printf ("inline step-over in-process while detaching");
4267 thread_info
*thr
= find_thread_global_id (step_over_info
.thread
);
4268 if (thr
->inf
== inf
)
4270 /* Since we removed threads of INF from the step-over chain,
4271 we know this won't start a step-over for INF. */
4272 clear_step_over_info ();
4274 if (target_is_non_stop_p ())
4276 /* Start a new step-over in another thread if there's
4277 one that needs it. */
4280 /* Restart all other threads (except the
4281 previously-stepping thread, since that one is still
4283 if (!step_over_info_valid_p ())
4284 restart_threads (thr
);
4289 if (displaced_step_in_progress (inf
))
4291 infrun_debug_printf ("displaced-stepping in-process while detaching");
4293 /* Stop threads currently displaced stepping, aborting it. */
4295 for (thread_info
*thr
: inf
->non_exited_threads ())
4297 if (thr
->displaced_step_state
.in_progress ())
4299 if (thr
->executing ())
4301 if (!thr
->stop_requested
)
4303 target_stop (thr
->ptid
);
4304 thr
->stop_requested
= true;
4308 thr
->set_resumed (false);
4312 while (displaced_step_in_progress (inf
))
4314 wait_one_event event
;
4316 event
.target
= inf
->process_target ();
4317 event
.ptid
= do_target_wait_1 (inf
, pid_ptid
, &event
.ws
, 0);
4320 print_target_wait_results (pid_ptid
, event
.ptid
, event
.ws
);
4325 /* It's OK to leave some of the threads of INF stopped, since
4326 they'll be detached shortly. */
4330 /* If all-stop, but there exists a non-stop target, stop all threads
4331 now that we're presenting the stop to the user. */
4334 stop_all_threads_if_all_stop_mode ()
4336 if (!non_stop
&& exists_non_stop_target ())
4337 stop_all_threads ("presenting stop to user in all-stop");
4340 /* Wait for control to return from inferior to debugger.
4342 If inferior gets a signal, we may decide to start it up again
4343 instead of returning. That is why there is a loop in this function.
4344 When this function actually returns it means the inferior
4345 should be left stopped and GDB should read more commands. */
4348 wait_for_inferior (inferior
*inf
)
4350 infrun_debug_printf ("wait_for_inferior ()");
4352 SCOPE_EXIT
{ delete_just_stopped_threads_infrun_breakpoints (); };
4354 /* If an error happens while handling the event, propagate GDB's
4355 knowledge of the executing state to the frontend/user running
4357 scoped_finish_thread_state finish_state
4358 (inf
->process_target (), minus_one_ptid
);
4362 execution_control_state ecs
;
4364 overlay_cache_invalid
= 1;
4366 /* Flush target cache before starting to handle each event.
4367 Target was running and cache could be stale. This is just a
4368 heuristic. Running threads may modify target memory, but we
4369 don't get any event. */
4370 target_dcache_invalidate (current_program_space
->aspace
);
4372 ecs
.ptid
= do_target_wait_1 (inf
, minus_one_ptid
, &ecs
.ws
, 0);
4373 ecs
.target
= inf
->process_target ();
4376 print_target_wait_results (minus_one_ptid
, ecs
.ptid
, ecs
.ws
);
4378 /* Now figure out what to do with the result of the result. */
4379 handle_inferior_event (&ecs
);
4381 if (!ecs
.wait_some_more
)
4385 stop_all_threads_if_all_stop_mode ();
4387 /* No error, don't finish the state yet. */
4388 finish_state
.release ();
4391 /* Cleanup that reinstalls the readline callback handler, if the
4392 target is running in the background. If while handling the target
4393 event something triggered a secondary prompt, like e.g., a
4394 pagination prompt, we'll have removed the callback handler (see
4395 gdb_readline_wrapper_line). Need to do this as we go back to the
4396 event loop, ready to process further input. Note this has no
4397 effect if the handler hasn't actually been removed, because calling
4398 rl_callback_handler_install resets the line buffer, thus losing
4402 reinstall_readline_callback_handler_cleanup ()
4404 struct ui
*ui
= current_ui
;
4408 /* We're not going back to the top level event loop yet. Don't
4409 install the readline callback, as it'd prep the terminal,
4410 readline-style (raw, noecho) (e.g., --batch). We'll install
4411 it the next time the prompt is displayed, when we're ready
4416 if (ui
->command_editing
&& ui
->prompt_state
!= PROMPT_BLOCKED
)
4417 gdb_rl_callback_handler_reinstall ();
4420 /* Clean up the FSMs of threads that are now stopped. In non-stop,
4421 that's just the event thread. In all-stop, that's all threads. In
4422 all-stop, threads that had a pending exit no longer have a reason
4423 to be around, as their FSMs/commands are canceled, so we delete
4424 them. This avoids "info threads" listing such threads as if they
4425 were alive (and failing to read their registers), the user being
4426 able to select and resume them (and that failing), etc. */
4429 clean_up_just_stopped_threads_fsms (struct execution_control_state
*ecs
)
4431 /* The first clean_up call below assumes the event thread is the current
4433 if (ecs
->event_thread
!= nullptr)
4434 gdb_assert (ecs
->event_thread
== inferior_thread ());
4436 if (ecs
->event_thread
!= nullptr
4437 && ecs
->event_thread
->thread_fsm () != nullptr)
4438 ecs
->event_thread
->thread_fsm ()->clean_up (ecs
->event_thread
);
4442 scoped_restore_current_thread restore_thread
;
4444 for (thread_info
*thr
: all_threads_safe ())
4446 if (thr
->state
== THREAD_EXITED
)
4449 if (thr
== ecs
->event_thread
)
4452 if (thr
->thread_fsm () != nullptr)
4454 switch_to_thread (thr
);
4455 thr
->thread_fsm ()->clean_up (thr
);
4458 /* As we are cancelling the command/FSM of this thread,
4459 whatever was the reason we needed to report a thread
4460 exited event to the user, that reason is gone. Delete
4461 the thread, so that the user doesn't see it in the thread
4462 list, the next proceed doesn't try to resume it, etc. */
4463 if (thr
->has_pending_waitstatus ()
4464 && (thr
->pending_waitstatus ().kind ()
4465 == TARGET_WAITKIND_THREAD_EXITED
))
4466 delete_thread (thr
);
4471 /* Helper for all_uis_check_sync_execution_done that works on the
4475 check_curr_ui_sync_execution_done (void)
4477 struct ui
*ui
= current_ui
;
4479 if (ui
->prompt_state
== PROMPT_NEEDED
4481 && !gdb_in_secondary_prompt_p (ui
))
4483 target_terminal::ours ();
4484 top_level_interpreter ()->on_sync_execution_done ();
4485 ui
->register_file_handler ();
4492 all_uis_check_sync_execution_done (void)
4494 SWITCH_THRU_ALL_UIS ()
4496 check_curr_ui_sync_execution_done ();
4503 all_uis_on_sync_execution_starting (void)
4505 SWITCH_THRU_ALL_UIS ()
4507 if (current_ui
->prompt_state
== PROMPT_NEEDED
)
4508 async_disable_stdin ();
4512 /* A quit_handler callback installed while we're handling inferior
4516 infrun_quit_handler ()
4518 if (target_terminal::is_ours ())
4522 default_quit_handler would throw a quit in this case, but if
4523 we're handling an event while we have the terminal, it means
4524 the target is running a background execution command, and
4525 thus when users press Ctrl-C, they're wanting to interrupt
4526 whatever command they were executing in the command line.
4530 (gdb) foo bar whatever<ctrl-c>
4532 That Ctrl-C should clear the input line, not interrupt event
4533 handling if it happens that the user types Ctrl-C at just the
4536 It's as-if background event handling was handled by a
4537 separate background thread.
4539 To be clear, the Ctrl-C is not lost -- it will be processed
4540 by the next QUIT call once we're out of fetch_inferior_event
4545 if (check_quit_flag ())
4546 target_pass_ctrlc ();
4550 /* Asynchronous version of wait_for_inferior. It is called by the
4551 event loop whenever a change of state is detected on the file
4552 descriptor corresponding to the target. It can be called more than
4553 once to complete a single execution command. In such cases we need
4554 to keep the state in a global variable ECSS. If it is the last time
4555 that this function is called for a single execution command, then
4556 report to the user that the inferior has stopped, and do the
4557 necessary cleanups. */
4560 fetch_inferior_event ()
4562 INFRUN_SCOPED_DEBUG_ENTER_EXIT
;
4564 execution_control_state ecs
;
4567 /* Events are always processed with the main UI as current UI. This
4568 way, warnings, debug output, etc. are always consistently sent to
4569 the main console. */
4570 scoped_restore save_ui
= make_scoped_restore (¤t_ui
, main_ui
);
4572 /* Temporarily disable pagination. Otherwise, the user would be
4573 given an option to press 'q' to quit, which would cause an early
4574 exit and could leave GDB in a half-baked state. */
4575 scoped_restore save_pagination
4576 = make_scoped_restore (&pagination_enabled
, false);
4578 /* Install a quit handler that does nothing if we have the terminal
4579 (meaning the target is running a background execution command),
4580 so that Ctrl-C never interrupts GDB before the event is fully
4582 scoped_restore restore_quit_handler
4583 = make_scoped_restore (&quit_handler
, infrun_quit_handler
);
4585 /* Make sure a SIGINT does not interrupt an extension language while
4586 we're handling an event. That could interrupt a Python unwinder
4587 or a Python observer or some such. A Ctrl-C should either be
4588 forwarded to the inferior if the inferior has the terminal, or,
4589 if GDB has the terminal, should interrupt the command the user is
4590 typing in the CLI. */
4591 scoped_disable_cooperative_sigint_handling restore_coop_sigint
;
4593 /* End up with readline processing input, if necessary. */
4595 SCOPE_EXIT
{ reinstall_readline_callback_handler_cleanup (); };
4597 /* We're handling a live event, so make sure we're doing live
4598 debugging. If we're looking at traceframes while the target is
4599 running, we're going to need to get back to that mode after
4600 handling the event. */
4601 std::optional
<scoped_restore_current_traceframe
> maybe_restore_traceframe
;
4604 maybe_restore_traceframe
.emplace ();
4605 set_current_traceframe (-1);
4608 /* The user/frontend should not notice a thread switch due to
4609 internal events. Make sure we revert to the user selected
4610 thread and frame after handling the event and running any
4611 breakpoint commands. */
4612 scoped_restore_current_thread restore_thread
;
4614 overlay_cache_invalid
= 1;
4615 /* Flush target cache before starting to handle each event. Target
4616 was running and cache could be stale. This is just a heuristic.
4617 Running threads may modify target memory, but we don't get any
4619 target_dcache_invalidate (current_program_space
->aspace
);
4621 scoped_restore save_exec_dir
4622 = make_scoped_restore (&execution_direction
,
4623 target_execution_direction ());
4625 /* Allow targets to pause their resumed threads while we handle
4627 scoped_disable_commit_resumed
disable_commit_resumed ("handling event");
4629 if (!do_target_wait (&ecs
, TARGET_WNOHANG
))
4631 infrun_debug_printf ("do_target_wait returned no event");
4632 disable_commit_resumed
.reset_and_commit ();
4636 gdb_assert (ecs
.ws
.kind () != TARGET_WAITKIND_IGNORE
);
4638 /* Switch to the inferior that generated the event, so we can do
4639 target calls. If the event was not associated to a ptid, */
4640 if (ecs
.ptid
!= null_ptid
4641 && ecs
.ptid
!= minus_one_ptid
)
4642 switch_to_inferior_no_thread (find_inferior_ptid (ecs
.target
, ecs
.ptid
));
4644 switch_to_target_no_thread (ecs
.target
);
4647 print_target_wait_results (minus_one_ptid
, ecs
.ptid
, ecs
.ws
);
4649 /* If an error happens while handling the event, propagate GDB's
4650 knowledge of the executing state to the frontend/user running
4652 ptid_t finish_ptid
= !target_is_non_stop_p () ? minus_one_ptid
: ecs
.ptid
;
4653 scoped_finish_thread_state
finish_state (ecs
.target
, finish_ptid
);
4655 /* Get executed before scoped_restore_current_thread above to apply
4656 still for the thread which has thrown the exception. */
4657 auto defer_bpstat_clear
4658 = make_scope_exit (bpstat_clear_actions
);
4659 auto defer_delete_threads
4660 = make_scope_exit (delete_just_stopped_threads_infrun_breakpoints
);
4662 int stop_id
= get_stop_id ();
4664 /* Now figure out what to do with the result of the result. */
4665 handle_inferior_event (&ecs
);
4667 if (!ecs
.wait_some_more
)
4669 struct inferior
*inf
= find_inferior_ptid (ecs
.target
, ecs
.ptid
);
4670 bool should_stop
= true;
4671 struct thread_info
*thr
= ecs
.event_thread
;
4673 delete_just_stopped_threads_infrun_breakpoints ();
4675 if (thr
!= nullptr && thr
->thread_fsm () != nullptr)
4676 should_stop
= thr
->thread_fsm ()->should_stop (thr
);
4684 bool should_notify_stop
= true;
4685 bool proceeded
= false;
4687 stop_all_threads_if_all_stop_mode ();
4689 clean_up_just_stopped_threads_fsms (&ecs
);
4691 if (stop_id
!= get_stop_id ())
4693 /* If the stop-id has changed then a stop has already been
4694 presented to the user in handle_inferior_event, this is
4695 likely a failed inferior call. As the stop has already
4696 been announced then we should not notify again.
4698 Also, if the prompt state is not PROMPT_NEEDED then GDB
4699 will not be ready for user input after this function. */
4700 should_notify_stop
= false;
4701 gdb_assert (current_ui
->prompt_state
== PROMPT_NEEDED
);
4703 else if (thr
!= nullptr && thr
->thread_fsm () != nullptr)
4705 = thr
->thread_fsm ()->should_notify_stop ();
4707 if (should_notify_stop
)
4709 /* We may not find an inferior if this was a process exit. */
4710 if (inf
== nullptr || inf
->control
.stop_soon
== NO_STOP_QUIETLY
)
4711 proceeded
= normal_stop ();
4716 inferior_event_handler (INF_EXEC_COMPLETE
);
4720 /* If we got a TARGET_WAITKIND_NO_RESUMED event, then the
4721 previously selected thread is gone. We have two
4722 choices - switch to no thread selected, or restore the
4723 previously selected thread (now exited). We chose the
4724 later, just because that's what GDB used to do. After
4725 this, "info threads" says "The current thread <Thread
4726 ID 2> has terminated." instead of "No thread
4730 && ecs
.ws
.kind () != TARGET_WAITKIND_NO_RESUMED
)
4731 restore_thread
.dont_restore ();
4735 defer_delete_threads
.release ();
4736 defer_bpstat_clear
.release ();
4738 /* No error, don't finish the thread states yet. */
4739 finish_state
.release ();
4741 disable_commit_resumed
.reset_and_commit ();
4743 /* This scope is used to ensure that readline callbacks are
4744 reinstalled here. */
4747 /* Handling this event might have caused some inferiors to become prunable.
4748 For example, the exit of an inferior that was automatically added. Try
4749 to get rid of them. Keeping those around slows down things linearly.
4751 Note that this never removes the current inferior. Therefore, call this
4752 after RESTORE_THREAD went out of scope, in case the event inferior (which was
4753 temporarily made the current inferior) is meant to be deleted.
4755 Call this before all_uis_check_sync_execution_done, so that notifications about
4756 removed inferiors appear before the prompt. */
4759 /* If a UI was in sync execution mode, and now isn't, restore its
4760 prompt (a synchronous execution command has finished, and we're
4761 ready for input). */
4762 all_uis_check_sync_execution_done ();
4765 && exec_done_display_p
4766 && (inferior_ptid
== null_ptid
4767 || inferior_thread ()->state
!= THREAD_RUNNING
))
4768 gdb_printf (_("completed.\n"));
4774 set_step_info (thread_info
*tp
, const frame_info_ptr
&frame
,
4775 struct symtab_and_line sal
)
4777 /* This can be removed once this function no longer implicitly relies on the
4778 inferior_ptid value. */
4779 gdb_assert (inferior_ptid
== tp
->ptid
);
4781 tp
->control
.step_frame_id
= get_frame_id (frame
);
4782 tp
->control
.step_stack_frame_id
= get_stack_frame_id (frame
);
4784 tp
->current_symtab
= sal
.symtab
;
4785 tp
->current_line
= sal
.line
;
4788 ("symtab = %s, line = %d, step_frame_id = %s, step_stack_frame_id = %s",
4789 tp
->current_symtab
!= nullptr ? tp
->current_symtab
->filename
: "<null>",
4791 tp
->control
.step_frame_id
.to_string ().c_str (),
4792 tp
->control
.step_stack_frame_id
.to_string ().c_str ());
4795 /* Clear context switchable stepping state. */
4798 init_thread_stepping_state (struct thread_info
*tss
)
4800 tss
->stepped_breakpoint
= 0;
4801 tss
->stepping_over_breakpoint
= 0;
4802 tss
->stepping_over_watchpoint
= 0;
4803 tss
->step_after_step_resume_breakpoint
= 0;
4809 set_last_target_status (process_stratum_target
*target
, ptid_t ptid
,
4810 const target_waitstatus
&status
)
4812 target_last_proc_target
= target
;
4813 target_last_wait_ptid
= ptid
;
4814 target_last_waitstatus
= status
;
4820 get_last_target_status (process_stratum_target
**target
, ptid_t
*ptid
,
4821 target_waitstatus
*status
)
4823 if (target
!= nullptr)
4824 *target
= target_last_proc_target
;
4825 if (ptid
!= nullptr)
4826 *ptid
= target_last_wait_ptid
;
4827 if (status
!= nullptr)
4828 *status
= target_last_waitstatus
;
4834 nullify_last_target_wait_ptid (void)
4836 target_last_proc_target
= nullptr;
4837 target_last_wait_ptid
= minus_one_ptid
;
4838 target_last_waitstatus
= {};
4841 /* Switch thread contexts. */
4844 context_switch (execution_control_state
*ecs
)
4846 if (ecs
->ptid
!= inferior_ptid
4847 && (inferior_ptid
== null_ptid
4848 || ecs
->event_thread
!= inferior_thread ()))
4850 infrun_debug_printf ("Switching context from %s to %s",
4851 inferior_ptid
.to_string ().c_str (),
4852 ecs
->ptid
.to_string ().c_str ());
4855 switch_to_thread (ecs
->event_thread
);
4858 /* If the target can't tell whether we've hit breakpoints
4859 (target_supports_stopped_by_sw_breakpoint), and we got a SIGTRAP,
4860 check whether that could have been caused by a breakpoint. If so,
4861 adjust the PC, per gdbarch_decr_pc_after_break. */
4864 adjust_pc_after_break (struct thread_info
*thread
,
4865 const target_waitstatus
&ws
)
4867 struct regcache
*regcache
;
4868 struct gdbarch
*gdbarch
;
4869 CORE_ADDR breakpoint_pc
, decr_pc
;
4871 /* If we've hit a breakpoint, we'll normally be stopped with SIGTRAP. If
4872 we aren't, just return.
4874 We assume that waitkinds other than TARGET_WAITKIND_STOPPED are not
4875 affected by gdbarch_decr_pc_after_break. Other waitkinds which are
4876 implemented by software breakpoints should be handled through the normal
4879 NOTE drow/2004-01-31: On some targets, breakpoints may generate
4880 different signals (SIGILL or SIGEMT for instance), but it is less
4881 clear where the PC is pointing afterwards. It may not match
4882 gdbarch_decr_pc_after_break. I don't know any specific target that
4883 generates these signals at breakpoints (the code has been in GDB since at
4884 least 1992) so I can not guess how to handle them here.
4886 In earlier versions of GDB, a target with
4887 gdbarch_have_nonsteppable_watchpoint would have the PC after hitting a
4888 watchpoint affected by gdbarch_decr_pc_after_break. I haven't found any
4889 target with both of these set in GDB history, and it seems unlikely to be
4890 correct, so gdbarch_have_nonsteppable_watchpoint is not checked here. */
4892 if (ws
.kind () != TARGET_WAITKIND_STOPPED
)
4895 if (ws
.sig () != GDB_SIGNAL_TRAP
)
4898 /* In reverse execution, when a breakpoint is hit, the instruction
4899 under it has already been de-executed. The reported PC always
4900 points at the breakpoint address, so adjusting it further would
4901 be wrong. E.g., consider this case on a decr_pc_after_break == 1
4904 B1 0x08000000 : INSN1
4905 B2 0x08000001 : INSN2
4907 PC -> 0x08000003 : INSN4
4909 Say you're stopped at 0x08000003 as above. Reverse continuing
4910 from that point should hit B2 as below. Reading the PC when the
4911 SIGTRAP is reported should read 0x08000001 and INSN2 should have
4912 been de-executed already.
4914 B1 0x08000000 : INSN1
4915 B2 PC -> 0x08000001 : INSN2
4919 We can't apply the same logic as for forward execution, because
4920 we would wrongly adjust the PC to 0x08000000, since there's a
4921 breakpoint at PC - 1. We'd then report a hit on B1, although
4922 INSN1 hadn't been de-executed yet. Doing nothing is the correct
4924 if (execution_direction
== EXEC_REVERSE
)
4927 /* If the target can tell whether the thread hit a SW breakpoint,
4928 trust it. Targets that can tell also adjust the PC
4930 if (target_supports_stopped_by_sw_breakpoint ())
4933 /* Note that relying on whether a breakpoint is planted in memory to
4934 determine this can fail. E.g,. the breakpoint could have been
4935 removed since. Or the thread could have been told to step an
4936 instruction the size of a breakpoint instruction, and only
4937 _after_ was a breakpoint inserted at its address. */
4939 /* If this target does not decrement the PC after breakpoints, then
4940 we have nothing to do. */
4941 regcache
= get_thread_regcache (thread
);
4942 gdbarch
= regcache
->arch ();
4944 decr_pc
= gdbarch_decr_pc_after_break (gdbarch
);
4948 const address_space
*aspace
= thread
->inf
->aspace
.get ();
4950 /* Find the location where (if we've hit a breakpoint) the
4951 breakpoint would be. */
4952 breakpoint_pc
= regcache_read_pc (regcache
) - decr_pc
;
4954 /* If the target can't tell whether a software breakpoint triggered,
4955 fallback to figuring it out based on breakpoints we think were
4956 inserted in the target, and on whether the thread was stepped or
4959 /* Check whether there actually is a software breakpoint inserted at
4962 If in non-stop mode, a race condition is possible where we've
4963 removed a breakpoint, but stop events for that breakpoint were
4964 already queued and arrive later. To suppress those spurious
4965 SIGTRAPs, we keep a list of such breakpoint locations for a bit,
4966 and retire them after a number of stop events are reported. Note
4967 this is an heuristic and can thus get confused. The real fix is
4968 to get the "stopped by SW BP and needs adjustment" info out of
4969 the target/kernel (and thus never reach here; see above). */
4970 if (software_breakpoint_inserted_here_p (aspace
, breakpoint_pc
)
4971 || (target_is_non_stop_p ()
4972 && moribund_breakpoint_here_p (aspace
, breakpoint_pc
)))
4974 std::optional
<scoped_restore_tmpl
<int>> restore_operation_disable
;
4976 if (record_full_is_used ())
4977 restore_operation_disable
.emplace
4978 (record_full_gdb_operation_disable_set ());
4980 /* When using hardware single-step, a SIGTRAP is reported for both
4981 a completed single-step and a software breakpoint. Need to
4982 differentiate between the two, as the latter needs adjusting
4983 but the former does not.
4985 The SIGTRAP can be due to a completed hardware single-step only if
4986 - we didn't insert software single-step breakpoints
4987 - this thread is currently being stepped
4989 If any of these events did not occur, we must have stopped due
4990 to hitting a software breakpoint, and have to back up to the
4993 As a special case, we could have hardware single-stepped a
4994 software breakpoint. In this case (prev_pc == breakpoint_pc),
4995 we also need to back up to the breakpoint address. */
4997 if (thread_has_single_step_breakpoints_set (thread
)
4998 || !currently_stepping (thread
)
4999 || (thread
->stepped_breakpoint
5000 && thread
->prev_pc
== breakpoint_pc
))
5001 regcache_write_pc (regcache
, breakpoint_pc
);
5006 stepped_in_from (const frame_info_ptr
&initial_frame
, frame_id step_frame_id
)
5008 frame_info_ptr frame
= initial_frame
;
5010 for (frame
= get_prev_frame (frame
);
5012 frame
= get_prev_frame (frame
))
5014 if (get_frame_id (frame
) == step_frame_id
)
5017 if (get_frame_type (frame
) != INLINE_FRAME
)
5024 /* Look for an inline frame that is marked for skip.
5025 If PREV_FRAME is TRUE start at the previous frame,
5026 otherwise start at the current frame. Stop at the
5027 first non-inline frame, or at the frame where the
5031 inline_frame_is_marked_for_skip (bool prev_frame
, struct thread_info
*tp
)
5033 frame_info_ptr frame
= get_current_frame ();
5036 frame
= get_prev_frame (frame
);
5038 for (; frame
!= nullptr; frame
= get_prev_frame (frame
))
5040 const char *fn
= nullptr;
5041 symtab_and_line sal
;
5044 if (get_frame_id (frame
) == tp
->control
.step_frame_id
)
5046 if (get_frame_type (frame
) != INLINE_FRAME
)
5049 sal
= find_frame_sal (frame
);
5050 sym
= get_frame_function (frame
);
5053 fn
= sym
->print_name ();
5056 && function_name_is_marked_for_skip (fn
, sal
))
5063 /* If the event thread has the stop requested flag set, pretend it
5064 stopped for a GDB_SIGNAL_0 (i.e., as if it stopped due to
5068 handle_stop_requested (struct execution_control_state
*ecs
)
5070 if (ecs
->event_thread
->stop_requested
)
5072 ecs
->ws
.set_stopped (GDB_SIGNAL_0
);
5073 handle_signal_stop (ecs
);
5079 /* Auxiliary function that handles syscall entry/return events.
5080 It returns true if the inferior should keep going (and GDB
5081 should ignore the event), or false if the event deserves to be
5085 handle_syscall_event (struct execution_control_state
*ecs
)
5087 struct regcache
*regcache
;
5090 context_switch (ecs
);
5092 regcache
= get_thread_regcache (ecs
->event_thread
);
5093 syscall_number
= ecs
->ws
.syscall_number ();
5094 ecs
->event_thread
->set_stop_pc (regcache_read_pc (regcache
));
5096 if (catch_syscall_enabled ()
5097 && catching_syscall_number (syscall_number
))
5099 infrun_debug_printf ("syscall number=%d", syscall_number
);
5101 ecs
->event_thread
->control
.stop_bpstat
5102 = bpstat_stop_status_nowatch (ecs
->event_thread
->inf
->aspace
.get (),
5103 ecs
->event_thread
->stop_pc (),
5104 ecs
->event_thread
, ecs
->ws
);
5106 if (handle_stop_requested (ecs
))
5109 if (bpstat_causes_stop (ecs
->event_thread
->control
.stop_bpstat
))
5111 /* Catchpoint hit. */
5116 if (handle_stop_requested (ecs
))
5119 /* If no catchpoint triggered for this, then keep going. */
5125 /* Lazily fill in the execution_control_state's stop_func_* fields. */
5128 fill_in_stop_func (struct gdbarch
*gdbarch
,
5129 struct execution_control_state
*ecs
)
5131 if (!ecs
->stop_func_filled_in
)
5134 const general_symbol_info
*gsi
;
5136 /* Don't care about return value; stop_func_start and stop_func_name
5137 will both be 0 if it doesn't work. */
5138 find_pc_partial_function_sym (ecs
->event_thread
->stop_pc (),
5140 &ecs
->stop_func_start
,
5141 &ecs
->stop_func_end
,
5143 ecs
->stop_func_name
= gsi
== nullptr ? nullptr : gsi
->print_name ();
5145 /* The call to find_pc_partial_function, above, will set
5146 stop_func_start and stop_func_end to the start and end
5147 of the range containing the stop pc. If this range
5148 contains the entry pc for the block (which is always the
5149 case for contiguous blocks), advance stop_func_start past
5150 the function's start offset and entrypoint. Note that
5151 stop_func_start is NOT advanced when in a range of a
5152 non-contiguous block that does not contain the entry pc. */
5153 if (block
!= nullptr
5154 && ecs
->stop_func_start
<= block
->entry_pc ()
5155 && block
->entry_pc () < ecs
->stop_func_end
)
5157 ecs
->stop_func_start
5158 += gdbarch_deprecated_function_start_offset (gdbarch
);
5160 /* PowerPC functions have a Local Entry Point (LEP) and a Global
5161 Entry Point (GEP). There is only one Entry Point (GEP = LEP) for
5162 other architectures. */
5163 ecs
->stop_func_alt_start
= ecs
->stop_func_start
;
5165 if (gdbarch_skip_entrypoint_p (gdbarch
))
5166 ecs
->stop_func_start
5167 = gdbarch_skip_entrypoint (gdbarch
, ecs
->stop_func_start
);
5170 ecs
->stop_func_filled_in
= 1;
5175 /* Return the STOP_SOON field of the inferior pointed at by ECS. */
5177 static enum stop_kind
5178 get_inferior_stop_soon (execution_control_state
*ecs
)
5180 struct inferior
*inf
= find_inferior_ptid (ecs
->target
, ecs
->ptid
);
5182 gdb_assert (inf
!= nullptr);
5183 return inf
->control
.stop_soon
;
5186 /* Poll for one event out of the current target. Store the resulting
5187 waitstatus in WS, and return the event ptid. Does not block. */
5190 poll_one_curr_target (struct target_waitstatus
*ws
)
5194 overlay_cache_invalid
= 1;
5196 /* Flush target cache before starting to handle each event.
5197 Target was running and cache could be stale. This is just a
5198 heuristic. Running threads may modify target memory, but we
5199 don't get any event. */
5200 target_dcache_invalidate (current_program_space
->aspace
);
5202 event_ptid
= target_wait (minus_one_ptid
, ws
, TARGET_WNOHANG
);
5205 print_target_wait_results (minus_one_ptid
, event_ptid
, *ws
);
5210 /* Wait for one event out of any target. */
5212 static wait_one_event
5217 for (inferior
*inf
: all_inferiors ())
5219 process_stratum_target
*target
= inf
->process_target ();
5220 if (target
== nullptr
5221 || !target
->is_async_p ()
5222 || !target
->threads_executing
)
5225 switch_to_inferior_no_thread (inf
);
5227 wait_one_event event
;
5228 event
.target
= target
;
5229 event
.ptid
= poll_one_curr_target (&event
.ws
);
5231 if (event
.ws
.kind () == TARGET_WAITKIND_NO_RESUMED
)
5233 /* If nothing is resumed, remove the target from the
5235 target_async (false);
5237 else if (event
.ws
.kind () != TARGET_WAITKIND_IGNORE
)
5241 /* Block waiting for some event. */
5248 for (inferior
*inf
: all_inferiors ())
5250 process_stratum_target
*target
= inf
->process_target ();
5251 if (target
== nullptr
5252 || !target
->is_async_p ()
5253 || !target
->threads_executing
)
5256 int fd
= target
->async_wait_fd ();
5257 FD_SET (fd
, &readfds
);
5264 /* No waitable targets left. All must be stopped. */
5265 infrun_debug_printf ("no waitable targets left");
5267 target_waitstatus ws
;
5268 ws
.set_no_resumed ();
5269 return {nullptr, minus_one_ptid
, std::move (ws
)};
5274 int numfds
= interruptible_select (nfds
, &readfds
, 0, nullptr, 0);
5280 perror_with_name ("interruptible_select");
5285 /* Save the thread's event and stop reason to process it later. */
5288 save_waitstatus (struct thread_info
*tp
, const target_waitstatus
&ws
)
5290 infrun_debug_printf ("saving status %s for %s",
5291 ws
.to_string ().c_str (),
5292 tp
->ptid
.to_string ().c_str ());
5294 /* Record for later. */
5295 tp
->set_pending_waitstatus (ws
);
5297 if (ws
.kind () == TARGET_WAITKIND_STOPPED
5298 && ws
.sig () == GDB_SIGNAL_TRAP
)
5300 struct regcache
*regcache
= get_thread_regcache (tp
);
5301 const address_space
*aspace
= tp
->inf
->aspace
.get ();
5302 CORE_ADDR pc
= regcache_read_pc (regcache
);
5304 adjust_pc_after_break (tp
, tp
->pending_waitstatus ());
5306 scoped_restore_current_thread restore_thread
;
5307 switch_to_thread (tp
);
5309 if (target_stopped_by_watchpoint ())
5310 tp
->set_stop_reason (TARGET_STOPPED_BY_WATCHPOINT
);
5311 else if (target_supports_stopped_by_sw_breakpoint ()
5312 && target_stopped_by_sw_breakpoint ())
5313 tp
->set_stop_reason (TARGET_STOPPED_BY_SW_BREAKPOINT
);
5314 else if (target_supports_stopped_by_hw_breakpoint ()
5315 && target_stopped_by_hw_breakpoint ())
5316 tp
->set_stop_reason (TARGET_STOPPED_BY_HW_BREAKPOINT
);
5317 else if (!target_supports_stopped_by_hw_breakpoint ()
5318 && hardware_breakpoint_inserted_here_p (aspace
, pc
))
5319 tp
->set_stop_reason (TARGET_STOPPED_BY_HW_BREAKPOINT
);
5320 else if (!target_supports_stopped_by_sw_breakpoint ()
5321 && software_breakpoint_inserted_here_p (aspace
, pc
))
5322 tp
->set_stop_reason (TARGET_STOPPED_BY_SW_BREAKPOINT
);
5323 else if (!thread_has_single_step_breakpoints_set (tp
)
5324 && currently_stepping (tp
))
5325 tp
->set_stop_reason (TARGET_STOPPED_BY_SINGLE_STEP
);
5329 /* Mark the non-executing threads accordingly. In all-stop, all
5330 threads of all processes are stopped when we get any event
5331 reported. In non-stop mode, only the event thread stops. */
5334 mark_non_executing_threads (process_stratum_target
*target
,
5336 const target_waitstatus
&ws
)
5340 if (!target_is_non_stop_p ())
5341 mark_ptid
= minus_one_ptid
;
5342 else if (ws
.kind () == TARGET_WAITKIND_SIGNALLED
5343 || ws
.kind () == TARGET_WAITKIND_EXITED
)
5345 /* If we're handling a process exit in non-stop mode, even
5346 though threads haven't been deleted yet, one would think
5347 that there is nothing to do, as threads of the dead process
5348 will be soon deleted, and threads of any other process were
5349 left running. However, on some targets, threads survive a
5350 process exit event. E.g., for the "checkpoint" command,
5351 when the current checkpoint/fork exits, linux-fork.c
5352 automatically switches to another fork from within
5353 target_mourn_inferior, by associating the same
5354 inferior/thread to another fork. We haven't mourned yet at
5355 this point, but we must mark any threads left in the
5356 process as not-executing so that finish_thread_state marks
5357 them stopped (in the user's perspective) if/when we present
5358 the stop to the user. */
5359 mark_ptid
= ptid_t (event_ptid
.pid ());
5362 mark_ptid
= event_ptid
;
5364 set_executing (target
, mark_ptid
, false);
5366 /* Likewise the resumed flag. */
5367 set_resumed (target
, mark_ptid
, false);
5370 /* Handle one event after stopping threads. If the eventing thread
5371 reports back any interesting event, we leave it pending. If the
5372 eventing thread was in the middle of a displaced step, we
5373 cancel/finish it, and unless the thread's inferior is being
5374 detached, put the thread back in the step-over chain. Returns true
5375 if there are no resumed threads left in the target (thus there's no
5376 point in waiting further), false otherwise. */
5379 handle_one (const wait_one_event
&event
)
5382 ("%s %s", event
.ws
.to_string ().c_str (),
5383 event
.ptid
.to_string ().c_str ());
5385 if (event
.ws
.kind () == TARGET_WAITKIND_NO_RESUMED
)
5387 /* All resumed threads exited. */
5390 else if (event
.ws
.kind () == TARGET_WAITKIND_THREAD_EXITED
5391 || event
.ws
.kind () == TARGET_WAITKIND_EXITED
5392 || event
.ws
.kind () == TARGET_WAITKIND_SIGNALLED
)
5394 /* One thread/process exited/signalled. */
5396 thread_info
*t
= nullptr;
5398 /* The target may have reported just a pid. If so, try
5399 the first non-exited thread. */
5400 if (event
.ptid
.is_pid ())
5402 int pid
= event
.ptid
.pid ();
5403 inferior
*inf
= find_inferior_pid (event
.target
, pid
);
5404 for (thread_info
*tp
: inf
->non_exited_threads ())
5410 /* If there is no available thread, the event would
5411 have to be appended to a per-inferior event list,
5412 which does not exist (and if it did, we'd have
5413 to adjust run control command to be able to
5414 resume such an inferior). We assert here instead
5415 of going into an infinite loop. */
5416 gdb_assert (t
!= nullptr);
5419 ("using %s", t
->ptid
.to_string ().c_str ());
5423 t
= event
.target
->find_thread (event
.ptid
);
5424 /* Check if this is the first time we see this thread.
5425 Don't bother adding if it individually exited. */
5427 && event
.ws
.kind () != TARGET_WAITKIND_THREAD_EXITED
)
5428 t
= add_thread (event
.target
, event
.ptid
);
5433 /* Set the threads as non-executing to avoid
5434 another stop attempt on them. */
5435 switch_to_thread_no_regs (t
);
5436 mark_non_executing_threads (event
.target
, event
.ptid
,
5438 save_waitstatus (t
, event
.ws
);
5439 t
->stop_requested
= false;
5441 if (event
.ws
.kind () == TARGET_WAITKIND_THREAD_EXITED
)
5443 if (displaced_step_finish (t
, event
.ws
)
5444 != DISPLACED_STEP_FINISH_STATUS_OK
)
5446 gdb_assert_not_reached ("displaced_step_finish on "
5447 "exited thread failed");
5454 thread_info
*t
= event
.target
->find_thread (event
.ptid
);
5456 t
= add_thread (event
.target
, event
.ptid
);
5458 t
->stop_requested
= 0;
5459 t
->set_executing (false);
5460 t
->set_resumed (false);
5461 t
->control
.may_range_step
= 0;
5463 /* This may be the first time we see the inferior report
5465 if (t
->inf
->needs_setup
)
5467 switch_to_thread_no_regs (t
);
5471 if (event
.ws
.kind () == TARGET_WAITKIND_STOPPED
5472 && event
.ws
.sig () == GDB_SIGNAL_0
)
5474 /* We caught the event that we intended to catch, so
5475 there's no event to save as pending. */
5477 if (displaced_step_finish (t
, event
.ws
)
5478 == DISPLACED_STEP_FINISH_STATUS_NOT_EXECUTED
)
5480 /* Add it back to the step-over queue. */
5482 ("displaced-step of %s canceled",
5483 t
->ptid
.to_string ().c_str ());
5485 t
->control
.trap_expected
= 0;
5486 if (!t
->inf
->detaching
)
5487 global_thread_step_over_chain_enqueue (t
);
5492 struct regcache
*regcache
;
5495 ("target_wait %s, saving status for %s",
5496 event
.ws
.to_string ().c_str (),
5497 t
->ptid
.to_string ().c_str ());
5499 /* Record for later. */
5500 save_waitstatus (t
, event
.ws
);
5502 if (displaced_step_finish (t
, event
.ws
)
5503 == DISPLACED_STEP_FINISH_STATUS_NOT_EXECUTED
)
5505 /* Add it back to the step-over queue. */
5506 t
->control
.trap_expected
= 0;
5507 if (!t
->inf
->detaching
)
5508 global_thread_step_over_chain_enqueue (t
);
5511 regcache
= get_thread_regcache (t
);
5512 t
->set_stop_pc (regcache_read_pc (regcache
));
5514 infrun_debug_printf ("saved stop_pc=%s for %s "
5515 "(currently_stepping=%d)",
5516 paddress (current_inferior ()->arch (),
5518 t
->ptid
.to_string ().c_str (),
5519 currently_stepping (t
));
5526 /* Helper for stop_all_threads. wait_one waits for events until it
5527 sees a TARGET_WAITKIND_NO_RESUMED event. When it sees one, it
5528 disables target_async for the target to stop waiting for events
5529 from it. TARGET_WAITKIND_NO_RESUMED can be delayed though,
5530 consider, debugging against gdbserver:
5532 #1 - Threads 1-5 are running, and thread 1 hits a breakpoint.
5534 #2 - gdb processes the breakpoint hit for thread 1, stops all
5535 threads, and steps thread 1 over the breakpoint. while
5536 stopping threads, some other threads reported interesting
5537 events, which were left pending in the thread's objects
5540 #2 - Thread 1 exits (it stepped an exit syscall), and gdbserver
5541 reports the thread exit for thread 1. The event ends up in
5542 remote's stop reply queue.
5544 #3 - That was the last resumed thread, so gdbserver reports
5545 no-resumed, and that event also ends up in remote's stop
5546 reply queue, queued after the thread exit from #2.
5548 #4 - gdb processes the thread exit event, which finishes the
5549 step-over, and so gdb restarts all threads (threads with
5550 pending events are left marked resumed, but aren't set
5551 executing). The no-resumed event is still left pending in
5552 the remote stop reply queue.
5554 #5 - Since there are now resumed threads with pending breakpoint
5555 hits, gdb picks one at random to process next.
5557 #5 - gdb picks the breakpoint hit for thread 2 this time, and that
5558 breakpoint also needs to be stepped over, so gdb stops all
5561 #6 - stop_all_threads counts number of expected stops and calls
5562 wait_one once for each.
5564 #7 - The first wait_one call collects the no-resumed event from #3
5567 #9 - Seeing the no-resumed event, wait_one disables target async
5568 for the remote target, to stop waiting for events from it.
5569 wait_one from here on always return no-resumed directly
5570 without reaching the target.
5572 #10 - stop_all_threads still hasn't seen all the stops it expects,
5573 so it does another pass.
5575 #11 - Since the remote target is not async (disabled in #9),
5576 wait_one doesn't wait on it, so it won't see the expected
5577 stops, and instead returns no-resumed directly.
5579 #12 - stop_all_threads still haven't seen all the stops, so it
5580 does another pass. goto #11, looping forever.
5582 To handle this, we explicitly (re-)enable target async on all
5583 targets that can async every time stop_all_threads goes wait for
5584 the expected stops. */
5587 reenable_target_async ()
5589 for (inferior
*inf
: all_inferiors ())
5591 process_stratum_target
*target
= inf
->process_target ();
5592 if (target
!= nullptr
5593 && target
->threads_executing
5594 && target
->can_async_p ()
5595 && !target
->is_async_p ())
5597 switch_to_inferior_no_thread (inf
);
5606 stop_all_threads (const char *reason
, inferior
*inf
)
5608 /* We may need multiple passes to discover all threads. */
5612 gdb_assert (exists_non_stop_target ());
5614 INFRUN_SCOPED_DEBUG_START_END ("reason=%s, inf=%d", reason
,
5615 inf
!= nullptr ? inf
->num
: -1);
5617 infrun_debug_show_threads ("non-exited threads",
5618 all_non_exited_threads ());
5620 scoped_restore_current_thread restore_thread
;
5622 /* Enable thread events on relevant targets. */
5623 for (auto *target
: all_non_exited_process_targets ())
5625 if (inf
!= nullptr && inf
->process_target () != target
)
5628 switch_to_target_no_thread (target
);
5629 target_thread_events (true);
5634 /* Disable thread events on relevant targets. */
5635 for (auto *target
: all_non_exited_process_targets ())
5637 if (inf
!= nullptr && inf
->process_target () != target
)
5640 switch_to_target_no_thread (target
);
5641 target_thread_events (false);
5644 /* Use debug_prefixed_printf directly to get a meaningful function
5647 debug_prefixed_printf ("infrun", "stop_all_threads", "done");
5650 /* Request threads to stop, and then wait for the stops. Because
5651 threads we already know about can spawn more threads while we're
5652 trying to stop them, and we only learn about new threads when we
5653 update the thread list, do this in a loop, and keep iterating
5654 until two passes find no threads that need to be stopped. */
5655 for (pass
= 0; pass
< 2; pass
++, iterations
++)
5657 infrun_debug_printf ("pass=%d, iterations=%d", pass
, iterations
);
5660 int waits_needed
= 0;
5662 for (auto *target
: all_non_exited_process_targets ())
5664 if (inf
!= nullptr && inf
->process_target () != target
)
5667 switch_to_target_no_thread (target
);
5668 update_thread_list ();
5671 /* Go through all threads looking for threads that we need
5672 to tell the target to stop. */
5673 for (thread_info
*t
: all_non_exited_threads ())
5675 if (inf
!= nullptr && t
->inf
!= inf
)
5678 /* For a single-target setting with an all-stop target,
5679 we would not even arrive here. For a multi-target
5680 setting, until GDB is able to handle a mixture of
5681 all-stop and non-stop targets, simply skip all-stop
5682 targets' threads. This should be fine due to the
5683 protection of 'check_multi_target_resumption'. */
5685 switch_to_thread_no_regs (t
);
5686 if (!target_is_non_stop_p ())
5689 if (t
->executing ())
5691 /* If already stopping, don't request a stop again.
5692 We just haven't seen the notification yet. */
5693 if (!t
->stop_requested
)
5695 infrun_debug_printf (" %s executing, need stop",
5696 t
->ptid
.to_string ().c_str ());
5697 target_stop (t
->ptid
);
5698 t
->stop_requested
= 1;
5702 infrun_debug_printf (" %s executing, already stopping",
5703 t
->ptid
.to_string ().c_str ());
5706 if (t
->stop_requested
)
5711 infrun_debug_printf (" %s not executing",
5712 t
->ptid
.to_string ().c_str ());
5714 /* The thread may be not executing, but still be
5715 resumed with a pending status to process. */
5716 t
->set_resumed (false);
5720 if (waits_needed
== 0)
5723 /* If we find new threads on the second iteration, restart
5724 over. We want to see two iterations in a row with all
5729 reenable_target_async ();
5731 for (int i
= 0; i
< waits_needed
; i
++)
5733 wait_one_event event
= wait_one ();
5734 if (handle_one (event
))
5741 /* Handle a TARGET_WAITKIND_NO_RESUMED event. Return true if we
5742 handled the event and should continue waiting. Return false if we
5743 should stop and report the event to the user. */
5746 handle_no_resumed (struct execution_control_state
*ecs
)
5748 if (target_can_async_p ())
5750 bool any_sync
= false;
5752 for (ui
*ui
: all_uis ())
5754 if (ui
->prompt_state
== PROMPT_BLOCKED
)
5762 /* There were no unwaited-for children left in the target, but,
5763 we're not synchronously waiting for events either. Just
5766 infrun_debug_printf ("TARGET_WAITKIND_NO_RESUMED (ignoring: bg)");
5767 prepare_to_wait (ecs
);
5772 /* Otherwise, if we were running a synchronous execution command, we
5773 may need to cancel it and give the user back the terminal.
5775 In non-stop mode, the target can't tell whether we've already
5776 consumed previous stop events, so it can end up sending us a
5777 no-resumed event like so:
5779 #0 - thread 1 is left stopped
5781 #1 - thread 2 is resumed and hits breakpoint
5782 -> TARGET_WAITKIND_STOPPED
5784 #2 - thread 3 is resumed and exits
5785 this is the last resumed thread, so
5786 -> TARGET_WAITKIND_NO_RESUMED
5788 #3 - gdb processes stop for thread 2 and decides to re-resume
5791 #4 - gdb processes the TARGET_WAITKIND_NO_RESUMED event.
5792 thread 2 is now resumed, so the event should be ignored.
5794 IOW, if the stop for thread 2 doesn't end a foreground command,
5795 then we need to ignore the following TARGET_WAITKIND_NO_RESUMED
5796 event. But it could be that the event meant that thread 2 itself
5797 (or whatever other thread was the last resumed thread) exited.
5799 To address this we refresh the thread list and check whether we
5800 have resumed threads _now_. In the example above, this removes
5801 thread 3 from the thread list. If thread 2 was re-resumed, we
5802 ignore this event. If we find no thread resumed, then we cancel
5803 the synchronous command and show "no unwaited-for " to the
5806 inferior
*curr_inf
= current_inferior ();
5808 scoped_restore_current_thread restore_thread
;
5809 update_thread_list ();
5813 - the current target has no thread executing, and
5814 - the current inferior is native, and
5815 - the current inferior is the one which has the terminal, and
5818 then a Ctrl-C from this point on would remain stuck in the
5819 kernel, until a thread resumes and dequeues it. That would
5820 result in the GDB CLI not reacting to Ctrl-C, not able to
5821 interrupt the program. To address this, if the current inferior
5822 no longer has any thread executing, we give the terminal to some
5823 other inferior that has at least one thread executing. */
5824 bool swap_terminal
= true;
5826 /* Whether to ignore this TARGET_WAITKIND_NO_RESUMED event, or
5827 whether to report it to the user. */
5828 bool ignore_event
= false;
5830 for (thread_info
*thread
: all_non_exited_threads ())
5832 if (swap_terminal
&& thread
->executing ())
5834 if (thread
->inf
!= curr_inf
)
5836 target_terminal::ours ();
5838 switch_to_thread (thread
);
5839 target_terminal::inferior ();
5841 swap_terminal
= false;
5844 if (!ignore_event
&& thread
->resumed ())
5846 /* Either there were no unwaited-for children left in the
5847 target at some point, but there are now, or some target
5848 other than the eventing one has unwaited-for children
5849 left. Just ignore. */
5850 infrun_debug_printf ("TARGET_WAITKIND_NO_RESUMED "
5851 "(ignoring: found resumed)");
5853 ignore_event
= true;
5856 if (ignore_event
&& !swap_terminal
)
5862 switch_to_inferior_no_thread (curr_inf
);
5863 prepare_to_wait (ecs
);
5867 /* Go ahead and report the event. */
5871 /* Handle a TARGET_WAITKIND_THREAD_EXITED event. Return true if we
5872 handled the event and should continue waiting. Return false if we
5873 should stop and report the event to the user. */
5876 handle_thread_exited (execution_control_state
*ecs
)
5878 context_switch (ecs
);
5880 /* Clear these so we don't re-start the thread stepping over a
5881 breakpoint/watchpoint. */
5882 ecs
->event_thread
->stepping_over_breakpoint
= 0;
5883 ecs
->event_thread
->stepping_over_watchpoint
= 0;
5885 /* If the thread had an FSM, then abort the command. But only after
5886 finishing the step over, as in non-stop mode, aborting this
5887 thread's command should not interfere with other threads. We
5888 must check this before finish_step over, however, which may
5889 update the thread list and delete the event thread. */
5890 bool abort_cmd
= (ecs
->event_thread
->thread_fsm () != nullptr);
5892 /* Mark the thread exited right now, because finish_step_over may
5893 update the thread list and that may delete the thread silently
5894 (depending on target), while we always want to emit the "[Thread
5895 ... exited]" notification. Don't actually delete the thread yet,
5896 because we need to pass its pointer down to finish_step_over. */
5897 set_thread_exited (ecs
->event_thread
);
5899 /* Maybe the thread was doing a step-over, if so release
5900 resources and start any further pending step-overs.
5902 If we are on a non-stop target and the thread was doing an
5903 in-line step, this also restarts the other threads. */
5904 int ret
= finish_step_over (ecs
);
5906 /* finish_step_over returns true if it moves ecs' wait status
5907 back into the thread, so that we go handle another pending
5908 event before this one. But we know it never does that if
5909 the event thread has exited. */
5910 gdb_assert (ret
== 0);
5914 /* We're stopping for the thread exit event. Switch to the
5915 event thread again, as finish_step_over may have switched
5917 switch_to_thread (ecs
->event_thread
);
5918 ecs
->event_thread
= nullptr;
5922 /* If finish_step_over started a new in-line step-over, don't
5923 try to restart anything else. */
5924 if (step_over_info_valid_p ())
5926 delete_thread (ecs
->event_thread
);
5930 /* Maybe we are on an all-stop target and we got this event
5931 while doing a step-like command on another thread. If so,
5932 go back to doing that. If this thread was stepping,
5933 switch_back_to_stepped_thread will consider that the thread
5934 was interrupted mid-step and will try keep stepping it. We
5935 don't want that, the thread is gone. So clear the proceed
5936 status so it doesn't do that. */
5937 clear_proceed_status_thread (ecs
->event_thread
);
5938 if (switch_back_to_stepped_thread (ecs
))
5940 delete_thread (ecs
->event_thread
);
5944 inferior
*inf
= ecs
->event_thread
->inf
;
5945 bool slock_applies
= schedlock_applies (ecs
->event_thread
);
5947 delete_thread (ecs
->event_thread
);
5948 ecs
->event_thread
= nullptr;
5950 /* Continue handling the event as if we had gotten a
5951 TARGET_WAITKIND_NO_RESUMED. */
5952 auto handle_as_no_resumed
= [ecs
] ()
5954 /* handle_no_resumed doesn't really look at the event kind, but
5955 normal_stop does. */
5956 ecs
->ws
.set_no_resumed ();
5957 ecs
->event_thread
= nullptr;
5958 ecs
->ptid
= minus_one_ptid
;
5960 /* Re-record the last target status. */
5961 set_last_target_status (ecs
->target
, ecs
->ptid
, ecs
->ws
);
5963 return handle_no_resumed (ecs
);
5966 /* If we are on an all-stop target, the target has stopped all
5967 threads to report the event. We don't actually want to
5968 stop, so restart the threads. */
5969 if (!target_is_non_stop_p ())
5973 /* Since the target is !non-stop, then everything is stopped
5974 at this point, and we can't assume we'll get further
5975 events until we resume the target again. Handle this
5976 event like if it were a TARGET_WAITKIND_NO_RESUMED. Note
5977 this refreshes the thread list and checks whether there
5978 are other resumed threads before deciding whether to
5979 print "no-unwaited-for left". This is important because
5980 the user could have done:
5982 (gdb) set scheduler-locking on
5988 ... and only one of the threads exited. */
5989 return handle_as_no_resumed ();
5993 /* Switch to the first non-exited thread we can find, and
5995 auto range
= inf
->non_exited_threads ();
5996 if (range
.begin () == range
.end ())
5998 /* Looks like the target reported a
5999 TARGET_WAITKIND_THREAD_EXITED for its last known
6001 return handle_as_no_resumed ();
6003 thread_info
*non_exited_thread
= *range
.begin ();
6004 switch_to_thread (non_exited_thread
);
6005 insert_breakpoints ();
6006 resume (GDB_SIGNAL_0
);
6010 prepare_to_wait (ecs
);
6014 /* Given an execution control state that has been freshly filled in by
6015 an event from the inferior, figure out what it means and take
6018 The alternatives are:
6020 1) stop_waiting and return; to really stop and return to the
6023 2) keep_going and return; to wait for the next event (set
6024 ecs->event_thread->stepping_over_breakpoint to 1 to single step
6028 handle_inferior_event (struct execution_control_state
*ecs
)
6030 /* Make sure that all temporary struct value objects that were
6031 created during the handling of the event get deleted at the
6033 scoped_value_mark free_values
;
6035 infrun_debug_printf ("%s", ecs
->ws
.to_string ().c_str ());
6037 if (ecs
->ws
.kind () == TARGET_WAITKIND_IGNORE
)
6039 /* We had an event in the inferior, but we are not interested in
6040 handling it at this level. The lower layers have already
6041 done what needs to be done, if anything.
6043 One of the possible circumstances for this is when the
6044 inferior produces output for the console. The inferior has
6045 not stopped, and we are ignoring the event. Another possible
6046 circumstance is any event which the lower level knows will be
6047 reported multiple times without an intervening resume. */
6048 prepare_to_wait (ecs
);
6052 if (ecs
->ws
.kind () == TARGET_WAITKIND_NO_RESUMED
6053 && handle_no_resumed (ecs
))
6056 /* Cache the last target/ptid/waitstatus. */
6057 set_last_target_status (ecs
->target
, ecs
->ptid
, ecs
->ws
);
6059 /* Always clear state belonging to the previous time we stopped. */
6060 stop_stack_dummy
= STOP_NONE
;
6062 if (ecs
->ws
.kind () == TARGET_WAITKIND_NO_RESUMED
)
6064 /* No unwaited-for children left. IOW, all resumed children
6070 if (ecs
->ws
.kind () != TARGET_WAITKIND_EXITED
6071 && ecs
->ws
.kind () != TARGET_WAITKIND_SIGNALLED
)
6073 ecs
->event_thread
= ecs
->target
->find_thread (ecs
->ptid
);
6074 /* If it's a new thread, add it to the thread database. */
6075 if (ecs
->event_thread
== nullptr)
6076 ecs
->event_thread
= add_thread (ecs
->target
, ecs
->ptid
);
6078 /* Disable range stepping. If the next step request could use a
6079 range, this will be end up re-enabled then. */
6080 ecs
->event_thread
->control
.may_range_step
= 0;
6083 /* Dependent on valid ECS->EVENT_THREAD. */
6084 adjust_pc_after_break (ecs
->event_thread
, ecs
->ws
);
6086 /* Dependent on the current PC value modified by adjust_pc_after_break. */
6087 reinit_frame_cache ();
6089 breakpoint_retire_moribund ();
6091 /* First, distinguish signals caused by the debugger from signals
6092 that have to do with the program's own actions. Note that
6093 breakpoint insns may cause SIGTRAP or SIGILL or SIGEMT, depending
6094 on the operating system version. Here we detect when a SIGILL or
6095 SIGEMT is really a breakpoint and change it to SIGTRAP. We do
6096 something similar for SIGSEGV, since a SIGSEGV will be generated
6097 when we're trying to execute a breakpoint instruction on a
6098 non-executable stack. This happens for call dummy breakpoints
6099 for architectures like SPARC that place call dummies on the
6101 if (ecs
->ws
.kind () == TARGET_WAITKIND_STOPPED
6102 && (ecs
->ws
.sig () == GDB_SIGNAL_ILL
6103 || ecs
->ws
.sig () == GDB_SIGNAL_SEGV
6104 || ecs
->ws
.sig () == GDB_SIGNAL_EMT
))
6106 struct regcache
*regcache
= get_thread_regcache (ecs
->event_thread
);
6108 if (breakpoint_inserted_here_p (ecs
->event_thread
->inf
->aspace
.get (),
6109 regcache_read_pc (regcache
)))
6111 infrun_debug_printf ("Treating signal as SIGTRAP");
6112 ecs
->ws
.set_stopped (GDB_SIGNAL_TRAP
);
6116 mark_non_executing_threads (ecs
->target
, ecs
->ptid
, ecs
->ws
);
6118 switch (ecs
->ws
.kind ())
6120 case TARGET_WAITKIND_LOADED
:
6122 context_switch (ecs
);
6123 /* Ignore gracefully during startup of the inferior, as it might
6124 be the shell which has just loaded some objects, otherwise
6125 add the symbols for the newly loaded objects. Also ignore at
6126 the beginning of an attach or remote session; we will query
6127 the full list of libraries once the connection is
6130 stop_kind stop_soon
= get_inferior_stop_soon (ecs
);
6131 if (stop_soon
== NO_STOP_QUIETLY
)
6133 struct regcache
*regcache
;
6135 regcache
= get_thread_regcache (ecs
->event_thread
);
6137 handle_solib_event ();
6139 ecs
->event_thread
->set_stop_pc (regcache_read_pc (regcache
));
6140 address_space
*aspace
= ecs
->event_thread
->inf
->aspace
.get ();
6141 ecs
->event_thread
->control
.stop_bpstat
6142 = bpstat_stop_status_nowatch (aspace
,
6143 ecs
->event_thread
->stop_pc (),
6144 ecs
->event_thread
, ecs
->ws
);
6146 if (handle_stop_requested (ecs
))
6149 if (bpstat_causes_stop (ecs
->event_thread
->control
.stop_bpstat
))
6151 /* A catchpoint triggered. */
6152 process_event_stop_test (ecs
);
6156 /* If requested, stop when the dynamic linker notifies
6157 gdb of events. This allows the user to get control
6158 and place breakpoints in initializer routines for
6159 dynamically loaded objects (among other things). */
6160 ecs
->event_thread
->set_stop_signal (GDB_SIGNAL_0
);
6161 if (stop_on_solib_events
)
6163 /* Make sure we print "Stopped due to solib-event" in
6165 stop_print_frame
= true;
6172 /* If we are skipping through a shell, or through shared library
6173 loading that we aren't interested in, resume the program. If
6174 we're running the program normally, also resume. */
6175 if (stop_soon
== STOP_QUIETLY
|| stop_soon
== NO_STOP_QUIETLY
)
6177 /* Loading of shared libraries might have changed breakpoint
6178 addresses. Make sure new breakpoints are inserted. */
6179 if (stop_soon
== NO_STOP_QUIETLY
)
6180 insert_breakpoints ();
6181 resume (GDB_SIGNAL_0
);
6182 prepare_to_wait (ecs
);
6186 /* But stop if we're attaching or setting up a remote
6188 if (stop_soon
== STOP_QUIETLY_NO_SIGSTOP
6189 || stop_soon
== STOP_QUIETLY_REMOTE
)
6191 infrun_debug_printf ("quietly stopped");
6196 internal_error (_("unhandled stop_soon: %d"), (int) stop_soon
);
6199 case TARGET_WAITKIND_SPURIOUS
:
6200 if (handle_stop_requested (ecs
))
6202 context_switch (ecs
);
6203 resume (GDB_SIGNAL_0
);
6204 prepare_to_wait (ecs
);
6207 case TARGET_WAITKIND_THREAD_CREATED
:
6208 if (handle_stop_requested (ecs
))
6210 context_switch (ecs
);
6211 if (!switch_back_to_stepped_thread (ecs
))
6215 case TARGET_WAITKIND_THREAD_EXITED
:
6216 if (handle_thread_exited (ecs
))
6221 case TARGET_WAITKIND_EXITED
:
6222 case TARGET_WAITKIND_SIGNALLED
:
6224 /* Depending on the system, ecs->ptid may point to a thread or
6225 to a process. On some targets, target_mourn_inferior may
6226 need to have access to the just-exited thread. That is the
6227 case of GNU/Linux's "checkpoint" support, for example.
6228 Call the switch_to_xxx routine as appropriate. */
6229 thread_info
*thr
= ecs
->target
->find_thread (ecs
->ptid
);
6231 switch_to_thread (thr
);
6234 inferior
*inf
= find_inferior_ptid (ecs
->target
, ecs
->ptid
);
6235 switch_to_inferior_no_thread (inf
);
6238 handle_vfork_child_exec_or_exit (0);
6239 target_terminal::ours (); /* Must do this before mourn anyway. */
6241 /* Clearing any previous state of convenience variables. */
6242 clear_exit_convenience_vars ();
6244 if (ecs
->ws
.kind () == TARGET_WAITKIND_EXITED
)
6246 /* Record the exit code in the convenience variable $_exitcode, so
6247 that the user can inspect this again later. */
6248 set_internalvar_integer (lookup_internalvar ("_exitcode"),
6249 (LONGEST
) ecs
->ws
.exit_status ());
6251 /* Also record this in the inferior itself. */
6252 current_inferior ()->has_exit_code
= true;
6253 current_inferior ()->exit_code
= (LONGEST
) ecs
->ws
.exit_status ();
6255 /* Support the --return-child-result option. */
6256 return_child_result_value
= ecs
->ws
.exit_status ();
6258 interps_notify_exited (ecs
->ws
.exit_status ());
6262 struct gdbarch
*gdbarch
= current_inferior ()->arch ();
6264 if (gdbarch_gdb_signal_to_target_p (gdbarch
))
6266 /* Set the value of the internal variable $_exitsignal,
6267 which holds the signal uncaught by the inferior. */
6268 set_internalvar_integer (lookup_internalvar ("_exitsignal"),
6269 gdbarch_gdb_signal_to_target (gdbarch
,
6274 /* We don't have access to the target's method used for
6275 converting between signal numbers (GDB's internal
6276 representation <-> target's representation).
6277 Therefore, we cannot do a good job at displaying this
6278 information to the user. It's better to just warn
6279 her about it (if infrun debugging is enabled), and
6281 infrun_debug_printf ("Cannot fill $_exitsignal with the correct "
6285 interps_notify_signal_exited (ecs
->ws
.sig ());
6288 gdb_flush (gdb_stdout
);
6289 target_mourn_inferior (inferior_ptid
);
6290 stop_print_frame
= false;
6294 case TARGET_WAITKIND_FORKED
:
6295 case TARGET_WAITKIND_VFORKED
:
6296 case TARGET_WAITKIND_THREAD_CLONED
:
6298 displaced_step_finish (ecs
->event_thread
, ecs
->ws
);
6300 /* Start a new step-over in another thread if there's one that
6304 context_switch (ecs
);
6306 /* Immediately detach breakpoints from the child before there's
6307 any chance of letting the user delete breakpoints from the
6308 breakpoint lists. If we don't do this early, it's easy to
6309 leave left over traps in the child, vis: "break foo; catch
6310 fork; c; <fork>; del; c; <child calls foo>". We only follow
6311 the fork on the last `continue', and by that time the
6312 breakpoint at "foo" is long gone from the breakpoint table.
6313 If we vforked, then we don't need to unpatch here, since both
6314 parent and child are sharing the same memory pages; we'll
6315 need to unpatch at follow/detach time instead to be certain
6316 that new breakpoints added between catchpoint hit time and
6317 vfork follow are detached. */
6318 if (ecs
->ws
.kind () == TARGET_WAITKIND_FORKED
)
6320 /* This won't actually modify the breakpoint list, but will
6321 physically remove the breakpoints from the child. */
6322 detach_breakpoints (ecs
->ws
.child_ptid ());
6325 delete_just_stopped_threads_single_step_breakpoints ();
6327 /* In case the event is caught by a catchpoint, remember that
6328 the event is to be followed at the next resume of the thread,
6329 and not immediately. */
6330 ecs
->event_thread
->pending_follow
= ecs
->ws
;
6332 ecs
->event_thread
->set_stop_pc
6333 (regcache_read_pc (get_thread_regcache (ecs
->event_thread
)));
6335 ecs
->event_thread
->control
.stop_bpstat
6336 = bpstat_stop_status_nowatch (ecs
->event_thread
->inf
->aspace
.get (),
6337 ecs
->event_thread
->stop_pc (),
6338 ecs
->event_thread
, ecs
->ws
);
6340 if (handle_stop_requested (ecs
))
6343 /* If no catchpoint triggered for this, then keep going. Note
6344 that we're interested in knowing the bpstat actually causes a
6345 stop, not just if it may explain the signal. Software
6346 watchpoints, for example, always appear in the bpstat. */
6347 if (!bpstat_causes_stop (ecs
->event_thread
->control
.stop_bpstat
))
6350 = (ecs
->ws
.kind () != TARGET_WAITKIND_THREAD_CLONED
6351 && follow_fork_mode_string
== follow_fork_mode_child
);
6353 ecs
->event_thread
->set_stop_signal (GDB_SIGNAL_0
);
6355 process_stratum_target
*targ
6356 = ecs
->event_thread
->inf
->process_target ();
6359 if (ecs
->ws
.kind () != TARGET_WAITKIND_THREAD_CLONED
)
6360 should_resume
= follow_fork ();
6363 should_resume
= true;
6364 inferior
*inf
= ecs
->event_thread
->inf
;
6365 inf
->top_target ()->follow_clone (ecs
->ws
.child_ptid ());
6366 ecs
->event_thread
->pending_follow
.set_spurious ();
6369 /* Note that one of these may be an invalid pointer,
6370 depending on detach_fork. */
6371 thread_info
*parent
= ecs
->event_thread
;
6372 thread_info
*child
= targ
->find_thread (ecs
->ws
.child_ptid ());
6374 /* At this point, the parent is marked running, and the
6375 child is marked stopped. */
6377 /* If not resuming the parent, mark it stopped. */
6378 if (ecs
->ws
.kind () != TARGET_WAITKIND_THREAD_CLONED
6379 && follow_child
&& !detach_fork
&& !non_stop
&& !sched_multi
)
6380 parent
->set_running (false);
6382 /* If resuming the child, mark it running. */
6383 if ((ecs
->ws
.kind () == TARGET_WAITKIND_THREAD_CLONED
6384 && !schedlock_applies (ecs
->event_thread
))
6385 || (ecs
->ws
.kind () != TARGET_WAITKIND_THREAD_CLONED
6387 || (!detach_fork
&& (non_stop
|| sched_multi
)))))
6388 child
->set_running (true);
6390 /* In non-stop mode, also resume the other branch. */
6391 if ((ecs
->ws
.kind () == TARGET_WAITKIND_THREAD_CLONED
6392 && target_is_non_stop_p ()
6393 && !schedlock_applies (ecs
->event_thread
))
6394 || (ecs
->ws
.kind () != TARGET_WAITKIND_THREAD_CLONED
6395 && (!detach_fork
&& (non_stop
6397 && target_is_non_stop_p ())))))
6400 switch_to_thread (parent
);
6402 switch_to_thread (child
);
6404 ecs
->event_thread
= inferior_thread ();
6405 ecs
->ptid
= inferior_ptid
;
6410 switch_to_thread (child
);
6412 switch_to_thread (parent
);
6414 ecs
->event_thread
= inferior_thread ();
6415 ecs
->ptid
= inferior_ptid
;
6419 /* Never call switch_back_to_stepped_thread if we are waiting for
6420 vfork-done (waiting for an external vfork child to exec or
6421 exit). We will resume only the vforking thread for the purpose
6422 of collecting the vfork-done event, and we will restart any
6423 step once the critical shared address space window is done. */
6426 && parent
->inf
->thread_waiting_for_vfork_done
!= nullptr)
6427 || !switch_back_to_stepped_thread (ecs
))
6434 process_event_stop_test (ecs
);
6437 case TARGET_WAITKIND_VFORK_DONE
:
6438 /* Done with the shared memory region. Re-insert breakpoints in
6439 the parent, and keep going. */
6441 context_switch (ecs
);
6443 handle_vfork_done (ecs
->event_thread
);
6444 gdb_assert (inferior_thread () == ecs
->event_thread
);
6446 if (handle_stop_requested (ecs
))
6449 if (!switch_back_to_stepped_thread (ecs
))
6451 gdb_assert (inferior_thread () == ecs
->event_thread
);
6452 /* This also takes care of reinserting breakpoints in the
6453 previously locked inferior. */
6458 case TARGET_WAITKIND_EXECD
:
6460 /* Note we can't read registers yet (the stop_pc), because we
6461 don't yet know the inferior's post-exec architecture.
6462 'stop_pc' is explicitly read below instead. */
6463 switch_to_thread_no_regs (ecs
->event_thread
);
6465 /* Do whatever is necessary to the parent branch of the vfork. */
6466 handle_vfork_child_exec_or_exit (1);
6468 /* This causes the eventpoints and symbol table to be reset.
6469 Must do this now, before trying to determine whether to
6471 follow_exec (inferior_ptid
, ecs
->ws
.execd_pathname ());
6473 /* In follow_exec we may have deleted the original thread and
6474 created a new one. Make sure that the event thread is the
6475 execd thread for that case (this is a nop otherwise). */
6476 ecs
->event_thread
= inferior_thread ();
6478 ecs
->event_thread
->set_stop_pc
6479 (regcache_read_pc (get_thread_regcache (ecs
->event_thread
)));
6481 ecs
->event_thread
->control
.stop_bpstat
6482 = bpstat_stop_status_nowatch (ecs
->event_thread
->inf
->aspace
.get (),
6483 ecs
->event_thread
->stop_pc (),
6484 ecs
->event_thread
, ecs
->ws
);
6486 if (handle_stop_requested (ecs
))
6489 /* If no catchpoint triggered for this, then keep going. */
6490 if (!bpstat_causes_stop (ecs
->event_thread
->control
.stop_bpstat
))
6492 ecs
->event_thread
->set_stop_signal (GDB_SIGNAL_0
);
6496 process_event_stop_test (ecs
);
6499 /* Be careful not to try to gather much state about a thread
6500 that's in a syscall. It's frequently a losing proposition. */
6501 case TARGET_WAITKIND_SYSCALL_ENTRY
:
6502 /* Getting the current syscall number. */
6503 if (handle_syscall_event (ecs
) == 0)
6504 process_event_stop_test (ecs
);
6507 /* Before examining the threads further, step this thread to
6508 get it entirely out of the syscall. (We get notice of the
6509 event when the thread is just on the verge of exiting a
6510 syscall. Stepping one instruction seems to get it back
6512 case TARGET_WAITKIND_SYSCALL_RETURN
:
6513 if (handle_syscall_event (ecs
) == 0)
6514 process_event_stop_test (ecs
);
6517 case TARGET_WAITKIND_STOPPED
:
6518 handle_signal_stop (ecs
);
6521 case TARGET_WAITKIND_NO_HISTORY
:
6522 /* Reverse execution: target ran out of history info. */
6524 /* Switch to the stopped thread. */
6525 context_switch (ecs
);
6526 infrun_debug_printf ("stopped");
6528 delete_just_stopped_threads_single_step_breakpoints ();
6529 ecs
->event_thread
->set_stop_pc
6530 (regcache_read_pc (get_thread_regcache (inferior_thread ())));
6532 if (handle_stop_requested (ecs
))
6535 interps_notify_no_history ();
6541 /* Restart threads back to what they were trying to do back when we
6542 paused them (because of an in-line step-over or vfork, for example).
6543 The EVENT_THREAD thread is ignored (not restarted).
6545 If INF is non-nullptr, only resume threads from INF. */
6548 restart_threads (struct thread_info
*event_thread
, inferior
*inf
)
6550 INFRUN_SCOPED_DEBUG_START_END ("event_thread=%s, inf=%d",
6551 event_thread
->ptid
.to_string ().c_str (),
6552 inf
!= nullptr ? inf
->num
: -1);
6554 gdb_assert (!step_over_info_valid_p ());
6556 /* In case the instruction just stepped spawned a new thread. */
6557 update_thread_list ();
6559 for (thread_info
*tp
: all_non_exited_threads ())
6561 if (inf
!= nullptr && tp
->inf
!= inf
)
6564 if (tp
->inf
->detaching
)
6566 infrun_debug_printf ("restart threads: [%s] inferior detaching",
6567 tp
->ptid
.to_string ().c_str ());
6571 switch_to_thread_no_regs (tp
);
6573 if (tp
== event_thread
)
6575 infrun_debug_printf ("restart threads: [%s] is event thread",
6576 tp
->ptid
.to_string ().c_str ());
6580 if (!(tp
->state
== THREAD_RUNNING
|| tp
->control
.in_infcall
))
6582 infrun_debug_printf ("restart threads: [%s] not meant to be running",
6583 tp
->ptid
.to_string ().c_str ());
6589 infrun_debug_printf ("restart threads: [%s] resumed",
6590 tp
->ptid
.to_string ().c_str ());
6591 gdb_assert (tp
->executing () || tp
->has_pending_waitstatus ());
6595 if (thread_is_in_step_over_chain (tp
))
6597 infrun_debug_printf ("restart threads: [%s] needs step-over",
6598 tp
->ptid
.to_string ().c_str ());
6599 gdb_assert (!tp
->resumed ());
6604 if (tp
->has_pending_waitstatus ())
6606 infrun_debug_printf ("restart threads: [%s] has pending status",
6607 tp
->ptid
.to_string ().c_str ());
6608 tp
->set_resumed (true);
6612 gdb_assert (!tp
->stop_requested
);
6614 /* If some thread needs to start a step-over at this point, it
6615 should still be in the step-over queue, and thus skipped
6617 if (thread_still_needs_step_over (tp
))
6619 internal_error ("thread [%s] needs a step-over, but not in "
6620 "step-over queue\n",
6621 tp
->ptid
.to_string ().c_str ());
6624 if (currently_stepping (tp
))
6626 infrun_debug_printf ("restart threads: [%s] was stepping",
6627 tp
->ptid
.to_string ().c_str ());
6628 keep_going_stepped_thread (tp
);
6632 infrun_debug_printf ("restart threads: [%s] continuing",
6633 tp
->ptid
.to_string ().c_str ());
6634 execution_control_state
ecs (tp
);
6635 switch_to_thread (tp
);
6636 keep_going_pass_signal (&ecs
);
6641 /* Callback for iterate_over_threads. Find a resumed thread that has
6642 a pending waitstatus. */
6645 resumed_thread_with_pending_status (struct thread_info
*tp
,
6648 return tp
->resumed () && tp
->has_pending_waitstatus ();
6651 /* Called when we get an event that may finish an in-line or
6652 out-of-line (displaced stepping) step-over started previously.
6653 Return true if the event is processed and we should go back to the
6654 event loop; false if the caller should continue processing the
6658 finish_step_over (struct execution_control_state
*ecs
)
6660 displaced_step_finish (ecs
->event_thread
, ecs
->ws
);
6662 bool had_step_over_info
= step_over_info_valid_p ();
6664 if (had_step_over_info
)
6666 /* If we're stepping over a breakpoint with all threads locked,
6667 then only the thread that was stepped should be reporting
6669 gdb_assert (ecs
->event_thread
->control
.trap_expected
);
6671 update_thread_events_after_step_over (ecs
->event_thread
, ecs
->ws
);
6673 clear_step_over_info ();
6676 if (!target_is_non_stop_p ())
6679 /* Start a new step-over in another thread if there's one that
6683 /* If we were stepping over a breakpoint before, and haven't started
6684 a new in-line step-over sequence, then restart all other threads
6685 (except the event thread). We can't do this in all-stop, as then
6686 e.g., we wouldn't be able to issue any other remote packet until
6687 these other threads stop. */
6688 if (had_step_over_info
&& !step_over_info_valid_p ())
6690 struct thread_info
*pending
;
6692 /* If we only have threads with pending statuses, the restart
6693 below won't restart any thread and so nothing re-inserts the
6694 breakpoint we just stepped over. But we need it inserted
6695 when we later process the pending events, otherwise if
6696 another thread has a pending event for this breakpoint too,
6697 we'd discard its event (because the breakpoint that
6698 originally caused the event was no longer inserted). */
6699 context_switch (ecs
);
6700 insert_breakpoints ();
6702 restart_threads (ecs
->event_thread
);
6704 /* If we have events pending, go through handle_inferior_event
6705 again, picking up a pending event at random. This avoids
6706 thread starvation. */
6708 /* But not if we just stepped over a watchpoint in order to let
6709 the instruction execute so we can evaluate its expression.
6710 The set of watchpoints that triggered is recorded in the
6711 breakpoint objects themselves (see bp->watchpoint_triggered).
6712 If we processed another event first, that other event could
6713 clobber this info. */
6714 if (ecs
->event_thread
->stepping_over_watchpoint
)
6717 /* The code below is meant to avoid one thread hogging the event
6718 loop by doing constant in-line step overs. If the stepping
6719 thread exited, there's no risk for this to happen, so we can
6720 safely let our caller process the event immediately. */
6721 if (ecs
->ws
.kind () == TARGET_WAITKIND_THREAD_EXITED
)
6724 pending
= iterate_over_threads (resumed_thread_with_pending_status
,
6726 if (pending
!= nullptr)
6728 struct thread_info
*tp
= ecs
->event_thread
;
6729 struct regcache
*regcache
;
6731 infrun_debug_printf ("found resumed threads with "
6732 "pending events, saving status");
6734 gdb_assert (pending
!= tp
);
6736 /* Record the event thread's event for later. */
6737 save_waitstatus (tp
, ecs
->ws
);
6738 /* This was cleared early, by handle_inferior_event. Set it
6739 so this pending event is considered by
6741 tp
->set_resumed (true);
6743 gdb_assert (!tp
->executing ());
6745 regcache
= get_thread_regcache (tp
);
6746 tp
->set_stop_pc (regcache_read_pc (regcache
));
6748 infrun_debug_printf ("saved stop_pc=%s for %s "
6749 "(currently_stepping=%d)",
6750 paddress (current_inferior ()->arch (),
6752 tp
->ptid
.to_string ().c_str (),
6753 currently_stepping (tp
));
6755 /* This in-line step-over finished; clear this so we won't
6756 start a new one. This is what handle_signal_stop would
6757 do, if we returned false. */
6758 tp
->stepping_over_breakpoint
= 0;
6760 /* Wake up the event loop again. */
6761 mark_async_event_handler (infrun_async_inferior_event_token
);
6763 prepare_to_wait (ecs
);
6774 notify_signal_received (gdb_signal sig
)
6776 interps_notify_signal_received (sig
);
6777 gdb::observers::signal_received
.notify (sig
);
6783 notify_normal_stop (bpstat
*bs
, int print_frame
)
6785 interps_notify_normal_stop (bs
, print_frame
);
6786 gdb::observers::normal_stop
.notify (bs
, print_frame
);
6791 void notify_user_selected_context_changed (user_selected_what selection
)
6793 interps_notify_user_selected_context_changed (selection
);
6794 gdb::observers::user_selected_context_changed
.notify (selection
);
6797 /* Come here when the program has stopped with a signal. */
6800 handle_signal_stop (struct execution_control_state
*ecs
)
6802 frame_info_ptr frame
;
6803 struct gdbarch
*gdbarch
;
6804 int stopped_by_watchpoint
;
6805 enum stop_kind stop_soon
;
6808 gdb_assert (ecs
->ws
.kind () == TARGET_WAITKIND_STOPPED
);
6810 ecs
->event_thread
->set_stop_signal (ecs
->ws
.sig ());
6812 /* Do we need to clean up the state of a thread that has
6813 completed a displaced single-step? (Doing so usually affects
6814 the PC, so do it here, before we set stop_pc.) */
6815 if (finish_step_over (ecs
))
6818 /* If we either finished a single-step or hit a breakpoint, but
6819 the user wanted this thread to be stopped, pretend we got a
6820 SIG0 (generic unsignaled stop). */
6821 if (ecs
->event_thread
->stop_requested
6822 && ecs
->event_thread
->stop_signal () == GDB_SIGNAL_TRAP
)
6823 ecs
->event_thread
->set_stop_signal (GDB_SIGNAL_0
);
6825 ecs
->event_thread
->set_stop_pc
6826 (regcache_read_pc (get_thread_regcache (ecs
->event_thread
)));
6828 context_switch (ecs
);
6830 if (deprecated_context_hook
)
6831 deprecated_context_hook (ecs
->event_thread
->global_num
);
6835 struct regcache
*regcache
= get_thread_regcache (ecs
->event_thread
);
6836 struct gdbarch
*reg_gdbarch
= regcache
->arch ();
6839 ("stop_pc=%s", paddress (reg_gdbarch
, ecs
->event_thread
->stop_pc ()));
6840 if (target_stopped_by_watchpoint ())
6844 infrun_debug_printf ("stopped by watchpoint");
6846 if (target_stopped_data_address (current_inferior ()->top_target (),
6848 infrun_debug_printf ("stopped data address=%s",
6849 paddress (reg_gdbarch
, addr
));
6851 infrun_debug_printf ("(no data address available)");
6855 /* This is originated from start_remote(), start_inferior() and
6856 shared libraries hook functions. */
6857 stop_soon
= get_inferior_stop_soon (ecs
);
6858 if (stop_soon
== STOP_QUIETLY
|| stop_soon
== STOP_QUIETLY_REMOTE
)
6860 infrun_debug_printf ("quietly stopped");
6861 stop_print_frame
= true;
6866 /* This originates from attach_command(). We need to overwrite
6867 the stop_signal here, because some kernels don't ignore a
6868 SIGSTOP in a subsequent ptrace(PTRACE_CONT,SIGSTOP) call.
6869 See more comments in inferior.h. On the other hand, if we
6870 get a non-SIGSTOP, report it to the user - assume the backend
6871 will handle the SIGSTOP if it should show up later.
6873 Also consider that the attach is complete when we see a
6874 SIGTRAP. Some systems (e.g. Windows), and stubs supporting
6875 target extended-remote report it instead of a SIGSTOP
6876 (e.g. gdbserver). We already rely on SIGTRAP being our
6877 signal, so this is no exception.
6879 Also consider that the attach is complete when we see a
6880 GDB_SIGNAL_0. In non-stop mode, GDB will explicitly tell
6881 the target to stop all threads of the inferior, in case the
6882 low level attach operation doesn't stop them implicitly. If
6883 they weren't stopped implicitly, then the stub will report a
6884 GDB_SIGNAL_0, meaning: stopped for no particular reason
6885 other than GDB's request. */
6886 if (stop_soon
== STOP_QUIETLY_NO_SIGSTOP
6887 && (ecs
->event_thread
->stop_signal () == GDB_SIGNAL_STOP
6888 || ecs
->event_thread
->stop_signal () == GDB_SIGNAL_TRAP
6889 || ecs
->event_thread
->stop_signal () == GDB_SIGNAL_0
))
6891 stop_print_frame
= true;
6893 ecs
->event_thread
->set_stop_signal (GDB_SIGNAL_0
);
6897 /* At this point, get hold of the now-current thread's frame. */
6898 frame
= get_current_frame ();
6899 gdbarch
= get_frame_arch (frame
);
6901 /* Pull the single step breakpoints out of the target. */
6902 if (ecs
->event_thread
->stop_signal () == GDB_SIGNAL_TRAP
)
6904 struct regcache
*regcache
;
6907 regcache
= get_thread_regcache (ecs
->event_thread
);
6908 const address_space
*aspace
= ecs
->event_thread
->inf
->aspace
.get ();
6910 pc
= regcache_read_pc (regcache
);
6912 /* However, before doing so, if this single-step breakpoint was
6913 actually for another thread, set this thread up for moving
6915 if (!thread_has_single_step_breakpoint_here (ecs
->event_thread
,
6918 if (single_step_breakpoint_inserted_here_p (aspace
, pc
))
6920 infrun_debug_printf ("[%s] hit another thread's single-step "
6922 ecs
->ptid
.to_string ().c_str ());
6923 ecs
->hit_singlestep_breakpoint
= 1;
6928 infrun_debug_printf ("[%s] hit its single-step breakpoint",
6929 ecs
->ptid
.to_string ().c_str ());
6932 delete_just_stopped_threads_single_step_breakpoints ();
6934 if (ecs
->event_thread
->stop_signal () == GDB_SIGNAL_TRAP
6935 && ecs
->event_thread
->control
.trap_expected
6936 && ecs
->event_thread
->stepping_over_watchpoint
)
6937 stopped_by_watchpoint
= 0;
6939 stopped_by_watchpoint
= watchpoints_triggered (ecs
->ws
);
6941 /* If necessary, step over this watchpoint. We'll be back to display
6943 if (stopped_by_watchpoint
6944 && (target_have_steppable_watchpoint ()
6945 || gdbarch_have_nonsteppable_watchpoint (gdbarch
)))
6947 /* At this point, we are stopped at an instruction which has
6948 attempted to write to a piece of memory under control of
6949 a watchpoint. The instruction hasn't actually executed
6950 yet. If we were to evaluate the watchpoint expression
6951 now, we would get the old value, and therefore no change
6952 would seem to have occurred.
6954 In order to make watchpoints work `right', we really need
6955 to complete the memory write, and then evaluate the
6956 watchpoint expression. We do this by single-stepping the
6959 It may not be necessary to disable the watchpoint to step over
6960 it. For example, the PA can (with some kernel cooperation)
6961 single step over a watchpoint without disabling the watchpoint.
6963 It is far more common to need to disable a watchpoint to step
6964 the inferior over it. If we have non-steppable watchpoints,
6965 we must disable the current watchpoint; it's simplest to
6966 disable all watchpoints.
6968 Any breakpoint at PC must also be stepped over -- if there's
6969 one, it will have already triggered before the watchpoint
6970 triggered, and we either already reported it to the user, or
6971 it didn't cause a stop and we called keep_going. In either
6972 case, if there was a breakpoint at PC, we must be trying to
6974 ecs
->event_thread
->stepping_over_watchpoint
= 1;
6979 ecs
->event_thread
->stepping_over_breakpoint
= 0;
6980 ecs
->event_thread
->stepping_over_watchpoint
= 0;
6981 bpstat_clear (&ecs
->event_thread
->control
.stop_bpstat
);
6982 ecs
->event_thread
->control
.stop_step
= 0;
6983 stop_print_frame
= true;
6984 stopped_by_random_signal
= 0;
6985 bpstat
*stop_chain
= nullptr;
6987 /* Hide inlined functions starting here, unless we just performed stepi or
6988 nexti. After stepi and nexti, always show the innermost frame (not any
6989 inline function call sites). */
6990 if (ecs
->event_thread
->control
.step_range_end
!= 1)
6992 const address_space
*aspace
= ecs
->event_thread
->inf
->aspace
.get ();
6994 /* skip_inline_frames is expensive, so we avoid it if we can
6995 determine that the address is one where functions cannot have
6996 been inlined. This improves performance with inferiors that
6997 load a lot of shared libraries, because the solib event
6998 breakpoint is defined as the address of a function (i.e. not
6999 inline). Note that we have to check the previous PC as well
7000 as the current one to catch cases when we have just
7001 single-stepped off a breakpoint prior to reinstating it.
7002 Note that we're assuming that the code we single-step to is
7003 not inline, but that's not definitive: there's nothing
7004 preventing the event breakpoint function from containing
7005 inlined code, and the single-step ending up there. If the
7006 user had set a breakpoint on that inlined code, the missing
7007 skip_inline_frames call would break things. Fortunately
7008 that's an extremely unlikely scenario. */
7009 if (!pc_at_non_inline_function (aspace
,
7010 ecs
->event_thread
->stop_pc (),
7012 && !(ecs
->event_thread
->stop_signal () == GDB_SIGNAL_TRAP
7013 && ecs
->event_thread
->control
.trap_expected
7014 && pc_at_non_inline_function (aspace
,
7015 ecs
->event_thread
->prev_pc
,
7018 stop_chain
= build_bpstat_chain (aspace
,
7019 ecs
->event_thread
->stop_pc (),
7021 skip_inline_frames (ecs
->event_thread
, stop_chain
);
7023 /* Re-fetch current thread's frame in case that invalidated
7025 frame
= get_current_frame ();
7026 gdbarch
= get_frame_arch (frame
);
7030 if (ecs
->event_thread
->stop_signal () == GDB_SIGNAL_TRAP
7031 && ecs
->event_thread
->control
.trap_expected
7032 && gdbarch_single_step_through_delay_p (gdbarch
)
7033 && currently_stepping (ecs
->event_thread
))
7035 /* We're trying to step off a breakpoint. Turns out that we're
7036 also on an instruction that needs to be stepped multiple
7037 times before it's been fully executing. E.g., architectures
7038 with a delay slot. It needs to be stepped twice, once for
7039 the instruction and once for the delay slot. */
7040 int step_through_delay
7041 = gdbarch_single_step_through_delay (gdbarch
, frame
);
7043 if (step_through_delay
)
7044 infrun_debug_printf ("step through delay");
7046 if (ecs
->event_thread
->control
.step_range_end
== 0
7047 && step_through_delay
)
7049 /* The user issued a continue when stopped at a breakpoint.
7050 Set up for another trap and get out of here. */
7051 ecs
->event_thread
->stepping_over_breakpoint
= 1;
7055 else if (step_through_delay
)
7057 /* The user issued a step when stopped at a breakpoint.
7058 Maybe we should stop, maybe we should not - the delay
7059 slot *might* correspond to a line of source. In any
7060 case, don't decide that here, just set
7061 ecs->stepping_over_breakpoint, making sure we
7062 single-step again before breakpoints are re-inserted. */
7063 ecs
->event_thread
->stepping_over_breakpoint
= 1;
7067 /* See if there is a breakpoint/watchpoint/catchpoint/etc. that
7068 handles this event. */
7069 ecs
->event_thread
->control
.stop_bpstat
7070 = bpstat_stop_status (ecs
->event_thread
->inf
->aspace
.get (),
7071 ecs
->event_thread
->stop_pc (),
7072 ecs
->event_thread
, ecs
->ws
, stop_chain
);
7074 /* Following in case break condition called a
7076 stop_print_frame
= true;
7078 /* This is where we handle "moribund" watchpoints. Unlike
7079 software breakpoints traps, hardware watchpoint traps are
7080 always distinguishable from random traps. If no high-level
7081 watchpoint is associated with the reported stop data address
7082 anymore, then the bpstat does not explain the signal ---
7083 simply make sure to ignore it if `stopped_by_watchpoint' is
7086 if (ecs
->event_thread
->stop_signal () == GDB_SIGNAL_TRAP
7087 && !bpstat_explains_signal (ecs
->event_thread
->control
.stop_bpstat
,
7089 && stopped_by_watchpoint
)
7091 infrun_debug_printf ("no user watchpoint explains watchpoint SIGTRAP, "
7095 /* NOTE: cagney/2003-03-29: These checks for a random signal
7096 at one stage in the past included checks for an inferior
7097 function call's call dummy's return breakpoint. The original
7098 comment, that went with the test, read:
7100 ``End of a stack dummy. Some systems (e.g. Sony news) give
7101 another signal besides SIGTRAP, so check here as well as
7104 If someone ever tries to get call dummys on a
7105 non-executable stack to work (where the target would stop
7106 with something like a SIGSEGV), then those tests might need
7107 to be re-instated. Given, however, that the tests were only
7108 enabled when momentary breakpoints were not being used, I
7109 suspect that it won't be the case.
7111 NOTE: kettenis/2004-02-05: Indeed such checks don't seem to
7112 be necessary for call dummies on a non-executable stack on
7115 /* See if the breakpoints module can explain the signal. */
7117 = !bpstat_explains_signal (ecs
->event_thread
->control
.stop_bpstat
,
7118 ecs
->event_thread
->stop_signal ());
7120 /* Maybe this was a trap for a software breakpoint that has since
7122 if (random_signal
&& target_stopped_by_sw_breakpoint ())
7124 if (gdbarch_program_breakpoint_here_p (gdbarch
,
7125 ecs
->event_thread
->stop_pc ()))
7127 struct regcache
*regcache
;
7130 /* Re-adjust PC to what the program would see if GDB was not
7132 regcache
= get_thread_regcache (ecs
->event_thread
);
7133 decr_pc
= gdbarch_decr_pc_after_break (gdbarch
);
7136 std::optional
<scoped_restore_tmpl
<int>>
7137 restore_operation_disable
;
7139 if (record_full_is_used ())
7140 restore_operation_disable
.emplace
7141 (record_full_gdb_operation_disable_set ());
7143 regcache_write_pc (regcache
,
7144 ecs
->event_thread
->stop_pc () + decr_pc
);
7149 /* A delayed software breakpoint event. Ignore the trap. */
7150 infrun_debug_printf ("delayed software breakpoint trap, ignoring");
7155 /* Maybe this was a trap for a hardware breakpoint/watchpoint that
7156 has since been removed. */
7157 if (random_signal
&& target_stopped_by_hw_breakpoint ())
7159 /* A delayed hardware breakpoint event. Ignore the trap. */
7160 infrun_debug_printf ("delayed hardware breakpoint/watchpoint "
7165 /* If not, perhaps stepping/nexting can. */
7167 random_signal
= !(ecs
->event_thread
->stop_signal () == GDB_SIGNAL_TRAP
7168 && currently_stepping (ecs
->event_thread
));
7170 /* Perhaps the thread hit a single-step breakpoint of _another_
7171 thread. Single-step breakpoints are transparent to the
7172 breakpoints module. */
7174 random_signal
= !ecs
->hit_singlestep_breakpoint
;
7176 /* No? Perhaps we got a moribund watchpoint. */
7178 random_signal
= !stopped_by_watchpoint
;
7180 /* Always stop if the user explicitly requested this thread to
7182 if (ecs
->event_thread
->stop_requested
)
7185 infrun_debug_printf ("user-requested stop");
7188 /* For the program's own signals, act according to
7189 the signal handling tables. */
7193 /* Signal not for debugging purposes. */
7194 enum gdb_signal stop_signal
= ecs
->event_thread
->stop_signal ();
7196 infrun_debug_printf ("random signal (%s)",
7197 gdb_signal_to_symbol_string (stop_signal
));
7199 stopped_by_random_signal
= 1;
7201 /* Always stop on signals if we're either just gaining control
7202 of the program, or the user explicitly requested this thread
7203 to remain stopped. */
7204 if (stop_soon
!= NO_STOP_QUIETLY
7205 || ecs
->event_thread
->stop_requested
7206 || signal_stop_state (ecs
->event_thread
->stop_signal ()))
7212 /* Notify observers the signal has "handle print" set. Note we
7213 returned early above if stopping; normal_stop handles the
7214 printing in that case. */
7215 if (signal_print
[ecs
->event_thread
->stop_signal ()])
7217 /* The signal table tells us to print about this signal. */
7218 target_terminal::ours_for_output ();
7219 notify_signal_received (ecs
->event_thread
->stop_signal ());
7220 target_terminal::inferior ();
7223 /* Clear the signal if it should not be passed. */
7224 if (signal_program
[ecs
->event_thread
->stop_signal ()] == 0)
7225 ecs
->event_thread
->set_stop_signal (GDB_SIGNAL_0
);
7227 if (ecs
->event_thread
->prev_pc
== ecs
->event_thread
->stop_pc ()
7228 && ecs
->event_thread
->control
.trap_expected
7229 && ecs
->event_thread
->control
.step_resume_breakpoint
== nullptr)
7231 /* We were just starting a new sequence, attempting to
7232 single-step off of a breakpoint and expecting a SIGTRAP.
7233 Instead this signal arrives. This signal will take us out
7234 of the stepping range so GDB needs to remember to, when
7235 the signal handler returns, resume stepping off that
7237 /* To simplify things, "continue" is forced to use the same
7238 code paths as single-step - set a breakpoint at the
7239 signal return address and then, once hit, step off that
7241 infrun_debug_printf ("signal arrived while stepping over breakpoint");
7243 insert_hp_step_resume_breakpoint_at_frame (frame
);
7244 ecs
->event_thread
->step_after_step_resume_breakpoint
= 1;
7245 /* Reset trap_expected to ensure breakpoints are re-inserted. */
7246 ecs
->event_thread
->control
.trap_expected
= 0;
7248 /* If we were nexting/stepping some other thread, switch to
7249 it, so that we don't continue it, losing control. */
7250 if (!switch_back_to_stepped_thread (ecs
))
7255 if (ecs
->event_thread
->stop_signal () != GDB_SIGNAL_0
7256 && (pc_in_thread_step_range (ecs
->event_thread
->stop_pc (),
7258 || ecs
->event_thread
->control
.step_range_end
== 1)
7259 && (get_stack_frame_id (frame
)
7260 == ecs
->event_thread
->control
.step_stack_frame_id
)
7261 && ecs
->event_thread
->control
.step_resume_breakpoint
== nullptr)
7263 /* The inferior is about to take a signal that will take it
7264 out of the single step range. Set a breakpoint at the
7265 current PC (which is presumably where the signal handler
7266 will eventually return) and then allow the inferior to
7269 Note that this is only needed for a signal delivered
7270 while in the single-step range. Nested signals aren't a
7271 problem as they eventually all return. */
7272 infrun_debug_printf ("signal may take us out of single-step range");
7274 clear_step_over_info ();
7275 insert_hp_step_resume_breakpoint_at_frame (frame
);
7276 ecs
->event_thread
->step_after_step_resume_breakpoint
= 1;
7277 /* Reset trap_expected to ensure breakpoints are re-inserted. */
7278 ecs
->event_thread
->control
.trap_expected
= 0;
7283 /* Note: step_resume_breakpoint may be non-NULL. This occurs
7284 when either there's a nested signal, or when there's a
7285 pending signal enabled just as the signal handler returns
7286 (leaving the inferior at the step-resume-breakpoint without
7287 actually executing it). Either way continue until the
7288 breakpoint is really hit. */
7290 if (!switch_back_to_stepped_thread (ecs
))
7292 infrun_debug_printf ("random signal, keep going");
7299 process_event_stop_test (ecs
);
7302 /* Return the address for the beginning of the line. */
7305 update_line_range_start (CORE_ADDR pc
, struct execution_control_state
*ecs
)
7307 /* The line table may have multiple entries for the same source code line.
7308 Given the PC, check the line table and return the PC that corresponds
7309 to the line table entry for the source line that PC is in. */
7310 CORE_ADDR start_line_pc
= ecs
->event_thread
->control
.step_range_start
;
7311 std::optional
<CORE_ADDR
> real_range_start
;
7313 /* Call find_line_range_start to get the smallest address in the
7314 linetable for multiple Line X entries in the line table. */
7315 real_range_start
= find_line_range_start (pc
);
7317 if (real_range_start
.has_value ())
7318 start_line_pc
= *real_range_start
;
7320 return start_line_pc
;
7325 /* Helper class for process_event_stop_test implementing lazy evaluation. */
7326 template<typename T
>
7329 using fetcher_t
= std::function
<T ()>;
7332 explicit lazy_loader (fetcher_t
&&f
) : m_loader (std::move (f
))
7337 if (!m_value
.has_value ())
7338 m_value
.emplace (m_loader ());
7339 return m_value
.value ();
7348 std::optional
<T
> m_value
;
7354 /* Come here when we've got some debug event / signal we can explain
7355 (IOW, not a random signal), and test whether it should cause a
7356 stop, or whether we should resume the inferior (transparently).
7357 E.g., could be a breakpoint whose condition evaluates false; we
7358 could be still stepping within the line; etc. */
7361 process_event_stop_test (struct execution_control_state
*ecs
)
7363 struct symtab_and_line stop_pc_sal
;
7364 frame_info_ptr frame
;
7365 struct gdbarch
*gdbarch
;
7366 CORE_ADDR jmp_buf_pc
;
7367 struct bpstat_what what
;
7369 /* Handle cases caused by hitting a breakpoint. */
7371 frame
= get_current_frame ();
7372 gdbarch
= get_frame_arch (frame
);
7374 what
= bpstat_what (ecs
->event_thread
->control
.stop_bpstat
);
7376 if (what
.call_dummy
)
7378 stop_stack_dummy
= what
.call_dummy
;
7381 /* A few breakpoint types have callbacks associated (e.g.,
7382 bp_jit_event). Run them now. */
7383 bpstat_run_callbacks (ecs
->event_thread
->control
.stop_bpstat
);
7385 /* If we hit an internal event that triggers symbol changes, the
7386 current frame will be invalidated within bpstat_what (e.g., if we
7387 hit an internal solib event). Re-fetch it. */
7388 frame
= get_current_frame ();
7389 gdbarch
= get_frame_arch (frame
);
7391 /* Shorthand to make if statements smaller. */
7392 struct frame_id original_frame_id
7393 = ecs
->event_thread
->control
.step_frame_id
;
7394 lazy_loader
<frame_id
> curr_frame_id
7395 ([] () { return get_frame_id (get_current_frame ()); });
7397 switch (what
.main_action
)
7399 case BPSTAT_WHAT_SET_LONGJMP_RESUME
:
7400 /* If we hit the breakpoint at longjmp while stepping, we
7401 install a momentary breakpoint at the target of the
7404 infrun_debug_printf ("BPSTAT_WHAT_SET_LONGJMP_RESUME");
7406 ecs
->event_thread
->stepping_over_breakpoint
= 1;
7408 if (what
.is_longjmp
)
7410 struct value
*arg_value
;
7412 /* If we set the longjmp breakpoint via a SystemTap probe,
7413 then use it to extract the arguments. The destination PC
7414 is the third argument to the probe. */
7415 arg_value
= probe_safe_evaluate_at_pc (frame
, 2);
7418 jmp_buf_pc
= value_as_address (arg_value
);
7419 jmp_buf_pc
= gdbarch_addr_bits_remove (gdbarch
, jmp_buf_pc
);
7421 else if (!gdbarch_get_longjmp_target_p (gdbarch
)
7422 || !gdbarch_get_longjmp_target (gdbarch
,
7423 frame
, &jmp_buf_pc
))
7425 infrun_debug_printf ("BPSTAT_WHAT_SET_LONGJMP_RESUME "
7426 "(!gdbarch_get_longjmp_target)");
7431 /* Insert a breakpoint at resume address. */
7432 insert_longjmp_resume_breakpoint (gdbarch
, jmp_buf_pc
);
7435 check_exception_resume (ecs
, frame
);
7439 case BPSTAT_WHAT_CLEAR_LONGJMP_RESUME
:
7441 frame_info_ptr init_frame
;
7443 /* There are several cases to consider.
7445 1. The initiating frame no longer exists. In this case we
7446 must stop, because the exception or longjmp has gone too
7449 2. The initiating frame exists, and is the same as the
7450 current frame. We stop, because the exception or longjmp
7453 3. The initiating frame exists and is different from the
7454 current frame. This means the exception or longjmp has
7455 been caught beneath the initiating frame, so keep going.
7457 4. longjmp breakpoint has been placed just to protect
7458 against stale dummy frames and user is not interested in
7459 stopping around longjmps. */
7461 infrun_debug_printf ("BPSTAT_WHAT_CLEAR_LONGJMP_RESUME");
7463 gdb_assert (ecs
->event_thread
->control
.exception_resume_breakpoint
7465 delete_exception_resume_breakpoint (ecs
->event_thread
);
7467 if (what
.is_longjmp
)
7469 check_longjmp_breakpoint_for_call_dummy (ecs
->event_thread
);
7471 if (!frame_id_p (ecs
->event_thread
->initiating_frame
))
7479 init_frame
= frame_find_by_id (ecs
->event_thread
->initiating_frame
);
7483 if (*curr_frame_id
== ecs
->event_thread
->initiating_frame
)
7485 /* Case 2. Fall through. */
7495 /* For Cases 1 and 2, remove the step-resume breakpoint, if it
7497 delete_step_resume_breakpoint (ecs
->event_thread
);
7499 end_stepping_range (ecs
);
7503 case BPSTAT_WHAT_SINGLE
:
7504 infrun_debug_printf ("BPSTAT_WHAT_SINGLE");
7505 ecs
->event_thread
->stepping_over_breakpoint
= 1;
7506 /* Still need to check other stuff, at least the case where we
7507 are stepping and step out of the right range. */
7510 case BPSTAT_WHAT_STEP_RESUME
:
7511 infrun_debug_printf ("BPSTAT_WHAT_STEP_RESUME");
7513 delete_step_resume_breakpoint (ecs
->event_thread
);
7514 if (ecs
->event_thread
->control
.proceed_to_finish
7515 && execution_direction
== EXEC_REVERSE
)
7517 struct thread_info
*tp
= ecs
->event_thread
;
7519 /* We are finishing a function in reverse, and just hit the
7520 step-resume breakpoint at the start address of the
7521 function, and we're almost there -- just need to back up
7522 by one more single-step, which should take us back to the
7524 tp
->control
.step_range_start
= tp
->control
.step_range_end
= 1;
7528 fill_in_stop_func (gdbarch
, ecs
);
7529 if (ecs
->event_thread
->stop_pc () == ecs
->stop_func_start
7530 && execution_direction
== EXEC_REVERSE
)
7532 /* We are stepping over a function call in reverse, and just
7533 hit the step-resume breakpoint at the start address of
7534 the function. Go back to single-stepping, which should
7535 take us back to the function call. */
7536 ecs
->event_thread
->stepping_over_breakpoint
= 1;
7542 case BPSTAT_WHAT_STOP_NOISY
:
7543 infrun_debug_printf ("BPSTAT_WHAT_STOP_NOISY");
7544 stop_print_frame
= true;
7546 /* Assume the thread stopped for a breakpoint. We'll still check
7547 whether a/the breakpoint is there when the thread is next
7549 ecs
->event_thread
->stepping_over_breakpoint
= 1;
7554 case BPSTAT_WHAT_STOP_SILENT
:
7555 infrun_debug_printf ("BPSTAT_WHAT_STOP_SILENT");
7556 stop_print_frame
= false;
7558 /* Assume the thread stopped for a breakpoint. We'll still check
7559 whether a/the breakpoint is there when the thread is next
7561 ecs
->event_thread
->stepping_over_breakpoint
= 1;
7565 case BPSTAT_WHAT_HP_STEP_RESUME
:
7566 infrun_debug_printf ("BPSTAT_WHAT_HP_STEP_RESUME");
7568 delete_step_resume_breakpoint (ecs
->event_thread
);
7569 if (ecs
->event_thread
->step_after_step_resume_breakpoint
)
7571 /* Back when the step-resume breakpoint was inserted, we
7572 were trying to single-step off a breakpoint. Go back to
7574 ecs
->event_thread
->step_after_step_resume_breakpoint
= 0;
7575 ecs
->event_thread
->stepping_over_breakpoint
= 1;
7581 case BPSTAT_WHAT_KEEP_CHECKING
:
7585 /* If we stepped a permanent breakpoint and we had a high priority
7586 step-resume breakpoint for the address we stepped, but we didn't
7587 hit it, then we must have stepped into the signal handler. The
7588 step-resume was only necessary to catch the case of _not_
7589 stepping into the handler, so delete it, and fall through to
7590 checking whether the step finished. */
7591 if (ecs
->event_thread
->stepped_breakpoint
)
7593 struct breakpoint
*sr_bp
7594 = ecs
->event_thread
->control
.step_resume_breakpoint
;
7596 if (sr_bp
!= nullptr
7597 && sr_bp
->first_loc ().permanent
7598 && sr_bp
->type
== bp_hp_step_resume
7599 && sr_bp
->first_loc ().address
== ecs
->event_thread
->prev_pc
)
7601 infrun_debug_printf ("stepped permanent breakpoint, stopped in handler");
7602 delete_step_resume_breakpoint (ecs
->event_thread
);
7603 ecs
->event_thread
->step_after_step_resume_breakpoint
= 0;
7607 /* We come here if we hit a breakpoint but should not stop for it.
7608 Possibly we also were stepping and should stop for that. So fall
7609 through and test for stepping. But, if not stepping, do not
7612 /* In all-stop mode, if we're currently stepping but have stopped in
7613 some other thread, we need to switch back to the stepped thread. */
7614 if (switch_back_to_stepped_thread (ecs
))
7617 if (ecs
->event_thread
->control
.step_resume_breakpoint
)
7619 infrun_debug_printf ("step-resume breakpoint is inserted");
7621 /* Having a step-resume breakpoint overrides anything
7622 else having to do with stepping commands until
7623 that breakpoint is reached. */
7628 if (ecs
->event_thread
->control
.step_range_end
== 0)
7630 infrun_debug_printf ("no stepping, continue");
7631 /* Likewise if we aren't even stepping. */
7636 /* Re-fetch current thread's frame in case the code above caused
7637 the frame cache to be re-initialized, making our FRAME variable
7638 a dangling pointer. */
7639 frame
= get_current_frame ();
7640 gdbarch
= get_frame_arch (frame
);
7641 fill_in_stop_func (gdbarch
, ecs
);
7643 /* If stepping through a line, keep going if still within it.
7645 Note that step_range_end is the address of the first instruction
7646 beyond the step range, and NOT the address of the last instruction
7649 Note also that during reverse execution, we may be stepping
7650 through a function epilogue and therefore must detect when
7651 the current-frame changes in the middle of a line. */
7653 if (pc_in_thread_step_range (ecs
->event_thread
->stop_pc (),
7655 && (execution_direction
!= EXEC_REVERSE
7656 || *curr_frame_id
== original_frame_id
))
7659 ("stepping inside range [%s-%s]",
7660 paddress (gdbarch
, ecs
->event_thread
->control
.step_range_start
),
7661 paddress (gdbarch
, ecs
->event_thread
->control
.step_range_end
));
7663 /* Tentatively re-enable range stepping; `resume' disables it if
7664 necessary (e.g., if we're stepping over a breakpoint or we
7665 have software watchpoints). */
7666 ecs
->event_thread
->control
.may_range_step
= 1;
7668 /* When stepping backward, stop at beginning of line range
7669 (unless it's the function entry point, in which case
7670 keep going back to the call point). */
7671 CORE_ADDR stop_pc
= ecs
->event_thread
->stop_pc ();
7672 if (stop_pc
== ecs
->event_thread
->control
.step_range_start
7673 && stop_pc
!= ecs
->stop_func_start
7674 && execution_direction
== EXEC_REVERSE
)
7675 end_stepping_range (ecs
);
7682 /* We stepped out of the stepping range. */
7684 /* If we are stepping at the source level and entered the runtime
7685 loader dynamic symbol resolution code...
7687 EXEC_FORWARD: we keep on single stepping until we exit the run
7688 time loader code and reach the callee's address.
7690 EXEC_REVERSE: we've already executed the callee (backward), and
7691 the runtime loader code is handled just like any other
7692 undebuggable function call. Now we need only keep stepping
7693 backward through the trampoline code, and that's handled further
7694 down, so there is nothing for us to do here. */
7696 if (execution_direction
!= EXEC_REVERSE
7697 && ecs
->event_thread
->control
.step_over_calls
== STEP_OVER_UNDEBUGGABLE
7698 && in_solib_dynsym_resolve_code (ecs
->event_thread
->stop_pc ())
7699 && (ecs
->event_thread
->control
.step_start_function
== nullptr
7700 || !in_solib_dynsym_resolve_code (
7701 ecs
->event_thread
->control
.step_start_function
->value_block ()
7704 CORE_ADDR pc_after_resolver
=
7705 gdbarch_skip_solib_resolver (gdbarch
, ecs
->event_thread
->stop_pc ());
7707 infrun_debug_printf ("stepped into dynsym resolve code");
7709 if (pc_after_resolver
)
7711 /* Set up a step-resume breakpoint at the address
7712 indicated by SKIP_SOLIB_RESOLVER. */
7713 symtab_and_line sr_sal
;
7714 sr_sal
.pc
= pc_after_resolver
;
7715 sr_sal
.pspace
= get_frame_program_space (frame
);
7717 insert_step_resume_breakpoint_at_sal (gdbarch
,
7718 sr_sal
, null_frame_id
);
7725 /* Step through an indirect branch thunk. */
7726 if (ecs
->event_thread
->control
.step_over_calls
!= STEP_OVER_NONE
7727 && gdbarch_in_indirect_branch_thunk (gdbarch
,
7728 ecs
->event_thread
->stop_pc ()))
7730 infrun_debug_printf ("stepped into indirect branch thunk");
7735 if (ecs
->event_thread
->control
.step_range_end
!= 1
7736 && (ecs
->event_thread
->control
.step_over_calls
== STEP_OVER_UNDEBUGGABLE
7737 || ecs
->event_thread
->control
.step_over_calls
== STEP_OVER_ALL
)
7738 && get_frame_type (frame
) == SIGTRAMP_FRAME
)
7740 infrun_debug_printf ("stepped into signal trampoline");
7741 /* The inferior, while doing a "step" or "next", has ended up in
7742 a signal trampoline (either by a signal being delivered or by
7743 the signal handler returning). Just single-step until the
7744 inferior leaves the trampoline (either by calling the handler
7750 /* If we're in the return path from a shared library trampoline,
7751 we want to proceed through the trampoline when stepping. */
7752 /* macro/2012-04-25: This needs to come before the subroutine
7753 call check below as on some targets return trampolines look
7754 like subroutine calls (MIPS16 return thunks). */
7755 if (gdbarch_in_solib_return_trampoline (gdbarch
,
7756 ecs
->event_thread
->stop_pc (),
7757 ecs
->stop_func_name
)
7758 && ecs
->event_thread
->control
.step_over_calls
!= STEP_OVER_NONE
)
7760 /* Determine where this trampoline returns. */
7761 CORE_ADDR stop_pc
= ecs
->event_thread
->stop_pc ();
7762 CORE_ADDR real_stop_pc
7763 = gdbarch_skip_trampoline_code (gdbarch
, frame
, stop_pc
);
7765 infrun_debug_printf ("stepped into solib return tramp");
7767 /* Only proceed through if we know where it's going. */
7770 /* And put the step-breakpoint there and go until there. */
7771 symtab_and_line sr_sal
;
7772 sr_sal
.pc
= real_stop_pc
;
7773 sr_sal
.section
= find_pc_overlay (sr_sal
.pc
);
7774 sr_sal
.pspace
= get_frame_program_space (frame
);
7776 /* Do not specify what the fp should be when we stop since
7777 on some machines the prologue is where the new fp value
7779 insert_step_resume_breakpoint_at_sal (gdbarch
,
7780 sr_sal
, null_frame_id
);
7782 /* Restart without fiddling with the step ranges or
7789 /* Check for subroutine calls. The check for the current frame
7790 equalling the step ID is not necessary - the check of the
7791 previous frame's ID is sufficient - but it is a common case and
7792 cheaper than checking the previous frame's ID.
7794 NOTE: frame_id::operator== will never report two invalid frame IDs as
7795 being equal, so to get into this block, both the current and
7796 previous frame must have valid frame IDs. */
7797 /* The outer_frame_id check is a heuristic to detect stepping
7798 through startup code. If we step over an instruction which
7799 sets the stack pointer from an invalid value to a valid value,
7800 we may detect that as a subroutine call from the mythical
7801 "outermost" function. This could be fixed by marking
7802 outermost frames as !stack_p,code_p,special_p. Then the
7803 initial outermost frame, before sp was valid, would
7804 have code_addr == &_start. See the comment in frame_id::operator==
7807 /* We want "nexti" to step into, not over, signal handlers invoked
7808 by the kernel, therefore this subroutine check should not trigger
7809 for a signal handler invocation. On most platforms, this is already
7810 not the case, as the kernel puts a signal trampoline frame onto the
7811 stack to handle proper return after the handler, and therefore at this
7812 point, the current frame is a grandchild of the step frame, not a
7813 child. However, on some platforms, the kernel actually uses a
7814 trampoline to handle *invocation* of the handler. In that case,
7815 when executing the first instruction of the trampoline, this check
7816 would erroneously detect the trampoline invocation as a subroutine
7817 call. Fix this by checking for SIGTRAMP_FRAME. */
7818 if ((get_stack_frame_id (frame
)
7819 != ecs
->event_thread
->control
.step_stack_frame_id
)
7820 && get_frame_type (frame
) != SIGTRAMP_FRAME
7821 && ((frame_unwind_caller_id (get_current_frame ())
7822 == ecs
->event_thread
->control
.step_stack_frame_id
)
7823 && ((ecs
->event_thread
->control
.step_stack_frame_id
7825 || (ecs
->event_thread
->control
.step_start_function
7826 != find_pc_function (ecs
->event_thread
->stop_pc ())))))
7828 CORE_ADDR stop_pc
= ecs
->event_thread
->stop_pc ();
7829 CORE_ADDR real_stop_pc
;
7831 infrun_debug_printf ("stepped into subroutine");
7833 if (ecs
->event_thread
->control
.step_over_calls
== STEP_OVER_NONE
)
7835 /* I presume that step_over_calls is only 0 when we're
7836 supposed to be stepping at the assembly language level
7837 ("stepi"). Just stop. */
7838 /* And this works the same backward as frontward. MVS */
7839 end_stepping_range (ecs
);
7843 /* Reverse stepping through solib trampolines. */
7845 if (execution_direction
== EXEC_REVERSE
7846 && ecs
->event_thread
->control
.step_over_calls
!= STEP_OVER_NONE
7847 && (gdbarch_skip_trampoline_code (gdbarch
, frame
, stop_pc
)
7848 || (ecs
->stop_func_start
== 0
7849 && in_solib_dynsym_resolve_code (stop_pc
))))
7851 /* Any solib trampoline code can be handled in reverse
7852 by simply continuing to single-step. We have already
7853 executed the solib function (backwards), and a few
7854 steps will take us back through the trampoline to the
7860 if (ecs
->event_thread
->control
.step_over_calls
== STEP_OVER_ALL
)
7862 /* We're doing a "next".
7864 Normal (forward) execution: set a breakpoint at the
7865 callee's return address (the address at which the caller
7868 Reverse (backward) execution. set the step-resume
7869 breakpoint at the start of the function that we just
7870 stepped into (backwards), and continue to there. When we
7871 get there, we'll need to single-step back to the caller. */
7873 if (execution_direction
== EXEC_REVERSE
)
7875 /* If we're already at the start of the function, we've either
7876 just stepped backward into a single instruction function,
7877 or stepped back out of a signal handler to the first instruction
7878 of the function. Just keep going, which will single-step back
7880 if (ecs
->stop_func_start
!= stop_pc
&& ecs
->stop_func_start
!= 0)
7882 /* Normal function call return (static or dynamic). */
7883 symtab_and_line sr_sal
;
7884 sr_sal
.pc
= ecs
->stop_func_start
;
7885 sr_sal
.pspace
= get_frame_program_space (frame
);
7886 insert_step_resume_breakpoint_at_sal (gdbarch
,
7887 sr_sal
, get_stack_frame_id (frame
));
7891 insert_step_resume_breakpoint_at_caller (frame
);
7897 /* If we are in a function call trampoline (a stub between the
7898 calling routine and the real function), locate the real
7899 function. That's what tells us (a) whether we want to step
7900 into it at all, and (b) what prologue we want to run to the
7901 end of, if we do step into it. */
7902 real_stop_pc
= skip_language_trampoline (frame
, stop_pc
);
7903 if (real_stop_pc
== 0)
7904 real_stop_pc
= gdbarch_skip_trampoline_code (gdbarch
, frame
, stop_pc
);
7905 if (real_stop_pc
!= 0)
7906 ecs
->stop_func_start
= real_stop_pc
;
7908 if (real_stop_pc
!= 0 && in_solib_dynsym_resolve_code (real_stop_pc
))
7910 symtab_and_line sr_sal
;
7911 sr_sal
.pc
= ecs
->stop_func_start
;
7912 sr_sal
.pspace
= get_frame_program_space (frame
);
7914 insert_step_resume_breakpoint_at_sal (gdbarch
,
7915 sr_sal
, null_frame_id
);
7920 /* If we have line number information for the function we are
7921 thinking of stepping into and the function isn't on the skip
7924 If there are several symtabs at that PC (e.g. with include
7925 files), just want to know whether *any* of them have line
7926 numbers. find_pc_line handles this. */
7928 struct symtab_and_line tmp_sal
;
7930 tmp_sal
= find_pc_line (ecs
->stop_func_start
, 0);
7931 if (tmp_sal
.line
!= 0
7932 && !function_name_is_marked_for_skip (ecs
->stop_func_name
,
7934 && !inline_frame_is_marked_for_skip (true, ecs
->event_thread
))
7936 if (execution_direction
== EXEC_REVERSE
)
7937 handle_step_into_function_backward (gdbarch
, ecs
);
7939 handle_step_into_function (gdbarch
, ecs
);
7944 /* If we have no line number and the step-stop-if-no-debug is
7945 set, we stop the step so that the user has a chance to switch
7946 in assembly mode. */
7947 if (ecs
->event_thread
->control
.step_over_calls
== STEP_OVER_UNDEBUGGABLE
7948 && step_stop_if_no_debug
)
7950 end_stepping_range (ecs
);
7954 if (execution_direction
== EXEC_REVERSE
)
7956 /* If we're already at the start of the function, we've either just
7957 stepped backward into a single instruction function without line
7958 number info, or stepped back out of a signal handler to the first
7959 instruction of the function without line number info. Just keep
7960 going, which will single-step back to the caller. */
7961 if (ecs
->stop_func_start
!= stop_pc
)
7963 /* Set a breakpoint at callee's start address.
7964 From there we can step once and be back in the caller. */
7965 symtab_and_line sr_sal
;
7966 sr_sal
.pc
= ecs
->stop_func_start
;
7967 sr_sal
.pspace
= get_frame_program_space (frame
);
7968 insert_step_resume_breakpoint_at_sal (gdbarch
,
7969 sr_sal
, null_frame_id
);
7973 /* Set a breakpoint at callee's return address (the address
7974 at which the caller will resume). */
7975 insert_step_resume_breakpoint_at_caller (frame
);
7981 /* Reverse stepping through solib trampolines. */
7983 if (execution_direction
== EXEC_REVERSE
7984 && ecs
->event_thread
->control
.step_over_calls
!= STEP_OVER_NONE
)
7986 CORE_ADDR stop_pc
= ecs
->event_thread
->stop_pc ();
7988 if (gdbarch_skip_trampoline_code (gdbarch
, frame
, stop_pc
)
7989 || (ecs
->stop_func_start
== 0
7990 && in_solib_dynsym_resolve_code (stop_pc
)))
7992 /* Any solib trampoline code can be handled in reverse
7993 by simply continuing to single-step. We have already
7994 executed the solib function (backwards), and a few
7995 steps will take us back through the trampoline to the
8000 else if (in_solib_dynsym_resolve_code (stop_pc
))
8002 /* Stepped backward into the solib dynsym resolver.
8003 Set a breakpoint at its start and continue, then
8004 one more step will take us out. */
8005 symtab_and_line sr_sal
;
8006 sr_sal
.pc
= ecs
->stop_func_start
;
8007 sr_sal
.pspace
= get_frame_program_space (frame
);
8008 insert_step_resume_breakpoint_at_sal (gdbarch
,
8009 sr_sal
, null_frame_id
);
8015 /* This always returns the sal for the inner-most frame when we are in a
8016 stack of inlined frames, even if GDB actually believes that it is in a
8017 more outer frame. This is checked for below by calls to
8018 inline_skipped_frames. */
8019 stop_pc_sal
= find_pc_line (ecs
->event_thread
->stop_pc (), 0);
8021 /* NOTE: tausq/2004-05-24: This if block used to be done before all
8022 the trampoline processing logic, however, there are some trampolines
8023 that have no names, so we should do trampoline handling first. */
8024 if (ecs
->event_thread
->control
.step_over_calls
== STEP_OVER_UNDEBUGGABLE
8025 && ecs
->stop_func_name
== nullptr
8026 && stop_pc_sal
.line
== 0)
8028 infrun_debug_printf ("stepped into undebuggable function");
8030 /* The inferior just stepped into, or returned to, an
8031 undebuggable function (where there is no debugging information
8032 and no line number corresponding to the address where the
8033 inferior stopped). Since we want to skip this kind of code,
8034 we keep going until the inferior returns from this
8035 function - unless the user has asked us not to (via
8036 set step-mode) or we no longer know how to get back
8037 to the call site. */
8038 if (step_stop_if_no_debug
8039 || !frame_id_p (frame_unwind_caller_id (frame
)))
8041 /* If we have no line number and the step-stop-if-no-debug
8042 is set, we stop the step so that the user has a chance to
8043 switch in assembly mode. */
8044 end_stepping_range (ecs
);
8049 /* Set a breakpoint at callee's return address (the address
8050 at which the caller will resume). */
8051 insert_step_resume_breakpoint_at_caller (frame
);
8057 if (execution_direction
== EXEC_REVERSE
8058 && ecs
->event_thread
->control
.proceed_to_finish
8059 && ecs
->event_thread
->stop_pc () >= ecs
->stop_func_alt_start
8060 && ecs
->event_thread
->stop_pc () < ecs
->stop_func_start
)
8062 /* We are executing the reverse-finish command.
8063 If the system supports multiple entry points and we are finishing a
8064 function in reverse. If we are between the entry points single-step
8065 back to the alternate entry point. If we are at the alternate entry
8066 point -- just need to back up by one more single-step, which
8067 should take us back to the function call. */
8068 ecs
->event_thread
->control
.step_range_start
8069 = ecs
->event_thread
->control
.step_range_end
= 1;
8075 if (ecs
->event_thread
->control
.step_range_end
== 1)
8077 /* It is stepi or nexti. We always want to stop stepping after
8079 infrun_debug_printf ("stepi/nexti");
8080 end_stepping_range (ecs
);
8084 if (stop_pc_sal
.line
== 0)
8086 /* We have no line number information. That means to stop
8087 stepping (does this always happen right after one instruction,
8088 when we do "s" in a function with no line numbers,
8089 or can this happen as a result of a return or longjmp?). */
8090 infrun_debug_printf ("line number info");
8091 end_stepping_range (ecs
);
8095 /* Look for "calls" to inlined functions, part one. If the inline
8096 frame machinery detected some skipped call sites, we have entered
8097 a new inline function. */
8099 if ((*curr_frame_id
== original_frame_id
)
8100 && inline_skipped_frames (ecs
->event_thread
))
8102 infrun_debug_printf ("stepped into inlined function");
8104 symtab_and_line call_sal
= find_frame_sal (get_current_frame ());
8106 if (ecs
->event_thread
->control
.step_over_calls
!= STEP_OVER_ALL
)
8108 /* For "step", we're going to stop. But if the call site
8109 for this inlined function is on the same source line as
8110 we were previously stepping, go down into the function
8111 first. Otherwise stop at the call site. */
8113 if (call_sal
.line
== ecs
->event_thread
->current_line
8114 && call_sal
.symtab
== ecs
->event_thread
->current_symtab
)
8116 step_into_inline_frame (ecs
->event_thread
);
8117 if (inline_frame_is_marked_for_skip (false, ecs
->event_thread
))
8124 end_stepping_range (ecs
);
8129 /* For "next", we should stop at the call site if it is on a
8130 different source line. Otherwise continue through the
8131 inlined function. */
8132 if (call_sal
.line
== ecs
->event_thread
->current_line
8133 && call_sal
.symtab
== ecs
->event_thread
->current_symtab
)
8136 end_stepping_range (ecs
);
8141 /* Look for "calls" to inlined functions, part two. If we are still
8142 in the same real function we were stepping through, but we have
8143 to go further up to find the exact frame ID, we are stepping
8144 through a more inlined call beyond its call site. */
8146 if (get_frame_type (get_current_frame ()) == INLINE_FRAME
8147 && (*curr_frame_id
!= original_frame_id
)
8148 && stepped_in_from (get_current_frame (), original_frame_id
))
8150 infrun_debug_printf ("stepping through inlined function");
8152 if (ecs
->event_thread
->control
.step_over_calls
== STEP_OVER_ALL
8153 || inline_frame_is_marked_for_skip (false, ecs
->event_thread
))
8156 end_stepping_range (ecs
);
8160 bool refresh_step_info
= true;
8161 if ((ecs
->event_thread
->stop_pc () == stop_pc_sal
.pc
)
8162 && (ecs
->event_thread
->current_line
!= stop_pc_sal
.line
8163 || ecs
->event_thread
->current_symtab
!= stop_pc_sal
.symtab
))
8165 /* We are at a different line. */
8167 if (stop_pc_sal
.is_stmt
)
8169 if (execution_direction
== EXEC_REVERSE
)
8171 /* We are stepping backwards make sure we have reached the
8172 beginning of the line. */
8173 CORE_ADDR stop_pc
= ecs
->event_thread
->stop_pc ();
8174 CORE_ADDR start_line_pc
8175 = update_line_range_start (stop_pc
, ecs
);
8177 if (stop_pc
!= start_line_pc
)
8179 /* Have not reached the beginning of the source code line.
8180 Set a step range. Execution should stop in any function
8181 calls we execute back into before reaching the beginning
8183 ecs
->event_thread
->control
.step_range_start
8185 ecs
->event_thread
->control
.step_range_end
= stop_pc
;
8186 set_step_info (ecs
->event_thread
, frame
, stop_pc_sal
);
8192 /* We are at the start of a statement.
8194 So stop. Note that we don't stop if we step into the middle of a
8195 statement. That is said to make things like for (;;) statements
8197 infrun_debug_printf ("stepped to a different line");
8198 end_stepping_range (ecs
);
8201 else if (*curr_frame_id
== original_frame_id
)
8203 /* We are not at the start of a statement, and we have not changed
8206 We ignore this line table entry, and continue stepping forward,
8207 looking for a better place to stop. */
8208 refresh_step_info
= false;
8209 infrun_debug_printf ("stepped to a different line, but "
8210 "it's not the start of a statement");
8214 /* We are not the start of a statement, and we have changed frame.
8216 We ignore this line table entry, and continue stepping forward,
8217 looking for a better place to stop. Keep refresh_step_info at
8218 true to note that the frame has changed, but ignore the line
8219 number to make sure we don't ignore a subsequent entry with the
8220 same line number. */
8221 stop_pc_sal
.line
= 0;
8222 infrun_debug_printf ("stepped to a different frame, but "
8223 "it's not the start of a statement");
8226 else if (execution_direction
== EXEC_REVERSE
8227 && *curr_frame_id
!= original_frame_id
8228 && original_frame_id
.code_addr_p
&& curr_frame_id
->code_addr_p
8229 && original_frame_id
.code_addr
== curr_frame_id
->code_addr
)
8231 /* If we enter here, we're leaving a recursive function call. In this
8232 situation, we shouldn't refresh the step information, because if we
8233 do, we'll lose the frame_id of when we started stepping, and this
8234 will make GDB not know we need to print frame information. */
8235 refresh_step_info
= false;
8236 infrun_debug_printf ("reverse stepping, left a recursive call, don't "
8237 "update step info so we remember we left a frame");
8240 /* We aren't done stepping.
8242 Optimize by setting the stepping range to the line.
8243 (We might not be in the original line, but if we entered a
8244 new line in mid-statement, we continue stepping. This makes
8245 things like for(;;) statements work better.)
8247 If we entered a SAL that indicates a non-statement line table entry,
8248 then we update the stepping range, but we don't update the step info,
8249 which includes things like the line number we are stepping away from.
8250 This means we will stop when we find a line table entry that is marked
8251 as is-statement, even if it matches the non-statement one we just
8254 ecs
->event_thread
->control
.step_range_start
= stop_pc_sal
.pc
;
8255 ecs
->event_thread
->control
.step_range_end
= stop_pc_sal
.end
;
8256 ecs
->event_thread
->control
.may_range_step
= 1;
8258 ("updated step range, start = %s, end = %s, may_range_step = %d",
8259 paddress (gdbarch
, ecs
->event_thread
->control
.step_range_start
),
8260 paddress (gdbarch
, ecs
->event_thread
->control
.step_range_end
),
8261 ecs
->event_thread
->control
.may_range_step
);
8262 if (refresh_step_info
)
8263 set_step_info (ecs
->event_thread
, frame
, stop_pc_sal
);
8265 infrun_debug_printf ("keep going");
8267 if (execution_direction
== EXEC_REVERSE
)
8269 CORE_ADDR stop_pc
= ecs
->event_thread
->stop_pc ();
8271 /* Make sure the stop_pc is set to the beginning of the line. */
8272 if (stop_pc
!= ecs
->event_thread
->control
.step_range_start
)
8273 ecs
->event_thread
->control
.step_range_start
8274 = update_line_range_start (stop_pc
, ecs
);
8280 static bool restart_stepped_thread (process_stratum_target
*resume_target
,
8281 ptid_t resume_ptid
);
8283 /* In all-stop mode, if we're currently stepping but have stopped in
8284 some other thread, we may need to switch back to the stepped
8285 thread. Returns true we set the inferior running, false if we left
8286 it stopped (and the event needs further processing). */
8289 switch_back_to_stepped_thread (struct execution_control_state
*ecs
)
8291 if (!target_is_non_stop_p ())
8293 /* If any thread is blocked on some internal breakpoint, and we
8294 simply need to step over that breakpoint to get it going
8295 again, do that first. */
8297 /* However, if we see an event for the stepping thread, then we
8298 know all other threads have been moved past their breakpoints
8299 already. Let the caller check whether the step is finished,
8300 etc., before deciding to move it past a breakpoint. */
8301 if (ecs
->event_thread
->control
.step_range_end
!= 0)
8304 /* Check if the current thread is blocked on an incomplete
8305 step-over, interrupted by a random signal. */
8306 if (ecs
->event_thread
->control
.trap_expected
8307 && ecs
->event_thread
->stop_signal () != GDB_SIGNAL_TRAP
)
8310 ("need to finish step-over of [%s]",
8311 ecs
->event_thread
->ptid
.to_string ().c_str ());
8316 /* Check if the current thread is blocked by a single-step
8317 breakpoint of another thread. */
8318 if (ecs
->hit_singlestep_breakpoint
)
8320 infrun_debug_printf ("need to step [%s] over single-step breakpoint",
8321 ecs
->ptid
.to_string ().c_str ());
8326 /* If this thread needs yet another step-over (e.g., stepping
8327 through a delay slot), do it first before moving on to
8329 if (thread_still_needs_step_over (ecs
->event_thread
))
8332 ("thread [%s] still needs step-over",
8333 ecs
->event_thread
->ptid
.to_string ().c_str ());
8338 /* If scheduler locking applies even if not stepping, there's no
8339 need to walk over threads. Above we've checked whether the
8340 current thread is stepping. If some other thread not the
8341 event thread is stepping, then it must be that scheduler
8342 locking is not in effect. */
8343 if (schedlock_applies (ecs
->event_thread
))
8346 /* Otherwise, we no longer expect a trap in the current thread.
8347 Clear the trap_expected flag before switching back -- this is
8348 what keep_going does as well, if we call it. */
8349 ecs
->event_thread
->control
.trap_expected
= 0;
8351 /* Likewise, clear the signal if it should not be passed. */
8352 if (!signal_program
[ecs
->event_thread
->stop_signal ()])
8353 ecs
->event_thread
->set_stop_signal (GDB_SIGNAL_0
);
8355 if (restart_stepped_thread (ecs
->target
, ecs
->ptid
))
8357 prepare_to_wait (ecs
);
8361 switch_to_thread (ecs
->event_thread
);
8367 /* Look for the thread that was stepping, and resume it.
8368 RESUME_TARGET / RESUME_PTID indicate the set of threads the caller
8369 is resuming. Return true if a thread was started, false
8373 restart_stepped_thread (process_stratum_target
*resume_target
,
8376 /* Do all pending step-overs before actually proceeding with
8378 if (start_step_over ())
8381 for (thread_info
*tp
: all_threads_safe ())
8383 if (tp
->state
== THREAD_EXITED
)
8386 if (tp
->has_pending_waitstatus ())
8389 /* Ignore threads of processes the caller is not
8392 && (tp
->inf
->process_target () != resume_target
8393 || tp
->inf
->pid
!= resume_ptid
.pid ()))
8396 if (tp
->control
.trap_expected
)
8398 infrun_debug_printf ("switching back to stepped thread (step-over)");
8400 if (keep_going_stepped_thread (tp
))
8405 for (thread_info
*tp
: all_threads_safe ())
8407 if (tp
->state
== THREAD_EXITED
)
8410 if (tp
->has_pending_waitstatus ())
8413 /* Ignore threads of processes the caller is not
8416 && (tp
->inf
->process_target () != resume_target
8417 || tp
->inf
->pid
!= resume_ptid
.pid ()))
8420 /* Did we find the stepping thread? */
8421 if (tp
->control
.step_range_end
)
8423 infrun_debug_printf ("switching back to stepped thread (stepping)");
8425 if (keep_going_stepped_thread (tp
))
8436 restart_after_all_stop_detach (process_stratum_target
*proc_target
)
8438 /* Note we don't check target_is_non_stop_p() here, because the
8439 current inferior may no longer have a process_stratum target
8440 pushed, as we just detached. */
8442 /* See if we have a THREAD_RUNNING thread that need to be
8443 re-resumed. If we have any thread that is already executing,
8444 then we don't need to resume the target -- it is already been
8445 resumed. With the remote target (in all-stop), it's even
8446 impossible to issue another resumption if the target is already
8447 resumed, until the target reports a stop. */
8448 for (thread_info
*thr
: all_threads (proc_target
))
8450 if (thr
->state
!= THREAD_RUNNING
)
8453 /* If we have any thread that is already executing, then we
8454 don't need to resume the target -- it is already been
8456 if (thr
->executing ())
8459 /* If we have a pending event to process, skip resuming the
8460 target and go straight to processing it. */
8461 if (thr
->resumed () && thr
->has_pending_waitstatus ())
8465 /* Alright, we need to re-resume the target. If a thread was
8466 stepping, we need to restart it stepping. */
8467 if (restart_stepped_thread (proc_target
, minus_one_ptid
))
8470 /* Otherwise, find the first THREAD_RUNNING thread and resume
8472 for (thread_info
*thr
: all_threads (proc_target
))
8474 if (thr
->state
!= THREAD_RUNNING
)
8477 execution_control_state
ecs (thr
);
8478 switch_to_thread (thr
);
8484 /* Set a previously stepped thread back to stepping. Returns true on
8485 success, false if the resume is not possible (e.g., the thread
8489 keep_going_stepped_thread (struct thread_info
*tp
)
8491 frame_info_ptr frame
;
8493 /* If the stepping thread exited, then don't try to switch back and
8494 resume it, which could fail in several different ways depending
8495 on the target. Instead, just keep going.
8497 We can find a stepping dead thread in the thread list in two
8500 - The target supports thread exit events, and when the target
8501 tries to delete the thread from the thread list, inferior_ptid
8502 pointed at the exiting thread. In such case, calling
8503 delete_thread does not really remove the thread from the list;
8504 instead, the thread is left listed, with 'exited' state.
8506 - The target's debug interface does not support thread exit
8507 events, and so we have no idea whatsoever if the previously
8508 stepping thread is still alive. For that reason, we need to
8509 synchronously query the target now. */
8511 if (tp
->state
== THREAD_EXITED
|| !target_thread_alive (tp
->ptid
))
8513 infrun_debug_printf ("not resuming previously stepped thread, it has "
8520 infrun_debug_printf ("resuming previously stepped thread");
8522 execution_control_state
ecs (tp
);
8523 switch_to_thread (tp
);
8525 tp
->set_stop_pc (regcache_read_pc (get_thread_regcache (tp
)));
8526 frame
= get_current_frame ();
8528 /* If the PC of the thread we were trying to single-step has
8529 changed, then that thread has trapped or been signaled, but the
8530 event has not been reported to GDB yet. Re-poll the target
8531 looking for this particular thread's event (i.e. temporarily
8532 enable schedlock) by:
8534 - setting a break at the current PC
8535 - resuming that particular thread, only (by setting trap
8538 This prevents us continuously moving the single-step breakpoint
8539 forward, one instruction at a time, overstepping. */
8541 if (tp
->stop_pc () != tp
->prev_pc
)
8545 infrun_debug_printf ("expected thread advanced also (%s -> %s)",
8546 paddress (current_inferior ()->arch (), tp
->prev_pc
),
8547 paddress (current_inferior ()->arch (),
8550 /* Clear the info of the previous step-over, as it's no longer
8551 valid (if the thread was trying to step over a breakpoint, it
8552 has already succeeded). It's what keep_going would do too,
8553 if we called it. Do this before trying to insert the sss
8554 breakpoint, otherwise if we were previously trying to step
8555 over this exact address in another thread, the breakpoint is
8557 clear_step_over_info ();
8558 tp
->control
.trap_expected
= 0;
8560 insert_single_step_breakpoint (get_frame_arch (frame
),
8561 get_frame_address_space (frame
),
8564 tp
->set_resumed (true);
8565 resume_ptid
= internal_resume_ptid (tp
->control
.stepping_command
);
8566 do_target_resume (resume_ptid
, false, GDB_SIGNAL_0
);
8570 infrun_debug_printf ("expected thread still hasn't advanced");
8572 keep_going_pass_signal (&ecs
);
8578 /* Is thread TP in the middle of (software or hardware)
8579 single-stepping? (Note the result of this function must never be
8580 passed directly as target_resume's STEP parameter.) */
8583 currently_stepping (struct thread_info
*tp
)
8585 return ((tp
->control
.step_range_end
8586 && tp
->control
.step_resume_breakpoint
== nullptr)
8587 || tp
->control
.trap_expected
8588 || tp
->stepped_breakpoint
8589 || bpstat_should_step ());
8592 /* Inferior has stepped into a subroutine call with source code that
8593 we should not step over. Do step to the first line of code in
8597 handle_step_into_function (struct gdbarch
*gdbarch
,
8598 struct execution_control_state
*ecs
)
8600 fill_in_stop_func (gdbarch
, ecs
);
8602 compunit_symtab
*cust
8603 = find_pc_compunit_symtab (ecs
->event_thread
->stop_pc ());
8604 if (cust
!= nullptr && cust
->language () != language_asm
)
8605 ecs
->stop_func_start
8606 = gdbarch_skip_prologue_noexcept (gdbarch
, ecs
->stop_func_start
);
8608 symtab_and_line stop_func_sal
= find_pc_line (ecs
->stop_func_start
, 0);
8609 /* Use the step_resume_break to step until the end of the prologue,
8610 even if that involves jumps (as it seems to on the vax under
8612 /* If the prologue ends in the middle of a source line, continue to
8613 the end of that source line (if it is still within the function).
8614 Otherwise, just go to end of prologue. */
8615 if (stop_func_sal
.end
8616 && stop_func_sal
.pc
!= ecs
->stop_func_start
8617 && stop_func_sal
.end
< ecs
->stop_func_end
)
8618 ecs
->stop_func_start
= stop_func_sal
.end
;
8620 /* Architectures which require breakpoint adjustment might not be able
8621 to place a breakpoint at the computed address. If so, the test
8622 ``ecs->stop_func_start == stop_pc'' will never succeed. Adjust
8623 ecs->stop_func_start to an address at which a breakpoint may be
8624 legitimately placed.
8626 Note: kevinb/2004-01-19: On FR-V, if this adjustment is not
8627 made, GDB will enter an infinite loop when stepping through
8628 optimized code consisting of VLIW instructions which contain
8629 subinstructions corresponding to different source lines. On
8630 FR-V, it's not permitted to place a breakpoint on any but the
8631 first subinstruction of a VLIW instruction. When a breakpoint is
8632 set, GDB will adjust the breakpoint address to the beginning of
8633 the VLIW instruction. Thus, we need to make the corresponding
8634 adjustment here when computing the stop address. */
8636 if (gdbarch_adjust_breakpoint_address_p (gdbarch
))
8638 ecs
->stop_func_start
8639 = gdbarch_adjust_breakpoint_address (gdbarch
,
8640 ecs
->stop_func_start
);
8643 if (ecs
->stop_func_start
== ecs
->event_thread
->stop_pc ())
8645 /* We are already there: stop now. */
8646 end_stepping_range (ecs
);
8651 /* Put the step-breakpoint there and go until there. */
8652 symtab_and_line sr_sal
;
8653 sr_sal
.pc
= ecs
->stop_func_start
;
8654 sr_sal
.section
= find_pc_overlay (ecs
->stop_func_start
);
8655 sr_sal
.pspace
= get_frame_program_space (get_current_frame ());
8657 /* Do not specify what the fp should be when we stop since on
8658 some machines the prologue is where the new fp value is
8660 insert_step_resume_breakpoint_at_sal (gdbarch
, sr_sal
, null_frame_id
);
8662 /* And make sure stepping stops right away then. */
8663 ecs
->event_thread
->control
.step_range_end
8664 = ecs
->event_thread
->control
.step_range_start
;
8669 /* Inferior has stepped backward into a subroutine call with source
8670 code that we should not step over. Do step to the beginning of the
8671 last line of code in it. */
8674 handle_step_into_function_backward (struct gdbarch
*gdbarch
,
8675 struct execution_control_state
*ecs
)
8677 struct compunit_symtab
*cust
;
8678 struct symtab_and_line stop_func_sal
;
8680 fill_in_stop_func (gdbarch
, ecs
);
8682 cust
= find_pc_compunit_symtab (ecs
->event_thread
->stop_pc ());
8683 if (cust
!= nullptr && cust
->language () != language_asm
)
8684 ecs
->stop_func_start
8685 = gdbarch_skip_prologue_noexcept (gdbarch
, ecs
->stop_func_start
);
8687 stop_func_sal
= find_pc_line (ecs
->event_thread
->stop_pc (), 0);
8689 /* OK, we're just going to keep stepping here. */
8690 if (stop_func_sal
.pc
== ecs
->event_thread
->stop_pc ())
8692 /* We're there already. Just stop stepping now. */
8693 end_stepping_range (ecs
);
8697 /* Else just reset the step range and keep going.
8698 No step-resume breakpoint, they don't work for
8699 epilogues, which can have multiple entry paths. */
8700 ecs
->event_thread
->control
.step_range_start
= stop_func_sal
.pc
;
8701 ecs
->event_thread
->control
.step_range_end
= stop_func_sal
.end
;
8707 /* Insert a "step-resume breakpoint" at SR_SAL with frame ID SR_ID.
8708 This is used to both functions and to skip over code. */
8711 insert_step_resume_breakpoint_at_sal_1 (struct gdbarch
*gdbarch
,
8712 struct symtab_and_line sr_sal
,
8713 struct frame_id sr_id
,
8714 enum bptype sr_type
)
8716 /* There should never be more than one step-resume or longjmp-resume
8717 breakpoint per thread, so we should never be setting a new
8718 step_resume_breakpoint when one is already active. */
8719 gdb_assert (inferior_thread ()->control
.step_resume_breakpoint
== nullptr);
8720 gdb_assert (sr_type
== bp_step_resume
|| sr_type
== bp_hp_step_resume
);
8722 infrun_debug_printf ("inserting step-resume breakpoint at %s",
8723 paddress (gdbarch
, sr_sal
.pc
));
8725 inferior_thread ()->control
.step_resume_breakpoint
8726 = set_momentary_breakpoint (gdbarch
, sr_sal
, sr_id
, sr_type
).release ();
8730 insert_step_resume_breakpoint_at_sal (struct gdbarch
*gdbarch
,
8731 struct symtab_and_line sr_sal
,
8732 struct frame_id sr_id
)
8734 insert_step_resume_breakpoint_at_sal_1 (gdbarch
,
8739 /* Insert a "high-priority step-resume breakpoint" at RETURN_FRAME.pc.
8740 This is used to skip a potential signal handler.
8742 This is called with the interrupted function's frame. The signal
8743 handler, when it returns, will resume the interrupted function at
8747 insert_hp_step_resume_breakpoint_at_frame (const frame_info_ptr
&return_frame
)
8749 gdb_assert (return_frame
!= nullptr);
8751 struct gdbarch
*gdbarch
= get_frame_arch (return_frame
);
8753 symtab_and_line sr_sal
;
8754 sr_sal
.pc
= gdbarch_addr_bits_remove (gdbarch
, get_frame_pc (return_frame
));
8755 sr_sal
.section
= find_pc_overlay (sr_sal
.pc
);
8756 sr_sal
.pspace
= get_frame_program_space (return_frame
);
8758 insert_step_resume_breakpoint_at_sal_1 (gdbarch
, sr_sal
,
8759 get_stack_frame_id (return_frame
),
8763 /* Insert a "step-resume breakpoint" at the previous frame's PC. This
8764 is used to skip a function after stepping into it (for "next" or if
8765 the called function has no debugging information).
8767 The current function has almost always been reached by single
8768 stepping a call or return instruction. NEXT_FRAME belongs to the
8769 current function, and the breakpoint will be set at the caller's
8772 This is a separate function rather than reusing
8773 insert_hp_step_resume_breakpoint_at_frame in order to avoid
8774 get_prev_frame, which may stop prematurely (see the implementation
8775 of frame_unwind_caller_id for an example). */
8778 insert_step_resume_breakpoint_at_caller (const frame_info_ptr
&next_frame
)
8780 /* We shouldn't have gotten here if we don't know where the call site
8782 gdb_assert (frame_id_p (frame_unwind_caller_id (next_frame
)));
8784 struct gdbarch
*gdbarch
= frame_unwind_caller_arch (next_frame
);
8786 symtab_and_line sr_sal
;
8787 sr_sal
.pc
= gdbarch_addr_bits_remove (gdbarch
,
8788 frame_unwind_caller_pc (next_frame
));
8789 sr_sal
.section
= find_pc_overlay (sr_sal
.pc
);
8790 sr_sal
.pspace
= frame_unwind_program_space (next_frame
);
8792 insert_step_resume_breakpoint_at_sal (gdbarch
, sr_sal
,
8793 frame_unwind_caller_id (next_frame
));
8796 /* Insert a "longjmp-resume" breakpoint at PC. This is used to set a
8797 new breakpoint at the target of a jmp_buf. The handling of
8798 longjmp-resume uses the same mechanisms used for handling
8799 "step-resume" breakpoints. */
8802 insert_longjmp_resume_breakpoint (struct gdbarch
*gdbarch
, CORE_ADDR pc
)
8804 /* There should never be more than one longjmp-resume breakpoint per
8805 thread, so we should never be setting a new
8806 longjmp_resume_breakpoint when one is already active. */
8807 gdb_assert (inferior_thread ()->control
.exception_resume_breakpoint
== nullptr);
8809 infrun_debug_printf ("inserting longjmp-resume breakpoint at %s",
8810 paddress (gdbarch
, pc
));
8812 inferior_thread ()->control
.exception_resume_breakpoint
=
8813 set_momentary_breakpoint_at_pc (gdbarch
, pc
, bp_longjmp_resume
).release ();
8816 /* Insert an exception resume breakpoint. TP is the thread throwing
8817 the exception. The block B is the block of the unwinder debug hook
8818 function. FRAME is the frame corresponding to the call to this
8819 function. SYM is the symbol of the function argument holding the
8820 target PC of the exception. */
8823 insert_exception_resume_breakpoint (struct thread_info
*tp
,
8824 const struct block
*b
,
8825 const frame_info_ptr
&frame
,
8830 struct block_symbol vsym
;
8831 struct value
*value
;
8833 struct breakpoint
*bp
;
8835 vsym
= lookup_symbol_search_name (sym
->search_name (),
8836 b
, SEARCH_VAR_DOMAIN
);
8837 value
= read_var_value (vsym
.symbol
, vsym
.block
, frame
);
8838 /* If the value was optimized out, revert to the old behavior. */
8839 if (! value
->optimized_out ())
8841 handler
= value_as_address (value
);
8843 infrun_debug_printf ("exception resume at %lx",
8844 (unsigned long) handler
);
8846 /* set_momentary_breakpoint_at_pc creates a thread-specific
8847 breakpoint for the current inferior thread. */
8848 gdb_assert (tp
== inferior_thread ());
8849 bp
= set_momentary_breakpoint_at_pc (get_frame_arch (frame
),
8851 bp_exception_resume
).release ();
8853 tp
->control
.exception_resume_breakpoint
= bp
;
8856 catch (const gdb_exception_error
&e
)
8858 /* We want to ignore errors here. */
8862 /* A helper for check_exception_resume that sets an
8863 exception-breakpoint based on a SystemTap probe. */
8866 insert_exception_resume_from_probe (struct thread_info
*tp
,
8867 const struct bound_probe
*probe
,
8868 const frame_info_ptr
&frame
)
8870 struct value
*arg_value
;
8872 struct breakpoint
*bp
;
8874 arg_value
= probe_safe_evaluate_at_pc (frame
, 1);
8878 handler
= value_as_address (arg_value
);
8880 infrun_debug_printf ("exception resume at %s",
8881 paddress (probe
->objfile
->arch (), handler
));
8883 /* set_momentary_breakpoint_at_pc creates a thread-specific breakpoint
8884 for the current inferior thread. */
8885 gdb_assert (tp
== inferior_thread ());
8886 bp
= set_momentary_breakpoint_at_pc (get_frame_arch (frame
),
8887 handler
, bp_exception_resume
).release ();
8888 tp
->control
.exception_resume_breakpoint
= bp
;
8891 /* This is called when an exception has been intercepted. Check to
8892 see whether the exception's destination is of interest, and if so,
8893 set an exception resume breakpoint there. */
8896 check_exception_resume (struct execution_control_state
*ecs
,
8897 const frame_info_ptr
&frame
)
8899 struct bound_probe probe
;
8900 struct symbol
*func
;
8902 /* First see if this exception unwinding breakpoint was set via a
8903 SystemTap probe point. If so, the probe has two arguments: the
8904 CFA and the HANDLER. We ignore the CFA, extract the handler, and
8905 set a breakpoint there. */
8906 probe
= find_probe_by_pc (get_frame_pc (frame
));
8909 insert_exception_resume_from_probe (ecs
->event_thread
, &probe
, frame
);
8913 func
= get_frame_function (frame
);
8919 const struct block
*b
;
8922 /* The exception breakpoint is a thread-specific breakpoint on
8923 the unwinder's debug hook, declared as:
8925 void _Unwind_DebugHook (void *cfa, void *handler);
8927 The CFA argument indicates the frame to which control is
8928 about to be transferred. HANDLER is the destination PC.
8930 We ignore the CFA and set a temporary breakpoint at HANDLER.
8931 This is not extremely efficient but it avoids issues in gdb
8932 with computing the DWARF CFA, and it also works even in weird
8933 cases such as throwing an exception from inside a signal
8936 b
= func
->value_block ();
8937 for (struct symbol
*sym
: block_iterator_range (b
))
8939 if (!sym
->is_argument ())
8946 insert_exception_resume_breakpoint (ecs
->event_thread
,
8952 catch (const gdb_exception_error
&e
)
8958 stop_waiting (struct execution_control_state
*ecs
)
8960 infrun_debug_printf ("stop_waiting");
8962 /* Let callers know we don't want to wait for the inferior anymore. */
8963 ecs
->wait_some_more
= 0;
8966 /* Like keep_going, but passes the signal to the inferior, even if the
8967 signal is set to nopass. */
8970 keep_going_pass_signal (struct execution_control_state
*ecs
)
8972 gdb_assert (ecs
->event_thread
->ptid
== inferior_ptid
);
8973 gdb_assert (!ecs
->event_thread
->resumed ());
8975 /* Save the pc before execution, to compare with pc after stop. */
8976 ecs
->event_thread
->prev_pc
8977 = regcache_read_pc_protected (get_thread_regcache (ecs
->event_thread
));
8979 if (ecs
->event_thread
->control
.trap_expected
)
8981 struct thread_info
*tp
= ecs
->event_thread
;
8983 infrun_debug_printf ("%s has trap_expected set, "
8984 "resuming to collect trap",
8985 tp
->ptid
.to_string ().c_str ());
8987 /* We haven't yet gotten our trap, and either: intercepted a
8988 non-signal event (e.g., a fork); or took a signal which we
8989 are supposed to pass through to the inferior. Simply
8991 resume (ecs
->event_thread
->stop_signal ());
8993 else if (step_over_info_valid_p ())
8995 /* Another thread is stepping over a breakpoint in-line. If
8996 this thread needs a step-over too, queue the request. In
8997 either case, this resume must be deferred for later. */
8998 struct thread_info
*tp
= ecs
->event_thread
;
9000 if (ecs
->hit_singlestep_breakpoint
9001 || thread_still_needs_step_over (tp
))
9003 infrun_debug_printf ("step-over already in progress: "
9004 "step-over for %s deferred",
9005 tp
->ptid
.to_string ().c_str ());
9006 global_thread_step_over_chain_enqueue (tp
);
9009 infrun_debug_printf ("step-over in progress: resume of %s deferred",
9010 tp
->ptid
.to_string ().c_str ());
9014 regcache
*regcache
= get_thread_regcache (ecs
->event_thread
);
9017 step_over_what step_what
;
9019 /* Either the trap was not expected, but we are continuing
9020 anyway (if we got a signal, the user asked it be passed to
9023 We got our expected trap, but decided we should resume from
9026 We're going to run this baby now!
9028 Note that insert_breakpoints won't try to re-insert
9029 already inserted breakpoints. Therefore, we don't
9030 care if breakpoints were already inserted, or not. */
9032 /* If we need to step over a breakpoint, and we're not using
9033 displaced stepping to do so, insert all breakpoints
9034 (watchpoints, etc.) but the one we're stepping over, step one
9035 instruction, and then re-insert the breakpoint when that step
9038 step_what
= thread_still_needs_step_over (ecs
->event_thread
);
9040 remove_bp
= (ecs
->hit_singlestep_breakpoint
9041 || (step_what
& STEP_OVER_BREAKPOINT
));
9042 remove_wps
= (step_what
& STEP_OVER_WATCHPOINT
);
9044 /* We can't use displaced stepping if we need to step past a
9045 watchpoint. The instruction copied to the scratch pad would
9046 still trigger the watchpoint. */
9048 && (remove_wps
|| !use_displaced_stepping (ecs
->event_thread
)))
9050 set_step_over_info (ecs
->event_thread
->inf
->aspace
.get (),
9051 regcache_read_pc (regcache
), remove_wps
,
9052 ecs
->event_thread
->global_num
);
9054 else if (remove_wps
)
9055 set_step_over_info (nullptr, 0, remove_wps
, -1);
9057 /* If we now need to do an in-line step-over, we need to stop
9058 all other threads. Note this must be done before
9059 insert_breakpoints below, because that removes the breakpoint
9060 we're about to step over, otherwise other threads could miss
9062 if (step_over_info_valid_p () && target_is_non_stop_p ())
9063 stop_all_threads ("starting in-line step-over");
9065 /* Stop stepping if inserting breakpoints fails. */
9068 insert_breakpoints ();
9070 catch (const gdb_exception_error
&e
)
9072 exception_print (gdb_stderr
, e
);
9074 clear_step_over_info ();
9078 ecs
->event_thread
->control
.trap_expected
= (remove_bp
|| remove_wps
);
9080 resume (ecs
->event_thread
->stop_signal ());
9083 prepare_to_wait (ecs
);
9086 /* Called when we should continue running the inferior, because the
9087 current event doesn't cause a user visible stop. This does the
9088 resuming part; waiting for the next event is done elsewhere. */
9091 keep_going (struct execution_control_state
*ecs
)
9093 if (ecs
->event_thread
->control
.trap_expected
9094 && ecs
->event_thread
->stop_signal () == GDB_SIGNAL_TRAP
)
9095 ecs
->event_thread
->control
.trap_expected
= 0;
9097 if (!signal_program
[ecs
->event_thread
->stop_signal ()])
9098 ecs
->event_thread
->set_stop_signal (GDB_SIGNAL_0
);
9099 keep_going_pass_signal (ecs
);
9102 /* This function normally comes after a resume, before
9103 handle_inferior_event exits. It takes care of any last bits of
9104 housekeeping, and sets the all-important wait_some_more flag. */
9107 prepare_to_wait (struct execution_control_state
*ecs
)
9109 infrun_debug_printf ("prepare_to_wait");
9111 ecs
->wait_some_more
= 1;
9113 /* If the target can't async, emulate it by marking the infrun event
9114 handler such that as soon as we get back to the event-loop, we
9115 immediately end up in fetch_inferior_event again calling
9117 if (!target_can_async_p ())
9118 mark_infrun_async_event_handler ();
9121 /* We are done with the step range of a step/next/si/ni command.
9122 Called once for each n of a "step n" operation. */
9125 end_stepping_range (struct execution_control_state
*ecs
)
9127 ecs
->event_thread
->control
.stop_step
= 1;
9131 /* Several print_*_reason functions to print why the inferior has stopped.
9132 We always print something when the inferior exits, or receives a signal.
9133 The rest of the cases are dealt with later on in normal_stop and
9134 print_it_typical. Ideally there should be a call to one of these
9135 print_*_reason functions functions from handle_inferior_event each time
9136 stop_waiting is called.
9138 Note that we don't call these directly, instead we delegate that to
9139 the interpreters, through observers. Interpreters then call these
9140 with whatever uiout is right. */
9143 print_signal_exited_reason (struct ui_out
*uiout
, enum gdb_signal siggnal
)
9145 annotate_signalled ();
9146 if (uiout
->is_mi_like_p ())
9148 ("reason", async_reason_lookup (EXEC_ASYNC_EXITED_SIGNALLED
));
9149 uiout
->text ("\nProgram terminated with signal ");
9150 annotate_signal_name ();
9151 uiout
->field_string ("signal-name",
9152 gdb_signal_to_name (siggnal
));
9153 annotate_signal_name_end ();
9155 annotate_signal_string ();
9156 uiout
->field_string ("signal-meaning",
9157 gdb_signal_to_string (siggnal
));
9158 annotate_signal_string_end ();
9159 uiout
->text (".\n");
9160 uiout
->text ("The program no longer exists.\n");
9164 print_exited_reason (struct ui_out
*uiout
, int exitstatus
)
9166 struct inferior
*inf
= current_inferior ();
9167 std::string pidstr
= target_pid_to_str (ptid_t (inf
->pid
));
9169 annotate_exited (exitstatus
);
9172 if (uiout
->is_mi_like_p ())
9173 uiout
->field_string ("reason", async_reason_lookup (EXEC_ASYNC_EXITED
));
9174 std::string exit_code_str
9175 = string_printf ("0%o", (unsigned int) exitstatus
);
9176 uiout
->message ("[Inferior %s (%s) exited with code %pF]\n",
9177 plongest (inf
->num
), pidstr
.c_str (),
9178 string_field ("exit-code", exit_code_str
.c_str ()));
9182 if (uiout
->is_mi_like_p ())
9184 ("reason", async_reason_lookup (EXEC_ASYNC_EXITED_NORMALLY
));
9185 uiout
->message ("[Inferior %s (%s) exited normally]\n",
9186 plongest (inf
->num
), pidstr
.c_str ());
9191 print_signal_received_reason (struct ui_out
*uiout
, enum gdb_signal siggnal
)
9193 struct thread_info
*thr
= inferior_thread ();
9195 infrun_debug_printf ("signal = %s", gdb_signal_to_string (siggnal
));
9199 if (uiout
->is_mi_like_p ())
9201 else if (show_thread_that_caused_stop ())
9203 uiout
->text ("\nThread ");
9204 uiout
->field_string ("thread-id", print_thread_id (thr
));
9206 const char *name
= thread_name (thr
);
9207 if (name
!= nullptr)
9209 uiout
->text (" \"");
9210 uiout
->field_string ("name", name
);
9215 uiout
->text ("\nProgram");
9217 if (siggnal
== GDB_SIGNAL_0
&& !uiout
->is_mi_like_p ())
9218 uiout
->text (" stopped");
9221 uiout
->text (" received signal ");
9222 annotate_signal_name ();
9223 if (uiout
->is_mi_like_p ())
9225 ("reason", async_reason_lookup (EXEC_ASYNC_SIGNAL_RECEIVED
));
9226 uiout
->field_string ("signal-name", gdb_signal_to_name (siggnal
));
9227 annotate_signal_name_end ();
9229 annotate_signal_string ();
9230 uiout
->field_string ("signal-meaning", gdb_signal_to_string (siggnal
));
9232 regcache
*regcache
= get_thread_regcache (thr
);
9233 struct gdbarch
*gdbarch
= regcache
->arch ();
9234 if (gdbarch_report_signal_info_p (gdbarch
))
9235 gdbarch_report_signal_info (gdbarch
, uiout
, siggnal
);
9237 annotate_signal_string_end ();
9239 uiout
->text (".\n");
9243 print_no_history_reason (struct ui_out
*uiout
)
9245 if (uiout
->is_mi_like_p ())
9246 uiout
->field_string ("reason", async_reason_lookup (EXEC_ASYNC_NO_HISTORY
));
9248 uiout
->text ("\nNo more reverse-execution history.\n");
9251 /* Print current location without a level number, if we have changed
9252 functions or hit a breakpoint. Print source line if we have one.
9253 bpstat_print contains the logic deciding in detail what to print,
9254 based on the event(s) that just occurred. */
9257 print_stop_location (const target_waitstatus
&ws
)
9260 enum print_what source_flag
;
9261 int do_frame_printing
= 1;
9262 struct thread_info
*tp
= inferior_thread ();
9264 bpstat_ret
= bpstat_print (tp
->control
.stop_bpstat
, ws
.kind ());
9268 /* FIXME: cagney/2002-12-01: Given that a frame ID does (or
9269 should) carry around the function and does (or should) use
9270 that when doing a frame comparison. */
9271 if (tp
->control
.stop_step
9272 && (tp
->control
.step_frame_id
9273 == get_frame_id (get_current_frame ()))
9274 && (tp
->control
.step_start_function
9275 == find_pc_function (tp
->stop_pc ())))
9277 /* Finished step, just print source line. */
9278 source_flag
= SRC_LINE
;
9282 /* Print location and source line. */
9283 source_flag
= SRC_AND_LOC
;
9286 case PRINT_SRC_AND_LOC
:
9287 /* Print location and source line. */
9288 source_flag
= SRC_AND_LOC
;
9290 case PRINT_SRC_ONLY
:
9291 source_flag
= SRC_LINE
;
9294 /* Something bogus. */
9295 source_flag
= SRC_LINE
;
9296 do_frame_printing
= 0;
9299 internal_error (_("Unknown value."));
9302 /* The behavior of this routine with respect to the source
9304 SRC_LINE: Print only source line
9305 LOCATION: Print only location
9306 SRC_AND_LOC: Print location and source line. */
9307 if (do_frame_printing
)
9308 print_stack_frame (get_selected_frame (nullptr), 0, source_flag
, 1);
9311 /* See `print_stop_event` in infrun.h. */
9314 do_print_stop_event (struct ui_out
*uiout
, bool displays
)
9316 struct target_waitstatus last
;
9317 struct thread_info
*tp
;
9319 get_last_target_status (nullptr, nullptr, &last
);
9322 scoped_restore save_uiout
= make_scoped_restore (¤t_uiout
, uiout
);
9324 print_stop_location (last
);
9326 /* Display the auto-display expressions. */
9331 tp
= inferior_thread ();
9332 if (tp
->thread_fsm () != nullptr
9333 && tp
->thread_fsm ()->finished_p ())
9335 struct return_value_info
*rv
;
9337 rv
= tp
->thread_fsm ()->return_value ();
9339 print_return_value (uiout
, rv
);
9343 /* See infrun.h. This function itself sets up buffered output for the
9344 duration of do_print_stop_event, which performs the actual event
9348 print_stop_event (struct ui_out
*uiout
, bool displays
)
9350 do_with_buffered_output (do_print_stop_event
, uiout
, displays
);
9356 maybe_remove_breakpoints (void)
9358 if (!breakpoints_should_be_inserted_now () && target_has_execution ())
9360 if (remove_breakpoints ())
9362 target_terminal::ours_for_output ();
9363 gdb_printf (_("Cannot remove breakpoints because "
9364 "program is no longer writable.\nFurther "
9365 "execution is probably impossible.\n"));
9370 /* The execution context that just caused a normal stop. */
9376 DISABLE_COPY_AND_ASSIGN (stop_context
);
9378 bool changed () const;
9383 /* The event PTID. */
9387 /* If stopp for a thread event, this is the thread that caused the
9389 thread_info_ref thread
;
9391 /* The inferior that caused the stop. */
9395 /* Initializes a new stop context. If stopped for a thread event, this
9396 takes a strong reference to the thread. */
9398 stop_context::stop_context ()
9400 stop_id
= get_stop_id ();
9401 ptid
= inferior_ptid
;
9402 inf_num
= current_inferior ()->num
;
9404 if (inferior_ptid
!= null_ptid
)
9406 /* Take a strong reference so that the thread can't be deleted
9408 thread
= thread_info_ref::new_reference (inferior_thread ());
9412 /* Return true if the current context no longer matches the saved stop
9416 stop_context::changed () const
9418 if (ptid
!= inferior_ptid
)
9420 if (inf_num
!= current_inferior ()->num
)
9422 if (thread
!= nullptr && thread
->state
!= THREAD_STOPPED
)
9424 if (get_stop_id () != stop_id
)
9434 struct target_waitstatus last
;
9436 get_last_target_status (nullptr, nullptr, &last
);
9440 /* If an exception is thrown from this point on, make sure to
9441 propagate GDB's knowledge of the executing state to the
9442 frontend/user running state. A QUIT is an easy exception to see
9443 here, so do this before any filtered output. */
9445 ptid_t finish_ptid
= null_ptid
;
9448 finish_ptid
= minus_one_ptid
;
9449 else if (last
.kind () == TARGET_WAITKIND_SIGNALLED
9450 || last
.kind () == TARGET_WAITKIND_EXITED
)
9452 /* On some targets, we may still have live threads in the
9453 inferior when we get a process exit event. E.g., for
9454 "checkpoint", when the current checkpoint/fork exits,
9455 linux-fork.c automatically switches to another fork from
9456 within target_mourn_inferior. */
9457 if (inferior_ptid
!= null_ptid
)
9458 finish_ptid
= ptid_t (inferior_ptid
.pid ());
9460 else if (last
.kind () != TARGET_WAITKIND_NO_RESUMED
9461 && last
.kind () != TARGET_WAITKIND_THREAD_EXITED
)
9462 finish_ptid
= inferior_ptid
;
9464 std::optional
<scoped_finish_thread_state
> maybe_finish_thread_state
;
9465 if (finish_ptid
!= null_ptid
)
9467 maybe_finish_thread_state
.emplace
9468 (user_visible_resume_target (finish_ptid
), finish_ptid
);
9471 /* As we're presenting a stop, and potentially removing breakpoints,
9472 update the thread list so we can tell whether there are threads
9473 running on the target. With target remote, for example, we can
9474 only learn about new threads when we explicitly update the thread
9475 list. Do this before notifying the interpreters about signal
9476 stops, end of stepping ranges, etc., so that the "new thread"
9477 output is emitted before e.g., "Program received signal FOO",
9478 instead of after. */
9479 update_thread_list ();
9481 if (last
.kind () == TARGET_WAITKIND_STOPPED
&& stopped_by_random_signal
)
9482 notify_signal_received (inferior_thread ()->stop_signal ());
9484 /* As with the notification of thread events, we want to delay
9485 notifying the user that we've switched thread context until
9486 the inferior actually stops.
9488 There's no point in saying anything if the inferior has exited.
9489 Note that SIGNALLED here means "exited with a signal", not
9490 "received a signal".
9492 Also skip saying anything in non-stop mode. In that mode, as we
9493 don't want GDB to switch threads behind the user's back, to avoid
9494 races where the user is typing a command to apply to thread x,
9495 but GDB switches to thread y before the user finishes entering
9496 the command, fetch_inferior_event installs a cleanup to restore
9497 the current thread back to the thread the user had selected right
9498 after this event is handled, so we're not really switching, only
9499 informing of a stop. */
9502 if ((last
.kind () != TARGET_WAITKIND_SIGNALLED
9503 && last
.kind () != TARGET_WAITKIND_EXITED
9504 && last
.kind () != TARGET_WAITKIND_NO_RESUMED
9505 && last
.kind () != TARGET_WAITKIND_THREAD_EXITED
)
9506 && target_has_execution ()
9507 && previous_thread
!= inferior_thread ())
9509 SWITCH_THRU_ALL_UIS ()
9511 target_terminal::ours_for_output ();
9512 gdb_printf (_("[Switching to %s]\n"),
9513 target_pid_to_str (inferior_ptid
).c_str ());
9514 annotate_thread_changed ();
9518 update_previous_thread ();
9521 if (last
.kind () == TARGET_WAITKIND_NO_RESUMED
9522 || last
.kind () == TARGET_WAITKIND_THREAD_EXITED
)
9524 stop_print_frame
= false;
9526 SWITCH_THRU_ALL_UIS ()
9527 if (current_ui
->prompt_state
== PROMPT_BLOCKED
)
9529 target_terminal::ours_for_output ();
9530 if (last
.kind () == TARGET_WAITKIND_NO_RESUMED
)
9531 gdb_printf (_("No unwaited-for children left.\n"));
9532 else if (last
.kind () == TARGET_WAITKIND_THREAD_EXITED
)
9533 gdb_printf (_("Command aborted, thread exited.\n"));
9535 gdb_assert_not_reached ("unhandled");
9539 /* Note: this depends on the update_thread_list call above. */
9540 maybe_remove_breakpoints ();
9542 /* If an auto-display called a function and that got a signal,
9543 delete that auto-display to avoid an infinite recursion. */
9545 if (stopped_by_random_signal
)
9546 disable_current_display ();
9548 SWITCH_THRU_ALL_UIS ()
9550 async_enable_stdin ();
9553 /* Let the user/frontend see the threads as stopped. */
9554 maybe_finish_thread_state
.reset ();
9556 /* Select innermost stack frame - i.e., current frame is frame 0,
9557 and current location is based on that. Handle the case where the
9558 dummy call is returning after being stopped. E.g. the dummy call
9559 previously hit a breakpoint. (If the dummy call returns
9560 normally, we won't reach here.) Do this before the stop hook is
9561 run, so that it doesn't get to see the temporary dummy frame,
9562 which is not where we'll present the stop. */
9563 if (has_stack_frames ())
9565 if (stop_stack_dummy
== STOP_STACK_DUMMY
)
9567 /* Pop the empty frame that contains the stack dummy. This
9568 also restores inferior state prior to the call (struct
9569 infcall_suspend_state). */
9570 frame_info_ptr frame
= get_current_frame ();
9572 gdb_assert (get_frame_type (frame
) == DUMMY_FRAME
);
9574 /* frame_pop calls reinit_frame_cache as the last thing it
9575 does which means there's now no selected frame. */
9578 select_frame (get_current_frame ());
9580 /* Set the current source location. */
9581 set_current_sal_from_frame (get_current_frame ());
9584 /* Look up the hook_stop and run it (CLI internally handles problem
9585 of stop_command's pre-hook not existing). */
9586 stop_context saved_context
;
9590 execute_cmd_pre_hook (stop_command
);
9592 catch (const gdb_exception_error
&ex
)
9594 exception_fprintf (gdb_stderr
, ex
,
9595 "Error while running hook_stop:\n");
9598 /* If the stop hook resumes the target, then there's no point in
9599 trying to notify about the previous stop; its context is
9600 gone. Likewise if the command switches thread or inferior --
9601 the observers would print a stop for the wrong
9603 if (saved_context
.changed ())
9606 /* Notify observers about the stop. This is where the interpreters
9607 print the stop event. */
9608 notify_normal_stop ((inferior_ptid
!= null_ptid
9609 ? inferior_thread ()->control
.stop_bpstat
9612 annotate_stopped ();
9614 if (target_has_execution ())
9616 if (last
.kind () != TARGET_WAITKIND_SIGNALLED
9617 && last
.kind () != TARGET_WAITKIND_EXITED
9618 && last
.kind () != TARGET_WAITKIND_NO_RESUMED
9619 && last
.kind () != TARGET_WAITKIND_THREAD_EXITED
)
9620 /* Delete the breakpoint we stopped at, if it wants to be deleted.
9621 Delete any breakpoint that is to be deleted at the next stop. */
9622 breakpoint_auto_delete (inferior_thread ()->control
.stop_bpstat
);
9629 signal_stop_state (int signo
)
9631 return signal_stop
[signo
];
9635 signal_print_state (int signo
)
9637 return signal_print
[signo
];
9641 signal_pass_state (int signo
)
9643 return signal_program
[signo
];
9647 signal_cache_update (int signo
)
9651 for (signo
= 0; signo
< (int) GDB_SIGNAL_LAST
; signo
++)
9652 signal_cache_update (signo
);
9657 signal_pass
[signo
] = (signal_stop
[signo
] == 0
9658 && signal_print
[signo
] == 0
9659 && signal_program
[signo
] == 1
9660 && signal_catch
[signo
] == 0);
9664 signal_stop_update (int signo
, int state
)
9666 int ret
= signal_stop
[signo
];
9668 signal_stop
[signo
] = state
;
9669 signal_cache_update (signo
);
9674 signal_print_update (int signo
, int state
)
9676 int ret
= signal_print
[signo
];
9678 signal_print
[signo
] = state
;
9679 signal_cache_update (signo
);
9684 signal_pass_update (int signo
, int state
)
9686 int ret
= signal_program
[signo
];
9688 signal_program
[signo
] = state
;
9689 signal_cache_update (signo
);
9693 /* Update the global 'signal_catch' from INFO and notify the
9697 signal_catch_update (const unsigned int *info
)
9701 for (i
= 0; i
< GDB_SIGNAL_LAST
; ++i
)
9702 signal_catch
[i
] = info
[i
] > 0;
9703 signal_cache_update (-1);
9704 target_pass_signals (signal_pass
);
9708 sig_print_header (void)
9710 gdb_printf (_("Signal Stop\tPrint\tPass "
9711 "to program\tDescription\n"));
9715 sig_print_info (enum gdb_signal oursig
)
9717 const char *name
= gdb_signal_to_name (oursig
);
9718 int name_padding
= 13 - strlen (name
);
9720 if (name_padding
<= 0)
9723 gdb_printf ("%s", name
);
9724 gdb_printf ("%*.*s ", name_padding
, name_padding
, " ");
9725 gdb_printf ("%s\t", signal_stop
[oursig
] ? "Yes" : "No");
9726 gdb_printf ("%s\t", signal_print
[oursig
] ? "Yes" : "No");
9727 gdb_printf ("%s\t\t", signal_program
[oursig
] ? "Yes" : "No");
9728 gdb_printf ("%s\n", gdb_signal_to_string (oursig
));
9731 /* Specify how various signals in the inferior should be handled. */
9734 handle_command (const char *args
, int from_tty
)
9736 int digits
, wordlen
;
9737 int sigfirst
, siglast
;
9738 enum gdb_signal oursig
;
9741 if (args
== nullptr)
9743 error_no_arg (_("signal to handle"));
9746 /* Allocate and zero an array of flags for which signals to handle. */
9748 const size_t nsigs
= GDB_SIGNAL_LAST
;
9749 unsigned char sigs
[nsigs
] {};
9751 /* Break the command line up into args. */
9753 gdb_argv
built_argv (args
);
9755 /* Walk through the args, looking for signal oursigs, signal names, and
9756 actions. Signal numbers and signal names may be interspersed with
9757 actions, with the actions being performed for all signals cumulatively
9758 specified. Signal ranges can be specified as <LOW>-<HIGH>. */
9760 for (char *arg
: built_argv
)
9762 wordlen
= strlen (arg
);
9763 for (digits
= 0; isdigit (arg
[digits
]); digits
++)
9767 sigfirst
= siglast
= -1;
9769 if (wordlen
>= 1 && !strncmp (arg
, "all", wordlen
))
9771 /* Apply action to all signals except those used by the
9772 debugger. Silently skip those. */
9775 siglast
= nsigs
- 1;
9777 else if (wordlen
>= 1 && !strncmp (arg
, "stop", wordlen
))
9779 SET_SIGS (nsigs
, sigs
, signal_stop
);
9780 SET_SIGS (nsigs
, sigs
, signal_print
);
9782 else if (wordlen
>= 1 && !strncmp (arg
, "ignore", wordlen
))
9784 UNSET_SIGS (nsigs
, sigs
, signal_program
);
9786 else if (wordlen
>= 2 && !strncmp (arg
, "print", wordlen
))
9788 SET_SIGS (nsigs
, sigs
, signal_print
);
9790 else if (wordlen
>= 2 && !strncmp (arg
, "pass", wordlen
))
9792 SET_SIGS (nsigs
, sigs
, signal_program
);
9794 else if (wordlen
>= 3 && !strncmp (arg
, "nostop", wordlen
))
9796 UNSET_SIGS (nsigs
, sigs
, signal_stop
);
9798 else if (wordlen
>= 3 && !strncmp (arg
, "noignore", wordlen
))
9800 SET_SIGS (nsigs
, sigs
, signal_program
);
9802 else if (wordlen
>= 4 && !strncmp (arg
, "noprint", wordlen
))
9804 UNSET_SIGS (nsigs
, sigs
, signal_print
);
9805 UNSET_SIGS (nsigs
, sigs
, signal_stop
);
9807 else if (wordlen
>= 4 && !strncmp (arg
, "nopass", wordlen
))
9809 UNSET_SIGS (nsigs
, sigs
, signal_program
);
9811 else if (digits
> 0)
9813 /* It is numeric. The numeric signal refers to our own
9814 internal signal numbering from target.h, not to host/target
9815 signal number. This is a feature; users really should be
9816 using symbolic names anyway, and the common ones like
9817 SIGHUP, SIGINT, SIGALRM, etc. will work right anyway. */
9819 sigfirst
= siglast
= (int)
9820 gdb_signal_from_command (atoi (arg
));
9821 if (arg
[digits
] == '-')
9824 gdb_signal_from_command (atoi (arg
+ digits
+ 1));
9826 if (sigfirst
> siglast
)
9828 /* Bet he didn't figure we'd think of this case... */
9829 std::swap (sigfirst
, siglast
);
9834 oursig
= gdb_signal_from_name (arg
);
9835 if (oursig
!= GDB_SIGNAL_UNKNOWN
)
9837 sigfirst
= siglast
= (int) oursig
;
9841 /* Not a number and not a recognized flag word => complain. */
9842 error (_("Unrecognized or ambiguous flag word: \"%s\"."), arg
);
9846 /* If any signal numbers or symbol names were found, set flags for
9847 which signals to apply actions to. */
9849 for (int signum
= sigfirst
; signum
>= 0 && signum
<= siglast
; signum
++)
9851 switch ((enum gdb_signal
) signum
)
9853 case GDB_SIGNAL_TRAP
:
9854 case GDB_SIGNAL_INT
:
9855 if (!allsigs
&& !sigs
[signum
])
9857 if (query (_("%s is used by the debugger.\n\
9858 Are you sure you want to change it? "),
9859 gdb_signal_to_name ((enum gdb_signal
) signum
)))
9864 gdb_printf (_("Not confirmed, unchanged.\n"));
9868 case GDB_SIGNAL_DEFAULT
:
9869 case GDB_SIGNAL_UNKNOWN
:
9870 /* Make sure that "all" doesn't print these. */
9879 for (int signum
= 0; signum
< nsigs
; signum
++)
9882 signal_cache_update (-1);
9883 target_pass_signals (signal_pass
);
9884 target_program_signals (signal_program
);
9888 /* Show the results. */
9889 sig_print_header ();
9890 for (; signum
< nsigs
; signum
++)
9892 sig_print_info ((enum gdb_signal
) signum
);
9899 /* Complete the "handle" command. */
9902 handle_completer (struct cmd_list_element
*ignore
,
9903 completion_tracker
&tracker
,
9904 const char *text
, const char *word
)
9906 static const char * const keywords
[] =
9920 signal_completer (ignore
, tracker
, text
, word
);
9921 complete_on_enum (tracker
, keywords
, word
, word
);
9925 gdb_signal_from_command (int num
)
9927 if (num
>= 1 && num
<= 15)
9928 return (enum gdb_signal
) num
;
9929 error (_("Only signals 1-15 are valid as numeric signals.\n\
9930 Use \"info signals\" for a list of symbolic signals."));
9933 /* Print current contents of the tables set by the handle command.
9934 It is possible we should just be printing signals actually used
9935 by the current target (but for things to work right when switching
9936 targets, all signals should be in the signal tables). */
9939 info_signals_command (const char *signum_exp
, int from_tty
)
9941 enum gdb_signal oursig
;
9943 sig_print_header ();
9947 /* First see if this is a symbol name. */
9948 oursig
= gdb_signal_from_name (signum_exp
);
9949 if (oursig
== GDB_SIGNAL_UNKNOWN
)
9951 /* No, try numeric. */
9953 gdb_signal_from_command (parse_and_eval_long (signum_exp
));
9955 sig_print_info (oursig
);
9960 /* These ugly casts brought to you by the native VAX compiler. */
9961 for (oursig
= GDB_SIGNAL_FIRST
;
9962 (int) oursig
< (int) GDB_SIGNAL_LAST
;
9963 oursig
= (enum gdb_signal
) ((int) oursig
+ 1))
9967 if (oursig
!= GDB_SIGNAL_UNKNOWN
9968 && oursig
!= GDB_SIGNAL_DEFAULT
&& oursig
!= GDB_SIGNAL_0
)
9969 sig_print_info (oursig
);
9972 gdb_printf (_("\nUse the \"handle\" command "
9973 "to change these tables.\n"));
9976 /* The $_siginfo convenience variable is a bit special. We don't know
9977 for sure the type of the value until we actually have a chance to
9978 fetch the data. The type can change depending on gdbarch, so it is
9979 also dependent on which thread you have selected.
9981 1. making $_siginfo be an internalvar that creates a new value on
9984 2. making the value of $_siginfo be an lval_computed value. */
9986 /* This function implements the lval_computed support for reading a
9990 siginfo_value_read (struct value
*v
)
9992 LONGEST transferred
;
9994 /* If we can access registers, so can we access $_siginfo. Likewise
9996 validate_registers_access ();
9999 target_read (current_inferior ()->top_target (),
10000 TARGET_OBJECT_SIGNAL_INFO
,
10002 v
->contents_all_raw ().data (),
10004 v
->type ()->length ());
10006 if (transferred
!= v
->type ()->length ())
10007 error (_("Unable to read siginfo"));
10010 /* This function implements the lval_computed support for writing a
10011 $_siginfo value. */
10014 siginfo_value_write (struct value
*v
, struct value
*fromval
)
10016 LONGEST transferred
;
10018 /* If we can access registers, so can we access $_siginfo. Likewise
10020 validate_registers_access ();
10022 transferred
= target_write (current_inferior ()->top_target (),
10023 TARGET_OBJECT_SIGNAL_INFO
,
10025 fromval
->contents_all_raw ().data (),
10027 fromval
->type ()->length ());
10029 if (transferred
!= fromval
->type ()->length ())
10030 error (_("Unable to write siginfo"));
10033 static const struct lval_funcs siginfo_value_funcs
=
10035 siginfo_value_read
,
10036 siginfo_value_write
10039 /* Return a new value with the correct type for the siginfo object of
10040 the current thread using architecture GDBARCH. Return a void value
10041 if there's no object available. */
10043 static struct value
*
10044 siginfo_make_value (struct gdbarch
*gdbarch
, struct internalvar
*var
,
10047 if (target_has_stack ()
10048 && inferior_ptid
!= null_ptid
10049 && gdbarch_get_siginfo_type_p (gdbarch
))
10051 struct type
*type
= gdbarch_get_siginfo_type (gdbarch
);
10053 return value::allocate_computed (type
, &siginfo_value_funcs
, nullptr);
10056 return value::allocate (builtin_type (gdbarch
)->builtin_void
);
10060 /* infcall_suspend_state contains state about the program itself like its
10061 registers and any signal it received when it last stopped.
10062 This state must be restored regardless of how the inferior function call
10063 ends (either successfully, or after it hits a breakpoint or signal)
10064 if the program is to properly continue where it left off. */
10066 class infcall_suspend_state
10069 /* Capture state from GDBARCH, TP, and REGCACHE that must be restored
10070 once the inferior function call has finished. */
10071 infcall_suspend_state (struct gdbarch
*gdbarch
,
10072 const struct thread_info
*tp
,
10073 struct regcache
*regcache
)
10074 : m_registers (new readonly_detached_regcache (*regcache
))
10076 tp
->save_suspend_to (m_thread_suspend
);
10078 gdb::unique_xmalloc_ptr
<gdb_byte
> siginfo_data
;
10080 if (gdbarch_get_siginfo_type_p (gdbarch
))
10082 struct type
*type
= gdbarch_get_siginfo_type (gdbarch
);
10083 size_t len
= type
->length ();
10085 siginfo_data
.reset ((gdb_byte
*) xmalloc (len
));
10087 if (target_read (current_inferior ()->top_target (),
10088 TARGET_OBJECT_SIGNAL_INFO
, nullptr,
10089 siginfo_data
.get (), 0, len
) != len
)
10091 /* Errors ignored. */
10092 siginfo_data
.reset (nullptr);
10098 m_siginfo_gdbarch
= gdbarch
;
10099 m_siginfo_data
= std::move (siginfo_data
);
10103 /* Return a pointer to the stored register state. */
10105 readonly_detached_regcache
*registers () const
10107 return m_registers
.get ();
10110 /* Restores the stored state into GDBARCH, TP, and REGCACHE. */
10112 void restore (struct gdbarch
*gdbarch
,
10113 struct thread_info
*tp
,
10114 struct regcache
*regcache
) const
10116 tp
->restore_suspend_from (m_thread_suspend
);
10118 if (m_siginfo_gdbarch
== gdbarch
)
10120 struct type
*type
= gdbarch_get_siginfo_type (gdbarch
);
10122 /* Errors ignored. */
10123 target_write (current_inferior ()->top_target (),
10124 TARGET_OBJECT_SIGNAL_INFO
, nullptr,
10125 m_siginfo_data
.get (), 0, type
->length ());
10128 /* The inferior can be gone if the user types "print exit(0)"
10129 (and perhaps other times). */
10130 if (target_has_execution ())
10131 /* NB: The register write goes through to the target. */
10132 regcache
->restore (registers ());
10136 /* How the current thread stopped before the inferior function call was
10138 struct thread_suspend_state m_thread_suspend
;
10140 /* The registers before the inferior function call was executed. */
10141 std::unique_ptr
<readonly_detached_regcache
> m_registers
;
10143 /* Format of SIGINFO_DATA or NULL if it is not present. */
10144 struct gdbarch
*m_siginfo_gdbarch
= nullptr;
10146 /* The inferior format depends on SIGINFO_GDBARCH and it has a length of
10147 gdbarch_get_siginfo_type ()->length (). For different gdbarch the
10148 content would be invalid. */
10149 gdb::unique_xmalloc_ptr
<gdb_byte
> m_siginfo_data
;
10152 infcall_suspend_state_up
10153 save_infcall_suspend_state ()
10155 struct thread_info
*tp
= inferior_thread ();
10156 regcache
*regcache
= get_thread_regcache (tp
);
10157 struct gdbarch
*gdbarch
= regcache
->arch ();
10159 infcall_suspend_state_up inf_state
10160 (new struct infcall_suspend_state (gdbarch
, tp
, regcache
));
10162 /* Having saved the current state, adjust the thread state, discarding
10163 any stop signal information. The stop signal is not useful when
10164 starting an inferior function call, and run_inferior_call will not use
10165 the signal due to its `proceed' call with GDB_SIGNAL_0. */
10166 tp
->set_stop_signal (GDB_SIGNAL_0
);
10171 /* Restore inferior session state to INF_STATE. */
10174 restore_infcall_suspend_state (struct infcall_suspend_state
*inf_state
)
10176 struct thread_info
*tp
= inferior_thread ();
10177 regcache
*regcache
= get_thread_regcache (inferior_thread ());
10178 struct gdbarch
*gdbarch
= regcache
->arch ();
10180 inf_state
->restore (gdbarch
, tp
, regcache
);
10181 discard_infcall_suspend_state (inf_state
);
10185 discard_infcall_suspend_state (struct infcall_suspend_state
*inf_state
)
10190 readonly_detached_regcache
*
10191 get_infcall_suspend_state_regcache (struct infcall_suspend_state
*inf_state
)
10193 return inf_state
->registers ();
10196 /* infcall_control_state contains state regarding gdb's control of the
10197 inferior itself like stepping control. It also contains session state like
10198 the user's currently selected frame. */
10200 struct infcall_control_state
10202 struct thread_control_state thread_control
;
10203 struct inferior_control_state inferior_control
;
10205 /* Other fields: */
10206 enum stop_stack_kind stop_stack_dummy
= STOP_NONE
;
10207 int stopped_by_random_signal
= 0;
10209 /* ID and level of the selected frame when the inferior function
10211 struct frame_id selected_frame_id
{};
10212 int selected_frame_level
= -1;
10215 /* Save all of the information associated with the inferior<==>gdb
10218 infcall_control_state_up
10219 save_infcall_control_state ()
10221 infcall_control_state_up
inf_status (new struct infcall_control_state
);
10222 struct thread_info
*tp
= inferior_thread ();
10223 struct inferior
*inf
= current_inferior ();
10225 inf_status
->thread_control
= tp
->control
;
10226 inf_status
->inferior_control
= inf
->control
;
10228 tp
->control
.step_resume_breakpoint
= nullptr;
10229 tp
->control
.exception_resume_breakpoint
= nullptr;
10231 /* Save original bpstat chain to INF_STATUS; replace it in TP with copy of
10232 chain. If caller's caller is walking the chain, they'll be happier if we
10233 hand them back the original chain when restore_infcall_control_state is
10235 tp
->control
.stop_bpstat
= bpstat_copy (tp
->control
.stop_bpstat
);
10237 /* Other fields: */
10238 inf_status
->stop_stack_dummy
= stop_stack_dummy
;
10239 inf_status
->stopped_by_random_signal
= stopped_by_random_signal
;
10241 save_selected_frame (&inf_status
->selected_frame_id
,
10242 &inf_status
->selected_frame_level
);
10247 /* Restore inferior session state to INF_STATUS. */
10250 restore_infcall_control_state (struct infcall_control_state
*inf_status
)
10252 struct thread_info
*tp
= inferior_thread ();
10253 struct inferior
*inf
= current_inferior ();
10255 if (tp
->control
.step_resume_breakpoint
)
10256 tp
->control
.step_resume_breakpoint
->disposition
= disp_del_at_next_stop
;
10258 if (tp
->control
.exception_resume_breakpoint
)
10259 tp
->control
.exception_resume_breakpoint
->disposition
10260 = disp_del_at_next_stop
;
10262 /* Handle the bpstat_copy of the chain. */
10263 bpstat_clear (&tp
->control
.stop_bpstat
);
10265 tp
->control
= inf_status
->thread_control
;
10266 inf
->control
= inf_status
->inferior_control
;
10268 /* Other fields: */
10269 stop_stack_dummy
= inf_status
->stop_stack_dummy
;
10270 stopped_by_random_signal
= inf_status
->stopped_by_random_signal
;
10272 if (target_has_stack ())
10274 restore_selected_frame (inf_status
->selected_frame_id
,
10275 inf_status
->selected_frame_level
);
10282 discard_infcall_control_state (struct infcall_control_state
*inf_status
)
10284 if (inf_status
->thread_control
.step_resume_breakpoint
)
10285 inf_status
->thread_control
.step_resume_breakpoint
->disposition
10286 = disp_del_at_next_stop
;
10288 if (inf_status
->thread_control
.exception_resume_breakpoint
)
10289 inf_status
->thread_control
.exception_resume_breakpoint
->disposition
10290 = disp_del_at_next_stop
;
10292 /* See save_infcall_control_state for info on stop_bpstat. */
10293 bpstat_clear (&inf_status
->thread_control
.stop_bpstat
);
10298 /* See infrun.h. */
10301 clear_exit_convenience_vars (void)
10303 clear_internalvar (lookup_internalvar ("_exitsignal"));
10304 clear_internalvar (lookup_internalvar ("_exitcode"));
10308 /* User interface for reverse debugging:
10309 Set exec-direction / show exec-direction commands
10310 (returns error unless target implements to_set_exec_direction method). */
10312 enum exec_direction_kind execution_direction
= EXEC_FORWARD
;
10313 static const char exec_forward
[] = "forward";
10314 static const char exec_reverse
[] = "reverse";
10315 static const char *exec_direction
= exec_forward
;
10316 static const char *const exec_direction_names
[] = {
10323 set_exec_direction_func (const char *args
, int from_tty
,
10324 struct cmd_list_element
*cmd
)
10326 if (target_can_execute_reverse ())
10328 if (!strcmp (exec_direction
, exec_forward
))
10329 execution_direction
= EXEC_FORWARD
;
10330 else if (!strcmp (exec_direction
, exec_reverse
))
10331 execution_direction
= EXEC_REVERSE
;
10335 exec_direction
= exec_forward
;
10336 error (_("Target does not support this operation."));
10341 show_exec_direction_func (struct ui_file
*out
, int from_tty
,
10342 struct cmd_list_element
*cmd
, const char *value
)
10344 switch (execution_direction
) {
10346 gdb_printf (out
, _("Forward.\n"));
10349 gdb_printf (out
, _("Reverse.\n"));
10352 internal_error (_("bogus execution_direction value: %d"),
10353 (int) execution_direction
);
10358 show_schedule_multiple (struct ui_file
*file
, int from_tty
,
10359 struct cmd_list_element
*c
, const char *value
)
10361 gdb_printf (file
, _("Resuming the execution of threads "
10362 "of all processes is %s.\n"), value
);
10365 /* Implementation of `siginfo' variable. */
10367 static const struct internalvar_funcs siginfo_funcs
=
10369 siginfo_make_value
,
10373 /* Callback for infrun's target events source. This is marked when a
10374 thread has a pending status to process. */
10377 infrun_async_inferior_event_handler (gdb_client_data data
)
10379 clear_async_event_handler (infrun_async_inferior_event_token
);
10380 inferior_event_handler (INF_REG_EVENT
);
10384 namespace selftests
10387 /* Verify that when two threads with the same ptid exist (from two different
10388 targets) and one of them changes ptid, we only update inferior_ptid if
10389 it is appropriate. */
10392 infrun_thread_ptid_changed ()
10394 gdbarch
*arch
= current_inferior ()->arch ();
10396 /* The thread which inferior_ptid represents changes ptid. */
10398 scoped_restore_current_pspace_and_thread restore
;
10400 scoped_mock_context
<test_target_ops
> target1 (arch
);
10401 scoped_mock_context
<test_target_ops
> target2 (arch
);
10403 ptid_t
old_ptid (111, 222);
10404 ptid_t
new_ptid (111, 333);
10406 target1
.mock_inferior
.pid
= old_ptid
.pid ();
10407 target1
.mock_thread
.ptid
= old_ptid
;
10408 target1
.mock_inferior
.ptid_thread_map
.clear ();
10409 target1
.mock_inferior
.ptid_thread_map
[old_ptid
] = &target1
.mock_thread
;
10411 target2
.mock_inferior
.pid
= old_ptid
.pid ();
10412 target2
.mock_thread
.ptid
= old_ptid
;
10413 target2
.mock_inferior
.ptid_thread_map
.clear ();
10414 target2
.mock_inferior
.ptid_thread_map
[old_ptid
] = &target2
.mock_thread
;
10416 auto restore_inferior_ptid
= make_scoped_restore (&inferior_ptid
, old_ptid
);
10417 set_current_inferior (&target1
.mock_inferior
);
10419 thread_change_ptid (&target1
.mock_target
, old_ptid
, new_ptid
);
10421 gdb_assert (inferior_ptid
== new_ptid
);
10424 /* A thread with the same ptid as inferior_ptid, but from another target,
10427 scoped_restore_current_pspace_and_thread restore
;
10429 scoped_mock_context
<test_target_ops
> target1 (arch
);
10430 scoped_mock_context
<test_target_ops
> target2 (arch
);
10432 ptid_t
old_ptid (111, 222);
10433 ptid_t
new_ptid (111, 333);
10435 target1
.mock_inferior
.pid
= old_ptid
.pid ();
10436 target1
.mock_thread
.ptid
= old_ptid
;
10437 target1
.mock_inferior
.ptid_thread_map
.clear ();
10438 target1
.mock_inferior
.ptid_thread_map
[old_ptid
] = &target1
.mock_thread
;
10440 target2
.mock_inferior
.pid
= old_ptid
.pid ();
10441 target2
.mock_thread
.ptid
= old_ptid
;
10442 target2
.mock_inferior
.ptid_thread_map
.clear ();
10443 target2
.mock_inferior
.ptid_thread_map
[old_ptid
] = &target2
.mock_thread
;
10445 auto restore_inferior_ptid
= make_scoped_restore (&inferior_ptid
, old_ptid
);
10446 set_current_inferior (&target2
.mock_inferior
);
10448 thread_change_ptid (&target1
.mock_target
, old_ptid
, new_ptid
);
10450 gdb_assert (inferior_ptid
== old_ptid
);
10454 } /* namespace selftests */
10456 #endif /* GDB_SELF_TEST */
10458 void _initialize_infrun ();
10460 _initialize_infrun ()
10462 struct cmd_list_element
*c
;
10464 /* Register extra event sources in the event loop. */
10465 infrun_async_inferior_event_token
10466 = create_async_event_handler (infrun_async_inferior_event_handler
, nullptr,
10469 cmd_list_element
*info_signals_cmd
10470 = add_info ("signals", info_signals_command
, _("\
10471 What debugger does when program gets various signals.\n\
10472 Specify a signal as argument to print info on that signal only."));
10473 add_info_alias ("handle", info_signals_cmd
, 0);
10475 c
= add_com ("handle", class_run
, handle_command
, _("\
10476 Specify how to handle signals.\n\
10477 Usage: handle SIGNAL [ACTIONS]\n\
10478 Args are signals and actions to apply to those signals.\n\
10479 If no actions are specified, the current settings for the specified signals\n\
10480 will be displayed instead.\n\
10482 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
10483 from 1-15 are allowed for compatibility with old versions of GDB.\n\
10484 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
10485 The special arg \"all\" is recognized to mean all signals except those\n\
10486 used by the debugger, typically SIGTRAP and SIGINT.\n\
10488 Recognized actions include \"stop\", \"nostop\", \"print\", \"noprint\",\n\
10489 \"pass\", \"nopass\", \"ignore\", or \"noignore\".\n\
10490 Stop means reenter debugger if this signal happens (implies print).\n\
10491 Print means print a message if this signal happens.\n\
10492 Pass means let program see this signal; otherwise program doesn't know.\n\
10493 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
10494 Pass and Stop may be combined.\n\
10496 Multiple signals may be specified. Signal numbers and signal names\n\
10497 may be interspersed with actions, with the actions being performed for\n\
10498 all signals cumulatively specified."));
10499 set_cmd_completer (c
, handle_completer
);
10501 stop_command
= add_cmd ("stop", class_obscure
,
10502 not_just_help_class_command
, _("\
10503 There is no `stop' command, but you can set a hook on `stop'.\n\
10504 This allows you to set a list of commands to be run each time execution\n\
10505 of the program stops."), &cmdlist
);
10507 add_setshow_boolean_cmd
10508 ("infrun", class_maintenance
, &debug_infrun
,
10509 _("Set inferior debugging."),
10510 _("Show inferior debugging."),
10511 _("When non-zero, inferior specific debugging is enabled."),
10512 nullptr, show_debug_infrun
, &setdebuglist
, &showdebuglist
);
10514 add_setshow_boolean_cmd ("non-stop", no_class
,
10516 Set whether gdb controls the inferior in non-stop mode."), _("\
10517 Show whether gdb controls the inferior in non-stop mode."), _("\
10518 When debugging a multi-threaded program and this setting is\n\
10519 off (the default, also called all-stop mode), when one thread stops\n\
10520 (for a breakpoint, watchpoint, exception, or similar events), GDB stops\n\
10521 all other threads in the program while you interact with the thread of\n\
10522 interest. When you continue or step a thread, you can allow the other\n\
10523 threads to run, or have them remain stopped, but while you inspect any\n\
10524 thread's state, all threads stop.\n\
10526 In non-stop mode, when one thread stops, other threads can continue\n\
10527 to run freely. You'll be able to step each thread independently,\n\
10528 leave it stopped or free to run as needed."),
10534 for (size_t i
= 0; i
< GDB_SIGNAL_LAST
; i
++)
10536 signal_stop
[i
] = 1;
10537 signal_print
[i
] = 1;
10538 signal_program
[i
] = 1;
10539 signal_catch
[i
] = 0;
10542 /* Signals caused by debugger's own actions should not be given to
10543 the program afterwards.
10545 Do not deliver GDB_SIGNAL_TRAP by default, except when the user
10546 explicitly specifies that it should be delivered to the target
10547 program. Typically, that would occur when a user is debugging a
10548 target monitor on a simulator: the target monitor sets a
10549 breakpoint; the simulator encounters this breakpoint and halts
10550 the simulation handing control to GDB; GDB, noting that the stop
10551 address doesn't map to any known breakpoint, returns control back
10552 to the simulator; the simulator then delivers the hardware
10553 equivalent of a GDB_SIGNAL_TRAP to the program being
10555 signal_program
[GDB_SIGNAL_TRAP
] = 0;
10556 signal_program
[GDB_SIGNAL_INT
] = 0;
10558 /* Signals that are not errors should not normally enter the debugger. */
10559 signal_stop
[GDB_SIGNAL_ALRM
] = 0;
10560 signal_print
[GDB_SIGNAL_ALRM
] = 0;
10561 signal_stop
[GDB_SIGNAL_VTALRM
] = 0;
10562 signal_print
[GDB_SIGNAL_VTALRM
] = 0;
10563 signal_stop
[GDB_SIGNAL_PROF
] = 0;
10564 signal_print
[GDB_SIGNAL_PROF
] = 0;
10565 signal_stop
[GDB_SIGNAL_CHLD
] = 0;
10566 signal_print
[GDB_SIGNAL_CHLD
] = 0;
10567 signal_stop
[GDB_SIGNAL_IO
] = 0;
10568 signal_print
[GDB_SIGNAL_IO
] = 0;
10569 signal_stop
[GDB_SIGNAL_POLL
] = 0;
10570 signal_print
[GDB_SIGNAL_POLL
] = 0;
10571 signal_stop
[GDB_SIGNAL_URG
] = 0;
10572 signal_print
[GDB_SIGNAL_URG
] = 0;
10573 signal_stop
[GDB_SIGNAL_WINCH
] = 0;
10574 signal_print
[GDB_SIGNAL_WINCH
] = 0;
10575 signal_stop
[GDB_SIGNAL_PRIO
] = 0;
10576 signal_print
[GDB_SIGNAL_PRIO
] = 0;
10578 /* These signals are used internally by user-level thread
10579 implementations. (See signal(5) on Solaris.) Like the above
10580 signals, a healthy program receives and handles them as part of
10581 its normal operation. */
10582 signal_stop
[GDB_SIGNAL_LWP
] = 0;
10583 signal_print
[GDB_SIGNAL_LWP
] = 0;
10584 signal_stop
[GDB_SIGNAL_WAITING
] = 0;
10585 signal_print
[GDB_SIGNAL_WAITING
] = 0;
10586 signal_stop
[GDB_SIGNAL_CANCEL
] = 0;
10587 signal_print
[GDB_SIGNAL_CANCEL
] = 0;
10588 signal_stop
[GDB_SIGNAL_LIBRT
] = 0;
10589 signal_print
[GDB_SIGNAL_LIBRT
] = 0;
10591 /* Update cached state. */
10592 signal_cache_update (-1);
10594 add_setshow_zinteger_cmd ("stop-on-solib-events", class_support
,
10595 &stop_on_solib_events
, _("\
10596 Set stopping for shared library events."), _("\
10597 Show stopping for shared library events."), _("\
10598 If nonzero, gdb will give control to the user when the dynamic linker\n\
10599 notifies gdb of shared library events. The most common event of interest\n\
10600 to the user would be loading/unloading of a new library."),
10601 set_stop_on_solib_events
,
10602 show_stop_on_solib_events
,
10603 &setlist
, &showlist
);
10605 add_setshow_enum_cmd ("follow-fork-mode", class_run
,
10606 follow_fork_mode_kind_names
,
10607 &follow_fork_mode_string
, _("\
10608 Set debugger response to a program call of fork or vfork."), _("\
10609 Show debugger response to a program call of fork or vfork."), _("\
10610 A fork or vfork creates a new process. follow-fork-mode can be:\n\
10611 parent - the original process is debugged after a fork\n\
10612 child - the new process is debugged after a fork\n\
10613 The unfollowed process will continue to run.\n\
10614 By default, the debugger will follow the parent process."),
10616 show_follow_fork_mode_string
,
10617 &setlist
, &showlist
);
10619 add_setshow_enum_cmd ("follow-exec-mode", class_run
,
10620 follow_exec_mode_names
,
10621 &follow_exec_mode_string
, _("\
10622 Set debugger response to a program call of exec."), _("\
10623 Show debugger response to a program call of exec."), _("\
10624 An exec call replaces the program image of a process.\n\
10626 follow-exec-mode can be:\n\
10628 new - the debugger creates a new inferior and rebinds the process\n\
10629 to this new inferior. The program the process was running before\n\
10630 the exec call can be restarted afterwards by restarting the original\n\
10633 same - the debugger keeps the process bound to the same inferior.\n\
10634 The new executable image replaces the previous executable loaded in\n\
10635 the inferior. Restarting the inferior after the exec call restarts\n\
10636 the executable the process was running after the exec call.\n\
10638 By default, the debugger will use the same inferior."),
10640 show_follow_exec_mode_string
,
10641 &setlist
, &showlist
);
10643 add_setshow_enum_cmd ("scheduler-locking", class_run
,
10644 scheduler_enums
, &scheduler_mode
, _("\
10645 Set mode for locking scheduler during execution."), _("\
10646 Show mode for locking scheduler during execution."), _("\
10647 off == no locking (threads may preempt at any time)\n\
10648 on == full locking (no thread except the current thread may run)\n\
10649 This applies to both normal execution and replay mode.\n\
10650 step == scheduler locked during stepping commands (step, next, stepi, nexti).\n\
10651 In this mode, other threads may run during other commands.\n\
10652 This applies to both normal execution and replay mode.\n\
10653 replay == scheduler locked in replay mode and unlocked during normal execution."),
10654 set_schedlock_func
, /* traps on target vector */
10655 show_scheduler_mode
,
10656 &setlist
, &showlist
);
10658 add_setshow_boolean_cmd ("schedule-multiple", class_run
, &sched_multi
, _("\
10659 Set mode for resuming threads of all processes."), _("\
10660 Show mode for resuming threads of all processes."), _("\
10661 When on, execution commands (such as 'continue' or 'next') resume all\n\
10662 threads of all processes. When off (which is the default), execution\n\
10663 commands only resume the threads of the current process. The set of\n\
10664 threads that are resumed is further refined by the scheduler-locking\n\
10665 mode (see help set scheduler-locking)."),
10667 show_schedule_multiple
,
10668 &setlist
, &showlist
);
10670 add_setshow_boolean_cmd ("step-mode", class_run
, &step_stop_if_no_debug
, _("\
10671 Set mode of the step operation."), _("\
10672 Show mode of the step operation."), _("\
10673 When set, doing a step over a function without debug line information\n\
10674 will stop at the first instruction of that function. Otherwise, the\n\
10675 function is skipped and the step command stops at a different source line."),
10677 show_step_stop_if_no_debug
,
10678 &setlist
, &showlist
);
10680 add_setshow_auto_boolean_cmd ("displaced-stepping", class_run
,
10681 &can_use_displaced_stepping
, _("\
10682 Set debugger's willingness to use displaced stepping."), _("\
10683 Show debugger's willingness to use displaced stepping."), _("\
10684 If on, gdb will use displaced stepping to step over breakpoints if it is\n\
10685 supported by the target architecture. If off, gdb will not use displaced\n\
10686 stepping to step over breakpoints, even if such is supported by the target\n\
10687 architecture. If auto (which is the default), gdb will use displaced stepping\n\
10688 if the target architecture supports it and non-stop mode is active, but will not\n\
10689 use it in all-stop mode (see help set non-stop)."),
10691 show_can_use_displaced_stepping
,
10692 &setlist
, &showlist
);
10694 add_setshow_enum_cmd ("exec-direction", class_run
, exec_direction_names
,
10695 &exec_direction
, _("Set direction of execution.\n\
10696 Options are 'forward' or 'reverse'."),
10697 _("Show direction of execution (forward/reverse)."),
10698 _("Tells gdb whether to execute forward or backward."),
10699 set_exec_direction_func
, show_exec_direction_func
,
10700 &setlist
, &showlist
);
10702 /* Set/show detach-on-fork: user-settable mode. */
10704 add_setshow_boolean_cmd ("detach-on-fork", class_run
, &detach_fork
, _("\
10705 Set whether gdb will detach the child of a fork."), _("\
10706 Show whether gdb will detach the child of a fork."), _("\
10707 Tells gdb whether to detach the child of a fork."),
10708 nullptr, nullptr, &setlist
, &showlist
);
10710 /* Set/show disable address space randomization mode. */
10712 add_setshow_boolean_cmd ("disable-randomization", class_support
,
10713 &disable_randomization
, _("\
10714 Set disabling of debuggee's virtual address space randomization."), _("\
10715 Show disabling of debuggee's virtual address space randomization."), _("\
10716 When this mode is on (which is the default), randomization of the virtual\n\
10717 address space is disabled. Standalone programs run with the randomization\n\
10718 enabled by default on some platforms."),
10719 &set_disable_randomization
,
10720 &show_disable_randomization
,
10721 &setlist
, &showlist
);
10723 /* ptid initializations */
10724 inferior_ptid
= null_ptid
;
10725 target_last_wait_ptid
= minus_one_ptid
;
10727 gdb::observers::thread_ptid_changed
.attach (infrun_thread_ptid_changed
,
10729 gdb::observers::thread_stop_requested
.attach (infrun_thread_stop_requested
,
10731 gdb::observers::inferior_exit
.attach (infrun_inferior_exit
, "infrun");
10732 gdb::observers::inferior_execd
.attach (infrun_inferior_execd
, "infrun");
10734 /* Explicitly create without lookup, since that tries to create a
10735 value with a void typed value, and when we get here, gdbarch
10736 isn't initialized yet. At this point, we're quite sure there
10737 isn't another convenience variable of the same name. */
10738 create_internalvar_type_lazy ("_siginfo", &siginfo_funcs
, nullptr);
10740 add_setshow_boolean_cmd ("observer", no_class
,
10741 &observer_mode_1
, _("\
10742 Set whether gdb controls the inferior in observer mode."), _("\
10743 Show whether gdb controls the inferior in observer mode."), _("\
10744 In observer mode, GDB can get data from the inferior, but not\n\
10745 affect its execution. Registers and memory may not be changed,\n\
10746 breakpoints may not be set, and the program cannot be interrupted\n\
10749 show_observer_mode
,
10754 selftests::register_test ("infrun_thread_ptid_changed",
10755 selftests::infrun_thread_ptid_changed
);