Automatic date update in version.in
[binutils-gdb.git] / gdbserver / linux-low.cc
blobac7f9807eccb19aae1911ec81dc124c3ced7a13f
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-2024 Free Software Foundation, Inc.
4 This file is part of GDB.
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19 #include "linux-low.h"
20 #include "nat/linux-osdata.h"
21 #include "gdbsupport/agent.h"
22 #include "tdesc.h"
23 #include "gdbsupport/event-loop.h"
24 #include "gdbsupport/event-pipe.h"
25 #include "gdbsupport/rsp-low.h"
26 #include "gdbsupport/signals-state-save-restore.h"
27 #include "nat/linux-nat.h"
28 #include "nat/linux-waitpid.h"
29 #include "gdbsupport/gdb_wait.h"
30 #include "nat/gdb_ptrace.h"
31 #include "nat/linux-ptrace.h"
32 #include "nat/linux-procfs.h"
33 #include "nat/linux-personality.h"
34 #include <signal.h>
35 #include <sys/ioctl.h>
36 #include <fcntl.h>
37 #include <unistd.h>
38 #include <sys/syscall.h>
39 #include <sched.h>
40 #include <pwd.h>
41 #include <sys/types.h>
42 #include <dirent.h>
43 #include <sys/stat.h>
44 #include <sys/vfs.h>
45 #include <sys/uio.h>
46 #include <langinfo.h>
47 #include <iconv.h>
48 #include "gdbsupport/filestuff.h"
49 #include "gdbsupport/gdb-safe-ctype.h"
50 #include "tracepoint.h"
51 #include <inttypes.h>
52 #include "gdbsupport/common-inferior.h"
53 #include "nat/fork-inferior.h"
54 #include "gdbsupport/environ.h"
55 #include "gdbsupport/gdb-sigmask.h"
56 #include "gdbsupport/scoped_restore.h"
57 #ifndef ELFMAG0
58 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
59 then ELFMAG0 will have been defined. If it didn't get included by
60 gdb_proc_service.h then including it will likely introduce a duplicate
61 definition of elf_fpregset_t. */
62 #include <elf.h>
63 #endif
64 #include "nat/linux-namespaces.h"
66 #ifndef O_LARGEFILE
67 #define O_LARGEFILE 0
68 #endif
70 #ifndef AT_HWCAP2
71 #define AT_HWCAP2 26
72 #endif
74 /* Some targets did not define these ptrace constants from the start,
75 so gdbserver defines them locally here. In the future, these may
76 be removed after they are added to asm/ptrace.h. */
77 #if !(defined(PT_TEXT_ADDR) \
78 || defined(PT_DATA_ADDR) \
79 || defined(PT_TEXT_END_ADDR))
80 #if defined(__mcoldfire__)
81 /* These are still undefined in 3.10 kernels. */
82 #define PT_TEXT_ADDR 49*4
83 #define PT_DATA_ADDR 50*4
84 #define PT_TEXT_END_ADDR 51*4
85 /* These are still undefined in 3.10 kernels. */
86 #elif defined(__TMS320C6X__)
87 #define PT_TEXT_ADDR (0x10000*4)
88 #define PT_DATA_ADDR (0x10004*4)
89 #define PT_TEXT_END_ADDR (0x10008*4)
90 #endif
91 #endif
93 #if (defined(__UCLIBC__) \
94 && defined(HAS_NOMMU) \
95 && defined(PT_TEXT_ADDR) \
96 && defined(PT_DATA_ADDR) \
97 && defined(PT_TEXT_END_ADDR))
98 #define SUPPORTS_READ_OFFSETS
99 #endif
101 #ifdef HAVE_LINUX_BTRACE
102 # include "nat/linux-btrace.h"
103 # include "gdbsupport/btrace-common.h"
104 #endif
106 #ifndef HAVE_ELF32_AUXV_T
107 /* Copied from glibc's elf.h. */
108 typedef struct
110 uint32_t a_type; /* Entry type */
111 union
113 uint32_t a_val; /* Integer value */
114 /* We use to have pointer elements added here. We cannot do that,
115 though, since it does not work when using 32-bit definitions
116 on 64-bit platforms and vice versa. */
117 } a_un;
118 } Elf32_auxv_t;
119 #endif
121 #ifndef HAVE_ELF64_AUXV_T
122 /* Copied from glibc's elf.h. */
123 typedef struct
125 uint64_t a_type; /* Entry type */
126 union
128 uint64_t a_val; /* Integer value */
129 /* We use to have pointer elements added here. We cannot do that,
130 though, since it does not work when using 32-bit definitions
131 on 64-bit platforms and vice versa. */
132 } a_un;
133 } Elf64_auxv_t;
134 #endif
136 /* Does the current host support PTRACE_GETREGSET? */
137 enum tribool have_ptrace_getregset = TRIBOOL_UNKNOWN;
139 /* Return TRUE if THREAD is the leader thread of the process. */
141 static bool
142 is_leader (thread_info *thread)
144 ptid_t ptid = ptid_of (thread);
145 return ptid.pid () == ptid.lwp ();
148 /* Return true if we should report thread exit events to GDB, for
149 THR. */
151 static bool
152 report_exit_events_for (thread_info *thr)
154 client_state &cs = get_client_state ();
156 return (cs.report_thread_events
157 || (thr->thread_options & GDB_THREAD_OPTION_EXIT) != 0);
160 /* LWP accessors. */
162 /* See nat/linux-nat.h. */
164 ptid_t
165 ptid_of_lwp (struct lwp_info *lwp)
167 return ptid_of (get_lwp_thread (lwp));
170 /* See nat/linux-nat.h. */
172 void
173 lwp_set_arch_private_info (struct lwp_info *lwp,
174 struct arch_lwp_info *info)
176 lwp->arch_private = info;
179 /* See nat/linux-nat.h. */
181 struct arch_lwp_info *
182 lwp_arch_private_info (struct lwp_info *lwp)
184 return lwp->arch_private;
187 /* See nat/linux-nat.h. */
190 lwp_is_stopped (struct lwp_info *lwp)
192 return lwp->stopped;
195 /* See nat/linux-nat.h. */
197 enum target_stop_reason
198 lwp_stop_reason (struct lwp_info *lwp)
200 return lwp->stop_reason;
203 /* See nat/linux-nat.h. */
206 lwp_is_stepping (struct lwp_info *lwp)
208 return lwp->stepping;
211 /* A list of all unknown processes which receive stop signals. Some
212 other process will presumably claim each of these as forked
213 children momentarily. */
215 struct simple_pid_list
217 /* The process ID. */
218 int pid;
220 /* The status as reported by waitpid. */
221 int status;
223 /* Next in chain. */
224 struct simple_pid_list *next;
226 static struct simple_pid_list *stopped_pids;
228 /* Trivial list manipulation functions to keep track of a list of new
229 stopped processes. */
231 static void
232 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
234 struct simple_pid_list *new_pid = XNEW (struct simple_pid_list);
236 new_pid->pid = pid;
237 new_pid->status = status;
238 new_pid->next = *listp;
239 *listp = new_pid;
242 static int
243 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
245 struct simple_pid_list **p;
247 for (p = listp; *p != NULL; p = &(*p)->next)
248 if ((*p)->pid == pid)
250 struct simple_pid_list *next = (*p)->next;
252 *statusp = (*p)->status;
253 xfree (*p);
254 *p = next;
255 return 1;
257 return 0;
260 enum stopping_threads_kind
262 /* Not stopping threads presently. */
263 NOT_STOPPING_THREADS,
265 /* Stopping threads. */
266 STOPPING_THREADS,
268 /* Stopping and suspending threads. */
269 STOPPING_AND_SUSPENDING_THREADS
272 /* This is set while stop_all_lwps is in effect. */
273 static stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
275 /* FIXME make into a target method? */
276 int using_threads = 1;
278 /* True if we're presently stabilizing threads (moving them out of
279 jump pads). */
280 static int stabilizing_threads;
282 static void unsuspend_all_lwps (struct lwp_info *except);
283 static void mark_lwp_dead (struct lwp_info *lwp, int wstat,
284 bool thread_event);
285 static int lwp_is_marked_dead (struct lwp_info *lwp);
286 static int kill_lwp (unsigned long lwpid, int signo);
287 static void enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info);
288 static int linux_low_ptrace_options (int attached);
289 static int check_ptrace_stopped_lwp_gone (struct lwp_info *lp);
291 /* When the event-loop is doing a step-over, this points at the thread
292 being stepped. */
293 static ptid_t step_over_bkpt;
295 bool
296 linux_process_target::low_supports_breakpoints ()
298 return false;
301 CORE_ADDR
302 linux_process_target::low_get_pc (regcache *regcache)
304 return 0;
307 void
308 linux_process_target::low_set_pc (regcache *regcache, CORE_ADDR newpc)
310 gdb_assert_not_reached ("linux target op low_set_pc is not implemented");
313 std::vector<CORE_ADDR>
314 linux_process_target::low_get_next_pcs (regcache *regcache)
316 gdb_assert_not_reached ("linux target op low_get_next_pcs is not "
317 "implemented");
321 linux_process_target::low_decr_pc_after_break ()
323 return 0;
326 /* True if LWP is stopped in its stepping range. */
328 static int
329 lwp_in_step_range (struct lwp_info *lwp)
331 CORE_ADDR pc = lwp->stop_pc;
333 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
336 /* The event pipe registered as a waitable file in the event loop. */
337 static event_pipe linux_event_pipe;
339 /* True if we're currently in async mode. */
340 #define target_is_async_p() (linux_event_pipe.is_open ())
342 static void send_sigstop (struct lwp_info *lwp);
344 /* Return non-zero if HEADER is a 64-bit ELF file. */
346 static int
347 elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
349 if (header->e_ident[EI_MAG0] == ELFMAG0
350 && header->e_ident[EI_MAG1] == ELFMAG1
351 && header->e_ident[EI_MAG2] == ELFMAG2
352 && header->e_ident[EI_MAG3] == ELFMAG3)
354 *machine = header->e_machine;
355 return header->e_ident[EI_CLASS] == ELFCLASS64;
358 *machine = EM_NONE;
359 return -1;
362 /* Return non-zero if FILE is a 64-bit ELF file,
363 zero if the file is not a 64-bit ELF file,
364 and -1 if the file is not accessible or doesn't exist. */
366 static int
367 elf_64_file_p (const char *file, unsigned int *machine)
369 Elf64_Ehdr header;
370 int fd;
372 fd = open (file, O_RDONLY);
373 if (fd < 0)
374 return -1;
376 if (read (fd, &header, sizeof (header)) != sizeof (header))
378 close (fd);
379 return 0;
381 close (fd);
383 return elf_64_header_p (&header, machine);
386 /* Accepts an integer PID; Returns true if the executable PID is
387 running is a 64-bit ELF file.. */
390 linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
392 char file[PATH_MAX];
394 sprintf (file, "/proc/%d/exe", pid);
395 return elf_64_file_p (file, machine);
398 void
399 linux_process_target::delete_lwp (lwp_info *lwp)
401 struct thread_info *thr = get_lwp_thread (lwp);
403 threads_debug_printf ("deleting %ld", lwpid_of (thr));
405 remove_thread (thr);
407 low_delete_thread (lwp->arch_private);
409 delete lwp;
412 void
413 linux_process_target::low_delete_thread (arch_lwp_info *info)
415 /* Default implementation should be overridden if architecture-specific
416 info is being used. */
417 gdb_assert (info == nullptr);
420 /* Open the /proc/PID/mem file for PROC. */
422 static void
423 open_proc_mem_file (process_info *proc)
425 gdb_assert (proc->priv->mem_fd == -1);
427 char filename[64];
428 xsnprintf (filename, sizeof filename, "/proc/%d/mem", proc->pid);
430 proc->priv->mem_fd
431 = gdb_open_cloexec (filename, O_RDWR | O_LARGEFILE, 0).release ();
434 process_info *
435 linux_process_target::add_linux_process_no_mem_file (int pid, int attached)
437 struct process_info *proc;
439 proc = add_process (pid, attached);
440 proc->priv = XCNEW (struct process_info_private);
442 proc->priv->arch_private = low_new_process ();
443 proc->priv->mem_fd = -1;
445 return proc;
449 process_info *
450 linux_process_target::add_linux_process (int pid, int attached)
452 process_info *proc = add_linux_process_no_mem_file (pid, attached);
453 open_proc_mem_file (proc);
454 return proc;
457 void
458 linux_process_target::remove_linux_process (process_info *proc)
460 if (proc->priv->mem_fd >= 0)
461 close (proc->priv->mem_fd);
463 this->low_delete_process (proc->priv->arch_private);
465 xfree (proc->priv);
466 proc->priv = nullptr;
468 remove_process (proc);
471 arch_process_info *
472 linux_process_target::low_new_process ()
474 return nullptr;
477 void
478 linux_process_target::low_delete_process (arch_process_info *info)
480 /* Default implementation must be overridden if architecture-specific
481 info exists. */
482 gdb_assert (info == nullptr);
485 void
486 linux_process_target::low_new_fork (process_info *parent, process_info *child)
488 /* Nop. */
491 void
492 linux_process_target::arch_setup_thread (thread_info *thread)
494 scoped_restore_current_thread restore_thread;
495 switch_to_thread (thread);
497 low_arch_setup ();
501 linux_process_target::handle_extended_wait (lwp_info **orig_event_lwp,
502 int wstat)
504 client_state &cs = get_client_state ();
505 struct lwp_info *event_lwp = *orig_event_lwp;
506 int event = linux_ptrace_get_extended_event (wstat);
507 struct thread_info *event_thr = get_lwp_thread (event_lwp);
509 gdb_assert (event_lwp->waitstatus.kind () == TARGET_WAITKIND_IGNORE);
511 /* All extended events we currently use are mid-syscall. Only
512 PTRACE_EVENT_STOP is delivered more like a signal-stop, but
513 you have to be using PTRACE_SEIZE to get that. */
514 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
516 if ((event == PTRACE_EVENT_FORK) || (event == PTRACE_EVENT_VFORK)
517 || (event == PTRACE_EVENT_CLONE))
519 unsigned long new_pid;
520 int ret, status;
522 /* Get the pid of the new lwp. */
523 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
524 &new_pid);
526 /* If we haven't already seen the new PID stop, wait for it now. */
527 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
529 /* The new child has a pending SIGSTOP. We can't affect it until it
530 hits the SIGSTOP, but we're already attached. */
532 ret = my_waitpid (new_pid, &status, __WALL);
534 if (ret == -1)
535 perror_with_name ("waiting for new child");
536 else if (ret != new_pid)
537 warning ("wait returned unexpected PID %d", ret);
538 else if (!WIFSTOPPED (status))
539 warning ("wait returned unexpected status 0x%x", status);
542 if (debug_threads)
544 debug_printf ("HEW: Got %s event from LWP %ld, new child is %ld\n",
545 (event == PTRACE_EVENT_FORK ? "fork"
546 : event == PTRACE_EVENT_VFORK ? "vfork"
547 : event == PTRACE_EVENT_CLONE ? "clone"
548 : "???"),
549 ptid_of (event_thr).lwp (),
550 new_pid);
553 ptid_t child_ptid = (event != PTRACE_EVENT_CLONE
554 ? ptid_t (new_pid, new_pid)
555 : ptid_t (ptid_of (event_thr).pid (), new_pid));
557 process_info *child_proc = nullptr;
559 if (event != PTRACE_EVENT_CLONE)
561 /* Add the new process to the tables before we add the LWP.
562 We need to do this even if the new process will be
563 detached. See breakpoint cloning code further below. */
564 child_proc = add_linux_process (new_pid, 0);
567 lwp_info *child_lwp = add_lwp (child_ptid);
568 gdb_assert (child_lwp != NULL);
569 child_lwp->stopped = 1;
570 if (event != PTRACE_EVENT_CLONE)
571 child_lwp->must_set_ptrace_flags = 1;
572 child_lwp->status_pending_p = 0;
574 thread_info *child_thr = get_lwp_thread (child_lwp);
576 /* If we're suspending all threads, leave this one suspended
577 too. If the fork/clone parent is stepping over a breakpoint,
578 all other threads have been suspended already. Leave the
579 child suspended too. */
580 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
581 || event_lwp->bp_reinsert != 0)
583 threads_debug_printf ("leaving child suspended");
584 child_lwp->suspended = 1;
587 if (event_lwp->bp_reinsert != 0
588 && supports_software_single_step ()
589 && event == PTRACE_EVENT_VFORK)
591 /* If we leave single-step breakpoints there, child will
592 hit it, so uninsert single-step breakpoints from parent
593 (and child). Once vfork child is done, reinsert
594 them back to parent. */
595 uninsert_single_step_breakpoints (event_thr);
598 if (event != PTRACE_EVENT_CLONE)
600 /* Clone the breakpoint lists of the parent. We need to do
601 this even if the new process will be detached, since we
602 will need the process object and the breakpoints to
603 remove any breakpoints from memory when we detach, and
604 the client side will access registers. */
605 gdb_assert (child_proc != NULL);
607 process_info *parent_proc = get_thread_process (event_thr);
608 child_proc->attached = parent_proc->attached;
610 clone_all_breakpoints (child_thr, event_thr);
612 target_desc_up tdesc = allocate_target_description ();
613 copy_target_description (tdesc.get (), parent_proc->tdesc);
614 child_proc->tdesc = tdesc.release ();
616 /* Clone arch-specific process data. */
617 low_new_fork (parent_proc, child_proc);
620 /* Save fork/clone info in the parent thread. */
621 if (event == PTRACE_EVENT_FORK)
622 event_lwp->waitstatus.set_forked (child_ptid);
623 else if (event == PTRACE_EVENT_VFORK)
624 event_lwp->waitstatus.set_vforked (child_ptid);
625 else if (event == PTRACE_EVENT_CLONE
626 && (event_thr->thread_options & GDB_THREAD_OPTION_CLONE) != 0)
627 event_lwp->waitstatus.set_thread_cloned (child_ptid);
629 if (event != PTRACE_EVENT_CLONE
630 || (event_thr->thread_options & GDB_THREAD_OPTION_CLONE) != 0)
632 /* The status_pending field contains bits denoting the
633 extended event, so when the pending event is handled, the
634 handler will look at lwp->waitstatus. */
635 event_lwp->status_pending_p = 1;
636 event_lwp->status_pending = wstat;
638 /* Link the threads until the parent's event is passed on to
639 GDB. */
640 event_lwp->relative = child_lwp;
641 child_lwp->relative = event_lwp;
644 /* If the parent thread is doing step-over with single-step
645 breakpoints, the list of single-step breakpoints are cloned
646 from the parent's. Remove them from the child process.
647 In case of vfork, we'll reinsert them back once vforked
648 child is done. */
649 if (event_lwp->bp_reinsert != 0
650 && supports_software_single_step ())
652 /* The child process is forked and stopped, so it is safe
653 to access its memory without stopping all other threads
654 from other processes. */
655 delete_single_step_breakpoints (child_thr);
657 gdb_assert (has_single_step_breakpoints (event_thr));
658 gdb_assert (!has_single_step_breakpoints (child_thr));
661 /* Normally we will get the pending SIGSTOP. But in some cases
662 we might get another signal delivered to the group first.
663 If we do get another signal, be sure not to lose it. */
664 if (WSTOPSIG (status) != SIGSTOP)
666 child_lwp->stop_expected = 1;
667 child_lwp->status_pending_p = 1;
668 child_lwp->status_pending = status;
670 else if (event == PTRACE_EVENT_CLONE && cs.report_thread_events)
672 child_lwp->waitstatus.set_thread_created ();
673 child_lwp->status_pending_p = 1;
674 child_lwp->status_pending = status;
677 if (event == PTRACE_EVENT_CLONE)
679 #ifdef USE_THREAD_DB
680 thread_db_notice_clone (event_thr, child_ptid);
681 #endif
684 if (event == PTRACE_EVENT_CLONE
685 && (event_thr->thread_options & GDB_THREAD_OPTION_CLONE) == 0)
687 threads_debug_printf
688 ("not reporting clone event from LWP %ld, new child is %ld\n",
689 ptid_of (event_thr).lwp (),
690 new_pid);
691 return 1;
694 /* Leave the child stopped until GDB processes the parent
695 event. */
696 child_thr->last_resume_kind = resume_stop;
697 child_thr->last_status.set_stopped (GDB_SIGNAL_0);
699 /* Report the event. */
700 threads_debug_printf
701 ("reporting %s event from LWP %ld, new child is %ld\n",
702 (event == PTRACE_EVENT_FORK ? "fork"
703 : event == PTRACE_EVENT_VFORK ? "vfork"
704 : event == PTRACE_EVENT_CLONE ? "clone"
705 : "???"),
706 ptid_of (event_thr).lwp (),
707 new_pid);
708 return 0;
710 else if (event == PTRACE_EVENT_VFORK_DONE)
712 event_lwp->waitstatus.set_vfork_done ();
714 if (event_lwp->bp_reinsert != 0 && supports_software_single_step ())
716 reinsert_single_step_breakpoints (event_thr);
718 gdb_assert (has_single_step_breakpoints (event_thr));
721 /* Report the event. */
722 return 0;
724 else if (event == PTRACE_EVENT_EXEC && cs.report_exec_events)
726 struct process_info *proc;
727 std::vector<int> syscalls_to_catch;
728 ptid_t event_ptid;
729 pid_t event_pid;
731 threads_debug_printf ("Got exec event from LWP %ld",
732 lwpid_of (event_thr));
734 /* Get the event ptid. */
735 event_ptid = ptid_of (event_thr);
736 event_pid = event_ptid.pid ();
738 /* Save the syscall list from the execing process. */
739 proc = get_thread_process (event_thr);
740 syscalls_to_catch = std::move (proc->syscalls_to_catch);
742 /* Delete the execing process and all its threads. */
743 mourn (proc);
744 switch_to_thread (nullptr);
746 /* Create a new process/lwp/thread. */
747 proc = add_linux_process (event_pid, 0);
748 event_lwp = add_lwp (event_ptid);
749 event_thr = get_lwp_thread (event_lwp);
750 gdb_assert (current_thread == event_thr);
751 arch_setup_thread (event_thr);
753 /* Set the event status. */
754 event_lwp->waitstatus.set_execd
755 (make_unique_xstrdup
756 (linux_proc_pid_to_exec_file (lwpid_of (event_thr))));
758 /* Mark the exec status as pending. */
759 event_lwp->stopped = 1;
760 event_lwp->status_pending_p = 1;
761 event_lwp->status_pending = wstat;
762 event_thr->last_resume_kind = resume_continue;
763 event_thr->last_status.set_ignore ();
765 /* Update syscall state in the new lwp, effectively mid-syscall too. */
766 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
768 /* Restore the list to catch. Don't rely on the client, which is free
769 to avoid sending a new list when the architecture doesn't change.
770 Also, for ANY_SYSCALL, the architecture doesn't really matter. */
771 proc->syscalls_to_catch = std::move (syscalls_to_catch);
773 /* Report the event. */
774 *orig_event_lwp = event_lwp;
775 return 0;
778 internal_error (_("unknown ptrace event %d"), event);
781 CORE_ADDR
782 linux_process_target::get_pc (lwp_info *lwp)
784 process_info *proc = get_thread_process (get_lwp_thread (lwp));
785 gdb_assert (!proc->starting_up);
787 if (!low_supports_breakpoints ())
788 return 0;
790 scoped_restore_current_thread restore_thread;
791 switch_to_thread (get_lwp_thread (lwp));
793 struct regcache *regcache = get_thread_regcache (current_thread, 1);
794 CORE_ADDR pc = low_get_pc (regcache);
796 threads_debug_printf ("pc is 0x%lx", (long) pc);
798 return pc;
801 void
802 linux_process_target::get_syscall_trapinfo (lwp_info *lwp, int *sysno)
804 struct regcache *regcache;
806 scoped_restore_current_thread restore_thread;
807 switch_to_thread (get_lwp_thread (lwp));
809 regcache = get_thread_regcache (current_thread, 1);
810 low_get_syscall_trapinfo (regcache, sysno);
812 threads_debug_printf ("get_syscall_trapinfo sysno %d", *sysno);
815 void
816 linux_process_target::low_get_syscall_trapinfo (regcache *regcache, int *sysno)
818 /* By default, report an unknown system call number. */
819 *sysno = UNKNOWN_SYSCALL;
822 bool
823 linux_process_target::save_stop_reason (lwp_info *lwp)
825 CORE_ADDR pc;
826 CORE_ADDR sw_breakpoint_pc;
827 siginfo_t siginfo;
829 if (!low_supports_breakpoints ())
830 return false;
832 process_info *proc = get_thread_process (get_lwp_thread (lwp));
833 if (proc->starting_up)
835 /* Claim we have the stop PC so that the caller doesn't try to
836 fetch it itself. */
837 return true;
840 pc = get_pc (lwp);
841 sw_breakpoint_pc = pc - low_decr_pc_after_break ();
843 /* breakpoint_at reads from the current thread. */
844 scoped_restore_current_thread restore_thread;
845 switch_to_thread (get_lwp_thread (lwp));
847 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
848 (PTRACE_TYPE_ARG3) 0, &siginfo) == 0)
850 if (siginfo.si_signo == SIGTRAP)
852 if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code)
853 && GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
855 /* The si_code is ambiguous on this arch -- check debug
856 registers. */
857 if (!check_stopped_by_watchpoint (lwp))
858 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
860 else if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code))
862 /* If we determine the LWP stopped for a SW breakpoint,
863 trust it. Particularly don't check watchpoint
864 registers, because at least on s390, we'd find
865 stopped-by-watchpoint as long as there's a watchpoint
866 set. */
867 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
869 else if (GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
871 /* This can indicate either a hardware breakpoint or
872 hardware watchpoint. Check debug registers. */
873 if (!check_stopped_by_watchpoint (lwp))
874 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
876 else if (siginfo.si_code == TRAP_TRACE)
878 /* We may have single stepped an instruction that
879 triggered a watchpoint. In that case, on some
880 architectures (such as x86), instead of TRAP_HWBKPT,
881 si_code indicates TRAP_TRACE, and we need to check
882 the debug registers separately. */
883 if (!check_stopped_by_watchpoint (lwp))
884 lwp->stop_reason = TARGET_STOPPED_BY_SINGLE_STEP;
889 if (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
891 threads_debug_printf
892 ("%s stopped by software breakpoint",
893 target_pid_to_str (ptid_of (get_lwp_thread (lwp))).c_str ());
895 /* Back up the PC if necessary. */
896 if (pc != sw_breakpoint_pc)
898 struct regcache *regcache
899 = get_thread_regcache (current_thread, 1);
900 low_set_pc (regcache, sw_breakpoint_pc);
903 /* Update this so we record the correct stop PC below. */
904 pc = sw_breakpoint_pc;
906 else if (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
907 threads_debug_printf
908 ("%s stopped by hardware breakpoint",
909 target_pid_to_str (ptid_of (get_lwp_thread (lwp))).c_str ());
910 else if (lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
911 threads_debug_printf
912 ("%s stopped by hardware watchpoint",
913 target_pid_to_str (ptid_of (get_lwp_thread (lwp))).c_str ());
914 else if (lwp->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP)
915 threads_debug_printf
916 ("%s stopped by trace",
917 target_pid_to_str (ptid_of (get_lwp_thread (lwp))).c_str ());
919 lwp->stop_pc = pc;
920 return true;
923 lwp_info *
924 linux_process_target::add_lwp (ptid_t ptid)
926 lwp_info *lwp = new lwp_info;
928 lwp->thread = add_thread (ptid, lwp);
930 low_new_thread (lwp);
932 return lwp;
935 void
936 linux_process_target::low_new_thread (lwp_info *info)
938 /* Nop. */
941 /* Callback to be used when calling fork_inferior, responsible for
942 actually initiating the tracing of the inferior. */
944 static void
945 linux_ptrace_fun ()
947 if (ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0,
948 (PTRACE_TYPE_ARG4) 0) < 0)
949 trace_start_error_with_name ("ptrace");
951 if (setpgid (0, 0) < 0)
952 trace_start_error_with_name ("setpgid");
954 /* If GDBserver is connected to gdb via stdio, redirect the inferior's
955 stdout to stderr so that inferior i/o doesn't corrupt the connection.
956 Also, redirect stdin to /dev/null. */
957 if (remote_connection_is_stdio ())
959 if (close (0) < 0)
960 trace_start_error_with_name ("close");
961 if (open ("/dev/null", O_RDONLY) < 0)
962 trace_start_error_with_name ("open");
963 if (dup2 (2, 1) < 0)
964 trace_start_error_with_name ("dup2");
965 if (write (2, "stdin/stdout redirected\n",
966 sizeof ("stdin/stdout redirected\n") - 1) < 0)
968 /* Errors ignored. */;
973 /* Start an inferior process and returns its pid.
974 PROGRAM is the name of the program to be started, and PROGRAM_ARGS
975 are its arguments. */
978 linux_process_target::create_inferior (const char *program,
979 const std::vector<char *> &program_args)
981 client_state &cs = get_client_state ();
982 struct lwp_info *new_lwp;
983 int pid;
984 ptid_t ptid;
987 maybe_disable_address_space_randomization restore_personality
988 (cs.disable_randomization);
989 std::string str_program_args = construct_inferior_arguments (program_args);
991 pid = fork_inferior (program,
992 str_program_args.c_str (),
993 get_environ ()->envp (), linux_ptrace_fun,
994 NULL, NULL, NULL, NULL);
997 /* When spawning a new process, we can't open the mem file yet. We
998 still have to nurse the process through the shell, and that execs
999 a couple times. The address space a /proc/PID/mem file is
1000 accessing is destroyed on exec. */
1001 process_info *proc = add_linux_process_no_mem_file (pid, 0);
1003 ptid = ptid_t (pid, pid);
1004 new_lwp = add_lwp (ptid);
1005 new_lwp->must_set_ptrace_flags = 1;
1007 post_fork_inferior (pid, program);
1009 /* PROC is now past the shell running the program we want, so we can
1010 open the /proc/PID/mem file. */
1011 open_proc_mem_file (proc);
1013 return pid;
1016 /* Implement the post_create_inferior target_ops method. */
1018 void
1019 linux_process_target::post_create_inferior ()
1021 struct lwp_info *lwp = get_thread_lwp (current_thread);
1023 low_arch_setup ();
1025 if (lwp->must_set_ptrace_flags)
1027 struct process_info *proc = current_process ();
1028 int options = linux_low_ptrace_options (proc->attached);
1030 linux_enable_event_reporting (lwpid_of (current_thread), options);
1031 lwp->must_set_ptrace_flags = 0;
1036 linux_process_target::attach_lwp (ptid_t ptid)
1038 struct lwp_info *new_lwp;
1039 int lwpid = ptid.lwp ();
1041 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
1042 != 0)
1043 return errno;
1045 new_lwp = add_lwp (ptid);
1047 /* We need to wait for SIGSTOP before being able to make the next
1048 ptrace call on this LWP. */
1049 new_lwp->must_set_ptrace_flags = 1;
1051 if (linux_proc_pid_is_stopped (lwpid))
1053 threads_debug_printf ("Attached to a stopped process");
1055 /* The process is definitely stopped. It is in a job control
1056 stop, unless the kernel predates the TASK_STOPPED /
1057 TASK_TRACED distinction, in which case it might be in a
1058 ptrace stop. Make sure it is in a ptrace stop; from there we
1059 can kill it, signal it, et cetera.
1061 First make sure there is a pending SIGSTOP. Since we are
1062 already attached, the process can not transition from stopped
1063 to running without a PTRACE_CONT; so we know this signal will
1064 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1065 probably already in the queue (unless this kernel is old
1066 enough to use TASK_STOPPED for ptrace stops); but since
1067 SIGSTOP is not an RT signal, it can only be queued once. */
1068 kill_lwp (lwpid, SIGSTOP);
1070 /* Finally, resume the stopped process. This will deliver the
1071 SIGSTOP (or a higher priority signal, just like normal
1072 PTRACE_ATTACH), which we'll catch later on. */
1073 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1076 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
1077 brings it to a halt.
1079 There are several cases to consider here:
1081 1) gdbserver has already attached to the process and is being notified
1082 of a new thread that is being created.
1083 In this case we should ignore that SIGSTOP and resume the
1084 process. This is handled below by setting stop_expected = 1,
1085 and the fact that add_thread sets last_resume_kind ==
1086 resume_continue.
1088 2) This is the first thread (the process thread), and we're attaching
1089 to it via attach_inferior.
1090 In this case we want the process thread to stop.
1091 This is handled by having linux_attach set last_resume_kind ==
1092 resume_stop after we return.
1094 If the pid we are attaching to is also the tgid, we attach to and
1095 stop all the existing threads. Otherwise, we attach to pid and
1096 ignore any other threads in the same group as this pid.
1098 3) GDB is connecting to gdbserver and is requesting an enumeration of all
1099 existing threads.
1100 In this case we want the thread to stop.
1101 FIXME: This case is currently not properly handled.
1102 We should wait for the SIGSTOP but don't. Things work apparently
1103 because enough time passes between when we ptrace (ATTACH) and when
1104 gdb makes the next ptrace call on the thread.
1106 On the other hand, if we are currently trying to stop all threads, we
1107 should treat the new thread as if we had sent it a SIGSTOP. This works
1108 because we are guaranteed that the add_lwp call above added us to the
1109 end of the list, and so the new thread has not yet reached
1110 wait_for_sigstop (but will). */
1111 new_lwp->stop_expected = 1;
1113 return 0;
1116 /* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
1117 already attached. Returns true if a new LWP is found, false
1118 otherwise. */
1120 static int
1121 attach_proc_task_lwp_callback (ptid_t ptid)
1123 /* Is this a new thread? */
1124 if (find_thread_ptid (ptid) == NULL)
1126 int lwpid = ptid.lwp ();
1127 int err;
1129 threads_debug_printf ("Found new lwp %d", lwpid);
1131 err = the_linux_target->attach_lwp (ptid);
1133 /* Be quiet if we simply raced with the thread exiting. EPERM
1134 is returned if the thread's task still exists, and is marked
1135 as exited or zombie, as well as other conditions, so in that
1136 case, confirm the status in /proc/PID/status. */
1137 if (err == ESRCH
1138 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
1139 threads_debug_printf
1140 ("Cannot attach to lwp %d: thread is gone (%d: %s)",
1141 lwpid, err, safe_strerror (err));
1142 else if (err != 0)
1144 std::string reason
1145 = linux_ptrace_attach_fail_reason_string (ptid, err);
1147 error (_("Cannot attach to lwp %d: %s"), lwpid, reason.c_str ());
1150 return 1;
1152 return 0;
1155 static void async_file_mark (void);
1157 /* Attach to PID. If PID is the tgid, attach to it and all
1158 of its threads. */
1161 linux_process_target::attach (unsigned long pid)
1163 struct process_info *proc;
1164 struct thread_info *initial_thread;
1165 ptid_t ptid = ptid_t (pid, pid);
1166 int err;
1168 /* Delay opening the /proc/PID/mem file until we've successfully
1169 attached. */
1170 proc = add_linux_process_no_mem_file (pid, 1);
1172 /* Attach to PID. We will check for other threads
1173 soon. */
1174 err = attach_lwp (ptid);
1175 if (err != 0)
1177 this->remove_linux_process (proc);
1179 std::string reason = linux_ptrace_attach_fail_reason_string (ptid, err);
1180 error ("Cannot attach to process %ld: %s", pid, reason.c_str ());
1183 open_proc_mem_file (proc);
1185 /* Don't ignore the initial SIGSTOP if we just attached to this
1186 process. It will be collected by wait shortly. */
1187 initial_thread = find_thread_ptid (ptid_t (pid, pid));
1188 gdb_assert (initial_thread != nullptr);
1189 initial_thread->last_resume_kind = resume_stop;
1191 /* We must attach to every LWP. If /proc is mounted, use that to
1192 find them now. On the one hand, the inferior may be using raw
1193 clone instead of using pthreads. On the other hand, even if it
1194 is using pthreads, GDB may not be connected yet (thread_db needs
1195 to do symbol lookups, through qSymbol). Also, thread_db walks
1196 structures in the inferior's address space to find the list of
1197 threads/LWPs, and those structures may well be corrupted. Note
1198 that once thread_db is loaded, we'll still use it to list threads
1199 and associate pthread info with each LWP. */
1202 linux_proc_attach_tgid_threads (pid, attach_proc_task_lwp_callback);
1204 catch (const gdb_exception_error &)
1206 /* Make sure we do not deliver the SIGSTOP to the process. */
1207 initial_thread->last_resume_kind = resume_continue;
1209 this->detach (proc);
1210 throw;
1213 /* GDB will shortly read the xml target description for this
1214 process, to figure out the process' architecture. But the target
1215 description is only filled in when the first process/thread in
1216 the thread group reports its initial PTRACE_ATTACH SIGSTOP. Do
1217 that now, otherwise, if GDB is fast enough, it could read the
1218 target description _before_ that initial stop. */
1219 if (non_stop)
1221 struct lwp_info *lwp;
1222 int wstat, lwpid;
1223 ptid_t pid_ptid = ptid_t (pid);
1225 lwpid = wait_for_event_filtered (pid_ptid, pid_ptid, &wstat, __WALL);
1226 gdb_assert (lwpid > 0);
1228 lwp = find_lwp_pid (ptid_t (lwpid));
1229 gdb_assert (lwp != nullptr);
1231 if (!WIFSTOPPED (wstat) || WSTOPSIG (wstat) != SIGSTOP)
1233 lwp->status_pending_p = 1;
1234 lwp->status_pending = wstat;
1237 initial_thread->last_resume_kind = resume_continue;
1239 async_file_mark ();
1241 gdb_assert (proc->tdesc != NULL);
1244 return 0;
1247 static int
1248 last_thread_of_process_p (int pid)
1250 bool seen_one = false;
1252 thread_info *thread = find_thread (pid, [&] (thread_info *thr_arg)
1254 if (!seen_one)
1256 /* This is the first thread of this process we see. */
1257 seen_one = true;
1258 return false;
1260 else
1262 /* This is the second thread of this process we see. */
1263 return true;
1267 return thread == NULL;
1270 /* Kill LWP. */
1272 static void
1273 linux_kill_one_lwp (struct lwp_info *lwp)
1275 struct thread_info *thr = get_lwp_thread (lwp);
1276 int pid = lwpid_of (thr);
1278 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
1279 there is no signal context, and ptrace(PTRACE_KILL) (or
1280 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
1281 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
1282 alternative is to kill with SIGKILL. We only need one SIGKILL
1283 per process, not one for each thread. But since we still support
1284 support debugging programs using raw clone without CLONE_THREAD,
1285 we send one for each thread. For years, we used PTRACE_KILL
1286 only, so we're being a bit paranoid about some old kernels where
1287 PTRACE_KILL might work better (dubious if there are any such, but
1288 that's why it's paranoia), so we try SIGKILL first, PTRACE_KILL
1289 second, and so we're fine everywhere. */
1291 errno = 0;
1292 kill_lwp (pid, SIGKILL);
1293 if (debug_threads)
1295 int save_errno = errno;
1297 threads_debug_printf ("kill_lwp (SIGKILL) %s, 0, 0 (%s)",
1298 target_pid_to_str (ptid_of (thr)).c_str (),
1299 save_errno ? safe_strerror (save_errno) : "OK");
1302 errno = 0;
1303 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1304 if (debug_threads)
1306 int save_errno = errno;
1308 threads_debug_printf ("PTRACE_KILL %s, 0, 0 (%s)",
1309 target_pid_to_str (ptid_of (thr)).c_str (),
1310 save_errno ? safe_strerror (save_errno) : "OK");
1314 /* Kill LWP and wait for it to die. */
1316 static void
1317 kill_wait_lwp (struct lwp_info *lwp)
1319 struct thread_info *thr = get_lwp_thread (lwp);
1320 int pid = ptid_of (thr).pid ();
1321 int lwpid = ptid_of (thr).lwp ();
1322 int wstat;
1323 int res;
1325 threads_debug_printf ("killing lwp %d, for pid: %d", lwpid, pid);
1329 linux_kill_one_lwp (lwp);
1331 /* Make sure it died. Notes:
1333 - The loop is most likely unnecessary.
1335 - We don't use wait_for_event as that could delete lwps
1336 while we're iterating over them. We're not interested in
1337 any pending status at this point, only in making sure all
1338 wait status on the kernel side are collected until the
1339 process is reaped.
1341 - We don't use __WALL here as the __WALL emulation relies on
1342 SIGCHLD, and killing a stopped process doesn't generate
1343 one, nor an exit status.
1345 res = my_waitpid (lwpid, &wstat, 0);
1346 if (res == -1 && errno == ECHILD)
1347 res = my_waitpid (lwpid, &wstat, __WCLONE);
1348 } while (res > 0 && WIFSTOPPED (wstat));
1350 /* Even if it was stopped, the child may have already disappeared.
1351 E.g., if it was killed by SIGKILL. */
1352 if (res < 0 && errno != ECHILD)
1353 perror_with_name ("kill_wait_lwp");
1356 /* Callback for `for_each_thread'. Kills an lwp of a given process,
1357 except the leader. */
1359 static void
1360 kill_one_lwp_callback (thread_info *thread, int pid)
1362 struct lwp_info *lwp = get_thread_lwp (thread);
1364 /* We avoid killing the first thread here, because of a Linux kernel (at
1365 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
1366 the children get a chance to be reaped, it will remain a zombie
1367 forever. */
1369 if (lwpid_of (thread) == pid)
1371 threads_debug_printf ("is last of process %s",
1372 target_pid_to_str (thread->id).c_str ());
1373 return;
1376 kill_wait_lwp (lwp);
1380 linux_process_target::kill (process_info *process)
1382 int pid = process->pid;
1384 /* If we're killing a running inferior, make sure it is stopped
1385 first, as PTRACE_KILL will not work otherwise. */
1386 stop_all_lwps (0, NULL);
1388 for_each_thread (pid, [&] (thread_info *thread)
1390 kill_one_lwp_callback (thread, pid);
1393 /* See the comment in linux_kill_one_lwp. We did not kill the first
1394 thread in the list, so do so now. */
1395 lwp_info *lwp = find_lwp_pid (ptid_t (pid));
1397 if (lwp == NULL)
1398 threads_debug_printf ("cannot find lwp for pid: %d", pid);
1399 else
1400 kill_wait_lwp (lwp);
1402 mourn (process);
1404 /* Since we presently can only stop all lwps of all processes, we
1405 need to unstop lwps of other processes. */
1406 unstop_all_lwps (0, NULL);
1407 return 0;
1410 /* Get pending signal of THREAD, for detaching purposes. This is the
1411 signal the thread last stopped for, which we need to deliver to the
1412 thread when detaching, otherwise, it'd be suppressed/lost. */
1414 static int
1415 get_detach_signal (struct thread_info *thread)
1417 client_state &cs = get_client_state ();
1418 enum gdb_signal signo = GDB_SIGNAL_0;
1419 int status;
1420 struct lwp_info *lp = get_thread_lwp (thread);
1422 if (lp->status_pending_p)
1423 status = lp->status_pending;
1424 else
1426 /* If the thread had been suspended by gdbserver, and it stopped
1427 cleanly, then it'll have stopped with SIGSTOP. But we don't
1428 want to deliver that SIGSTOP. */
1429 if (thread->last_status.kind () != TARGET_WAITKIND_STOPPED
1430 || thread->last_status.sig () == GDB_SIGNAL_0)
1431 return 0;
1433 /* Otherwise, we may need to deliver the signal we
1434 intercepted. */
1435 status = lp->last_status;
1438 if (!WIFSTOPPED (status))
1440 threads_debug_printf ("lwp %s hasn't stopped: no pending signal",
1441 target_pid_to_str (ptid_of (thread)).c_str ());
1442 return 0;
1445 /* Extended wait statuses aren't real SIGTRAPs. */
1446 if (WSTOPSIG (status) == SIGTRAP && linux_is_extended_waitstatus (status))
1448 threads_debug_printf ("lwp %s had stopped with extended "
1449 "status: no pending signal",
1450 target_pid_to_str (ptid_of (thread)).c_str ());
1451 return 0;
1454 signo = gdb_signal_from_host (WSTOPSIG (status));
1456 if (cs.program_signals_p && !cs.program_signals[signo])
1458 threads_debug_printf ("lwp %s had signal %s, but it is in nopass state",
1459 target_pid_to_str (ptid_of (thread)).c_str (),
1460 gdb_signal_to_string (signo));
1461 return 0;
1463 else if (!cs.program_signals_p
1464 /* If we have no way to know which signals GDB does not
1465 want to have passed to the program, assume
1466 SIGTRAP/SIGINT, which is GDB's default. */
1467 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
1469 threads_debug_printf ("lwp %s had signal %s, "
1470 "but we don't know if we should pass it. "
1471 "Default to not.",
1472 target_pid_to_str (ptid_of (thread)).c_str (),
1473 gdb_signal_to_string (signo));
1474 return 0;
1476 else
1478 threads_debug_printf ("lwp %s has pending signal %s: delivering it",
1479 target_pid_to_str (ptid_of (thread)).c_str (),
1480 gdb_signal_to_string (signo));
1482 return WSTOPSIG (status);
1486 void
1487 linux_process_target::detach_one_lwp (lwp_info *lwp)
1489 struct thread_info *thread = get_lwp_thread (lwp);
1490 int sig;
1491 int lwpid;
1493 /* If there is a pending SIGSTOP, get rid of it. */
1494 if (lwp->stop_expected)
1496 threads_debug_printf ("Sending SIGCONT to %s",
1497 target_pid_to_str (ptid_of (thread)).c_str ());
1499 kill_lwp (lwpid_of (thread), SIGCONT);
1500 lwp->stop_expected = 0;
1503 /* Pass on any pending signal for this thread. */
1504 sig = get_detach_signal (thread);
1506 /* Preparing to resume may try to write registers, and fail if the
1507 lwp is zombie. If that happens, ignore the error. We'll handle
1508 it below, when detach fails with ESRCH. */
1511 /* Flush any pending changes to the process's registers. */
1512 regcache_invalidate_thread (thread);
1514 /* Finally, let it resume. */
1515 low_prepare_to_resume (lwp);
1517 catch (const gdb_exception_error &ex)
1519 if (!check_ptrace_stopped_lwp_gone (lwp))
1520 throw;
1523 lwpid = lwpid_of (thread);
1524 if (ptrace (PTRACE_DETACH, lwpid, (PTRACE_TYPE_ARG3) 0,
1525 (PTRACE_TYPE_ARG4) (long) sig) < 0)
1527 int save_errno = errno;
1529 /* We know the thread exists, so ESRCH must mean the lwp is
1530 zombie. This can happen if one of the already-detached
1531 threads exits the whole thread group. In that case we're
1532 still attached, and must reap the lwp. */
1533 if (save_errno == ESRCH)
1535 int ret, status;
1537 ret = my_waitpid (lwpid, &status, __WALL);
1538 if (ret == -1)
1540 warning (_("Couldn't reap LWP %d while detaching: %s"),
1541 lwpid, safe_strerror (errno));
1543 else if (!WIFEXITED (status) && !WIFSIGNALED (status))
1545 warning (_("Reaping LWP %d while detaching "
1546 "returned unexpected status 0x%x"),
1547 lwpid, status);
1550 else
1552 error (_("Can't detach %s: %s"),
1553 target_pid_to_str (ptid_of (thread)).c_str (),
1554 safe_strerror (save_errno));
1557 else
1558 threads_debug_printf ("PTRACE_DETACH (%s, %s, 0) (OK)",
1559 target_pid_to_str (ptid_of (thread)).c_str (),
1560 strsignal (sig));
1562 delete_lwp (lwp);
1566 linux_process_target::detach (process_info *process)
1568 struct lwp_info *main_lwp;
1570 /* As there's a step over already in progress, let it finish first,
1571 otherwise nesting a stabilize_threads operation on top gets real
1572 messy. */
1573 complete_ongoing_step_over ();
1575 /* Stop all threads before detaching. First, ptrace requires that
1576 the thread is stopped to successfully detach. Second, thread_db
1577 may need to uninstall thread event breakpoints from memory, which
1578 only works with a stopped process anyway. */
1579 stop_all_lwps (0, NULL);
1581 #ifdef USE_THREAD_DB
1582 thread_db_detach (process);
1583 #endif
1585 /* Stabilize threads (move out of jump pads). */
1586 target_stabilize_threads ();
1588 /* Detach from the clone lwps first. If the thread group exits just
1589 while we're detaching, we must reap the clone lwps before we're
1590 able to reap the leader. */
1591 for_each_thread (process->pid, [this] (thread_info *thread)
1593 /* We don't actually detach from the thread group leader just yet.
1594 If the thread group exits, we must reap the zombie clone lwps
1595 before we're able to reap the leader. */
1596 if (thread->id.pid () == thread->id.lwp ())
1597 return;
1599 lwp_info *lwp = get_thread_lwp (thread);
1600 detach_one_lwp (lwp);
1603 main_lwp = find_lwp_pid (ptid_t (process->pid));
1604 gdb_assert (main_lwp != nullptr);
1605 detach_one_lwp (main_lwp);
1607 mourn (process);
1609 /* Since we presently can only stop all lwps of all processes, we
1610 need to unstop lwps of other processes. */
1611 unstop_all_lwps (0, NULL);
1612 return 0;
1615 /* Remove all LWPs that belong to process PROC from the lwp list. */
1617 void
1618 linux_process_target::mourn (process_info *process)
1620 #ifdef USE_THREAD_DB
1621 thread_db_mourn (process);
1622 #endif
1624 for_each_thread (process->pid, [this] (thread_info *thread)
1626 delete_lwp (get_thread_lwp (thread));
1629 this->remove_linux_process (process);
1632 void
1633 linux_process_target::join (int pid)
1635 int status, ret;
1637 do {
1638 ret = my_waitpid (pid, &status, 0);
1639 if (WIFEXITED (status) || WIFSIGNALED (status))
1640 break;
1641 } while (ret != -1 || errno != ECHILD);
1644 /* Return true if the given thread is still alive. */
1646 bool
1647 linux_process_target::thread_alive (ptid_t ptid)
1649 struct lwp_info *lwp = find_lwp_pid (ptid);
1651 /* We assume we always know if a thread exits. If a whole process
1652 exited but we still haven't been able to report it to GDB, we'll
1653 hold on to the last lwp of the dead process. */
1654 if (lwp != NULL)
1655 return !lwp_is_marked_dead (lwp);
1656 else
1657 return 0;
1660 bool
1661 linux_process_target::thread_still_has_status_pending (thread_info *thread)
1663 struct lwp_info *lp = get_thread_lwp (thread);
1665 if (!lp->status_pending_p)
1666 return 0;
1668 if (thread->last_resume_kind != resume_stop
1669 && (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1670 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
1672 CORE_ADDR pc;
1673 int discard = 0;
1675 gdb_assert (lp->last_status != 0);
1677 pc = get_pc (lp);
1679 scoped_restore_current_thread restore_thread;
1680 switch_to_thread (thread);
1682 if (pc != lp->stop_pc)
1684 threads_debug_printf ("PC of %ld changed",
1685 lwpid_of (thread));
1686 discard = 1;
1689 if (discard)
1691 threads_debug_printf ("discarding pending breakpoint status");
1692 lp->status_pending_p = 0;
1693 return 0;
1697 return 1;
1700 /* Returns true if LWP is resumed from the client's perspective. */
1702 static int
1703 lwp_resumed (struct lwp_info *lwp)
1705 struct thread_info *thread = get_lwp_thread (lwp);
1707 if (thread->last_resume_kind != resume_stop)
1708 return 1;
1710 /* Did gdb send us a `vCont;t', but we haven't reported the
1711 corresponding stop to gdb yet? If so, the thread is still
1712 resumed/running from gdb's perspective. */
1713 if (thread->last_resume_kind == resume_stop
1714 && thread->last_status.kind () == TARGET_WAITKIND_IGNORE)
1715 return 1;
1717 return 0;
1720 bool
1721 linux_process_target::status_pending_p_callback (thread_info *thread,
1722 ptid_t ptid)
1724 struct lwp_info *lp = get_thread_lwp (thread);
1726 /* Check if we're only interested in events from a specific process
1727 or a specific LWP. */
1728 if (!thread->id.matches (ptid))
1729 return 0;
1731 if (!lwp_resumed (lp))
1732 return 0;
1734 if (lp->status_pending_p
1735 && !thread_still_has_status_pending (thread))
1737 resume_one_lwp (lp, lp->stepping, GDB_SIGNAL_0, NULL);
1738 return 0;
1741 return lp->status_pending_p;
1744 struct lwp_info *
1745 find_lwp_pid (ptid_t ptid)
1747 long lwp = ptid.lwp () != 0 ? ptid.lwp () : ptid.pid ();
1748 thread_info *thread = find_thread ([lwp] (thread_info *thr_arg)
1750 return thr_arg->id.lwp () == lwp;
1753 if (thread == NULL)
1754 return NULL;
1756 return get_thread_lwp (thread);
1759 /* Return the number of known LWPs in the tgid given by PID. */
1761 static int
1762 num_lwps (int pid)
1764 int count = 0;
1766 for_each_thread (pid, [&] (thread_info *thread)
1768 count++;
1771 return count;
1774 /* See nat/linux-nat.h. */
1776 struct lwp_info *
1777 iterate_over_lwps (ptid_t filter,
1778 gdb::function_view<iterate_over_lwps_ftype> callback)
1780 thread_info *thread = find_thread (filter, [&] (thread_info *thr_arg)
1782 lwp_info *lwp = get_thread_lwp (thr_arg);
1784 return callback (lwp);
1787 if (thread == NULL)
1788 return NULL;
1790 return get_thread_lwp (thread);
1793 bool
1794 linux_process_target::check_zombie_leaders ()
1796 bool new_pending_event = false;
1798 for_each_process ([&] (process_info *proc)
1800 pid_t leader_pid = pid_of (proc);
1801 lwp_info *leader_lp = find_lwp_pid (ptid_t (leader_pid));
1803 threads_debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1804 "num_lwps=%d, zombie=%d",
1805 leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1806 linux_proc_pid_is_zombie (leader_pid));
1808 if (leader_lp != NULL && !leader_lp->stopped
1809 /* Check if there are other threads in the group, as we may
1810 have raced with the inferior simply exiting. Note this
1811 isn't a watertight check. If the inferior is
1812 multi-threaded and is exiting, it may be we see the
1813 leader as zombie before we reap all the non-leader
1814 threads. See comments below. */
1815 && !last_thread_of_process_p (leader_pid)
1816 && linux_proc_pid_is_zombie (leader_pid))
1818 /* A zombie leader in a multi-threaded program can mean one
1819 of three things:
1821 #1 - Only the leader exited, not the whole program, e.g.,
1822 with pthread_exit. Since we can't reap the leader's exit
1823 status until all other threads are gone and reaped too,
1824 we want to delete the zombie leader right away, as it
1825 can't be debugged, we can't read its registers, etc.
1826 This is the main reason we check for zombie leaders
1827 disappearing.
1829 #2 - The whole thread-group/process exited (a group exit,
1830 via e.g. exit(3), and there is (or will be shortly) an
1831 exit reported for each thread in the process, and then
1832 finally an exit for the leader once the non-leaders are
1833 reaped.
1835 #3 - There are 3 or more threads in the group, and a
1836 thread other than the leader exec'd. See comments on
1837 exec events at the top of the file.
1839 Ideally we would never delete the leader for case #2.
1840 Instead, we want to collect the exit status of each
1841 non-leader thread, and then finally collect the exit
1842 status of the leader as normal and use its exit code as
1843 whole-process exit code. Unfortunately, there's no
1844 race-free way to distinguish cases #1 and #2. We can't
1845 assume the exit events for the non-leaders threads are
1846 already pending in the kernel, nor can we assume the
1847 non-leader threads are in zombie state already. Between
1848 the leader becoming zombie and the non-leaders exiting
1849 and becoming zombie themselves, there's a small time
1850 window, so such a check would be racy. Temporarily
1851 pausing all threads and checking to see if all threads
1852 exit or not before re-resuming them would work in the
1853 case that all threads are running right now, but it
1854 wouldn't work if some thread is currently already
1855 ptrace-stopped, e.g., due to scheduler-locking.
1857 So what we do is we delete the leader anyhow, and then
1858 later on when we see its exit status, we re-add it back.
1859 We also make sure that we only report a whole-process
1860 exit when we see the leader exiting, as opposed to when
1861 the last LWP in the LWP list exits, which can be a
1862 non-leader if we deleted the leader here. */
1863 threads_debug_printf ("Thread group leader %d zombie "
1864 "(it exited, or another thread execd), "
1865 "deleting it.",
1866 leader_pid);
1868 thread_info *leader_thread = get_lwp_thread (leader_lp);
1869 if (report_exit_events_for (leader_thread))
1871 mark_lwp_dead (leader_lp, W_EXITCODE (0, 0), true);
1872 new_pending_event = true;
1874 else
1875 delete_lwp (leader_lp);
1879 return new_pending_event;
1882 /* Callback for `find_thread'. Returns the first LWP that is not
1883 stopped. */
1885 static bool
1886 not_stopped_callback (thread_info *thread, ptid_t filter)
1888 if (!thread->id.matches (filter))
1889 return false;
1891 lwp_info *lwp = get_thread_lwp (thread);
1893 return !lwp->stopped;
1896 /* Increment LWP's suspend count. */
1898 static void
1899 lwp_suspended_inc (struct lwp_info *lwp)
1901 lwp->suspended++;
1903 if (lwp->suspended > 4)
1904 threads_debug_printf
1905 ("LWP %ld has a suspiciously high suspend count, suspended=%d",
1906 lwpid_of (get_lwp_thread (lwp)), lwp->suspended);
1909 /* Decrement LWP's suspend count. */
1911 static void
1912 lwp_suspended_decr (struct lwp_info *lwp)
1914 lwp->suspended--;
1916 if (lwp->suspended < 0)
1918 struct thread_info *thread = get_lwp_thread (lwp);
1920 internal_error ("unsuspend LWP %ld, suspended=%d\n", lwpid_of (thread),
1921 lwp->suspended);
1925 /* This function should only be called if the LWP got a SIGTRAP.
1927 Handle any tracepoint steps or hits. Return true if a tracepoint
1928 event was handled, 0 otherwise. */
1930 static int
1931 handle_tracepoints (struct lwp_info *lwp)
1933 struct thread_info *tinfo = get_lwp_thread (lwp);
1934 int tpoint_related_event = 0;
1936 gdb_assert (lwp->suspended == 0);
1938 /* If this tracepoint hit causes a tracing stop, we'll immediately
1939 uninsert tracepoints. To do this, we temporarily pause all
1940 threads, unpatch away, and then unpause threads. We need to make
1941 sure the unpausing doesn't resume LWP too. */
1942 lwp_suspended_inc (lwp);
1944 /* And we need to be sure that any all-threads-stopping doesn't try
1945 to move threads out of the jump pads, as it could deadlock the
1946 inferior (LWP could be in the jump pad, maybe even holding the
1947 lock.) */
1949 /* Do any necessary step collect actions. */
1950 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1952 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1954 /* See if we just hit a tracepoint and do its main collect
1955 actions. */
1956 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1958 lwp_suspended_decr (lwp);
1960 gdb_assert (lwp->suspended == 0);
1961 gdb_assert (!stabilizing_threads
1962 || (lwp->collecting_fast_tracepoint
1963 != fast_tpoint_collect_result::not_collecting));
1965 if (tpoint_related_event)
1967 threads_debug_printf ("got a tracepoint event");
1968 return 1;
1971 return 0;
1974 fast_tpoint_collect_result
1975 linux_process_target::linux_fast_tracepoint_collecting
1976 (lwp_info *lwp, fast_tpoint_collect_status *status)
1978 CORE_ADDR thread_area;
1979 struct thread_info *thread = get_lwp_thread (lwp);
1981 /* Get the thread area address. This is used to recognize which
1982 thread is which when tracing with the in-process agent library.
1983 We don't read anything from the address, and treat it as opaque;
1984 it's the address itself that we assume is unique per-thread. */
1985 if (low_get_thread_area (lwpid_of (thread), &thread_area) == -1)
1986 return fast_tpoint_collect_result::not_collecting;
1988 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
1992 linux_process_target::low_get_thread_area (int lwpid, CORE_ADDR *addrp)
1994 return -1;
1997 bool
1998 linux_process_target::maybe_move_out_of_jump_pad (lwp_info *lwp, int *wstat)
2000 scoped_restore_current_thread restore_thread;
2001 switch_to_thread (get_lwp_thread (lwp));
2003 if ((wstat == NULL
2004 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
2005 && supports_fast_tracepoints ()
2006 && agent_loaded_p ())
2008 struct fast_tpoint_collect_status status;
2010 threads_debug_printf
2011 ("Checking whether LWP %ld needs to move out of the jump pad.",
2012 lwpid_of (current_thread));
2014 fast_tpoint_collect_result r
2015 = linux_fast_tracepoint_collecting (lwp, &status);
2017 if (wstat == NULL
2018 || (WSTOPSIG (*wstat) != SIGILL
2019 && WSTOPSIG (*wstat) != SIGFPE
2020 && WSTOPSIG (*wstat) != SIGSEGV
2021 && WSTOPSIG (*wstat) != SIGBUS))
2023 lwp->collecting_fast_tracepoint = r;
2025 if (r != fast_tpoint_collect_result::not_collecting)
2027 if (r == fast_tpoint_collect_result::before_insn
2028 && lwp->exit_jump_pad_bkpt == NULL)
2030 /* Haven't executed the original instruction yet.
2031 Set breakpoint there, and wait till it's hit,
2032 then single-step until exiting the jump pad. */
2033 lwp->exit_jump_pad_bkpt
2034 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
2037 threads_debug_printf
2038 ("Checking whether LWP %ld needs to move out of the jump pad..."
2039 " it does", lwpid_of (current_thread));
2041 return true;
2044 else
2046 /* If we get a synchronous signal while collecting, *and*
2047 while executing the (relocated) original instruction,
2048 reset the PC to point at the tpoint address, before
2049 reporting to GDB. Otherwise, it's an IPA lib bug: just
2050 report the signal to GDB, and pray for the best. */
2052 lwp->collecting_fast_tracepoint
2053 = fast_tpoint_collect_result::not_collecting;
2055 if (r != fast_tpoint_collect_result::not_collecting
2056 && (status.adjusted_insn_addr <= lwp->stop_pc
2057 && lwp->stop_pc < status.adjusted_insn_addr_end))
2059 siginfo_t info;
2060 struct regcache *regcache;
2062 /* The si_addr on a few signals references the address
2063 of the faulting instruction. Adjust that as
2064 well. */
2065 if ((WSTOPSIG (*wstat) == SIGILL
2066 || WSTOPSIG (*wstat) == SIGFPE
2067 || WSTOPSIG (*wstat) == SIGBUS
2068 || WSTOPSIG (*wstat) == SIGSEGV)
2069 && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
2070 (PTRACE_TYPE_ARG3) 0, &info) == 0
2071 /* Final check just to make sure we don't clobber
2072 the siginfo of non-kernel-sent signals. */
2073 && (uintptr_t) info.si_addr == lwp->stop_pc)
2075 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
2076 ptrace (PTRACE_SETSIGINFO, lwpid_of (current_thread),
2077 (PTRACE_TYPE_ARG3) 0, &info);
2080 regcache = get_thread_regcache (current_thread, 1);
2081 low_set_pc (regcache, status.tpoint_addr);
2082 lwp->stop_pc = status.tpoint_addr;
2084 /* Cancel any fast tracepoint lock this thread was
2085 holding. */
2086 force_unlock_trace_buffer ();
2089 if (lwp->exit_jump_pad_bkpt != NULL)
2091 threads_debug_printf
2092 ("Cancelling fast exit-jump-pad: removing bkpt."
2093 "stopping all threads momentarily.");
2095 stop_all_lwps (1, lwp);
2097 delete_breakpoint (lwp->exit_jump_pad_bkpt);
2098 lwp->exit_jump_pad_bkpt = NULL;
2100 unstop_all_lwps (1, lwp);
2102 gdb_assert (lwp->suspended >= 0);
2107 threads_debug_printf
2108 ("Checking whether LWP %ld needs to move out of the jump pad... no",
2109 lwpid_of (current_thread));
2111 return false;
2114 /* Enqueue one signal in the "signals to report later when out of the
2115 jump pad" list. */
2117 static void
2118 enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2120 struct thread_info *thread = get_lwp_thread (lwp);
2122 threads_debug_printf ("Deferring signal %d for LWP %ld.",
2123 WSTOPSIG (*wstat), lwpid_of (thread));
2125 if (debug_threads)
2127 for (const auto &sig : lwp->pending_signals_to_report)
2128 threads_debug_printf (" Already queued %d", sig.signal);
2130 threads_debug_printf (" (no more currently queued signals)");
2133 /* Don't enqueue non-RT signals if they are already in the deferred
2134 queue. (SIGSTOP being the easiest signal to see ending up here
2135 twice) */
2136 if (WSTOPSIG (*wstat) < __SIGRTMIN)
2138 for (const auto &sig : lwp->pending_signals_to_report)
2140 if (sig.signal == WSTOPSIG (*wstat))
2142 threads_debug_printf
2143 ("Not requeuing already queued non-RT signal %d for LWP %ld",
2144 sig.signal, lwpid_of (thread));
2145 return;
2150 lwp->pending_signals_to_report.emplace_back (WSTOPSIG (*wstat));
2152 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2153 &lwp->pending_signals_to_report.back ().info);
2156 /* Dequeue one signal from the "signals to report later when out of
2157 the jump pad" list. */
2159 static int
2160 dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2162 struct thread_info *thread = get_lwp_thread (lwp);
2164 if (!lwp->pending_signals_to_report.empty ())
2166 const pending_signal &p_sig = lwp->pending_signals_to_report.front ();
2168 *wstat = W_STOPCODE (p_sig.signal);
2169 if (p_sig.info.si_signo != 0)
2170 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2171 &p_sig.info);
2173 lwp->pending_signals_to_report.pop_front ();
2175 threads_debug_printf ("Reporting deferred signal %d for LWP %ld.",
2176 WSTOPSIG (*wstat), lwpid_of (thread));
2178 if (debug_threads)
2180 for (const auto &sig : lwp->pending_signals_to_report)
2181 threads_debug_printf (" Still queued %d", sig.signal);
2183 threads_debug_printf (" (no more queued signals)");
2186 return 1;
2189 return 0;
2192 bool
2193 linux_process_target::check_stopped_by_watchpoint (lwp_info *child)
2195 scoped_restore_current_thread restore_thread;
2196 switch_to_thread (get_lwp_thread (child));
2198 if (low_stopped_by_watchpoint ())
2200 child->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
2201 child->stopped_data_address = low_stopped_data_address ();
2204 return child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
2207 bool
2208 linux_process_target::low_stopped_by_watchpoint ()
2210 return false;
2213 CORE_ADDR
2214 linux_process_target::low_stopped_data_address ()
2216 return 0;
2219 /* Return the ptrace options that we want to try to enable. */
2221 static int
2222 linux_low_ptrace_options (int attached)
2224 client_state &cs = get_client_state ();
2225 int options = 0;
2227 if (!attached)
2228 options |= PTRACE_O_EXITKILL;
2230 if (cs.report_fork_events)
2231 options |= PTRACE_O_TRACEFORK;
2233 if (cs.report_vfork_events)
2234 options |= (PTRACE_O_TRACEVFORK | PTRACE_O_TRACEVFORKDONE);
2236 if (cs.report_exec_events)
2237 options |= PTRACE_O_TRACEEXEC;
2239 options |= PTRACE_O_TRACESYSGOOD;
2241 return options;
2244 void
2245 linux_process_target::filter_event (int lwpid, int wstat)
2247 struct lwp_info *child;
2248 struct thread_info *thread;
2249 int have_stop_pc = 0;
2251 child = find_lwp_pid (ptid_t (lwpid));
2253 /* Check for events reported by anything not in our LWP list. */
2254 if (child == nullptr)
2256 if (WIFSTOPPED (wstat))
2258 if (WSTOPSIG (wstat) == SIGTRAP
2259 && linux_ptrace_get_extended_event (wstat) == PTRACE_EVENT_EXEC)
2261 /* A non-leader thread exec'ed after we've seen the
2262 leader zombie, and removed it from our lists (in
2263 check_zombie_leaders). The non-leader thread changes
2264 its tid to the tgid. */
2265 threads_debug_printf
2266 ("Re-adding thread group leader LWP %d after exec.",
2267 lwpid);
2269 child = add_lwp (ptid_t (lwpid, lwpid));
2270 child->stopped = 1;
2271 switch_to_thread (child->thread);
2273 else
2275 /* A process we are controlling has forked and the new
2276 child's stop was reported to us by the kernel. Save
2277 its PID and go back to waiting for the fork event to
2278 be reported - the stopped process might be returned
2279 from waitpid before or after the fork event is. */
2280 threads_debug_printf
2281 ("Saving LWP %d status %s in stopped_pids list",
2282 lwpid, status_to_str (wstat).c_str ());
2283 add_to_pid_list (&stopped_pids, lwpid, wstat);
2286 else
2288 /* Don't report an event for the exit of an LWP not in our
2289 list, i.e. not part of any inferior we're debugging.
2290 This can happen if we detach from a program we originally
2291 forked and then it exits. However, note that we may have
2292 earlier deleted a leader of an inferior we're debugging,
2293 in check_zombie_leaders. Re-add it back here if so. */
2294 find_process ([&] (process_info *proc)
2296 if (proc->pid == lwpid)
2298 threads_debug_printf
2299 ("Re-adding thread group leader LWP %d after exit.",
2300 lwpid);
2302 child = add_lwp (ptid_t (lwpid, lwpid));
2303 return true;
2305 return false;
2309 if (child == nullptr)
2310 return;
2313 thread = get_lwp_thread (child);
2315 child->stopped = 1;
2317 child->last_status = wstat;
2319 /* Check if the thread has exited. */
2320 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat)))
2322 threads_debug_printf ("%d exited", lwpid);
2324 if (finish_step_over (child))
2326 /* Unsuspend all other LWPs, and set them back running again. */
2327 unsuspend_all_lwps (child);
2330 /* If this is not the leader LWP, then the exit signal was not
2331 the end of the debugged application and should be ignored,
2332 unless GDB wants to hear about thread exits. */
2333 if (report_exit_events_for (thread) || is_leader (thread))
2335 /* Since events are serialized to GDB core, and we can't
2336 report this one right now. Leave the status pending for
2337 the next time we're able to report it. */
2338 mark_lwp_dead (child, wstat, false);
2339 return;
2341 else
2343 delete_lwp (child);
2344 return;
2348 gdb_assert (WIFSTOPPED (wstat));
2350 if (WIFSTOPPED (wstat))
2352 struct process_info *proc;
2354 /* Architecture-specific setup after inferior is running. */
2355 proc = find_process_pid (pid_of (thread));
2356 if (proc->tdesc == NULL)
2358 if (proc->attached)
2360 /* This needs to happen after we have attached to the
2361 inferior and it is stopped for the first time, but
2362 before we access any inferior registers. */
2363 arch_setup_thread (thread);
2365 else
2367 /* The process is started, but GDBserver will do
2368 architecture-specific setup after the program stops at
2369 the first instruction. */
2370 child->status_pending_p = 1;
2371 child->status_pending = wstat;
2372 return;
2377 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
2379 struct process_info *proc = find_process_pid (pid_of (thread));
2380 int options = linux_low_ptrace_options (proc->attached);
2382 linux_enable_event_reporting (lwpid, options);
2383 child->must_set_ptrace_flags = 0;
2386 /* Always update syscall_state, even if it will be filtered later. */
2387 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SYSCALL_SIGTRAP)
2389 child->syscall_state
2390 = (child->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2391 ? TARGET_WAITKIND_SYSCALL_RETURN
2392 : TARGET_WAITKIND_SYSCALL_ENTRY);
2394 else
2396 /* Almost all other ptrace-stops are known to be outside of system
2397 calls, with further exceptions in handle_extended_wait. */
2398 child->syscall_state = TARGET_WAITKIND_IGNORE;
2401 /* Be careful to not overwrite stop_pc until save_stop_reason is
2402 called. */
2403 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
2404 && linux_is_extended_waitstatus (wstat))
2406 child->stop_pc = get_pc (child);
2407 if (handle_extended_wait (&child, wstat))
2409 /* The event has been handled, so just return without
2410 reporting it. */
2411 return;
2415 if (linux_wstatus_maybe_breakpoint (wstat))
2417 if (save_stop_reason (child))
2418 have_stop_pc = 1;
2421 if (!have_stop_pc)
2422 child->stop_pc = get_pc (child);
2424 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
2425 && child->stop_expected)
2427 threads_debug_printf ("Expected stop.");
2429 child->stop_expected = 0;
2431 if (thread->last_resume_kind == resume_stop)
2433 /* We want to report the stop to the core. Treat the
2434 SIGSTOP as a normal event. */
2435 threads_debug_printf ("resume_stop SIGSTOP caught for %s.",
2436 target_pid_to_str (ptid_of (thread)).c_str ());
2438 else if (stopping_threads != NOT_STOPPING_THREADS)
2440 /* Stopping threads. We don't want this SIGSTOP to end up
2441 pending. */
2442 threads_debug_printf ("SIGSTOP caught for %s while stopping threads.",
2443 target_pid_to_str (ptid_of (thread)).c_str ());
2444 return;
2446 else
2448 /* This is a delayed SIGSTOP. Filter out the event. */
2449 threads_debug_printf ("%s %s, 0, 0 (discard delayed SIGSTOP)",
2450 child->stepping ? "step" : "continue",
2451 target_pid_to_str (ptid_of (thread)).c_str ());
2453 resume_one_lwp (child, child->stepping, 0, NULL);
2454 return;
2458 child->status_pending_p = 1;
2459 child->status_pending = wstat;
2460 return;
2463 bool
2464 linux_process_target::maybe_hw_step (thread_info *thread)
2466 if (supports_hardware_single_step ())
2467 return true;
2468 else
2470 /* GDBserver must insert single-step breakpoint for software
2471 single step. */
2472 gdb_assert (has_single_step_breakpoints (thread));
2473 return false;
2477 void
2478 linux_process_target::resume_stopped_resumed_lwps (thread_info *thread)
2480 struct lwp_info *lp = get_thread_lwp (thread);
2482 if (lp->stopped
2483 && !lp->suspended
2484 && !lp->status_pending_p
2485 && thread->last_status.kind () == TARGET_WAITKIND_IGNORE)
2487 int step = 0;
2489 if (thread->last_resume_kind == resume_step)
2491 if (supports_software_single_step ())
2492 install_software_single_step_breakpoints (lp);
2494 step = maybe_hw_step (thread);
2497 threads_debug_printf ("resuming stopped-resumed LWP %s at %s: step=%d",
2498 target_pid_to_str (ptid_of (thread)).c_str (),
2499 paddress (lp->stop_pc), step);
2501 resume_one_lwp (lp, step, GDB_SIGNAL_0, NULL);
2506 linux_process_target::wait_for_event_filtered (ptid_t wait_ptid,
2507 ptid_t filter_ptid,
2508 int *wstatp, int options)
2510 struct thread_info *event_thread;
2511 struct lwp_info *event_child, *requested_child;
2512 sigset_t block_mask, prev_mask;
2514 retry:
2515 /* N.B. event_thread points to the thread_info struct that contains
2516 event_child. Keep them in sync. */
2517 event_thread = NULL;
2518 event_child = NULL;
2519 requested_child = NULL;
2521 /* Check for a lwp with a pending status. */
2523 if (filter_ptid == minus_one_ptid || filter_ptid.is_pid ())
2525 event_thread = find_thread_in_random ([&] (thread_info *thread)
2527 return status_pending_p_callback (thread, filter_ptid);
2530 if (event_thread != NULL)
2532 event_child = get_thread_lwp (event_thread);
2533 threads_debug_printf ("Got a pending child %ld", lwpid_of (event_thread));
2536 else if (filter_ptid != null_ptid)
2538 requested_child = find_lwp_pid (filter_ptid);
2539 gdb_assert (requested_child != nullptr);
2541 if (stopping_threads == NOT_STOPPING_THREADS
2542 && requested_child->status_pending_p
2543 && (requested_child->collecting_fast_tracepoint
2544 != fast_tpoint_collect_result::not_collecting))
2546 enqueue_one_deferred_signal (requested_child,
2547 &requested_child->status_pending);
2548 requested_child->status_pending_p = 0;
2549 requested_child->status_pending = 0;
2550 resume_one_lwp (requested_child, 0, 0, NULL);
2553 if (requested_child->suspended
2554 && requested_child->status_pending_p)
2556 internal_error ("requesting an event out of a"
2557 " suspended child?");
2560 if (requested_child->status_pending_p)
2562 event_child = requested_child;
2563 event_thread = get_lwp_thread (event_child);
2567 if (event_child != NULL)
2569 threads_debug_printf ("Got an event from pending child %ld (%04x)",
2570 lwpid_of (event_thread),
2571 event_child->status_pending);
2573 *wstatp = event_child->status_pending;
2574 event_child->status_pending_p = 0;
2575 event_child->status_pending = 0;
2576 switch_to_thread (event_thread);
2577 return lwpid_of (event_thread);
2580 /* But if we don't find a pending event, we'll have to wait.
2582 We only enter this loop if no process has a pending wait status.
2583 Thus any action taken in response to a wait status inside this
2584 loop is responding as soon as we detect the status, not after any
2585 pending events. */
2587 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2588 all signals while here. */
2589 sigfillset (&block_mask);
2590 gdb_sigmask (SIG_BLOCK, &block_mask, &prev_mask);
2592 /* Always pull all events out of the kernel. We'll randomly select
2593 an event LWP out of all that have events, to prevent
2594 starvation. */
2595 while (event_child == NULL)
2597 pid_t ret = 0;
2599 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2600 quirks:
2602 - If the thread group leader exits while other threads in the
2603 thread group still exist, waitpid(TGID, ...) hangs. That
2604 waitpid won't return an exit status until the other threads
2605 in the group are reaped.
2607 - When a non-leader thread execs, that thread just vanishes
2608 without reporting an exit (so we'd hang if we waited for it
2609 explicitly in that case). The exec event is reported to
2610 the TGID pid. */
2611 errno = 0;
2612 ret = my_waitpid (-1, wstatp, options | WNOHANG);
2614 threads_debug_printf ("waitpid(-1, ...) returned %d, %s",
2615 ret, errno ? safe_strerror (errno) : "ERRNO-OK");
2617 if (ret > 0)
2619 threads_debug_printf ("waitpid %ld received %s",
2620 (long) ret, status_to_str (*wstatp).c_str ());
2622 /* Filter all events. IOW, leave all events pending. We'll
2623 randomly select an event LWP out of all that have events
2624 below. */
2625 filter_event (ret, *wstatp);
2626 /* Retry until nothing comes out of waitpid. A single
2627 SIGCHLD can indicate more than one child stopped. */
2628 continue;
2631 /* Now that we've pulled all events out of the kernel, resume
2632 LWPs that don't have an interesting event to report. */
2633 if (stopping_threads == NOT_STOPPING_THREADS)
2634 for_each_thread ([this] (thread_info *thread)
2636 resume_stopped_resumed_lwps (thread);
2639 /* ... and find an LWP with a status to report to the core, if
2640 any. */
2641 event_thread = find_thread_in_random ([&] (thread_info *thread)
2643 return status_pending_p_callback (thread, filter_ptid);
2646 if (event_thread != NULL)
2648 event_child = get_thread_lwp (event_thread);
2649 *wstatp = event_child->status_pending;
2650 event_child->status_pending_p = 0;
2651 event_child->status_pending = 0;
2652 break;
2655 /* Check for zombie thread group leaders. Those can't be reaped
2656 until all other threads in the thread group are. */
2657 if (check_zombie_leaders ())
2658 goto retry;
2660 auto not_stopped = [&] (thread_info *thread)
2662 return not_stopped_callback (thread, wait_ptid);
2665 /* If there are no resumed children left in the set of LWPs we
2666 want to wait for, bail. We can't just block in
2667 waitpid/sigsuspend, because lwps might have been left stopped
2668 in trace-stop state, and we'd be stuck forever waiting for
2669 their status to change (which would only happen if we resumed
2670 them). Even if WNOHANG is set, this return code is preferred
2671 over 0 (below), as it is more detailed. */
2672 if (find_thread (not_stopped) == NULL)
2674 threads_debug_printf ("exit (no unwaited-for LWP)");
2676 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
2677 return -1;
2680 /* No interesting event to report to the caller. */
2681 if ((options & WNOHANG))
2683 threads_debug_printf ("WNOHANG set, no event found");
2685 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
2686 return 0;
2689 /* Block until we get an event reported with SIGCHLD. */
2690 threads_debug_printf ("sigsuspend'ing");
2692 sigsuspend (&prev_mask);
2693 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
2694 goto retry;
2697 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
2699 switch_to_thread (event_thread);
2701 return lwpid_of (event_thread);
2705 linux_process_target::wait_for_event (ptid_t ptid, int *wstatp, int options)
2707 return wait_for_event_filtered (ptid, ptid, wstatp, options);
2710 /* Select one LWP out of those that have events pending. */
2712 static void
2713 select_event_lwp (struct lwp_info **orig_lp)
2715 struct thread_info *event_thread = NULL;
2717 /* In all-stop, give preference to the LWP that is being
2718 single-stepped. There will be at most one, and it's the LWP that
2719 the core is most interested in. If we didn't do this, then we'd
2720 have to handle pending step SIGTRAPs somehow in case the core
2721 later continues the previously-stepped thread, otherwise we'd
2722 report the pending SIGTRAP, and the core, not having stepped the
2723 thread, wouldn't understand what the trap was for, and therefore
2724 would report it to the user as a random signal. */
2725 if (!non_stop)
2727 event_thread = find_thread ([] (thread_info *thread)
2729 lwp_info *lp = get_thread_lwp (thread);
2731 return (thread->last_status.kind () == TARGET_WAITKIND_IGNORE
2732 && thread->last_resume_kind == resume_step
2733 && lp->status_pending_p);
2736 if (event_thread != NULL)
2737 threads_debug_printf
2738 ("Select single-step %s",
2739 target_pid_to_str (ptid_of (event_thread)).c_str ());
2741 if (event_thread == NULL)
2743 /* No single-stepping LWP. Select one at random, out of those
2744 which have had events. */
2746 event_thread = find_thread_in_random ([&] (thread_info *thread)
2748 lwp_info *lp = get_thread_lwp (thread);
2750 /* Only resumed LWPs that have an event pending. */
2751 return (thread->last_status.kind () == TARGET_WAITKIND_IGNORE
2752 && lp->status_pending_p);
2756 if (event_thread != NULL)
2758 struct lwp_info *event_lp = get_thread_lwp (event_thread);
2760 /* Switch the event LWP. */
2761 *orig_lp = event_lp;
2765 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
2766 NULL. */
2768 static void
2769 unsuspend_all_lwps (struct lwp_info *except)
2771 for_each_thread ([&] (thread_info *thread)
2773 lwp_info *lwp = get_thread_lwp (thread);
2775 if (lwp != except)
2776 lwp_suspended_decr (lwp);
2780 static bool lwp_running (thread_info *thread);
2782 /* Stabilize threads (move out of jump pads).
2784 If a thread is midway collecting a fast tracepoint, we need to
2785 finish the collection and move it out of the jump pad before
2786 reporting the signal.
2788 This avoids recursion while collecting (when a signal arrives
2789 midway, and the signal handler itself collects), which would trash
2790 the trace buffer. In case the user set a breakpoint in a signal
2791 handler, this avoids the backtrace showing the jump pad, etc..
2792 Most importantly, there are certain things we can't do safely if
2793 threads are stopped in a jump pad (or in its callee's). For
2794 example:
2796 - starting a new trace run. A thread still collecting the
2797 previous run, could trash the trace buffer when resumed. The trace
2798 buffer control structures would have been reset but the thread had
2799 no way to tell. The thread could even midway memcpy'ing to the
2800 buffer, which would mean that when resumed, it would clobber the
2801 trace buffer that had been set for a new run.
2803 - we can't rewrite/reuse the jump pads for new tracepoints
2804 safely. Say you do tstart while a thread is stopped midway while
2805 collecting. When the thread is later resumed, it finishes the
2806 collection, and returns to the jump pad, to execute the original
2807 instruction that was under the tracepoint jump at the time the
2808 older run had been started. If the jump pad had been rewritten
2809 since for something else in the new run, the thread would now
2810 execute the wrong / random instructions. */
2812 void
2813 linux_process_target::stabilize_threads ()
2815 thread_info *thread_stuck = find_thread ([this] (thread_info *thread)
2817 return stuck_in_jump_pad (thread);
2820 if (thread_stuck != NULL)
2822 threads_debug_printf ("can't stabilize, LWP %ld is stuck in jump pad",
2823 lwpid_of (thread_stuck));
2824 return;
2827 scoped_restore_current_thread restore_thread;
2829 stabilizing_threads = 1;
2831 /* Kick 'em all. */
2832 for_each_thread ([this] (thread_info *thread)
2834 move_out_of_jump_pad (thread);
2837 /* Loop until all are stopped out of the jump pads. */
2838 while (find_thread (lwp_running) != NULL)
2840 struct target_waitstatus ourstatus;
2841 struct lwp_info *lwp;
2842 int wstat;
2844 /* Note that we go through the full wait even loop. While
2845 moving threads out of jump pad, we need to be able to step
2846 over internal breakpoints and such. */
2847 wait_1 (minus_one_ptid, &ourstatus, 0);
2849 if (ourstatus.kind () == TARGET_WAITKIND_STOPPED)
2851 lwp = get_thread_lwp (current_thread);
2853 /* Lock it. */
2854 lwp_suspended_inc (lwp);
2856 if (ourstatus.sig () != GDB_SIGNAL_0
2857 || current_thread->last_resume_kind == resume_stop)
2859 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.sig ()));
2860 enqueue_one_deferred_signal (lwp, &wstat);
2865 unsuspend_all_lwps (NULL);
2867 stabilizing_threads = 0;
2869 if (debug_threads)
2871 thread_stuck = find_thread ([this] (thread_info *thread)
2873 return stuck_in_jump_pad (thread);
2876 if (thread_stuck != NULL)
2877 threads_debug_printf
2878 ("couldn't stabilize, LWP %ld got stuck in jump pad",
2879 lwpid_of (thread_stuck));
2883 /* Convenience function that is called when the kernel reports an
2884 event that is not passed out to GDB. */
2886 static ptid_t
2887 ignore_event (struct target_waitstatus *ourstatus)
2889 /* If we got an event, there may still be others, as a single
2890 SIGCHLD can indicate more than one child stopped. This forces
2891 another target_wait call. */
2892 async_file_mark ();
2894 ourstatus->set_ignore ();
2895 return null_ptid;
2898 ptid_t
2899 linux_process_target::filter_exit_event (lwp_info *event_child,
2900 target_waitstatus *ourstatus)
2902 struct thread_info *thread = get_lwp_thread (event_child);
2903 ptid_t ptid = ptid_of (thread);
2905 if (ourstatus->kind () == TARGET_WAITKIND_THREAD_EXITED)
2907 /* We're reporting a thread exit for the leader. The exit was
2908 detected by check_zombie_leaders. */
2909 gdb_assert (is_leader (thread));
2910 gdb_assert (report_exit_events_for (thread));
2912 delete_lwp (event_child);
2913 return ptid;
2916 /* Note we must filter TARGET_WAITKIND_SIGNALLED as well, otherwise
2917 if a non-leader thread exits with a signal, we'd report it to the
2918 core which would interpret it as the whole-process exiting.
2919 There is no TARGET_WAITKIND_THREAD_SIGNALLED event kind. */
2920 if (ourstatus->kind () != TARGET_WAITKIND_EXITED
2921 && ourstatus->kind () != TARGET_WAITKIND_SIGNALLED)
2922 return ptid;
2924 if (!is_leader (thread))
2926 if (report_exit_events_for (thread))
2927 ourstatus->set_thread_exited (0);
2928 else
2929 ourstatus->set_ignore ();
2931 delete_lwp (event_child);
2933 return ptid;
2936 /* Returns 1 if GDB is interested in any event_child syscalls. */
2938 static int
2939 gdb_catching_syscalls_p (struct lwp_info *event_child)
2941 struct thread_info *thread = get_lwp_thread (event_child);
2942 struct process_info *proc = get_thread_process (thread);
2944 return !proc->syscalls_to_catch.empty ();
2947 bool
2948 linux_process_target::gdb_catch_this_syscall (lwp_info *event_child)
2950 int sysno;
2951 struct thread_info *thread = get_lwp_thread (event_child);
2952 struct process_info *proc = get_thread_process (thread);
2954 if (proc->syscalls_to_catch.empty ())
2955 return false;
2957 if (proc->syscalls_to_catch[0] == ANY_SYSCALL)
2958 return true;
2960 get_syscall_trapinfo (event_child, &sysno);
2962 for (int iter : proc->syscalls_to_catch)
2963 if (iter == sysno)
2964 return true;
2966 return false;
2969 ptid_t
2970 linux_process_target::wait_1 (ptid_t ptid, target_waitstatus *ourstatus,
2971 target_wait_flags target_options)
2973 THREADS_SCOPED_DEBUG_ENTER_EXIT;
2975 client_state &cs = get_client_state ();
2976 int w;
2977 struct lwp_info *event_child;
2978 int options;
2979 int pid;
2980 int step_over_finished;
2981 int bp_explains_trap;
2982 int maybe_internal_trap;
2983 int report_to_gdb;
2984 int trace_event;
2985 int in_step_range;
2987 threads_debug_printf ("[%s]", target_pid_to_str (ptid).c_str ());
2989 /* Translate generic target options into linux options. */
2990 options = __WALL;
2991 if (target_options & TARGET_WNOHANG)
2992 options |= WNOHANG;
2994 bp_explains_trap = 0;
2995 trace_event = 0;
2996 in_step_range = 0;
2997 ourstatus->set_ignore ();
2999 bool was_any_resumed = any_resumed ();
3001 if (step_over_bkpt == null_ptid)
3002 pid = wait_for_event (ptid, &w, options);
3003 else
3005 threads_debug_printf ("step_over_bkpt set [%s], doing a blocking wait",
3006 target_pid_to_str (step_over_bkpt).c_str ());
3007 pid = wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
3010 if (pid == 0 || (pid == -1 && !was_any_resumed))
3012 gdb_assert (target_options & TARGET_WNOHANG);
3014 threads_debug_printf ("ret = null_ptid, TARGET_WAITKIND_IGNORE");
3016 ourstatus->set_ignore ();
3017 return null_ptid;
3019 else if (pid == -1)
3021 threads_debug_printf ("ret = null_ptid, TARGET_WAITKIND_NO_RESUMED");
3023 ourstatus->set_no_resumed ();
3024 return null_ptid;
3027 event_child = get_thread_lwp (current_thread);
3029 /* wait_for_event only returns an exit status for the last
3030 child of a process. Report it. */
3031 if (WIFEXITED (w) || WIFSIGNALED (w))
3033 if (WIFEXITED (w))
3035 /* If we already have the exit recorded in waitstatus, use
3036 it. This will happen when we detect a zombie leader,
3037 when we had GDB_THREAD_OPTION_EXIT enabled for it. We
3038 want to report its exit as TARGET_WAITKIND_THREAD_EXITED,
3039 as the whole process hasn't exited yet. */
3040 const target_waitstatus &ws = event_child->waitstatus;
3041 if (ws.kind () != TARGET_WAITKIND_IGNORE)
3043 gdb_assert (ws.kind () == TARGET_WAITKIND_EXITED
3044 || ws.kind () == TARGET_WAITKIND_THREAD_EXITED);
3045 *ourstatus = ws;
3047 else
3048 ourstatus->set_exited (WEXITSTATUS (w));
3050 threads_debug_printf
3051 ("ret = %s, exited with retcode %d",
3052 target_pid_to_str (ptid_of (current_thread)).c_str (),
3053 WEXITSTATUS (w));
3055 else
3057 ourstatus->set_signalled (gdb_signal_from_host (WTERMSIG (w)));
3059 threads_debug_printf
3060 ("ret = %s, terminated with signal %d",
3061 target_pid_to_str (ptid_of (current_thread)).c_str (),
3062 WTERMSIG (w));
3065 return filter_exit_event (event_child, ourstatus);
3068 /* If step-over executes a breakpoint instruction, in the case of a
3069 hardware single step it means a gdb/gdbserver breakpoint had been
3070 planted on top of a permanent breakpoint, in the case of a software
3071 single step it may just mean that gdbserver hit the reinsert breakpoint.
3072 The PC has been adjusted by save_stop_reason to point at
3073 the breakpoint address.
3074 So in the case of the hardware single step advance the PC manually
3075 past the breakpoint and in the case of software single step advance only
3076 if it's not the single_step_breakpoint we are hitting.
3077 This avoids that a program would keep trapping a permanent breakpoint
3078 forever. */
3079 if (step_over_bkpt != null_ptid
3080 && event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3081 && (event_child->stepping
3082 || !single_step_breakpoint_inserted_here (event_child->stop_pc)))
3084 int increment_pc = 0;
3085 int breakpoint_kind = 0;
3086 CORE_ADDR stop_pc = event_child->stop_pc;
3088 breakpoint_kind = breakpoint_kind_from_current_state (&stop_pc);
3089 sw_breakpoint_from_kind (breakpoint_kind, &increment_pc);
3091 threads_debug_printf
3092 ("step-over for %s executed software breakpoint",
3093 target_pid_to_str (ptid_of (current_thread)).c_str ());
3095 if (increment_pc != 0)
3097 struct regcache *regcache
3098 = get_thread_regcache (current_thread, 1);
3100 event_child->stop_pc += increment_pc;
3101 low_set_pc (regcache, event_child->stop_pc);
3103 if (!low_breakpoint_at (event_child->stop_pc))
3104 event_child->stop_reason = TARGET_STOPPED_BY_NO_REASON;
3108 /* If this event was not handled before, and is not a SIGTRAP, we
3109 report it. SIGILL and SIGSEGV are also treated as traps in case
3110 a breakpoint is inserted at the current PC. If this target does
3111 not support internal breakpoints at all, we also report the
3112 SIGTRAP without further processing; it's of no concern to us. */
3113 maybe_internal_trap
3114 = (low_supports_breakpoints ()
3115 && (WSTOPSIG (w) == SIGTRAP
3116 || ((WSTOPSIG (w) == SIGILL
3117 || WSTOPSIG (w) == SIGSEGV)
3118 && low_breakpoint_at (event_child->stop_pc))));
3120 if (maybe_internal_trap)
3122 /* Handle anything that requires bookkeeping before deciding to
3123 report the event or continue waiting. */
3125 /* First check if we can explain the SIGTRAP with an internal
3126 breakpoint, or if we should possibly report the event to GDB.
3127 Do this before anything that may remove or insert a
3128 breakpoint. */
3129 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
3131 /* We have a SIGTRAP, possibly a step-over dance has just
3132 finished. If so, tweak the state machine accordingly,
3133 reinsert breakpoints and delete any single-step
3134 breakpoints. */
3135 step_over_finished = finish_step_over (event_child);
3137 /* Now invoke the callbacks of any internal breakpoints there. */
3138 check_breakpoints (event_child->stop_pc);
3140 /* Handle tracepoint data collecting. This may overflow the
3141 trace buffer, and cause a tracing stop, removing
3142 breakpoints. */
3143 trace_event = handle_tracepoints (event_child);
3145 if (bp_explains_trap)
3146 threads_debug_printf ("Hit a gdbserver breakpoint.");
3148 else
3150 /* We have some other signal, possibly a step-over dance was in
3151 progress, and it should be cancelled too. */
3152 step_over_finished = finish_step_over (event_child);
3155 /* We have all the data we need. Either report the event to GDB, or
3156 resume threads and keep waiting for more. */
3158 /* If we're collecting a fast tracepoint, finish the collection and
3159 move out of the jump pad before delivering a signal. See
3160 linux_stabilize_threads. */
3162 if (WIFSTOPPED (w)
3163 && WSTOPSIG (w) != SIGTRAP
3164 && supports_fast_tracepoints ()
3165 && agent_loaded_p ())
3167 threads_debug_printf ("Got signal %d for LWP %ld. Check if we need "
3168 "to defer or adjust it.",
3169 WSTOPSIG (w), lwpid_of (current_thread));
3171 /* Allow debugging the jump pad itself. */
3172 if (current_thread->last_resume_kind != resume_step
3173 && maybe_move_out_of_jump_pad (event_child, &w))
3175 enqueue_one_deferred_signal (event_child, &w);
3177 threads_debug_printf ("Signal %d for LWP %ld deferred (in jump pad)",
3178 WSTOPSIG (w), lwpid_of (current_thread));
3180 resume_one_lwp (event_child, 0, 0, NULL);
3182 return ignore_event (ourstatus);
3186 if (event_child->collecting_fast_tracepoint
3187 != fast_tpoint_collect_result::not_collecting)
3189 threads_debug_printf
3190 ("LWP %ld was trying to move out of the jump pad (%d). "
3191 "Check if we're already there.",
3192 lwpid_of (current_thread),
3193 (int) event_child->collecting_fast_tracepoint);
3195 trace_event = 1;
3197 event_child->collecting_fast_tracepoint
3198 = linux_fast_tracepoint_collecting (event_child, NULL);
3200 if (event_child->collecting_fast_tracepoint
3201 != fast_tpoint_collect_result::before_insn)
3203 /* No longer need this breakpoint. */
3204 if (event_child->exit_jump_pad_bkpt != NULL)
3206 threads_debug_printf
3207 ("No longer need exit-jump-pad bkpt; removing it."
3208 "stopping all threads momentarily.");
3210 /* Other running threads could hit this breakpoint.
3211 We don't handle moribund locations like GDB does,
3212 instead we always pause all threads when removing
3213 breakpoints, so that any step-over or
3214 decr_pc_after_break adjustment is always taken
3215 care of while the breakpoint is still
3216 inserted. */
3217 stop_all_lwps (1, event_child);
3219 delete_breakpoint (event_child->exit_jump_pad_bkpt);
3220 event_child->exit_jump_pad_bkpt = NULL;
3222 unstop_all_lwps (1, event_child);
3224 gdb_assert (event_child->suspended >= 0);
3228 if (event_child->collecting_fast_tracepoint
3229 == fast_tpoint_collect_result::not_collecting)
3231 threads_debug_printf
3232 ("fast tracepoint finished collecting successfully.");
3234 /* We may have a deferred signal to report. */
3235 if (dequeue_one_deferred_signal (event_child, &w))
3236 threads_debug_printf ("dequeued one signal.");
3237 else
3239 threads_debug_printf ("no deferred signals.");
3241 if (stabilizing_threads)
3243 ourstatus->set_stopped (GDB_SIGNAL_0);
3245 threads_debug_printf
3246 ("ret = %s, stopped while stabilizing threads",
3247 target_pid_to_str (ptid_of (current_thread)).c_str ());
3249 return ptid_of (current_thread);
3255 /* Check whether GDB would be interested in this event. */
3257 /* Check if GDB is interested in this syscall. */
3258 if (WIFSTOPPED (w)
3259 && WSTOPSIG (w) == SYSCALL_SIGTRAP
3260 && !gdb_catch_this_syscall (event_child))
3262 threads_debug_printf ("Ignored syscall for LWP %ld.",
3263 lwpid_of (current_thread));
3265 resume_one_lwp (event_child, event_child->stepping, 0, NULL);
3267 return ignore_event (ourstatus);
3270 /* If GDB is not interested in this signal, don't stop other
3271 threads, and don't report it to GDB. Just resume the inferior
3272 right away. We do this for threading-related signals as well as
3273 any that GDB specifically requested we ignore. But never ignore
3274 SIGSTOP if we sent it ourselves, and do not ignore signals when
3275 stepping - they may require special handling to skip the signal
3276 handler. Also never ignore signals that could be caused by a
3277 breakpoint. */
3278 if (WIFSTOPPED (w)
3279 && current_thread->last_resume_kind != resume_step
3280 && (
3281 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
3282 (current_process ()->priv->thread_db != NULL
3283 && (WSTOPSIG (w) == __SIGRTMIN
3284 || WSTOPSIG (w) == __SIGRTMIN + 1))
3286 #endif
3287 (cs.pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
3288 && !(WSTOPSIG (w) == SIGSTOP
3289 && current_thread->last_resume_kind == resume_stop)
3290 && !linux_wstatus_maybe_breakpoint (w))))
3292 siginfo_t info, *info_p;
3294 threads_debug_printf ("Ignored signal %d for LWP %ld.",
3295 WSTOPSIG (w), lwpid_of (current_thread));
3297 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
3298 (PTRACE_TYPE_ARG3) 0, &info) == 0)
3299 info_p = &info;
3300 else
3301 info_p = NULL;
3303 if (step_over_finished)
3305 /* We cancelled this thread's step-over above. We still
3306 need to unsuspend all other LWPs, and set them back
3307 running again while the signal handler runs. */
3308 unsuspend_all_lwps (event_child);
3310 /* Enqueue the pending signal info so that proceed_all_lwps
3311 doesn't lose it. */
3312 enqueue_pending_signal (event_child, WSTOPSIG (w), info_p);
3314 proceed_all_lwps ();
3316 else
3318 resume_one_lwp (event_child, event_child->stepping,
3319 WSTOPSIG (w), info_p);
3322 return ignore_event (ourstatus);
3325 /* Note that all addresses are always "out of the step range" when
3326 there's no range to begin with. */
3327 in_step_range = lwp_in_step_range (event_child);
3329 /* If GDB wanted this thread to single step, and the thread is out
3330 of the step range, we always want to report the SIGTRAP, and let
3331 GDB handle it. Watchpoints should always be reported. So should
3332 signals we can't explain. A SIGTRAP we can't explain could be a
3333 GDB breakpoint --- we may or not support Z0 breakpoints. If we
3334 do, we're be able to handle GDB breakpoints on top of internal
3335 breakpoints, by handling the internal breakpoint and still
3336 reporting the event to GDB. If we don't, we're out of luck, GDB
3337 won't see the breakpoint hit. If we see a single-step event but
3338 the thread should be continuing, don't pass the trap to gdb.
3339 That indicates that we had previously finished a single-step but
3340 left the single-step pending -- see
3341 complete_ongoing_step_over. */
3342 report_to_gdb = (!maybe_internal_trap
3343 || (current_thread->last_resume_kind == resume_step
3344 && !in_step_range)
3345 || event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3346 || (!in_step_range
3347 && !bp_explains_trap
3348 && !trace_event
3349 && !step_over_finished
3350 && !(current_thread->last_resume_kind == resume_continue
3351 && event_child->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP))
3352 || (gdb_breakpoint_here (event_child->stop_pc)
3353 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
3354 && gdb_no_commands_at_breakpoint (event_child->stop_pc))
3355 || event_child->waitstatus.kind () != TARGET_WAITKIND_IGNORE);
3357 run_breakpoint_commands (event_child->stop_pc);
3359 /* We found no reason GDB would want us to stop. We either hit one
3360 of our own breakpoints, or finished an internal step GDB
3361 shouldn't know about. */
3362 if (!report_to_gdb)
3364 if (bp_explains_trap)
3365 threads_debug_printf ("Hit a gdbserver breakpoint.");
3367 if (step_over_finished)
3368 threads_debug_printf ("Step-over finished.");
3370 if (trace_event)
3371 threads_debug_printf ("Tracepoint event.");
3373 if (lwp_in_step_range (event_child))
3374 threads_debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).",
3375 paddress (event_child->stop_pc),
3376 paddress (event_child->step_range_start),
3377 paddress (event_child->step_range_end));
3379 /* We're not reporting this breakpoint to GDB, so apply the
3380 decr_pc_after_break adjustment to the inferior's regcache
3381 ourselves. */
3383 if (low_supports_breakpoints ())
3385 struct regcache *regcache
3386 = get_thread_regcache (current_thread, 1);
3387 low_set_pc (regcache, event_child->stop_pc);
3390 if (step_over_finished)
3392 /* If we have finished stepping over a breakpoint, we've
3393 stopped and suspended all LWPs momentarily except the
3394 stepping one. This is where we resume them all again.
3395 We're going to keep waiting, so use proceed, which
3396 handles stepping over the next breakpoint. */
3397 unsuspend_all_lwps (event_child);
3399 else
3401 /* Remove the single-step breakpoints if any. Note that
3402 there isn't single-step breakpoint if we finished stepping
3403 over. */
3404 if (supports_software_single_step ()
3405 && has_single_step_breakpoints (current_thread))
3407 stop_all_lwps (0, event_child);
3408 delete_single_step_breakpoints (current_thread);
3409 unstop_all_lwps (0, event_child);
3413 threads_debug_printf ("proceeding all threads.");
3415 proceed_all_lwps ();
3417 return ignore_event (ourstatus);
3420 if (debug_threads)
3422 if (event_child->waitstatus.kind () != TARGET_WAITKIND_IGNORE)
3423 threads_debug_printf ("LWP %ld: extended event with waitstatus %s",
3424 lwpid_of (get_lwp_thread (event_child)),
3425 event_child->waitstatus.to_string ().c_str ());
3427 if (current_thread->last_resume_kind == resume_step)
3429 if (event_child->step_range_start == event_child->step_range_end)
3430 threads_debug_printf
3431 ("GDB wanted to single-step, reporting event.");
3432 else if (!lwp_in_step_range (event_child))
3433 threads_debug_printf ("Out of step range, reporting event.");
3436 if (event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
3437 threads_debug_printf ("Stopped by watchpoint.");
3438 else if (gdb_breakpoint_here (event_child->stop_pc))
3439 threads_debug_printf ("Stopped by GDB breakpoint.");
3442 threads_debug_printf ("Hit a non-gdbserver trap event.");
3444 /* Alright, we're going to report a stop. */
3446 /* Remove single-step breakpoints. */
3447 if (supports_software_single_step ())
3449 /* Remove single-step breakpoints or not. It it is true, stop all
3450 lwps, so that other threads won't hit the breakpoint in the
3451 staled memory. */
3452 int remove_single_step_breakpoints_p = 0;
3454 if (non_stop)
3456 remove_single_step_breakpoints_p
3457 = has_single_step_breakpoints (current_thread);
3459 else
3461 /* In all-stop, a stop reply cancels all previous resume
3462 requests. Delete all single-step breakpoints. */
3464 find_thread ([&] (thread_info *thread) {
3465 if (has_single_step_breakpoints (thread))
3467 remove_single_step_breakpoints_p = 1;
3468 return true;
3471 return false;
3475 if (remove_single_step_breakpoints_p)
3477 /* If we remove single-step breakpoints from memory, stop all lwps,
3478 so that other threads won't hit the breakpoint in the staled
3479 memory. */
3480 stop_all_lwps (0, event_child);
3482 if (non_stop)
3484 gdb_assert (has_single_step_breakpoints (current_thread));
3485 delete_single_step_breakpoints (current_thread);
3487 else
3489 for_each_thread ([] (thread_info *thread){
3490 if (has_single_step_breakpoints (thread))
3491 delete_single_step_breakpoints (thread);
3495 unstop_all_lwps (0, event_child);
3499 if (!stabilizing_threads)
3501 /* In all-stop, stop all threads. */
3502 if (!non_stop)
3503 stop_all_lwps (0, NULL);
3505 if (step_over_finished)
3507 if (!non_stop)
3509 /* If we were doing a step-over, all other threads but
3510 the stepping one had been paused in start_step_over,
3511 with their suspend counts incremented. We don't want
3512 to do a full unstop/unpause, because we're in
3513 all-stop mode (so we want threads stopped), but we
3514 still need to unsuspend the other threads, to
3515 decrement their `suspended' count back. */
3516 unsuspend_all_lwps (event_child);
3518 else
3520 /* If we just finished a step-over, then all threads had
3521 been momentarily paused. In all-stop, that's fine,
3522 we want threads stopped by now anyway. In non-stop,
3523 we need to re-resume threads that GDB wanted to be
3524 running. */
3525 unstop_all_lwps (1, event_child);
3529 /* If we're not waiting for a specific LWP, choose an event LWP
3530 from among those that have had events. Giving equal priority
3531 to all LWPs that have had events helps prevent
3532 starvation. */
3533 if (ptid == minus_one_ptid)
3535 event_child->status_pending_p = 1;
3536 event_child->status_pending = w;
3538 select_event_lwp (&event_child);
3540 /* current_thread and event_child must stay in sync. */
3541 switch_to_thread (get_lwp_thread (event_child));
3543 event_child->status_pending_p = 0;
3544 w = event_child->status_pending;
3548 /* Stabilize threads (move out of jump pads). */
3549 if (!non_stop)
3550 target_stabilize_threads ();
3552 else
3554 /* If we just finished a step-over, then all threads had been
3555 momentarily paused. In all-stop, that's fine, we want
3556 threads stopped by now anyway. In non-stop, we need to
3557 re-resume threads that GDB wanted to be running. */
3558 if (step_over_finished)
3559 unstop_all_lwps (1, event_child);
3562 /* At this point, we haven't set OURSTATUS. This is where we do it. */
3563 gdb_assert (ourstatus->kind () == TARGET_WAITKIND_IGNORE);
3565 if (event_child->waitstatus.kind () != TARGET_WAITKIND_IGNORE)
3567 /* If the reported event is an exit, fork, vfork, clone or exec,
3568 let GDB know. */
3570 /* Break the unreported fork/vfork/clone relationship chain. */
3571 if (is_new_child_status (event_child->waitstatus.kind ()))
3573 event_child->relative->relative = NULL;
3574 event_child->relative = NULL;
3577 *ourstatus = event_child->waitstatus;
3578 /* Clear the event lwp's waitstatus since we handled it already. */
3579 event_child->waitstatus.set_ignore ();
3581 else
3583 /* The LWP stopped due to a plain signal or a syscall signal. Either way,
3584 event_child->waitstatus wasn't filled in with the details, so look at
3585 the wait status W. */
3586 if (WSTOPSIG (w) == SYSCALL_SIGTRAP)
3588 int syscall_number;
3590 get_syscall_trapinfo (event_child, &syscall_number);
3591 if (event_child->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY)
3592 ourstatus->set_syscall_entry (syscall_number);
3593 else if (event_child->syscall_state == TARGET_WAITKIND_SYSCALL_RETURN)
3594 ourstatus->set_syscall_return (syscall_number);
3595 else
3596 gdb_assert_not_reached ("unexpected syscall state");
3598 else if (current_thread->last_resume_kind == resume_stop
3599 && WSTOPSIG (w) == SIGSTOP)
3601 /* A thread that has been requested to stop by GDB with vCont;t,
3602 and it stopped cleanly, so report as SIG0. The use of
3603 SIGSTOP is an implementation detail. */
3604 ourstatus->set_stopped (GDB_SIGNAL_0);
3606 else
3607 ourstatus->set_stopped (gdb_signal_from_host (WSTOPSIG (w)));
3610 /* Now that we've selected our final event LWP, un-adjust its PC if
3611 it was a software breakpoint, and the client doesn't know we can
3612 adjust the breakpoint ourselves. */
3613 if (event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3614 && !cs.swbreak_feature)
3616 int decr_pc = low_decr_pc_after_break ();
3618 if (decr_pc != 0)
3620 struct regcache *regcache
3621 = get_thread_regcache (current_thread, 1);
3622 low_set_pc (regcache, event_child->stop_pc + decr_pc);
3626 gdb_assert (step_over_bkpt == null_ptid);
3628 threads_debug_printf ("ret = %s, %s",
3629 target_pid_to_str (ptid_of (current_thread)).c_str (),
3630 ourstatus->to_string ().c_str ());
3632 return filter_exit_event (event_child, ourstatus);
3635 /* Get rid of any pending event in the pipe. */
3636 static void
3637 async_file_flush (void)
3639 linux_event_pipe.flush ();
3642 /* Put something in the pipe, so the event loop wakes up. */
3643 static void
3644 async_file_mark (void)
3646 linux_event_pipe.mark ();
3649 ptid_t
3650 linux_process_target::wait (ptid_t ptid,
3651 target_waitstatus *ourstatus,
3652 target_wait_flags target_options)
3654 ptid_t event_ptid;
3656 /* Flush the async file first. */
3657 if (target_is_async_p ())
3658 async_file_flush ();
3662 event_ptid = wait_1 (ptid, ourstatus, target_options);
3664 while ((target_options & TARGET_WNOHANG) == 0
3665 && ourstatus->kind () == TARGET_WAITKIND_IGNORE);
3667 /* If at least one stop was reported, there may be more. A single
3668 SIGCHLD can signal more than one child stop. */
3669 if (target_is_async_p ()
3670 && (target_options & TARGET_WNOHANG) != 0
3671 && event_ptid != null_ptid)
3672 async_file_mark ();
3674 return event_ptid;
3677 /* Send a signal to an LWP. */
3679 static int
3680 kill_lwp (unsigned long lwpid, int signo)
3682 int ret;
3684 errno = 0;
3685 ret = syscall (__NR_tkill, lwpid, signo);
3686 if (errno == ENOSYS)
3688 /* If tkill fails, then we are not using nptl threads, a
3689 configuration we no longer support. */
3690 perror_with_name (("tkill"));
3692 return ret;
3695 void
3696 linux_stop_lwp (struct lwp_info *lwp)
3698 send_sigstop (lwp);
3701 static void
3702 send_sigstop (struct lwp_info *lwp)
3704 int pid;
3706 pid = lwpid_of (get_lwp_thread (lwp));
3708 /* If we already have a pending stop signal for this process, don't
3709 send another. */
3710 if (lwp->stop_expected)
3712 threads_debug_printf ("Have pending sigstop for lwp %d", pid);
3714 return;
3717 threads_debug_printf ("Sending sigstop to lwp %d", pid);
3719 lwp->stop_expected = 1;
3720 kill_lwp (pid, SIGSTOP);
3723 static void
3724 send_sigstop (thread_info *thread, lwp_info *except)
3726 struct lwp_info *lwp = get_thread_lwp (thread);
3728 /* Ignore EXCEPT. */
3729 if (lwp == except)
3730 return;
3732 if (lwp->stopped)
3733 return;
3735 send_sigstop (lwp);
3738 /* Increment the suspend count of an LWP, and stop it, if not stopped
3739 yet. */
3740 static void
3741 suspend_and_send_sigstop (thread_info *thread, lwp_info *except)
3743 struct lwp_info *lwp = get_thread_lwp (thread);
3745 /* Ignore EXCEPT. */
3746 if (lwp == except)
3747 return;
3749 lwp_suspended_inc (lwp);
3751 send_sigstop (thread, except);
3754 /* Mark LWP dead, with WSTAT as exit status pending to report later.
3755 If THREAD_EVENT is true, interpret WSTAT as a thread exit event
3756 instead of a process exit event. This is meaningful for the leader
3757 thread, as we normally report a process-wide exit event when we see
3758 the leader exit, and a thread exit event when we see any other
3759 thread exit. */
3761 static void
3762 mark_lwp_dead (struct lwp_info *lwp, int wstat, bool thread_event)
3764 /* Store the exit status for later. */
3765 lwp->status_pending_p = 1;
3766 lwp->status_pending = wstat;
3768 /* Store in waitstatus as well, as there's nothing else to process
3769 for this event. */
3770 if (WIFEXITED (wstat))
3772 if (thread_event)
3773 lwp->waitstatus.set_thread_exited (WEXITSTATUS (wstat));
3774 else
3775 lwp->waitstatus.set_exited (WEXITSTATUS (wstat));
3777 else if (WIFSIGNALED (wstat))
3779 gdb_assert (!thread_event);
3780 lwp->waitstatus.set_signalled (gdb_signal_from_host (WTERMSIG (wstat)));
3782 else
3783 gdb_assert_not_reached ("unknown status kind");
3785 /* Prevent trying to stop it. */
3786 lwp->stopped = 1;
3788 /* No further stops are expected from a dead lwp. */
3789 lwp->stop_expected = 0;
3792 /* Return true if LWP has exited already, and has a pending exit event
3793 to report to GDB. */
3795 static int
3796 lwp_is_marked_dead (struct lwp_info *lwp)
3798 return (lwp->status_pending_p
3799 && (WIFEXITED (lwp->status_pending)
3800 || WIFSIGNALED (lwp->status_pending)));
3803 void
3804 linux_process_target::wait_for_sigstop ()
3806 struct thread_info *saved_thread;
3807 ptid_t saved_tid;
3808 int wstat;
3809 int ret;
3811 saved_thread = current_thread;
3812 if (saved_thread != NULL)
3813 saved_tid = saved_thread->id;
3814 else
3815 saved_tid = null_ptid; /* avoid bogus unused warning */
3817 scoped_restore_current_thread restore_thread;
3819 threads_debug_printf ("pulling events");
3821 /* Passing NULL_PTID as filter indicates we want all events to be
3822 left pending. Eventually this returns when there are no
3823 unwaited-for children left. */
3824 ret = wait_for_event_filtered (minus_one_ptid, null_ptid, &wstat, __WALL);
3825 gdb_assert (ret == -1);
3827 if (saved_thread == NULL || mythread_alive (saved_tid))
3828 return;
3829 else
3831 threads_debug_printf ("Previously current thread died.");
3833 /* We can't change the current inferior behind GDB's back,
3834 otherwise, a subsequent command may apply to the wrong
3835 process. */
3836 restore_thread.dont_restore ();
3837 switch_to_thread (nullptr);
3841 bool
3842 linux_process_target::stuck_in_jump_pad (thread_info *thread)
3844 struct lwp_info *lwp = get_thread_lwp (thread);
3846 if (lwp->suspended != 0)
3848 internal_error ("LWP %ld is suspended, suspended=%d\n",
3849 lwpid_of (thread), lwp->suspended);
3851 gdb_assert (lwp->stopped);
3853 /* Allow debugging the jump pad, gdb_collect, etc.. */
3854 return (supports_fast_tracepoints ()
3855 && agent_loaded_p ()
3856 && (gdb_breakpoint_here (lwp->stop_pc)
3857 || lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3858 || thread->last_resume_kind == resume_step)
3859 && (linux_fast_tracepoint_collecting (lwp, NULL)
3860 != fast_tpoint_collect_result::not_collecting));
3863 void
3864 linux_process_target::move_out_of_jump_pad (thread_info *thread)
3866 struct lwp_info *lwp = get_thread_lwp (thread);
3867 int *wstat;
3869 if (lwp->suspended != 0)
3871 internal_error ("LWP %ld is suspended, suspended=%d\n",
3872 lwpid_of (thread), lwp->suspended);
3874 gdb_assert (lwp->stopped);
3876 /* For gdb_breakpoint_here. */
3877 scoped_restore_current_thread restore_thread;
3878 switch_to_thread (thread);
3880 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
3882 /* Allow debugging the jump pad, gdb_collect, etc. */
3883 if (!gdb_breakpoint_here (lwp->stop_pc)
3884 && lwp->stop_reason != TARGET_STOPPED_BY_WATCHPOINT
3885 && thread->last_resume_kind != resume_step
3886 && maybe_move_out_of_jump_pad (lwp, wstat))
3888 threads_debug_printf ("LWP %ld needs stabilizing (in jump pad)",
3889 lwpid_of (thread));
3891 if (wstat)
3893 lwp->status_pending_p = 0;
3894 enqueue_one_deferred_signal (lwp, wstat);
3896 threads_debug_printf ("Signal %d for LWP %ld deferred (in jump pad",
3897 WSTOPSIG (*wstat), lwpid_of (thread));
3900 resume_one_lwp (lwp, 0, 0, NULL);
3902 else
3903 lwp_suspended_inc (lwp);
3906 static bool
3907 lwp_running (thread_info *thread)
3909 struct lwp_info *lwp = get_thread_lwp (thread);
3911 if (lwp_is_marked_dead (lwp))
3912 return false;
3914 return !lwp->stopped;
3917 void
3918 linux_process_target::stop_all_lwps (int suspend, lwp_info *except)
3920 /* Should not be called recursively. */
3921 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
3923 THREADS_SCOPED_DEBUG_ENTER_EXIT;
3925 threads_debug_printf
3926 ("%s, except=%s", suspend ? "stop-and-suspend" : "stop",
3927 (except != NULL
3928 ? target_pid_to_str (ptid_of (get_lwp_thread (except))).c_str ()
3929 : "none"));
3931 stopping_threads = (suspend
3932 ? STOPPING_AND_SUSPENDING_THREADS
3933 : STOPPING_THREADS);
3935 if (suspend)
3936 for_each_thread ([&] (thread_info *thread)
3938 suspend_and_send_sigstop (thread, except);
3940 else
3941 for_each_thread ([&] (thread_info *thread)
3943 send_sigstop (thread, except);
3946 wait_for_sigstop ();
3947 stopping_threads = NOT_STOPPING_THREADS;
3949 threads_debug_printf ("setting stopping_threads back to !stopping");
3952 /* Enqueue one signal in the chain of signals which need to be
3953 delivered to this process on next resume. */
3955 static void
3956 enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info)
3958 lwp->pending_signals.emplace_back (signal);
3959 if (info == nullptr)
3960 memset (&lwp->pending_signals.back ().info, 0, sizeof (siginfo_t));
3961 else
3962 lwp->pending_signals.back ().info = *info;
3965 void
3966 linux_process_target::install_software_single_step_breakpoints (lwp_info *lwp)
3968 struct thread_info *thread = get_lwp_thread (lwp);
3969 struct regcache *regcache = get_thread_regcache (thread, 1);
3971 scoped_restore_current_thread restore_thread;
3973 switch_to_thread (thread);
3974 std::vector<CORE_ADDR> next_pcs = low_get_next_pcs (regcache);
3976 for (CORE_ADDR pc : next_pcs)
3977 set_single_step_breakpoint (pc, current_ptid);
3981 linux_process_target::single_step (lwp_info* lwp)
3983 int step = 0;
3985 if (supports_hardware_single_step ())
3987 step = 1;
3989 else if (supports_software_single_step ())
3991 install_software_single_step_breakpoints (lwp);
3992 step = 0;
3994 else
3995 threads_debug_printf ("stepping is not implemented on this target");
3997 return step;
4000 /* The signal can be delivered to the inferior if we are not trying to
4001 finish a fast tracepoint collect. Since signal can be delivered in
4002 the step-over, the program may go to signal handler and trap again
4003 after return from the signal handler. We can live with the spurious
4004 double traps. */
4006 static int
4007 lwp_signal_can_be_delivered (struct lwp_info *lwp)
4009 return (lwp->collecting_fast_tracepoint
4010 == fast_tpoint_collect_result::not_collecting);
4013 void
4014 linux_process_target::resume_one_lwp_throw (lwp_info *lwp, int step,
4015 int signal, siginfo_t *info)
4017 struct thread_info *thread = get_lwp_thread (lwp);
4018 int ptrace_request;
4019 struct process_info *proc = get_thread_process (thread);
4021 /* Note that target description may not be initialised
4022 (proc->tdesc == NULL) at this point because the program hasn't
4023 stopped at the first instruction yet. It means GDBserver skips
4024 the extra traps from the wrapper program (see option --wrapper).
4025 Code in this function that requires register access should be
4026 guarded by proc->tdesc == NULL or something else. */
4028 if (lwp->stopped == 0)
4029 return;
4031 gdb_assert (lwp->waitstatus.kind () == TARGET_WAITKIND_IGNORE);
4033 fast_tpoint_collect_result fast_tp_collecting
4034 = lwp->collecting_fast_tracepoint;
4036 gdb_assert (!stabilizing_threads
4037 || (fast_tp_collecting
4038 != fast_tpoint_collect_result::not_collecting));
4040 /* Cancel actions that rely on GDB not changing the PC (e.g., the
4041 user used the "jump" command, or "set $pc = foo"). */
4042 if (thread->while_stepping != NULL && lwp->stop_pc != get_pc (lwp))
4044 /* Collecting 'while-stepping' actions doesn't make sense
4045 anymore. */
4046 release_while_stepping_state_list (thread);
4049 /* If we have pending signals or status, and a new signal, enqueue the
4050 signal. Also enqueue the signal if it can't be delivered to the
4051 inferior right now. */
4052 if (signal != 0
4053 && (lwp->status_pending_p
4054 || !lwp->pending_signals.empty ()
4055 || !lwp_signal_can_be_delivered (lwp)))
4057 enqueue_pending_signal (lwp, signal, info);
4059 /* Postpone any pending signal. It was enqueued above. */
4060 signal = 0;
4063 if (lwp->status_pending_p)
4065 threads_debug_printf
4066 ("Not resuming lwp %ld (%s, stop %s); has pending status",
4067 lwpid_of (thread), step ? "step" : "continue",
4068 lwp->stop_expected ? "expected" : "not expected");
4069 return;
4072 scoped_restore_current_thread restore_thread;
4073 switch_to_thread (thread);
4075 /* This bit needs some thinking about. If we get a signal that
4076 we must report while a single-step reinsert is still pending,
4077 we often end up resuming the thread. It might be better to
4078 (ew) allow a stack of pending events; then we could be sure that
4079 the reinsert happened right away and not lose any signals.
4081 Making this stack would also shrink the window in which breakpoints are
4082 uninserted (see comment in linux_wait_for_lwp) but not enough for
4083 complete correctness, so it won't solve that problem. It may be
4084 worthwhile just to solve this one, however. */
4085 if (lwp->bp_reinsert != 0)
4087 threads_debug_printf (" pending reinsert at 0x%s",
4088 paddress (lwp->bp_reinsert));
4090 if (supports_hardware_single_step ())
4092 if (fast_tp_collecting == fast_tpoint_collect_result::not_collecting)
4094 if (step == 0)
4095 warning ("BAD - reinserting but not stepping.");
4096 if (lwp->suspended)
4097 warning ("BAD - reinserting and suspended(%d).",
4098 lwp->suspended);
4102 step = maybe_hw_step (thread);
4105 if (fast_tp_collecting == fast_tpoint_collect_result::before_insn)
4106 threads_debug_printf
4107 ("lwp %ld wants to get out of fast tracepoint jump pad "
4108 "(exit-jump-pad-bkpt)", lwpid_of (thread));
4110 else if (fast_tp_collecting == fast_tpoint_collect_result::at_insn)
4112 threads_debug_printf
4113 ("lwp %ld wants to get out of fast tracepoint jump pad single-stepping",
4114 lwpid_of (thread));
4116 if (supports_hardware_single_step ())
4117 step = 1;
4118 else
4120 internal_error ("moving out of jump pad single-stepping"
4121 " not implemented on this target");
4125 /* If we have while-stepping actions in this thread set it stepping.
4126 If we have a signal to deliver, it may or may not be set to
4127 SIG_IGN, we don't know. Assume so, and allow collecting
4128 while-stepping into a signal handler. A possible smart thing to
4129 do would be to set an internal breakpoint at the signal return
4130 address, continue, and carry on catching this while-stepping
4131 action only when that breakpoint is hit. A future
4132 enhancement. */
4133 if (thread->while_stepping != NULL)
4135 threads_debug_printf
4136 ("lwp %ld has a while-stepping action -> forcing step.",
4137 lwpid_of (thread));
4139 step = single_step (lwp);
4142 if (proc->tdesc != NULL && low_supports_breakpoints ())
4144 struct regcache *regcache = get_thread_regcache (current_thread, 1);
4146 lwp->stop_pc = low_get_pc (regcache);
4148 threads_debug_printf (" %s from pc 0x%lx", step ? "step" : "continue",
4149 (long) lwp->stop_pc);
4152 /* If we have pending signals, consume one if it can be delivered to
4153 the inferior. */
4154 if (!lwp->pending_signals.empty () && lwp_signal_can_be_delivered (lwp))
4156 const pending_signal &p_sig = lwp->pending_signals.front ();
4158 signal = p_sig.signal;
4159 if (p_sig.info.si_signo != 0)
4160 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
4161 &p_sig.info);
4163 lwp->pending_signals.pop_front ();
4166 threads_debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)",
4167 lwpid_of (thread), step ? "step" : "continue", signal,
4168 lwp->stop_expected ? "expected" : "not expected");
4170 low_prepare_to_resume (lwp);
4172 regcache_invalidate_thread (thread);
4173 errno = 0;
4174 lwp->stepping = step;
4175 if (step)
4176 ptrace_request = PTRACE_SINGLESTEP;
4177 else if (gdb_catching_syscalls_p (lwp))
4178 ptrace_request = PTRACE_SYSCALL;
4179 else
4180 ptrace_request = PTRACE_CONT;
4181 ptrace (ptrace_request,
4182 lwpid_of (thread),
4183 (PTRACE_TYPE_ARG3) 0,
4184 /* Coerce to a uintptr_t first to avoid potential gcc warning
4185 of coercing an 8 byte integer to a 4 byte pointer. */
4186 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
4188 if (errno)
4190 int saved_errno = errno;
4192 threads_debug_printf ("ptrace errno = %d (%s)",
4193 saved_errno, strerror (saved_errno));
4195 errno = saved_errno;
4196 perror_with_name ("resuming thread");
4199 /* Successfully resumed. Clear state that no longer makes sense,
4200 and mark the LWP as running. Must not do this before resuming
4201 otherwise if that fails other code will be confused. E.g., we'd
4202 later try to stop the LWP and hang forever waiting for a stop
4203 status. Note that we must not throw after this is cleared,
4204 otherwise handle_zombie_lwp_error would get confused. */
4205 lwp->stopped = 0;
4206 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4209 void
4210 linux_process_target::low_prepare_to_resume (lwp_info *lwp)
4212 /* Nop. */
4215 /* Called when we try to resume a stopped LWP and that errors out. If
4216 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
4217 or about to become), discard the error, clear any pending status
4218 the LWP may have, and return true (we'll collect the exit status
4219 soon enough). Otherwise, return false. */
4221 static int
4222 check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
4224 struct thread_info *thread = get_lwp_thread (lp);
4226 /* If we get an error after resuming the LWP successfully, we'd
4227 confuse !T state for the LWP being gone. */
4228 gdb_assert (lp->stopped);
4230 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
4231 because even if ptrace failed with ESRCH, the tracee may be "not
4232 yet fully dead", but already refusing ptrace requests. In that
4233 case the tracee has 'R (Running)' state for a little bit
4234 (observed in Linux 3.18). See also the note on ESRCH in the
4235 ptrace(2) man page. Instead, check whether the LWP has any state
4236 other than ptrace-stopped. */
4238 /* Don't assume anything if /proc/PID/status can't be read. */
4239 if (linux_proc_pid_is_trace_stopped_nowarn (lwpid_of (thread)) == 0)
4241 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4242 lp->status_pending_p = 0;
4243 return 1;
4245 return 0;
4248 void
4249 linux_process_target::resume_one_lwp (lwp_info *lwp, int step, int signal,
4250 siginfo_t *info)
4254 resume_one_lwp_throw (lwp, step, signal, info);
4256 catch (const gdb_exception_error &ex)
4258 if (check_ptrace_stopped_lwp_gone (lwp))
4260 /* This could because we tried to resume an LWP after its leader
4261 exited. Mark it as resumed, so we can collect an exit event
4262 from it. */
4263 lwp->stopped = 0;
4264 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4266 else
4267 throw;
4271 /* This function is called once per thread via for_each_thread.
4272 We look up which resume request applies to THREAD and mark it with a
4273 pointer to the appropriate resume request.
4275 This algorithm is O(threads * resume elements), but resume elements
4276 is small (and will remain small at least until GDB supports thread
4277 suspension). */
4279 static void
4280 linux_set_resume_request (thread_info *thread, thread_resume *resume, size_t n)
4282 struct lwp_info *lwp = get_thread_lwp (thread);
4284 for (int ndx = 0; ndx < n; ndx++)
4286 ptid_t ptid = resume[ndx].thread;
4287 if (ptid == minus_one_ptid
4288 || ptid == thread->id
4289 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
4290 of PID'. */
4291 || (ptid.pid () == pid_of (thread)
4292 && (ptid.is_pid ()
4293 || ptid.lwp () == -1)))
4295 if (resume[ndx].kind == resume_stop
4296 && thread->last_resume_kind == resume_stop)
4298 threads_debug_printf
4299 ("already %s LWP %ld at GDB's request",
4300 (thread->last_status.kind () == TARGET_WAITKIND_STOPPED
4301 ? "stopped" : "stopping"),
4302 lwpid_of (thread));
4304 continue;
4307 /* Ignore (wildcard) resume requests for already-resumed
4308 threads. */
4309 if (resume[ndx].kind != resume_stop
4310 && thread->last_resume_kind != resume_stop)
4312 threads_debug_printf
4313 ("already %s LWP %ld at GDB's request",
4314 (thread->last_resume_kind == resume_step
4315 ? "stepping" : "continuing"),
4316 lwpid_of (thread));
4317 continue;
4320 /* Don't let wildcard resumes resume fork/vfork/clone
4321 children that GDB does not yet know are new children. */
4322 if (lwp->relative != NULL)
4324 struct lwp_info *rel = lwp->relative;
4326 if (rel->status_pending_p
4327 && is_new_child_status (rel->waitstatus.kind ()))
4329 threads_debug_printf
4330 ("not resuming LWP %ld: has queued stop reply",
4331 lwpid_of (thread));
4332 continue;
4336 /* If the thread has a pending event that has already been
4337 reported to GDBserver core, but GDB has not pulled the
4338 event out of the vStopped queue yet, likewise, ignore the
4339 (wildcard) resume request. */
4340 if (in_queued_stop_replies (thread->id))
4342 threads_debug_printf
4343 ("not resuming LWP %ld: has queued stop reply",
4344 lwpid_of (thread));
4345 continue;
4348 lwp->resume = &resume[ndx];
4349 thread->last_resume_kind = lwp->resume->kind;
4351 lwp->step_range_start = lwp->resume->step_range_start;
4352 lwp->step_range_end = lwp->resume->step_range_end;
4354 /* If we had a deferred signal to report, dequeue one now.
4355 This can happen if LWP gets more than one signal while
4356 trying to get out of a jump pad. */
4357 if (lwp->stopped
4358 && !lwp->status_pending_p
4359 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
4361 lwp->status_pending_p = 1;
4363 threads_debug_printf
4364 ("Dequeueing deferred signal %d for LWP %ld, "
4365 "leaving status pending.",
4366 WSTOPSIG (lwp->status_pending),
4367 lwpid_of (thread));
4370 return;
4374 /* No resume action for this thread. */
4375 lwp->resume = NULL;
4378 bool
4379 linux_process_target::resume_status_pending (thread_info *thread)
4381 struct lwp_info *lwp = get_thread_lwp (thread);
4383 /* LWPs which will not be resumed are not interesting, because
4384 we might not wait for them next time through linux_wait. */
4385 if (lwp->resume == NULL)
4386 return false;
4388 return thread_still_has_status_pending (thread);
4391 bool
4392 linux_process_target::thread_needs_step_over (thread_info *thread)
4394 struct lwp_info *lwp = get_thread_lwp (thread);
4395 CORE_ADDR pc;
4396 struct process_info *proc = get_thread_process (thread);
4398 /* GDBserver is skipping the extra traps from the wrapper program,
4399 don't have to do step over. */
4400 if (proc->tdesc == NULL)
4401 return false;
4403 /* LWPs which will not be resumed are not interesting, because we
4404 might not wait for them next time through linux_wait. */
4406 if (!lwp->stopped)
4408 threads_debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped",
4409 lwpid_of (thread));
4410 return false;
4413 if (thread->last_resume_kind == resume_stop)
4415 threads_debug_printf
4416 ("Need step over [LWP %ld]? Ignoring, should remain stopped",
4417 lwpid_of (thread));
4418 return false;
4421 gdb_assert (lwp->suspended >= 0);
4423 if (lwp->suspended)
4425 threads_debug_printf ("Need step over [LWP %ld]? Ignoring, suspended",
4426 lwpid_of (thread));
4427 return false;
4430 if (lwp->status_pending_p)
4432 threads_debug_printf
4433 ("Need step over [LWP %ld]? Ignoring, has pending status.",
4434 lwpid_of (thread));
4435 return false;
4438 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
4439 or we have. */
4440 pc = get_pc (lwp);
4442 /* If the PC has changed since we stopped, then don't do anything,
4443 and let the breakpoint/tracepoint be hit. This happens if, for
4444 instance, GDB handled the decr_pc_after_break subtraction itself,
4445 GDB is OOL stepping this thread, or the user has issued a "jump"
4446 command, or poked thread's registers herself. */
4447 if (pc != lwp->stop_pc)
4449 threads_debug_printf
4450 ("Need step over [LWP %ld]? Cancelling, PC was changed. "
4451 "Old stop_pc was 0x%s, PC is now 0x%s", lwpid_of (thread),
4452 paddress (lwp->stop_pc), paddress (pc));
4453 return false;
4456 /* On software single step target, resume the inferior with signal
4457 rather than stepping over. */
4458 if (supports_software_single_step ()
4459 && !lwp->pending_signals.empty ()
4460 && lwp_signal_can_be_delivered (lwp))
4462 threads_debug_printf
4463 ("Need step over [LWP %ld]? Ignoring, has pending signals.",
4464 lwpid_of (thread));
4466 return false;
4469 scoped_restore_current_thread restore_thread;
4470 switch_to_thread (thread);
4472 /* We can only step over breakpoints we know about. */
4473 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
4475 /* Don't step over a breakpoint that GDB expects to hit
4476 though. If the condition is being evaluated on the target's side
4477 and it evaluate to false, step over this breakpoint as well. */
4478 if (gdb_breakpoint_here (pc)
4479 && gdb_condition_true_at_breakpoint (pc)
4480 && gdb_no_commands_at_breakpoint (pc))
4482 threads_debug_printf ("Need step over [LWP %ld]? yes, but found"
4483 " GDB breakpoint at 0x%s; skipping step over",
4484 lwpid_of (thread), paddress (pc));
4486 return false;
4488 else
4490 threads_debug_printf ("Need step over [LWP %ld]? yes, "
4491 "found breakpoint at 0x%s",
4492 lwpid_of (thread), paddress (pc));
4494 /* We've found an lwp that needs stepping over --- return 1 so
4495 that find_thread stops looking. */
4496 return true;
4500 threads_debug_printf
4501 ("Need step over [LWP %ld]? No, no breakpoint found at 0x%s",
4502 lwpid_of (thread), paddress (pc));
4504 return false;
4507 void
4508 linux_process_target::start_step_over (lwp_info *lwp)
4510 struct thread_info *thread = get_lwp_thread (lwp);
4511 CORE_ADDR pc;
4513 threads_debug_printf ("Starting step-over on LWP %ld. Stopping all threads",
4514 lwpid_of (thread));
4516 stop_all_lwps (1, lwp);
4518 if (lwp->suspended != 0)
4520 internal_error ("LWP %ld suspended=%d\n", lwpid_of (thread),
4521 lwp->suspended);
4524 threads_debug_printf ("Done stopping all threads for step-over.");
4526 /* Note, we should always reach here with an already adjusted PC,
4527 either by GDB (if we're resuming due to GDB's request), or by our
4528 caller, if we just finished handling an internal breakpoint GDB
4529 shouldn't care about. */
4530 pc = get_pc (lwp);
4532 bool step = false;
4534 scoped_restore_current_thread restore_thread;
4535 switch_to_thread (thread);
4537 lwp->bp_reinsert = pc;
4538 uninsert_breakpoints_at (pc);
4539 uninsert_fast_tracepoint_jumps_at (pc);
4541 step = single_step (lwp);
4544 resume_one_lwp (lwp, step, 0, NULL);
4546 /* Require next event from this LWP. */
4547 step_over_bkpt = thread->id;
4550 bool
4551 linux_process_target::finish_step_over (lwp_info *lwp)
4553 if (lwp->bp_reinsert != 0)
4555 scoped_restore_current_thread restore_thread;
4557 threads_debug_printf ("Finished step over.");
4559 switch_to_thread (get_lwp_thread (lwp));
4561 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
4562 may be no breakpoint to reinsert there by now. */
4563 reinsert_breakpoints_at (lwp->bp_reinsert);
4564 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
4566 lwp->bp_reinsert = 0;
4568 /* Delete any single-step breakpoints. No longer needed. We
4569 don't have to worry about other threads hitting this trap,
4570 and later not being able to explain it, because we were
4571 stepping over a breakpoint, and we hold all threads but
4572 LWP stopped while doing that. */
4573 if (!supports_hardware_single_step ())
4575 gdb_assert (has_single_step_breakpoints (current_thread));
4576 delete_single_step_breakpoints (current_thread);
4579 step_over_bkpt = null_ptid;
4580 return true;
4582 else
4583 return false;
4586 void
4587 linux_process_target::complete_ongoing_step_over ()
4589 if (step_over_bkpt != null_ptid)
4591 struct lwp_info *lwp;
4592 int wstat;
4593 int ret;
4595 threads_debug_printf ("detach: step over in progress, finish it first");
4597 /* Passing NULL_PTID as filter indicates we want all events to
4598 be left pending. Eventually this returns when there are no
4599 unwaited-for children left. */
4600 ret = wait_for_event_filtered (minus_one_ptid, null_ptid, &wstat,
4601 __WALL);
4602 gdb_assert (ret == -1);
4604 lwp = find_lwp_pid (step_over_bkpt);
4605 if (lwp != NULL)
4607 finish_step_over (lwp);
4609 /* If we got our step SIGTRAP, don't leave it pending,
4610 otherwise we would report it to GDB as a spurious
4611 SIGTRAP. */
4612 gdb_assert (lwp->status_pending_p);
4613 if (WIFSTOPPED (lwp->status_pending)
4614 && WSTOPSIG (lwp->status_pending) == SIGTRAP)
4616 thread_info *thread = get_lwp_thread (lwp);
4617 if (thread->last_resume_kind != resume_step)
4619 threads_debug_printf ("detach: discard step-over SIGTRAP");
4621 lwp->status_pending_p = 0;
4622 lwp->status_pending = 0;
4623 resume_one_lwp (lwp, lwp->stepping, 0, NULL);
4625 else
4626 threads_debug_printf
4627 ("detach: resume_step, not discarding step-over SIGTRAP");
4630 step_over_bkpt = null_ptid;
4631 unsuspend_all_lwps (lwp);
4635 void
4636 linux_process_target::resume_one_thread (thread_info *thread,
4637 bool leave_all_stopped)
4639 struct lwp_info *lwp = get_thread_lwp (thread);
4640 int leave_pending;
4642 if (lwp->resume == NULL)
4643 return;
4645 if (lwp->resume->kind == resume_stop)
4647 threads_debug_printf ("resume_stop request for LWP %ld",
4648 lwpid_of (thread));
4650 if (!lwp->stopped)
4652 threads_debug_printf ("stopping LWP %ld", lwpid_of (thread));
4654 /* Stop the thread, and wait for the event asynchronously,
4655 through the event loop. */
4656 send_sigstop (lwp);
4658 else
4660 threads_debug_printf ("already stopped LWP %ld", lwpid_of (thread));
4662 /* The LWP may have been stopped in an internal event that
4663 was not meant to be notified back to GDB (e.g., gdbserver
4664 breakpoint), so we should be reporting a stop event in
4665 this case too. */
4667 /* If the thread already has a pending SIGSTOP, this is a
4668 no-op. Otherwise, something later will presumably resume
4669 the thread and this will cause it to cancel any pending
4670 operation, due to last_resume_kind == resume_stop. If
4671 the thread already has a pending status to report, we
4672 will still report it the next time we wait - see
4673 status_pending_p_callback. */
4675 /* If we already have a pending signal to report, then
4676 there's no need to queue a SIGSTOP, as this means we're
4677 midway through moving the LWP out of the jumppad, and we
4678 will report the pending signal as soon as that is
4679 finished. */
4680 if (lwp->pending_signals_to_report.empty ())
4681 send_sigstop (lwp);
4684 /* For stop requests, we're done. */
4685 lwp->resume = NULL;
4686 thread->last_status.set_ignore ();
4687 return;
4690 /* If this thread which is about to be resumed has a pending status,
4691 then don't resume it - we can just report the pending status.
4692 Likewise if it is suspended, because e.g., another thread is
4693 stepping past a breakpoint. Make sure to queue any signals that
4694 would otherwise be sent. In all-stop mode, we do this decision
4695 based on if *any* thread has a pending status. If there's a
4696 thread that needs the step-over-breakpoint dance, then don't
4697 resume any other thread but that particular one. */
4698 leave_pending = (lwp->suspended
4699 || lwp->status_pending_p
4700 || leave_all_stopped);
4702 /* If we have a new signal, enqueue the signal. */
4703 if (lwp->resume->sig != 0)
4705 siginfo_t info, *info_p;
4707 /* If this is the same signal we were previously stopped by,
4708 make sure to queue its siginfo. */
4709 if (WIFSTOPPED (lwp->last_status)
4710 && WSTOPSIG (lwp->last_status) == lwp->resume->sig
4711 && ptrace (PTRACE_GETSIGINFO, lwpid_of (thread),
4712 (PTRACE_TYPE_ARG3) 0, &info) == 0)
4713 info_p = &info;
4714 else
4715 info_p = NULL;
4717 enqueue_pending_signal (lwp, lwp->resume->sig, info_p);
4720 if (!leave_pending)
4722 threads_debug_printf ("resuming LWP %ld", lwpid_of (thread));
4724 proceed_one_lwp (thread, NULL);
4726 else
4727 threads_debug_printf ("leaving LWP %ld stopped", lwpid_of (thread));
4729 thread->last_status.set_ignore ();
4730 lwp->resume = NULL;
4733 void
4734 linux_process_target::resume (thread_resume *resume_info, size_t n)
4736 struct thread_info *need_step_over = NULL;
4738 THREADS_SCOPED_DEBUG_ENTER_EXIT;
4740 for_each_thread ([&] (thread_info *thread)
4742 linux_set_resume_request (thread, resume_info, n);
4745 /* If there is a thread which would otherwise be resumed, which has
4746 a pending status, then don't resume any threads - we can just
4747 report the pending status. Make sure to queue any signals that
4748 would otherwise be sent. In non-stop mode, we'll apply this
4749 logic to each thread individually. We consume all pending events
4750 before considering to start a step-over (in all-stop). */
4751 bool any_pending = false;
4752 if (!non_stop)
4753 any_pending = find_thread ([this] (thread_info *thread)
4755 return resume_status_pending (thread);
4756 }) != nullptr;
4758 /* If there is a thread which would otherwise be resumed, which is
4759 stopped at a breakpoint that needs stepping over, then don't
4760 resume any threads - have it step over the breakpoint with all
4761 other threads stopped, then resume all threads again. Make sure
4762 to queue any signals that would otherwise be delivered or
4763 queued. */
4764 if (!any_pending && low_supports_breakpoints ())
4765 need_step_over = find_thread ([this] (thread_info *thread)
4767 return thread_needs_step_over (thread);
4770 bool leave_all_stopped = (need_step_over != NULL || any_pending);
4772 if (need_step_over != NULL)
4773 threads_debug_printf ("Not resuming all, need step over");
4774 else if (any_pending)
4775 threads_debug_printf ("Not resuming, all-stop and found "
4776 "an LWP with pending status");
4777 else
4778 threads_debug_printf ("Resuming, no pending status or step over needed");
4780 /* Even if we're leaving threads stopped, queue all signals we'd
4781 otherwise deliver. */
4782 for_each_thread ([&] (thread_info *thread)
4784 resume_one_thread (thread, leave_all_stopped);
4787 if (need_step_over)
4788 start_step_over (get_thread_lwp (need_step_over));
4790 /* We may have events that were pending that can/should be sent to
4791 the client now. Trigger a linux_wait call. */
4792 if (target_is_async_p ())
4793 async_file_mark ();
4796 void
4797 linux_process_target::proceed_one_lwp (thread_info *thread, lwp_info *except)
4799 struct lwp_info *lwp = get_thread_lwp (thread);
4800 int step;
4802 if (lwp == except)
4803 return;
4805 threads_debug_printf ("lwp %ld", lwpid_of (thread));
4807 if (!lwp->stopped)
4809 threads_debug_printf (" LWP %ld already running", lwpid_of (thread));
4810 return;
4813 if (thread->last_resume_kind == resume_stop
4814 && thread->last_status.kind () != TARGET_WAITKIND_IGNORE)
4816 threads_debug_printf (" client wants LWP to remain %ld stopped",
4817 lwpid_of (thread));
4818 return;
4821 if (lwp->status_pending_p)
4823 threads_debug_printf (" LWP %ld has pending status, leaving stopped",
4824 lwpid_of (thread));
4825 return;
4828 gdb_assert (lwp->suspended >= 0);
4830 if (lwp->suspended)
4832 threads_debug_printf (" LWP %ld is suspended", lwpid_of (thread));
4833 return;
4836 if (thread->last_resume_kind == resume_stop
4837 && lwp->pending_signals_to_report.empty ()
4838 && (lwp->collecting_fast_tracepoint
4839 == fast_tpoint_collect_result::not_collecting))
4841 /* We haven't reported this LWP as stopped yet (otherwise, the
4842 last_status.kind check above would catch it, and we wouldn't
4843 reach here. This LWP may have been momentarily paused by a
4844 stop_all_lwps call while handling for example, another LWP's
4845 step-over. In that case, the pending expected SIGSTOP signal
4846 that was queued at vCont;t handling time will have already
4847 been consumed by wait_for_sigstop, and so we need to requeue
4848 another one here. Note that if the LWP already has a SIGSTOP
4849 pending, this is a no-op. */
4851 threads_debug_printf
4852 ("Client wants LWP %ld to stop. Making sure it has a SIGSTOP pending",
4853 lwpid_of (thread));
4855 send_sigstop (lwp);
4858 if (thread->last_resume_kind == resume_step)
4860 threads_debug_printf (" stepping LWP %ld, client wants it stepping",
4861 lwpid_of (thread));
4863 /* If resume_step is requested by GDB, install single-step
4864 breakpoints when the thread is about to be actually resumed if
4865 the single-step breakpoints weren't removed. */
4866 if (supports_software_single_step ()
4867 && !has_single_step_breakpoints (thread))
4868 install_software_single_step_breakpoints (lwp);
4870 step = maybe_hw_step (thread);
4872 else if (lwp->bp_reinsert != 0)
4874 threads_debug_printf (" stepping LWP %ld, reinsert set",
4875 lwpid_of (thread));
4877 step = maybe_hw_step (thread);
4879 else
4880 step = 0;
4882 resume_one_lwp (lwp, step, 0, NULL);
4885 void
4886 linux_process_target::unsuspend_and_proceed_one_lwp (thread_info *thread,
4887 lwp_info *except)
4889 struct lwp_info *lwp = get_thread_lwp (thread);
4891 if (lwp == except)
4892 return;
4894 lwp_suspended_decr (lwp);
4896 proceed_one_lwp (thread, except);
4899 void
4900 linux_process_target::proceed_all_lwps ()
4902 struct thread_info *need_step_over;
4904 /* If there is a thread which would otherwise be resumed, which is
4905 stopped at a breakpoint that needs stepping over, then don't
4906 resume any threads - have it step over the breakpoint with all
4907 other threads stopped, then resume all threads again. */
4909 if (low_supports_breakpoints ())
4911 need_step_over = find_thread ([this] (thread_info *thread)
4913 return thread_needs_step_over (thread);
4916 if (need_step_over != NULL)
4918 threads_debug_printf ("found thread %ld needing a step-over",
4919 lwpid_of (need_step_over));
4921 start_step_over (get_thread_lwp (need_step_over));
4922 return;
4926 threads_debug_printf ("Proceeding, no step-over needed");
4928 for_each_thread ([this] (thread_info *thread)
4930 proceed_one_lwp (thread, NULL);
4934 void
4935 linux_process_target::unstop_all_lwps (int unsuspend, lwp_info *except)
4937 THREADS_SCOPED_DEBUG_ENTER_EXIT;
4939 if (except)
4940 threads_debug_printf ("except=(LWP %ld)",
4941 lwpid_of (get_lwp_thread (except)));
4942 else
4943 threads_debug_printf ("except=nullptr");
4945 if (unsuspend)
4946 for_each_thread ([&] (thread_info *thread)
4948 unsuspend_and_proceed_one_lwp (thread, except);
4950 else
4951 for_each_thread ([&] (thread_info *thread)
4953 proceed_one_lwp (thread, except);
4958 #ifdef HAVE_LINUX_REGSETS
4960 #define use_linux_regsets 1
4962 /* Returns true if REGSET has been disabled. */
4964 static int
4965 regset_disabled (struct regsets_info *info, struct regset_info *regset)
4967 return (info->disabled_regsets != NULL
4968 && info->disabled_regsets[regset - info->regsets]);
4971 /* Disable REGSET. */
4973 static void
4974 disable_regset (struct regsets_info *info, struct regset_info *regset)
4976 int dr_offset;
4978 dr_offset = regset - info->regsets;
4979 if (info->disabled_regsets == NULL)
4980 info->disabled_regsets = (char *) xcalloc (1, info->num_regsets);
4981 info->disabled_regsets[dr_offset] = 1;
4984 static int
4985 regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
4986 struct regcache *regcache)
4988 struct regset_info *regset;
4989 int saw_general_regs = 0;
4990 int pid;
4991 struct iovec iov;
4993 pid = lwpid_of (current_thread);
4994 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
4996 void *buf, *data;
4997 int nt_type, res;
4999 if (regset->size == 0 || regset_disabled (regsets_info, regset))
5000 continue;
5002 buf = xmalloc (regset->size);
5004 nt_type = regset->nt_type;
5005 if (nt_type)
5007 iov.iov_base = buf;
5008 iov.iov_len = regset->size;
5009 data = (void *) &iov;
5011 else
5012 data = buf;
5014 #ifndef __sparc__
5015 res = ptrace (regset->get_request, pid,
5016 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5017 #else
5018 res = ptrace (regset->get_request, pid, data, nt_type);
5019 #endif
5020 if (res < 0)
5022 if (errno == EIO
5023 || (errno == EINVAL && regset->type == OPTIONAL_REGS))
5025 /* If we get EIO on a regset, or an EINVAL and the regset is
5026 optional, do not try it again for this process mode. */
5027 disable_regset (regsets_info, regset);
5029 else if (errno == ENODATA)
5031 /* ENODATA may be returned if the regset is currently
5032 not "active". This can happen in normal operation,
5033 so suppress the warning in this case. */
5035 else if (errno == ESRCH)
5037 /* At this point, ESRCH should mean the process is
5038 already gone, in which case we simply ignore attempts
5039 to read its registers. */
5041 else
5043 char s[256];
5044 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
5045 pid);
5046 perror (s);
5049 else
5051 if (regset->type == GENERAL_REGS)
5052 saw_general_regs = 1;
5053 regset->store_function (regcache, buf);
5055 free (buf);
5057 if (saw_general_regs)
5058 return 0;
5059 else
5060 return 1;
5063 static int
5064 regsets_store_inferior_registers (struct regsets_info *regsets_info,
5065 struct regcache *regcache)
5067 struct regset_info *regset;
5068 int saw_general_regs = 0;
5069 int pid;
5070 struct iovec iov;
5072 pid = lwpid_of (current_thread);
5073 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
5075 void *buf, *data;
5076 int nt_type, res;
5078 if (regset->size == 0 || regset_disabled (regsets_info, regset)
5079 || regset->fill_function == NULL)
5080 continue;
5082 buf = xmalloc (regset->size);
5084 /* First fill the buffer with the current register set contents,
5085 in case there are any items in the kernel's regset that are
5086 not in gdbserver's regcache. */
5088 nt_type = regset->nt_type;
5089 if (nt_type)
5091 iov.iov_base = buf;
5092 iov.iov_len = regset->size;
5093 data = (void *) &iov;
5095 else
5096 data = buf;
5098 #ifndef __sparc__
5099 res = ptrace (regset->get_request, pid,
5100 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5101 #else
5102 res = ptrace (regset->get_request, pid, data, nt_type);
5103 #endif
5105 if (res == 0)
5107 /* Then overlay our cached registers on that. */
5108 regset->fill_function (regcache, buf);
5110 /* Only now do we write the register set. */
5111 #ifndef __sparc__
5112 res = ptrace (regset->set_request, pid,
5113 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5114 #else
5115 res = ptrace (regset->set_request, pid, data, nt_type);
5116 #endif
5119 if (res < 0)
5121 if (errno == EIO
5122 || (errno == EINVAL && regset->type == OPTIONAL_REGS))
5124 /* If we get EIO on a regset, or an EINVAL and the regset is
5125 optional, do not try it again for this process mode. */
5126 disable_regset (regsets_info, regset);
5128 else if (errno == ESRCH)
5130 /* At this point, ESRCH should mean the process is
5131 already gone, in which case we simply ignore attempts
5132 to change its registers. See also the related
5133 comment in resume_one_lwp. */
5134 free (buf);
5135 return 0;
5137 else
5139 perror ("Warning: ptrace(regsets_store_inferior_registers)");
5142 else if (regset->type == GENERAL_REGS)
5143 saw_general_regs = 1;
5144 free (buf);
5146 if (saw_general_regs)
5147 return 0;
5148 else
5149 return 1;
5152 #else /* !HAVE_LINUX_REGSETS */
5154 #define use_linux_regsets 0
5155 #define regsets_fetch_inferior_registers(regsets_info, regcache) 1
5156 #define regsets_store_inferior_registers(regsets_info, regcache) 1
5158 #endif
5160 /* Return 1 if register REGNO is supported by one of the regset ptrace
5161 calls or 0 if it has to be transferred individually. */
5163 static int
5164 linux_register_in_regsets (const struct regs_info *regs_info, int regno)
5166 unsigned char mask = 1 << (regno % 8);
5167 size_t index = regno / 8;
5169 return (use_linux_regsets
5170 && (regs_info->regset_bitmap == NULL
5171 || (regs_info->regset_bitmap[index] & mask) != 0));
5174 #ifdef HAVE_LINUX_USRREGS
5176 static int
5177 register_addr (const struct usrregs_info *usrregs, int regnum)
5179 int addr;
5181 if (regnum < 0 || regnum >= usrregs->num_regs)
5182 error ("Invalid register number %d.", regnum);
5184 addr = usrregs->regmap[regnum];
5186 return addr;
5190 void
5191 linux_process_target::fetch_register (const usrregs_info *usrregs,
5192 regcache *regcache, int regno)
5194 CORE_ADDR regaddr;
5195 int i, size;
5196 char *buf;
5197 int pid;
5199 if (regno >= usrregs->num_regs)
5200 return;
5201 if (low_cannot_fetch_register (regno))
5202 return;
5204 regaddr = register_addr (usrregs, regno);
5205 if (regaddr == -1)
5206 return;
5208 size = ((register_size (regcache->tdesc, regno)
5209 + sizeof (PTRACE_XFER_TYPE) - 1)
5210 & -sizeof (PTRACE_XFER_TYPE));
5211 buf = (char *) alloca (size);
5213 pid = lwpid_of (current_thread);
5214 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5216 errno = 0;
5217 *(PTRACE_XFER_TYPE *) (buf + i) =
5218 ptrace (PTRACE_PEEKUSER, pid,
5219 /* Coerce to a uintptr_t first to avoid potential gcc warning
5220 of coercing an 8 byte integer to a 4 byte pointer. */
5221 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
5222 regaddr += sizeof (PTRACE_XFER_TYPE);
5223 if (errno != 0)
5225 /* Mark register REGNO unavailable. */
5226 supply_register (regcache, regno, NULL);
5227 return;
5231 low_supply_ptrace_register (regcache, regno, buf);
5234 void
5235 linux_process_target::store_register (const usrregs_info *usrregs,
5236 regcache *regcache, int regno)
5238 CORE_ADDR regaddr;
5239 int i, size;
5240 char *buf;
5241 int pid;
5243 if (regno >= usrregs->num_regs)
5244 return;
5245 if (low_cannot_store_register (regno))
5246 return;
5248 regaddr = register_addr (usrregs, regno);
5249 if (regaddr == -1)
5250 return;
5252 size = ((register_size (regcache->tdesc, regno)
5253 + sizeof (PTRACE_XFER_TYPE) - 1)
5254 & -sizeof (PTRACE_XFER_TYPE));
5255 buf = (char *) alloca (size);
5256 memset (buf, 0, size);
5258 low_collect_ptrace_register (regcache, regno, buf);
5260 pid = lwpid_of (current_thread);
5261 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5263 errno = 0;
5264 ptrace (PTRACE_POKEUSER, pid,
5265 /* Coerce to a uintptr_t first to avoid potential gcc warning
5266 about coercing an 8 byte integer to a 4 byte pointer. */
5267 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
5268 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
5269 if (errno != 0)
5271 /* At this point, ESRCH should mean the process is
5272 already gone, in which case we simply ignore attempts
5273 to change its registers. See also the related
5274 comment in resume_one_lwp. */
5275 if (errno == ESRCH)
5276 return;
5279 if (!low_cannot_store_register (regno))
5280 error ("writing register %d: %s", regno, safe_strerror (errno));
5282 regaddr += sizeof (PTRACE_XFER_TYPE);
5285 #endif /* HAVE_LINUX_USRREGS */
5287 void
5288 linux_process_target::low_collect_ptrace_register (regcache *regcache,
5289 int regno, char *buf)
5291 collect_register (regcache, regno, buf);
5294 void
5295 linux_process_target::low_supply_ptrace_register (regcache *regcache,
5296 int regno, const char *buf)
5298 supply_register (regcache, regno, buf);
5301 void
5302 linux_process_target::usr_fetch_inferior_registers (const regs_info *regs_info,
5303 regcache *regcache,
5304 int regno, int all)
5306 #ifdef HAVE_LINUX_USRREGS
5307 struct usrregs_info *usr = regs_info->usrregs;
5309 if (regno == -1)
5311 for (regno = 0; regno < usr->num_regs; regno++)
5312 if (all || !linux_register_in_regsets (regs_info, regno))
5313 fetch_register (usr, regcache, regno);
5315 else
5316 fetch_register (usr, regcache, regno);
5317 #endif
5320 void
5321 linux_process_target::usr_store_inferior_registers (const regs_info *regs_info,
5322 regcache *regcache,
5323 int regno, int all)
5325 #ifdef HAVE_LINUX_USRREGS
5326 struct usrregs_info *usr = regs_info->usrregs;
5328 if (regno == -1)
5330 for (regno = 0; regno < usr->num_regs; regno++)
5331 if (all || !linux_register_in_regsets (regs_info, regno))
5332 store_register (usr, regcache, regno);
5334 else
5335 store_register (usr, regcache, regno);
5336 #endif
5339 void
5340 linux_process_target::fetch_registers (regcache *regcache, int regno)
5342 int use_regsets;
5343 int all = 0;
5344 const regs_info *regs_info = get_regs_info ();
5346 if (regno == -1)
5348 if (regs_info->usrregs != NULL)
5349 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
5350 low_fetch_register (regcache, regno);
5352 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
5353 if (regs_info->usrregs != NULL)
5354 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
5356 else
5358 if (low_fetch_register (regcache, regno))
5359 return;
5361 use_regsets = linux_register_in_regsets (regs_info, regno);
5362 if (use_regsets)
5363 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
5364 regcache);
5365 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5366 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
5370 void
5371 linux_process_target::store_registers (regcache *regcache, int regno)
5373 int use_regsets;
5374 int all = 0;
5375 const regs_info *regs_info = get_regs_info ();
5377 if (regno == -1)
5379 all = regsets_store_inferior_registers (regs_info->regsets_info,
5380 regcache);
5381 if (regs_info->usrregs != NULL)
5382 usr_store_inferior_registers (regs_info, regcache, regno, all);
5384 else
5386 use_regsets = linux_register_in_regsets (regs_info, regno);
5387 if (use_regsets)
5388 all = regsets_store_inferior_registers (regs_info->regsets_info,
5389 regcache);
5390 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5391 usr_store_inferior_registers (regs_info, regcache, regno, 1);
5395 bool
5396 linux_process_target::low_fetch_register (regcache *regcache, int regno)
5398 return false;
5401 /* A wrapper for the read_memory target op. */
5403 static int
5404 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
5406 return the_target->read_memory (memaddr, myaddr, len);
5410 /* Helper for read_memory/write_memory using /proc/PID/mem. Because
5411 we can use a single read/write call, this can be much more
5412 efficient than banging away at PTRACE_PEEKTEXT. Also, unlike
5413 PTRACE_PEEKTEXT/PTRACE_POKETEXT, this works with running threads.
5414 One an only one of READBUF and WRITEBUF is non-null. If READBUF is
5415 not null, then we're reading, otherwise we're writing. */
5417 static int
5418 proc_xfer_memory (CORE_ADDR memaddr, unsigned char *readbuf,
5419 const gdb_byte *writebuf, int len)
5421 gdb_assert ((readbuf == nullptr) != (writebuf == nullptr));
5423 process_info *proc = current_process ();
5425 int fd = proc->priv->mem_fd;
5426 if (fd == -1)
5427 return EIO;
5429 while (len > 0)
5431 int bytes;
5433 /* Use pread64/pwrite64 if available, since they save a syscall
5434 and can handle 64-bit offsets even on 32-bit platforms (for
5435 instance, SPARC debugging a SPARC64 application). But only
5436 use them if the offset isn't so high that when cast to off_t
5437 it'd be negative, as seen on SPARC64. pread64/pwrite64
5438 outright reject such offsets. lseek does not. */
5439 #ifdef HAVE_PREAD64
5440 if ((off_t) memaddr >= 0)
5441 bytes = (readbuf != nullptr
5442 ? pread64 (fd, readbuf, len, memaddr)
5443 : pwrite64 (fd, writebuf, len, memaddr));
5444 else
5445 #endif
5447 bytes = -1;
5448 if (lseek (fd, memaddr, SEEK_SET) != -1)
5449 bytes = (readbuf != nullptr
5450 ? read (fd, readbuf, len)
5451 : write (fd, writebuf, len));
5454 if (bytes < 0)
5455 return errno;
5456 else if (bytes == 0)
5458 /* EOF means the address space is gone, the whole process
5459 exited or execed. */
5460 return EIO;
5463 memaddr += bytes;
5464 if (readbuf != nullptr)
5465 readbuf += bytes;
5466 else
5467 writebuf += bytes;
5468 len -= bytes;
5471 return 0;
5475 linux_process_target::read_memory (CORE_ADDR memaddr,
5476 unsigned char *myaddr, int len)
5478 return proc_xfer_memory (memaddr, myaddr, nullptr, len);
5481 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
5482 memory at MEMADDR. On failure (cannot write to the inferior)
5483 returns the value of errno. Always succeeds if LEN is zero. */
5486 linux_process_target::write_memory (CORE_ADDR memaddr,
5487 const unsigned char *myaddr, int len)
5489 if (debug_threads)
5491 /* Dump up to four bytes. */
5492 char str[4 * 2 + 1];
5493 char *p = str;
5494 int dump = len < 4 ? len : 4;
5496 for (int i = 0; i < dump; i++)
5498 sprintf (p, "%02x", myaddr[i]);
5499 p += 2;
5501 *p = '\0';
5503 threads_debug_printf ("Writing %s to 0x%08lx in process %d",
5504 str, (long) memaddr, current_process ()->pid);
5507 return proc_xfer_memory (memaddr, nullptr, myaddr, len);
5510 void
5511 linux_process_target::look_up_symbols ()
5513 #ifdef USE_THREAD_DB
5514 struct process_info *proc = current_process ();
5516 if (proc->priv->thread_db != NULL)
5517 return;
5519 thread_db_init ();
5520 #endif
5523 void
5524 linux_process_target::request_interrupt ()
5526 /* Send a SIGINT to the process group. This acts just like the user
5527 typed a ^C on the controlling terminal. */
5528 int res = ::kill (-signal_pid, SIGINT);
5529 if (res == -1)
5530 warning (_("Sending SIGINT to process group of pid %ld failed: %s"),
5531 signal_pid, safe_strerror (errno));
5534 bool
5535 linux_process_target::supports_read_auxv ()
5537 return true;
5540 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
5541 to debugger memory starting at MYADDR. */
5544 linux_process_target::read_auxv (int pid, CORE_ADDR offset,
5545 unsigned char *myaddr, unsigned int len)
5547 char filename[PATH_MAX];
5548 int fd, n;
5550 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5552 fd = open (filename, O_RDONLY);
5553 if (fd < 0)
5554 return -1;
5556 if (offset != (CORE_ADDR) 0
5557 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5558 n = -1;
5559 else
5560 n = read (fd, myaddr, len);
5562 close (fd);
5564 return n;
5568 linux_process_target::insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
5569 int size, raw_breakpoint *bp)
5571 if (type == raw_bkpt_type_sw)
5572 return insert_memory_breakpoint (bp);
5573 else
5574 return low_insert_point (type, addr, size, bp);
5578 linux_process_target::low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
5579 int size, raw_breakpoint *bp)
5581 /* Unsupported (see target.h). */
5582 return 1;
5586 linux_process_target::remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
5587 int size, raw_breakpoint *bp)
5589 if (type == raw_bkpt_type_sw)
5590 return remove_memory_breakpoint (bp);
5591 else
5592 return low_remove_point (type, addr, size, bp);
5596 linux_process_target::low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
5597 int size, raw_breakpoint *bp)
5599 /* Unsupported (see target.h). */
5600 return 1;
5603 /* Implement the stopped_by_sw_breakpoint target_ops
5604 method. */
5606 bool
5607 linux_process_target::stopped_by_sw_breakpoint ()
5609 struct lwp_info *lwp = get_thread_lwp (current_thread);
5611 return (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT);
5614 /* Implement the supports_stopped_by_sw_breakpoint target_ops
5615 method. */
5617 bool
5618 linux_process_target::supports_stopped_by_sw_breakpoint ()
5620 return true;
5623 /* Implement the stopped_by_hw_breakpoint target_ops
5624 method. */
5626 bool
5627 linux_process_target::stopped_by_hw_breakpoint ()
5629 struct lwp_info *lwp = get_thread_lwp (current_thread);
5631 return (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT);
5634 /* Implement the supports_stopped_by_hw_breakpoint target_ops
5635 method. */
5637 bool
5638 linux_process_target::supports_stopped_by_hw_breakpoint ()
5640 return true;
5643 /* Implement the supports_hardware_single_step target_ops method. */
5645 bool
5646 linux_process_target::supports_hardware_single_step ()
5648 return true;
5651 bool
5652 linux_process_target::stopped_by_watchpoint ()
5654 struct lwp_info *lwp = get_thread_lwp (current_thread);
5656 return lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
5659 CORE_ADDR
5660 linux_process_target::stopped_data_address ()
5662 struct lwp_info *lwp = get_thread_lwp (current_thread);
5664 return lwp->stopped_data_address;
5667 /* This is only used for targets that define PT_TEXT_ADDR,
5668 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
5669 the target has different ways of acquiring this information, like
5670 loadmaps. */
5672 bool
5673 linux_process_target::supports_read_offsets ()
5675 #ifdef SUPPORTS_READ_OFFSETS
5676 return true;
5677 #else
5678 return false;
5679 #endif
5682 /* Under uClinux, programs are loaded at non-zero offsets, which we need
5683 to tell gdb about. */
5686 linux_process_target::read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
5688 #ifdef SUPPORTS_READ_OFFSETS
5689 unsigned long text, text_end, data;
5690 int pid = lwpid_of (current_thread);
5692 errno = 0;
5694 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
5695 (PTRACE_TYPE_ARG4) 0);
5696 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
5697 (PTRACE_TYPE_ARG4) 0);
5698 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
5699 (PTRACE_TYPE_ARG4) 0);
5701 if (errno == 0)
5703 /* Both text and data offsets produced at compile-time (and so
5704 used by gdb) are relative to the beginning of the program,
5705 with the data segment immediately following the text segment.
5706 However, the actual runtime layout in memory may put the data
5707 somewhere else, so when we send gdb a data base-address, we
5708 use the real data base address and subtract the compile-time
5709 data base-address from it (which is just the length of the
5710 text segment). BSS immediately follows data in both
5711 cases. */
5712 *text_p = text;
5713 *data_p = data - (text_end - text);
5715 return 1;
5717 return 0;
5718 #else
5719 gdb_assert_not_reached ("target op read_offsets not supported");
5720 #endif
5723 bool
5724 linux_process_target::supports_get_tls_address ()
5726 #ifdef USE_THREAD_DB
5727 return true;
5728 #else
5729 return false;
5730 #endif
5734 linux_process_target::get_tls_address (thread_info *thread,
5735 CORE_ADDR offset,
5736 CORE_ADDR load_module,
5737 CORE_ADDR *address)
5739 #ifdef USE_THREAD_DB
5740 return thread_db_get_tls_address (thread, offset, load_module, address);
5741 #else
5742 return -1;
5743 #endif
5746 bool
5747 linux_process_target::supports_qxfer_osdata ()
5749 return true;
5753 linux_process_target::qxfer_osdata (const char *annex,
5754 unsigned char *readbuf,
5755 unsigned const char *writebuf,
5756 CORE_ADDR offset, int len)
5758 return linux_common_xfer_osdata (annex, readbuf, offset, len);
5761 void
5762 linux_process_target::siginfo_fixup (siginfo_t *siginfo,
5763 gdb_byte *inf_siginfo, int direction)
5765 bool done = low_siginfo_fixup (siginfo, inf_siginfo, direction);
5767 /* If there was no callback, or the callback didn't do anything,
5768 then just do a straight memcpy. */
5769 if (!done)
5771 if (direction == 1)
5772 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
5773 else
5774 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
5778 bool
5779 linux_process_target::low_siginfo_fixup (siginfo_t *native, gdb_byte *inf,
5780 int direction)
5782 return false;
5785 bool
5786 linux_process_target::supports_qxfer_siginfo ()
5788 return true;
5792 linux_process_target::qxfer_siginfo (const char *annex,
5793 unsigned char *readbuf,
5794 unsigned const char *writebuf,
5795 CORE_ADDR offset, int len)
5797 int pid;
5798 siginfo_t siginfo;
5799 gdb_byte inf_siginfo[sizeof (siginfo_t)];
5801 if (current_thread == NULL)
5802 return -1;
5804 pid = lwpid_of (current_thread);
5806 threads_debug_printf ("%s siginfo for lwp %d.",
5807 readbuf != NULL ? "Reading" : "Writing",
5808 pid);
5810 if (offset >= sizeof (siginfo))
5811 return -1;
5813 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
5814 return -1;
5816 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
5817 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
5818 inferior with a 64-bit GDBSERVER should look the same as debugging it
5819 with a 32-bit GDBSERVER, we need to convert it. */
5820 siginfo_fixup (&siginfo, inf_siginfo, 0);
5822 if (offset + len > sizeof (siginfo))
5823 len = sizeof (siginfo) - offset;
5825 if (readbuf != NULL)
5826 memcpy (readbuf, inf_siginfo + offset, len);
5827 else
5829 memcpy (inf_siginfo + offset, writebuf, len);
5831 /* Convert back to ptrace layout before flushing it out. */
5832 siginfo_fixup (&siginfo, inf_siginfo, 1);
5834 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
5835 return -1;
5838 return len;
5841 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
5842 so we notice when children change state; as the handler for the
5843 sigsuspend in my_waitpid. */
5845 static void
5846 sigchld_handler (int signo)
5848 int old_errno = errno;
5850 if (debug_threads)
5854 /* Use the async signal safe debug function. */
5855 if (debug_write ("sigchld_handler\n",
5856 sizeof ("sigchld_handler\n") - 1) < 0)
5857 break; /* just ignore */
5858 } while (0);
5861 if (target_is_async_p ())
5862 async_file_mark (); /* trigger a linux_wait */
5864 errno = old_errno;
5867 bool
5868 linux_process_target::supports_non_stop ()
5870 return true;
5873 bool
5874 linux_process_target::async (bool enable)
5876 bool previous = target_is_async_p ();
5878 threads_debug_printf ("async (%d), previous=%d",
5879 enable, previous);
5881 if (previous != enable)
5883 sigset_t mask;
5884 sigemptyset (&mask);
5885 sigaddset (&mask, SIGCHLD);
5887 gdb_sigmask (SIG_BLOCK, &mask, NULL);
5889 if (enable)
5891 if (!linux_event_pipe.open_pipe ())
5893 gdb_sigmask (SIG_UNBLOCK, &mask, NULL);
5895 warning ("creating event pipe failed.");
5896 return previous;
5899 /* Register the event loop handler. */
5900 add_file_handler (linux_event_pipe.event_fd (),
5901 handle_target_event, NULL,
5902 "linux-low");
5904 /* Always trigger a linux_wait. */
5905 async_file_mark ();
5907 else
5909 delete_file_handler (linux_event_pipe.event_fd ());
5911 linux_event_pipe.close_pipe ();
5914 gdb_sigmask (SIG_UNBLOCK, &mask, NULL);
5917 return previous;
5921 linux_process_target::start_non_stop (bool nonstop)
5923 /* Register or unregister from event-loop accordingly. */
5924 target_async (nonstop);
5926 if (target_is_async_p () != (nonstop != false))
5927 return -1;
5929 return 0;
5932 bool
5933 linux_process_target::supports_multi_process ()
5935 return true;
5938 /* Check if fork events are supported. */
5940 bool
5941 linux_process_target::supports_fork_events ()
5943 return true;
5946 /* Check if vfork events are supported. */
5948 bool
5949 linux_process_target::supports_vfork_events ()
5951 return true;
5954 /* Return the set of supported thread options. */
5956 gdb_thread_options
5957 linux_process_target::supported_thread_options ()
5959 return GDB_THREAD_OPTION_CLONE | GDB_THREAD_OPTION_EXIT;
5962 /* Check if exec events are supported. */
5964 bool
5965 linux_process_target::supports_exec_events ()
5967 return true;
5970 /* Target hook for 'handle_new_gdb_connection'. Causes a reset of the
5971 ptrace flags for all inferiors. This is in case the new GDB connection
5972 doesn't support the same set of events that the previous one did. */
5974 void
5975 linux_process_target::handle_new_gdb_connection ()
5977 /* Request that all the lwps reset their ptrace options. */
5978 for_each_thread ([] (thread_info *thread)
5980 struct lwp_info *lwp = get_thread_lwp (thread);
5982 if (!lwp->stopped)
5984 /* Stop the lwp so we can modify its ptrace options. */
5985 lwp->must_set_ptrace_flags = 1;
5986 linux_stop_lwp (lwp);
5988 else
5990 /* Already stopped; go ahead and set the ptrace options. */
5991 struct process_info *proc = find_process_pid (pid_of (thread));
5992 int options = linux_low_ptrace_options (proc->attached);
5994 linux_enable_event_reporting (lwpid_of (thread), options);
5995 lwp->must_set_ptrace_flags = 0;
6001 linux_process_target::handle_monitor_command (char *mon)
6003 #ifdef USE_THREAD_DB
6004 return thread_db_handle_monitor_command (mon);
6005 #else
6006 return 0;
6007 #endif
6011 linux_process_target::core_of_thread (ptid_t ptid)
6013 return linux_common_core_of_thread (ptid);
6016 bool
6017 linux_process_target::supports_disable_randomization ()
6019 return true;
6022 bool
6023 linux_process_target::supports_agent ()
6025 return true;
6028 bool
6029 linux_process_target::supports_range_stepping ()
6031 if (supports_software_single_step ())
6032 return true;
6034 return low_supports_range_stepping ();
6037 bool
6038 linux_process_target::low_supports_range_stepping ()
6040 return false;
6043 bool
6044 linux_process_target::supports_pid_to_exec_file ()
6046 return true;
6049 const char *
6050 linux_process_target::pid_to_exec_file (int pid)
6052 return linux_proc_pid_to_exec_file (pid);
6055 bool
6056 linux_process_target::supports_multifs ()
6058 return true;
6062 linux_process_target::multifs_open (int pid, const char *filename,
6063 int flags, mode_t mode)
6065 return linux_mntns_open_cloexec (pid, filename, flags, mode);
6069 linux_process_target::multifs_unlink (int pid, const char *filename)
6071 return linux_mntns_unlink (pid, filename);
6074 ssize_t
6075 linux_process_target::multifs_readlink (int pid, const char *filename,
6076 char *buf, size_t bufsiz)
6078 return linux_mntns_readlink (pid, filename, buf, bufsiz);
6081 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
6082 struct target_loadseg
6084 /* Core address to which the segment is mapped. */
6085 Elf32_Addr addr;
6086 /* VMA recorded in the program header. */
6087 Elf32_Addr p_vaddr;
6088 /* Size of this segment in memory. */
6089 Elf32_Word p_memsz;
6092 # if defined PT_GETDSBT
6093 struct target_loadmap
6095 /* Protocol version number, must be zero. */
6096 Elf32_Word version;
6097 /* Pointer to the DSBT table, its size, and the DSBT index. */
6098 unsigned *dsbt_table;
6099 unsigned dsbt_size, dsbt_index;
6100 /* Number of segments in this map. */
6101 Elf32_Word nsegs;
6102 /* The actual memory map. */
6103 struct target_loadseg segs[/*nsegs*/];
6105 # define LINUX_LOADMAP PT_GETDSBT
6106 # define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
6107 # define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
6108 # else
6109 struct target_loadmap
6111 /* Protocol version number, must be zero. */
6112 Elf32_Half version;
6113 /* Number of segments in this map. */
6114 Elf32_Half nsegs;
6115 /* The actual memory map. */
6116 struct target_loadseg segs[/*nsegs*/];
6118 # define LINUX_LOADMAP PTRACE_GETFDPIC
6119 # define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
6120 # define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
6121 # endif
6123 bool
6124 linux_process_target::supports_read_loadmap ()
6126 return true;
6130 linux_process_target::read_loadmap (const char *annex, CORE_ADDR offset,
6131 unsigned char *myaddr, unsigned int len)
6133 int pid = lwpid_of (current_thread);
6134 int addr = -1;
6135 struct target_loadmap *data = NULL;
6136 unsigned int actual_length, copy_length;
6138 if (strcmp (annex, "exec") == 0)
6139 addr = (int) LINUX_LOADMAP_EXEC;
6140 else if (strcmp (annex, "interp") == 0)
6141 addr = (int) LINUX_LOADMAP_INTERP;
6142 else
6143 return -1;
6145 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
6146 return -1;
6148 if (data == NULL)
6149 return -1;
6151 actual_length = sizeof (struct target_loadmap)
6152 + sizeof (struct target_loadseg) * data->nsegs;
6154 if (offset < 0 || offset > actual_length)
6155 return -1;
6157 copy_length = actual_length - offset < len ? actual_length - offset : len;
6158 memcpy (myaddr, (char *) data + offset, copy_length);
6159 return copy_length;
6161 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
6163 bool
6164 linux_process_target::supports_catch_syscall ()
6166 return low_supports_catch_syscall ();
6169 bool
6170 linux_process_target::low_supports_catch_syscall ()
6172 return false;
6175 CORE_ADDR
6176 linux_process_target::read_pc (regcache *regcache)
6178 if (!low_supports_breakpoints ())
6179 return 0;
6181 return low_get_pc (regcache);
6184 void
6185 linux_process_target::write_pc (regcache *regcache, CORE_ADDR pc)
6187 gdb_assert (low_supports_breakpoints ());
6189 low_set_pc (regcache, pc);
6192 bool
6193 linux_process_target::supports_thread_stopped ()
6195 return true;
6198 bool
6199 linux_process_target::thread_stopped (thread_info *thread)
6201 return get_thread_lwp (thread)->stopped;
6204 bool
6205 linux_process_target::any_resumed ()
6207 bool any_resumed;
6209 auto status_pending_p_any = [&] (thread_info *thread)
6211 return status_pending_p_callback (thread, minus_one_ptid);
6214 auto not_stopped = [&] (thread_info *thread)
6216 return not_stopped_callback (thread, minus_one_ptid);
6219 /* Find a resumed LWP, if any. */
6220 if (find_thread (status_pending_p_any) != NULL)
6221 any_resumed = 1;
6222 else if (find_thread (not_stopped) != NULL)
6223 any_resumed = 1;
6224 else
6225 any_resumed = 0;
6227 return any_resumed;
6230 /* This exposes stop-all-threads functionality to other modules. */
6232 void
6233 linux_process_target::pause_all (bool freeze)
6235 stop_all_lwps (freeze, NULL);
6238 /* This exposes unstop-all-threads functionality to other gdbserver
6239 modules. */
6241 void
6242 linux_process_target::unpause_all (bool unfreeze)
6244 unstop_all_lwps (unfreeze, NULL);
6247 /* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
6249 static int
6250 get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
6251 CORE_ADDR *phdr_memaddr, int *num_phdr)
6253 char filename[PATH_MAX];
6254 int fd;
6255 const int auxv_size = is_elf64
6256 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
6257 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
6259 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
6261 fd = open (filename, O_RDONLY);
6262 if (fd < 0)
6263 return 1;
6265 *phdr_memaddr = 0;
6266 *num_phdr = 0;
6267 while (read (fd, buf, auxv_size) == auxv_size
6268 && (*phdr_memaddr == 0 || *num_phdr == 0))
6270 if (is_elf64)
6272 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
6274 switch (aux->a_type)
6276 case AT_PHDR:
6277 *phdr_memaddr = aux->a_un.a_val;
6278 break;
6279 case AT_PHNUM:
6280 *num_phdr = aux->a_un.a_val;
6281 break;
6284 else
6286 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
6288 switch (aux->a_type)
6290 case AT_PHDR:
6291 *phdr_memaddr = aux->a_un.a_val;
6292 break;
6293 case AT_PHNUM:
6294 *num_phdr = aux->a_un.a_val;
6295 break;
6300 close (fd);
6302 if (*phdr_memaddr == 0 || *num_phdr == 0)
6304 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
6305 "phdr_memaddr = %ld, phdr_num = %d",
6306 (long) *phdr_memaddr, *num_phdr);
6307 return 2;
6310 return 0;
6313 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
6315 static CORE_ADDR
6316 get_dynamic (const int pid, const int is_elf64)
6318 CORE_ADDR phdr_memaddr, relocation;
6319 int num_phdr, i;
6320 unsigned char *phdr_buf;
6321 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
6323 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
6324 return 0;
6326 gdb_assert (num_phdr < 100); /* Basic sanity check. */
6327 phdr_buf = (unsigned char *) alloca (num_phdr * phdr_size);
6329 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
6330 return 0;
6332 /* Compute relocation: it is expected to be 0 for "regular" executables,
6333 non-zero for PIE ones. */
6334 relocation = -1;
6335 for (i = 0; relocation == -1 && i < num_phdr; i++)
6336 if (is_elf64)
6338 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6340 if (p->p_type == PT_PHDR)
6341 relocation = phdr_memaddr - p->p_vaddr;
6343 else
6345 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6347 if (p->p_type == PT_PHDR)
6348 relocation = phdr_memaddr - p->p_vaddr;
6351 if (relocation == -1)
6353 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
6354 any real world executables, including PIE executables, have always
6355 PT_PHDR present. PT_PHDR is not present in some shared libraries or
6356 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
6357 or present DT_DEBUG anyway (fpc binaries are statically linked).
6359 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
6361 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
6363 return 0;
6366 for (i = 0; i < num_phdr; i++)
6368 if (is_elf64)
6370 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6372 if (p->p_type == PT_DYNAMIC)
6373 return p->p_vaddr + relocation;
6375 else
6377 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6379 if (p->p_type == PT_DYNAMIC)
6380 return p->p_vaddr + relocation;
6384 return 0;
6387 /* Return &_r_debug in the inferior, or -1 if not present. Return value
6388 can be 0 if the inferior does not yet have the library list initialized.
6389 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
6390 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
6392 static CORE_ADDR
6393 get_r_debug (const int pid, const int is_elf64)
6395 CORE_ADDR dynamic_memaddr;
6396 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
6397 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
6398 CORE_ADDR map = -1;
6400 dynamic_memaddr = get_dynamic (pid, is_elf64);
6401 if (dynamic_memaddr == 0)
6402 return map;
6404 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
6406 if (is_elf64)
6408 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
6409 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6410 union
6412 Elf64_Xword map;
6413 unsigned char buf[sizeof (Elf64_Xword)];
6415 rld_map;
6416 #endif
6417 #ifdef DT_MIPS_RLD_MAP
6418 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6420 if (linux_read_memory (dyn->d_un.d_val,
6421 rld_map.buf, sizeof (rld_map.buf)) == 0)
6422 return rld_map.map;
6423 else
6424 break;
6426 #endif /* DT_MIPS_RLD_MAP */
6427 #ifdef DT_MIPS_RLD_MAP_REL
6428 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6430 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6431 rld_map.buf, sizeof (rld_map.buf)) == 0)
6432 return rld_map.map;
6433 else
6434 break;
6436 #endif /* DT_MIPS_RLD_MAP_REL */
6438 if (dyn->d_tag == DT_DEBUG && map == -1)
6439 map = dyn->d_un.d_val;
6441 if (dyn->d_tag == DT_NULL)
6442 break;
6444 else
6446 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
6447 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6448 union
6450 Elf32_Word map;
6451 unsigned char buf[sizeof (Elf32_Word)];
6453 rld_map;
6454 #endif
6455 #ifdef DT_MIPS_RLD_MAP
6456 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6458 if (linux_read_memory (dyn->d_un.d_val,
6459 rld_map.buf, sizeof (rld_map.buf)) == 0)
6460 return rld_map.map;
6461 else
6462 break;
6464 #endif /* DT_MIPS_RLD_MAP */
6465 #ifdef DT_MIPS_RLD_MAP_REL
6466 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6468 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6469 rld_map.buf, sizeof (rld_map.buf)) == 0)
6470 return rld_map.map;
6471 else
6472 break;
6474 #endif /* DT_MIPS_RLD_MAP_REL */
6476 if (dyn->d_tag == DT_DEBUG && map == -1)
6477 map = dyn->d_un.d_val;
6479 if (dyn->d_tag == DT_NULL)
6480 break;
6483 dynamic_memaddr += dyn_size;
6486 return map;
6489 /* Read one pointer from MEMADDR in the inferior. */
6491 static int
6492 read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
6494 int ret;
6496 /* Go through a union so this works on either big or little endian
6497 hosts, when the inferior's pointer size is smaller than the size
6498 of CORE_ADDR. It is assumed the inferior's endianness is the
6499 same of the superior's. */
6500 union
6502 CORE_ADDR core_addr;
6503 unsigned int ui;
6504 unsigned char uc;
6505 } addr;
6507 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
6508 if (ret == 0)
6510 if (ptr_size == sizeof (CORE_ADDR))
6511 *ptr = addr.core_addr;
6512 else if (ptr_size == sizeof (unsigned int))
6513 *ptr = addr.ui;
6514 else
6515 gdb_assert_not_reached ("unhandled pointer size");
6517 return ret;
6520 bool
6521 linux_process_target::supports_qxfer_libraries_svr4 ()
6523 return true;
6526 struct link_map_offsets
6528 /* Offset and size of r_debug.r_version. */
6529 int r_version_offset;
6531 /* Offset and size of r_debug.r_map. */
6532 int r_map_offset;
6534 /* Offset of r_debug_extended.r_next. */
6535 int r_next_offset;
6537 /* Offset to l_addr field in struct link_map. */
6538 int l_addr_offset;
6540 /* Offset to l_name field in struct link_map. */
6541 int l_name_offset;
6543 /* Offset to l_ld field in struct link_map. */
6544 int l_ld_offset;
6546 /* Offset to l_next field in struct link_map. */
6547 int l_next_offset;
6549 /* Offset to l_prev field in struct link_map. */
6550 int l_prev_offset;
6553 static const link_map_offsets lmo_32bit_offsets =
6555 0, /* r_version offset. */
6556 4, /* r_debug.r_map offset. */
6557 20, /* r_debug_extended.r_next. */
6558 0, /* l_addr offset in link_map. */
6559 4, /* l_name offset in link_map. */
6560 8, /* l_ld offset in link_map. */
6561 12, /* l_next offset in link_map. */
6562 16 /* l_prev offset in link_map. */
6565 static const link_map_offsets lmo_64bit_offsets =
6567 0, /* r_version offset. */
6568 8, /* r_debug.r_map offset. */
6569 40, /* r_debug_extended.r_next. */
6570 0, /* l_addr offset in link_map. */
6571 8, /* l_name offset in link_map. */
6572 16, /* l_ld offset in link_map. */
6573 24, /* l_next offset in link_map. */
6574 32 /* l_prev offset in link_map. */
6577 /* Get the loaded shared libraries from one namespace. */
6579 static void
6580 read_link_map (std::string &document, CORE_ADDR lmid, CORE_ADDR lm_addr,
6581 CORE_ADDR lm_prev, int ptr_size, const link_map_offsets *lmo)
6583 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
6585 while (lm_addr
6586 && read_one_ptr (lm_addr + lmo->l_name_offset,
6587 &l_name, ptr_size) == 0
6588 && read_one_ptr (lm_addr + lmo->l_addr_offset,
6589 &l_addr, ptr_size) == 0
6590 && read_one_ptr (lm_addr + lmo->l_ld_offset,
6591 &l_ld, ptr_size) == 0
6592 && read_one_ptr (lm_addr + lmo->l_prev_offset,
6593 &l_prev, ptr_size) == 0
6594 && read_one_ptr (lm_addr + lmo->l_next_offset,
6595 &l_next, ptr_size) == 0)
6597 unsigned char libname[PATH_MAX];
6599 if (lm_prev != l_prev)
6601 warning ("Corrupted shared library list: 0x%s != 0x%s",
6602 paddress (lm_prev), paddress (l_prev));
6603 break;
6606 /* Not checking for error because reading may stop before we've got
6607 PATH_MAX worth of characters. */
6608 libname[0] = '\0';
6609 linux_read_memory (l_name, libname, sizeof (libname) - 1);
6610 libname[sizeof (libname) - 1] = '\0';
6611 if (libname[0] != '\0')
6613 string_appendf (document, "<library name=\"");
6614 xml_escape_text_append (document, (char *) libname);
6615 string_appendf (document, "\" lm=\"0x%s\" l_addr=\"0x%s\" "
6616 "l_ld=\"0x%s\" lmid=\"0x%s\"/>",
6617 paddress (lm_addr), paddress (l_addr),
6618 paddress (l_ld), paddress (lmid));
6621 lm_prev = lm_addr;
6622 lm_addr = l_next;
6626 /* Construct qXfer:libraries-svr4:read reply. */
6629 linux_process_target::qxfer_libraries_svr4 (const char *annex,
6630 unsigned char *readbuf,
6631 unsigned const char *writebuf,
6632 CORE_ADDR offset, int len)
6634 struct process_info_private *const priv = current_process ()->priv;
6635 char filename[PATH_MAX];
6636 int pid, is_elf64;
6637 unsigned int machine;
6638 CORE_ADDR lmid = 0, lm_addr = 0, lm_prev = 0;
6640 if (writebuf != NULL)
6641 return -2;
6642 if (readbuf == NULL)
6643 return -1;
6645 pid = lwpid_of (current_thread);
6646 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
6647 is_elf64 = elf_64_file_p (filename, &machine);
6648 const link_map_offsets *lmo;
6649 int ptr_size;
6650 if (is_elf64)
6652 lmo = &lmo_64bit_offsets;
6653 ptr_size = 8;
6655 else
6657 lmo = &lmo_32bit_offsets;
6658 ptr_size = 4;
6661 while (annex[0] != '\0')
6663 const char *sep;
6664 CORE_ADDR *addrp;
6665 int name_len;
6667 sep = strchr (annex, '=');
6668 if (sep == NULL)
6669 break;
6671 name_len = sep - annex;
6672 if (name_len == 4 && startswith (annex, "lmid"))
6673 addrp = &lmid;
6674 else if (name_len == 5 && startswith (annex, "start"))
6675 addrp = &lm_addr;
6676 else if (name_len == 4 && startswith (annex, "prev"))
6677 addrp = &lm_prev;
6678 else
6680 annex = strchr (sep, ';');
6681 if (annex == NULL)
6682 break;
6683 annex++;
6684 continue;
6687 annex = decode_address_to_semicolon (addrp, sep + 1);
6690 std::string document = "<library-list-svr4 version=\"1.0\"";
6692 /* When the starting LM_ADDR is passed in the annex, only traverse that
6693 namespace, which is assumed to be identified by LMID.
6695 Otherwise, start with R_DEBUG and traverse all namespaces we find. */
6696 if (lm_addr != 0)
6698 document += ">";
6699 read_link_map (document, lmid, lm_addr, lm_prev, ptr_size, lmo);
6701 else
6703 if (lm_prev != 0)
6704 warning ("ignoring prev=0x%s without start", paddress (lm_prev));
6706 /* We could interpret LMID as 'provide only the libraries for this
6707 namespace' but GDB is currently only providing lmid, start, and
6708 prev, or nothing. */
6709 if (lmid != 0)
6710 warning ("ignoring lmid=0x%s without start", paddress (lmid));
6712 CORE_ADDR r_debug = priv->r_debug;
6713 if (r_debug == 0)
6714 r_debug = priv->r_debug = get_r_debug (pid, is_elf64);
6716 /* We failed to find DT_DEBUG. Such situation will not change
6717 for this inferior - do not retry it. Report it to GDB as
6718 E01, see for the reasons at the GDB solib-svr4.c side. */
6719 if (r_debug == (CORE_ADDR) -1)
6720 return -1;
6722 /* Terminate the header if we end up with an empty list. */
6723 if (r_debug == 0)
6724 document += ">";
6726 while (r_debug != 0)
6728 int r_version = 0;
6729 if (linux_read_memory (r_debug + lmo->r_version_offset,
6730 (unsigned char *) &r_version,
6731 sizeof (r_version)) != 0)
6733 warning ("unable to read r_version from 0x%s",
6734 paddress (r_debug + lmo->r_version_offset));
6735 break;
6738 if (r_version < 1)
6740 warning ("unexpected r_debug version %d", r_version);
6741 break;
6744 if (read_one_ptr (r_debug + lmo->r_map_offset, &lm_addr,
6745 ptr_size) != 0)
6747 warning ("unable to read r_map from 0x%s",
6748 paddress (r_debug + lmo->r_map_offset));
6749 break;
6752 /* We read the entire namespace. */
6753 lm_prev = 0;
6755 /* The first entry corresponds to the main executable unless the
6756 dynamic loader was loaded late by a static executable. But
6757 in such case the main executable does not have PT_DYNAMIC
6758 present and we would not have gotten here. */
6759 if (r_debug == priv->r_debug)
6761 if (lm_addr != 0)
6762 string_appendf (document, " main-lm=\"0x%s\">",
6763 paddress (lm_addr));
6764 else
6765 document += ">";
6767 lm_prev = lm_addr;
6768 if (read_one_ptr (lm_addr + lmo->l_next_offset,
6769 &lm_addr, ptr_size) != 0)
6771 warning ("unable to read l_next from 0x%s",
6772 paddress (lm_addr + lmo->l_next_offset));
6773 break;
6777 read_link_map (document, r_debug, lm_addr, lm_prev, ptr_size, lmo);
6779 if (r_version < 2)
6780 break;
6782 if (read_one_ptr (r_debug + lmo->r_next_offset, &r_debug,
6783 ptr_size) != 0)
6785 warning ("unable to read r_next from 0x%s",
6786 paddress (r_debug + lmo->r_next_offset));
6787 break;
6792 document += "</library-list-svr4>";
6794 int document_len = document.length ();
6795 if (offset < document_len)
6796 document_len -= offset;
6797 else
6798 document_len = 0;
6799 if (len > document_len)
6800 len = document_len;
6802 memcpy (readbuf, document.data () + offset, len);
6804 return len;
6807 #ifdef HAVE_LINUX_BTRACE
6809 bool
6810 linux_process_target::supports_btrace ()
6812 return true;
6815 btrace_target_info *
6816 linux_process_target::enable_btrace (thread_info *tp,
6817 const btrace_config *conf)
6819 return linux_enable_btrace (tp->id, conf);
6822 /* See to_disable_btrace target method. */
6825 linux_process_target::disable_btrace (btrace_target_info *tinfo)
6827 enum btrace_error err;
6829 err = linux_disable_btrace (tinfo);
6830 return (err == BTRACE_ERR_NONE ? 0 : -1);
6833 /* Encode an Intel Processor Trace configuration. */
6835 static void
6836 linux_low_encode_pt_config (std::string *buffer,
6837 const struct btrace_data_pt_config *config)
6839 *buffer += "<pt-config>\n";
6841 switch (config->cpu.vendor)
6843 case CV_INTEL:
6844 string_xml_appendf (*buffer, "<cpu vendor=\"GenuineIntel\" family=\"%u\" "
6845 "model=\"%u\" stepping=\"%u\"/>\n",
6846 config->cpu.family, config->cpu.model,
6847 config->cpu.stepping);
6848 break;
6850 default:
6851 break;
6854 *buffer += "</pt-config>\n";
6857 /* Encode a raw buffer. */
6859 static void
6860 linux_low_encode_raw (std::string *buffer, const gdb_byte *data,
6861 unsigned int size)
6863 if (size == 0)
6864 return;
6866 /* We use hex encoding - see gdbsupport/rsp-low.h. */
6867 *buffer += "<raw>\n";
6869 while (size-- > 0)
6871 char elem[2];
6873 elem[0] = tohex ((*data >> 4) & 0xf);
6874 elem[1] = tohex (*data++ & 0xf);
6876 buffer->append (elem, 2);
6879 *buffer += "</raw>\n";
6882 /* See to_read_btrace target method. */
6885 linux_process_target::read_btrace (btrace_target_info *tinfo,
6886 std::string *buffer,
6887 enum btrace_read_type type)
6889 struct btrace_data btrace;
6890 enum btrace_error err;
6892 err = linux_read_btrace (&btrace, tinfo, type);
6893 if (err != BTRACE_ERR_NONE)
6895 if (err == BTRACE_ERR_OVERFLOW)
6896 *buffer += "E.Overflow.";
6897 else
6898 *buffer += "E.Generic Error.";
6900 return -1;
6903 switch (btrace.format)
6905 case BTRACE_FORMAT_NONE:
6906 *buffer += "E.No Trace.";
6907 return -1;
6909 case BTRACE_FORMAT_BTS:
6910 *buffer += "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n";
6911 *buffer += "<btrace version=\"1.0\">\n";
6913 for (const btrace_block &block : *btrace.variant.bts.blocks)
6914 string_xml_appendf (*buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
6915 paddress (block.begin), paddress (block.end));
6917 *buffer += "</btrace>\n";
6918 break;
6920 case BTRACE_FORMAT_PT:
6921 *buffer += "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n";
6922 *buffer += "<btrace version=\"1.0\">\n";
6923 *buffer += "<pt>\n";
6925 linux_low_encode_pt_config (buffer, &btrace.variant.pt.config);
6927 linux_low_encode_raw (buffer, btrace.variant.pt.data,
6928 btrace.variant.pt.size);
6930 *buffer += "</pt>\n";
6931 *buffer += "</btrace>\n";
6932 break;
6934 default:
6935 *buffer += "E.Unsupported Trace Format.";
6936 return -1;
6939 return 0;
6942 /* See to_btrace_conf target method. */
6945 linux_process_target::read_btrace_conf (const btrace_target_info *tinfo,
6946 std::string *buffer)
6948 const struct btrace_config *conf;
6950 *buffer += "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n";
6951 *buffer += "<btrace-conf version=\"1.0\">\n";
6953 conf = linux_btrace_conf (tinfo);
6954 if (conf != NULL)
6956 switch (conf->format)
6958 case BTRACE_FORMAT_NONE:
6959 break;
6961 case BTRACE_FORMAT_BTS:
6962 string_xml_appendf (*buffer, "<bts");
6963 string_xml_appendf (*buffer, " size=\"0x%x\"", conf->bts.size);
6964 string_xml_appendf (*buffer, " />\n");
6965 break;
6967 case BTRACE_FORMAT_PT:
6968 string_xml_appendf (*buffer, "<pt");
6969 string_xml_appendf (*buffer, " size=\"0x%x\"", conf->pt.size);
6970 string_xml_appendf (*buffer, "/>\n");
6971 break;
6975 *buffer += "</btrace-conf>\n";
6976 return 0;
6978 #endif /* HAVE_LINUX_BTRACE */
6980 /* See nat/linux-nat.h. */
6982 ptid_t
6983 current_lwp_ptid (void)
6985 return ptid_of (current_thread);
6988 /* A helper function that copies NAME to DEST, replacing non-printable
6989 characters with '?'. Returns the original DEST as a
6990 convenience. */
6992 static const char *
6993 replace_non_ascii (char *dest, const char *name)
6995 const char *result = dest;
6996 while (*name != '\0')
6998 if (!ISPRINT (*name))
6999 *dest++ = '?';
7000 else
7001 *dest++ = *name;
7002 ++name;
7004 *dest = '\0';
7005 return result;
7008 const char *
7009 linux_process_target::thread_name (ptid_t thread)
7011 static char dest[100];
7013 const char *name = linux_proc_tid_get_name (thread);
7014 if (name == nullptr)
7015 return nullptr;
7017 /* Linux limits the comm file to 16 bytes (including the trailing
7018 \0. If the program or thread name is set when using a multi-byte
7019 encoding, this might cause it to be truncated mid-character. In
7020 this situation, sending the truncated form in an XML <thread>
7021 response will cause a parse error in gdb. So, instead convert
7022 from the locale's encoding (we can't be sure this is the correct
7023 encoding, but it's as good a guess as we have) to UTF-8, but in a
7024 way that ignores any encoding errors. See PR remote/30618. */
7025 const char *cset = nl_langinfo (CODESET);
7026 iconv_t handle = iconv_open ("UTF-8//IGNORE", cset);
7027 if (handle == (iconv_t) -1)
7028 return replace_non_ascii (dest, name);
7030 size_t inbytes = strlen (name);
7031 char *inbuf = const_cast<char *> (name);
7032 size_t outbytes = sizeof (dest);
7033 char *outbuf = dest;
7034 size_t result = iconv (handle, &inbuf, &inbytes, &outbuf, &outbytes);
7036 if (result == (size_t) -1)
7038 if (errno == E2BIG)
7039 outbuf = &dest[sizeof (dest) - 1];
7040 else if ((errno == EILSEQ || errno == EINVAL)
7041 && outbuf < &dest[sizeof (dest) - 2])
7042 *outbuf++ = '?';
7044 *outbuf = '\0';
7046 iconv_close (handle);
7047 return *dest == '\0' ? nullptr : dest;
7050 #if USE_THREAD_DB
7051 bool
7052 linux_process_target::thread_handle (ptid_t ptid, gdb_byte **handle,
7053 int *handle_len)
7055 return thread_db_thread_handle (ptid, handle, handle_len);
7057 #endif
7059 thread_info *
7060 linux_process_target::thread_pending_parent (thread_info *thread)
7062 lwp_info *parent = get_thread_lwp (thread)->pending_parent ();
7064 if (parent == nullptr)
7065 return nullptr;
7067 return get_lwp_thread (parent);
7070 thread_info *
7071 linux_process_target::thread_pending_child (thread_info *thread,
7072 target_waitkind *kind)
7074 lwp_info *child = get_thread_lwp (thread)->pending_child (kind);
7076 if (child == nullptr)
7077 return nullptr;
7079 return get_lwp_thread (child);
7082 /* Default implementation of linux_target_ops method "set_pc" for
7083 32-bit pc register which is literally named "pc". */
7085 void
7086 linux_set_pc_32bit (struct regcache *regcache, CORE_ADDR pc)
7088 uint32_t newpc = pc;
7090 supply_register_by_name (regcache, "pc", &newpc);
7093 /* Default implementation of linux_target_ops method "get_pc" for
7094 32-bit pc register which is literally named "pc". */
7096 CORE_ADDR
7097 linux_get_pc_32bit (struct regcache *regcache)
7099 uint32_t pc;
7101 collect_register_by_name (regcache, "pc", &pc);
7102 threads_debug_printf ("stop pc is 0x%" PRIx32, pc);
7103 return pc;
7106 /* Default implementation of linux_target_ops method "set_pc" for
7107 64-bit pc register which is literally named "pc". */
7109 void
7110 linux_set_pc_64bit (struct regcache *regcache, CORE_ADDR pc)
7112 uint64_t newpc = pc;
7114 supply_register_by_name (regcache, "pc", &newpc);
7117 /* Default implementation of linux_target_ops method "get_pc" for
7118 64-bit pc register which is literally named "pc". */
7120 CORE_ADDR
7121 linux_get_pc_64bit (struct regcache *regcache)
7123 uint64_t pc;
7125 collect_register_by_name (regcache, "pc", &pc);
7126 threads_debug_printf ("stop pc is 0x%" PRIx64, pc);
7127 return pc;
7130 /* See linux-low.h. */
7133 linux_get_auxv (int pid, int wordsize, CORE_ADDR match, CORE_ADDR *valp)
7135 gdb_byte *data = (gdb_byte *) alloca (2 * wordsize);
7136 int offset = 0;
7138 gdb_assert (wordsize == 4 || wordsize == 8);
7140 while (the_target->read_auxv (pid, offset, data, 2 * wordsize)
7141 == 2 * wordsize)
7143 if (wordsize == 4)
7145 uint32_t *data_p = (uint32_t *) data;
7146 if (data_p[0] == match)
7148 *valp = data_p[1];
7149 return 1;
7152 else
7154 uint64_t *data_p = (uint64_t *) data;
7155 if (data_p[0] == match)
7157 *valp = data_p[1];
7158 return 1;
7162 offset += 2 * wordsize;
7165 return 0;
7168 /* See linux-low.h. */
7170 CORE_ADDR
7171 linux_get_hwcap (int pid, int wordsize)
7173 CORE_ADDR hwcap = 0;
7174 linux_get_auxv (pid, wordsize, AT_HWCAP, &hwcap);
7175 return hwcap;
7178 /* See linux-low.h. */
7180 CORE_ADDR
7181 linux_get_hwcap2 (int pid, int wordsize)
7183 CORE_ADDR hwcap2 = 0;
7184 linux_get_auxv (pid, wordsize, AT_HWCAP2, &hwcap2);
7185 return hwcap2;
7188 #ifdef HAVE_LINUX_REGSETS
7189 void
7190 initialize_regsets_info (struct regsets_info *info)
7192 for (info->num_regsets = 0;
7193 info->regsets[info->num_regsets].size >= 0;
7194 info->num_regsets++)
7197 #endif
7199 void
7200 initialize_low (void)
7202 struct sigaction sigchld_action;
7204 memset (&sigchld_action, 0, sizeof (sigchld_action));
7205 set_target_ops (the_linux_target);
7207 linux_ptrace_init_warnings ();
7208 linux_proc_init_warnings ();
7210 sigchld_action.sa_handler = sigchld_handler;
7211 sigemptyset (&sigchld_action.sa_mask);
7212 sigchld_action.sa_flags = SA_RESTART;
7213 sigaction (SIGCHLD, &sigchld_action, NULL);
7215 initialize_low_arch ();
7217 linux_check_ptrace_features ();