Don't use free_contents in _bfd_elf_slurp_version_tables
[binutils-gdb.git] / gdbserver / linux-low.cc
blob4aa011c14ecf9573564cb2383ad6e9fea776d7aa
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-2023 Free Software Foundation, Inc.
4 This file is part of GDB.
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19 #include "server.h"
20 #include "linux-low.h"
21 #include "nat/linux-osdata.h"
22 #include "gdbsupport/agent.h"
23 #include "tdesc.h"
24 #include "gdbsupport/event-loop.h"
25 #include "gdbsupport/event-pipe.h"
26 #include "gdbsupport/rsp-low.h"
27 #include "gdbsupport/signals-state-save-restore.h"
28 #include "nat/linux-nat.h"
29 #include "nat/linux-waitpid.h"
30 #include "gdbsupport/gdb_wait.h"
31 #include "nat/gdb_ptrace.h"
32 #include "nat/linux-ptrace.h"
33 #include "nat/linux-procfs.h"
34 #include "nat/linux-personality.h"
35 #include <signal.h>
36 #include <sys/ioctl.h>
37 #include <fcntl.h>
38 #include <unistd.h>
39 #include <sys/syscall.h>
40 #include <sched.h>
41 #include <pwd.h>
42 #include <sys/types.h>
43 #include <dirent.h>
44 #include <sys/stat.h>
45 #include <sys/vfs.h>
46 #include <sys/uio.h>
47 #include <langinfo.h>
48 #include <iconv.h>
49 #include "gdbsupport/filestuff.h"
50 #include "gdbsupport/gdb-safe-ctype.h"
51 #include "tracepoint.h"
52 #include <inttypes.h>
53 #include "gdbsupport/common-inferior.h"
54 #include "nat/fork-inferior.h"
55 #include "gdbsupport/environ.h"
56 #include "gdbsupport/gdb-sigmask.h"
57 #include "gdbsupport/scoped_restore.h"
58 #ifndef ELFMAG0
59 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
60 then ELFMAG0 will have been defined. If it didn't get included by
61 gdb_proc_service.h then including it will likely introduce a duplicate
62 definition of elf_fpregset_t. */
63 #include <elf.h>
64 #endif
65 #include "nat/linux-namespaces.h"
67 #ifndef O_LARGEFILE
68 #define O_LARGEFILE 0
69 #endif
71 #ifndef AT_HWCAP2
72 #define AT_HWCAP2 26
73 #endif
75 /* Some targets did not define these ptrace constants from the start,
76 so gdbserver defines them locally here. In the future, these may
77 be removed after they are added to asm/ptrace.h. */
78 #if !(defined(PT_TEXT_ADDR) \
79 || defined(PT_DATA_ADDR) \
80 || defined(PT_TEXT_END_ADDR))
81 #if defined(__mcoldfire__)
82 /* These are still undefined in 3.10 kernels. */
83 #define PT_TEXT_ADDR 49*4
84 #define PT_DATA_ADDR 50*4
85 #define PT_TEXT_END_ADDR 51*4
86 /* These are still undefined in 3.10 kernels. */
87 #elif defined(__TMS320C6X__)
88 #define PT_TEXT_ADDR (0x10000*4)
89 #define PT_DATA_ADDR (0x10004*4)
90 #define PT_TEXT_END_ADDR (0x10008*4)
91 #endif
92 #endif
94 #if (defined(__UCLIBC__) \
95 && defined(HAS_NOMMU) \
96 && defined(PT_TEXT_ADDR) \
97 && defined(PT_DATA_ADDR) \
98 && defined(PT_TEXT_END_ADDR))
99 #define SUPPORTS_READ_OFFSETS
100 #endif
102 #ifdef HAVE_LINUX_BTRACE
103 # include "nat/linux-btrace.h"
104 # include "gdbsupport/btrace-common.h"
105 #endif
107 #ifndef HAVE_ELF32_AUXV_T
108 /* Copied from glibc's elf.h. */
109 typedef struct
111 uint32_t a_type; /* Entry type */
112 union
114 uint32_t a_val; /* Integer value */
115 /* We use to have pointer elements added here. We cannot do that,
116 though, since it does not work when using 32-bit definitions
117 on 64-bit platforms and vice versa. */
118 } a_un;
119 } Elf32_auxv_t;
120 #endif
122 #ifndef HAVE_ELF64_AUXV_T
123 /* Copied from glibc's elf.h. */
124 typedef struct
126 uint64_t a_type; /* Entry type */
127 union
129 uint64_t a_val; /* Integer value */
130 /* We use to have pointer elements added here. We cannot do that,
131 though, since it does not work when using 32-bit definitions
132 on 64-bit platforms and vice versa. */
133 } a_un;
134 } Elf64_auxv_t;
135 #endif
137 /* Does the current host support PTRACE_GETREGSET? */
138 int have_ptrace_getregset = -1;
140 /* Return TRUE if THREAD is the leader thread of the process. */
142 static bool
143 is_leader (thread_info *thread)
145 ptid_t ptid = ptid_of (thread);
146 return ptid.pid () == ptid.lwp ();
149 /* Return true if we should report thread exit events to GDB, for
150 THR. */
152 static bool
153 report_exit_events_for (thread_info *thr)
155 client_state &cs = get_client_state ();
157 return (cs.report_thread_events
158 || (thr->thread_options & GDB_THREAD_OPTION_EXIT) != 0);
161 /* LWP accessors. */
163 /* See nat/linux-nat.h. */
165 ptid_t
166 ptid_of_lwp (struct lwp_info *lwp)
168 return ptid_of (get_lwp_thread (lwp));
171 /* See nat/linux-nat.h. */
173 void
174 lwp_set_arch_private_info (struct lwp_info *lwp,
175 struct arch_lwp_info *info)
177 lwp->arch_private = info;
180 /* See nat/linux-nat.h. */
182 struct arch_lwp_info *
183 lwp_arch_private_info (struct lwp_info *lwp)
185 return lwp->arch_private;
188 /* See nat/linux-nat.h. */
191 lwp_is_stopped (struct lwp_info *lwp)
193 return lwp->stopped;
196 /* See nat/linux-nat.h. */
198 enum target_stop_reason
199 lwp_stop_reason (struct lwp_info *lwp)
201 return lwp->stop_reason;
204 /* See nat/linux-nat.h. */
207 lwp_is_stepping (struct lwp_info *lwp)
209 return lwp->stepping;
212 /* A list of all unknown processes which receive stop signals. Some
213 other process will presumably claim each of these as forked
214 children momentarily. */
216 struct simple_pid_list
218 /* The process ID. */
219 int pid;
221 /* The status as reported by waitpid. */
222 int status;
224 /* Next in chain. */
225 struct simple_pid_list *next;
227 static struct simple_pid_list *stopped_pids;
229 /* Trivial list manipulation functions to keep track of a list of new
230 stopped processes. */
232 static void
233 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
235 struct simple_pid_list *new_pid = XNEW (struct simple_pid_list);
237 new_pid->pid = pid;
238 new_pid->status = status;
239 new_pid->next = *listp;
240 *listp = new_pid;
243 static int
244 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
246 struct simple_pid_list **p;
248 for (p = listp; *p != NULL; p = &(*p)->next)
249 if ((*p)->pid == pid)
251 struct simple_pid_list *next = (*p)->next;
253 *statusp = (*p)->status;
254 xfree (*p);
255 *p = next;
256 return 1;
258 return 0;
261 enum stopping_threads_kind
263 /* Not stopping threads presently. */
264 NOT_STOPPING_THREADS,
266 /* Stopping threads. */
267 STOPPING_THREADS,
269 /* Stopping and suspending threads. */
270 STOPPING_AND_SUSPENDING_THREADS
273 /* This is set while stop_all_lwps is in effect. */
274 static stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
276 /* FIXME make into a target method? */
277 int using_threads = 1;
279 /* True if we're presently stabilizing threads (moving them out of
280 jump pads). */
281 static int stabilizing_threads;
283 static void unsuspend_all_lwps (struct lwp_info *except);
284 static void mark_lwp_dead (struct lwp_info *lwp, int wstat,
285 bool thread_event);
286 static int lwp_is_marked_dead (struct lwp_info *lwp);
287 static int kill_lwp (unsigned long lwpid, int signo);
288 static void enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info);
289 static int linux_low_ptrace_options (int attached);
290 static int check_ptrace_stopped_lwp_gone (struct lwp_info *lp);
292 /* When the event-loop is doing a step-over, this points at the thread
293 being stepped. */
294 static ptid_t step_over_bkpt;
296 bool
297 linux_process_target::low_supports_breakpoints ()
299 return false;
302 CORE_ADDR
303 linux_process_target::low_get_pc (regcache *regcache)
305 return 0;
308 void
309 linux_process_target::low_set_pc (regcache *regcache, CORE_ADDR newpc)
311 gdb_assert_not_reached ("linux target op low_set_pc is not implemented");
314 std::vector<CORE_ADDR>
315 linux_process_target::low_get_next_pcs (regcache *regcache)
317 gdb_assert_not_reached ("linux target op low_get_next_pcs is not "
318 "implemented");
322 linux_process_target::low_decr_pc_after_break ()
324 return 0;
327 /* True if LWP is stopped in its stepping range. */
329 static int
330 lwp_in_step_range (struct lwp_info *lwp)
332 CORE_ADDR pc = lwp->stop_pc;
334 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
337 /* The event pipe registered as a waitable file in the event loop. */
338 static event_pipe linux_event_pipe;
340 /* True if we're currently in async mode. */
341 #define target_is_async_p() (linux_event_pipe.is_open ())
343 static void send_sigstop (struct lwp_info *lwp);
345 /* Return non-zero if HEADER is a 64-bit ELF file. */
347 static int
348 elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
350 if (header->e_ident[EI_MAG0] == ELFMAG0
351 && header->e_ident[EI_MAG1] == ELFMAG1
352 && header->e_ident[EI_MAG2] == ELFMAG2
353 && header->e_ident[EI_MAG3] == ELFMAG3)
355 *machine = header->e_machine;
356 return header->e_ident[EI_CLASS] == ELFCLASS64;
359 *machine = EM_NONE;
360 return -1;
363 /* Return non-zero if FILE is a 64-bit ELF file,
364 zero if the file is not a 64-bit ELF file,
365 and -1 if the file is not accessible or doesn't exist. */
367 static int
368 elf_64_file_p (const char *file, unsigned int *machine)
370 Elf64_Ehdr header;
371 int fd;
373 fd = open (file, O_RDONLY);
374 if (fd < 0)
375 return -1;
377 if (read (fd, &header, sizeof (header)) != sizeof (header))
379 close (fd);
380 return 0;
382 close (fd);
384 return elf_64_header_p (&header, machine);
387 /* Accepts an integer PID; Returns true if the executable PID is
388 running is a 64-bit ELF file.. */
391 linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
393 char file[PATH_MAX];
395 sprintf (file, "/proc/%d/exe", pid);
396 return elf_64_file_p (file, machine);
399 void
400 linux_process_target::delete_lwp (lwp_info *lwp)
402 struct thread_info *thr = get_lwp_thread (lwp);
404 threads_debug_printf ("deleting %ld", lwpid_of (thr));
406 remove_thread (thr);
408 low_delete_thread (lwp->arch_private);
410 delete lwp;
413 void
414 linux_process_target::low_delete_thread (arch_lwp_info *info)
416 /* Default implementation should be overridden if architecture-specific
417 info is being used. */
418 gdb_assert (info == nullptr);
421 /* Open the /proc/PID/mem file for PROC. */
423 static void
424 open_proc_mem_file (process_info *proc)
426 gdb_assert (proc->priv->mem_fd == -1);
428 char filename[64];
429 xsnprintf (filename, sizeof filename, "/proc/%d/mem", proc->pid);
431 proc->priv->mem_fd
432 = gdb_open_cloexec (filename, O_RDWR | O_LARGEFILE, 0).release ();
435 process_info *
436 linux_process_target::add_linux_process_no_mem_file (int pid, int attached)
438 struct process_info *proc;
440 proc = add_process (pid, attached);
441 proc->priv = XCNEW (struct process_info_private);
443 proc->priv->arch_private = low_new_process ();
444 proc->priv->mem_fd = -1;
446 return proc;
450 process_info *
451 linux_process_target::add_linux_process (int pid, int attached)
453 process_info *proc = add_linux_process_no_mem_file (pid, attached);
454 open_proc_mem_file (proc);
455 return proc;
458 void
459 linux_process_target::remove_linux_process (process_info *proc)
461 if (proc->priv->mem_fd >= 0)
462 close (proc->priv->mem_fd);
464 this->low_delete_process (proc->priv->arch_private);
466 xfree (proc->priv);
467 proc->priv = nullptr;
469 remove_process (proc);
472 arch_process_info *
473 linux_process_target::low_new_process ()
475 return nullptr;
478 void
479 linux_process_target::low_delete_process (arch_process_info *info)
481 /* Default implementation must be overridden if architecture-specific
482 info exists. */
483 gdb_assert (info == nullptr);
486 void
487 linux_process_target::low_new_fork (process_info *parent, process_info *child)
489 /* Nop. */
492 void
493 linux_process_target::arch_setup_thread (thread_info *thread)
495 scoped_restore_current_thread restore_thread;
496 switch_to_thread (thread);
498 low_arch_setup ();
502 linux_process_target::handle_extended_wait (lwp_info **orig_event_lwp,
503 int wstat)
505 client_state &cs = get_client_state ();
506 struct lwp_info *event_lwp = *orig_event_lwp;
507 int event = linux_ptrace_get_extended_event (wstat);
508 struct thread_info *event_thr = get_lwp_thread (event_lwp);
510 gdb_assert (event_lwp->waitstatus.kind () == TARGET_WAITKIND_IGNORE);
512 /* All extended events we currently use are mid-syscall. Only
513 PTRACE_EVENT_STOP is delivered more like a signal-stop, but
514 you have to be using PTRACE_SEIZE to get that. */
515 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
517 if ((event == PTRACE_EVENT_FORK) || (event == PTRACE_EVENT_VFORK)
518 || (event == PTRACE_EVENT_CLONE))
520 unsigned long new_pid;
521 int ret, status;
523 /* Get the pid of the new lwp. */
524 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
525 &new_pid);
527 /* If we haven't already seen the new PID stop, wait for it now. */
528 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
530 /* The new child has a pending SIGSTOP. We can't affect it until it
531 hits the SIGSTOP, but we're already attached. */
533 ret = my_waitpid (new_pid, &status, __WALL);
535 if (ret == -1)
536 perror_with_name ("waiting for new child");
537 else if (ret != new_pid)
538 warning ("wait returned unexpected PID %d", ret);
539 else if (!WIFSTOPPED (status))
540 warning ("wait returned unexpected status 0x%x", status);
543 if (debug_threads)
545 debug_printf ("HEW: Got %s event from LWP %ld, new child is %ld\n",
546 (event == PTRACE_EVENT_FORK ? "fork"
547 : event == PTRACE_EVENT_VFORK ? "vfork"
548 : event == PTRACE_EVENT_CLONE ? "clone"
549 : "???"),
550 ptid_of (event_thr).lwp (),
551 new_pid);
554 ptid_t child_ptid = (event != PTRACE_EVENT_CLONE
555 ? ptid_t (new_pid, new_pid)
556 : ptid_t (ptid_of (event_thr).pid (), new_pid));
558 lwp_info *child_lwp = add_lwp (child_ptid);
559 gdb_assert (child_lwp != NULL);
560 child_lwp->stopped = 1;
561 if (event != PTRACE_EVENT_CLONE)
562 child_lwp->must_set_ptrace_flags = 1;
563 child_lwp->status_pending_p = 0;
565 thread_info *child_thr = get_lwp_thread (child_lwp);
567 /* If we're suspending all threads, leave this one suspended
568 too. If the fork/clone parent is stepping over a breakpoint,
569 all other threads have been suspended already. Leave the
570 child suspended too. */
571 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
572 || event_lwp->bp_reinsert != 0)
574 threads_debug_printf ("leaving child suspended");
575 child_lwp->suspended = 1;
578 if (event_lwp->bp_reinsert != 0
579 && supports_software_single_step ()
580 && event == PTRACE_EVENT_VFORK)
582 /* If we leave single-step breakpoints there, child will
583 hit it, so uninsert single-step breakpoints from parent
584 (and child). Once vfork child is done, reinsert
585 them back to parent. */
586 uninsert_single_step_breakpoints (event_thr);
589 if (event != PTRACE_EVENT_CLONE)
591 /* Add the new process to the tables and clone the breakpoint
592 lists of the parent. We need to do this even if the new process
593 will be detached, since we will need the process object and the
594 breakpoints to remove any breakpoints from memory when we
595 detach, and the client side will access registers. */
596 process_info *child_proc = add_linux_process (new_pid, 0);
597 gdb_assert (child_proc != NULL);
599 process_info *parent_proc = get_thread_process (event_thr);
600 child_proc->attached = parent_proc->attached;
602 clone_all_breakpoints (child_thr, event_thr);
604 target_desc_up tdesc = allocate_target_description ();
605 copy_target_description (tdesc.get (), parent_proc->tdesc);
606 child_proc->tdesc = tdesc.release ();
608 /* Clone arch-specific process data. */
609 low_new_fork (parent_proc, child_proc);
612 /* Save fork/clone info in the parent thread. */
613 if (event == PTRACE_EVENT_FORK)
614 event_lwp->waitstatus.set_forked (child_ptid);
615 else if (event == PTRACE_EVENT_VFORK)
616 event_lwp->waitstatus.set_vforked (child_ptid);
617 else if (event == PTRACE_EVENT_CLONE
618 && (event_thr->thread_options & GDB_THREAD_OPTION_CLONE) != 0)
619 event_lwp->waitstatus.set_thread_cloned (child_ptid);
621 if (event != PTRACE_EVENT_CLONE
622 || (event_thr->thread_options & GDB_THREAD_OPTION_CLONE) != 0)
624 /* The status_pending field contains bits denoting the
625 extended event, so when the pending event is handled, the
626 handler will look at lwp->waitstatus. */
627 event_lwp->status_pending_p = 1;
628 event_lwp->status_pending = wstat;
630 /* Link the threads until the parent's event is passed on to
631 GDB. */
632 event_lwp->relative = child_lwp;
633 child_lwp->relative = event_lwp;
636 /* If the parent thread is doing step-over with single-step
637 breakpoints, the list of single-step breakpoints are cloned
638 from the parent's. Remove them from the child process.
639 In case of vfork, we'll reinsert them back once vforked
640 child is done. */
641 if (event_lwp->bp_reinsert != 0
642 && supports_software_single_step ())
644 /* The child process is forked and stopped, so it is safe
645 to access its memory without stopping all other threads
646 from other processes. */
647 delete_single_step_breakpoints (child_thr);
649 gdb_assert (has_single_step_breakpoints (event_thr));
650 gdb_assert (!has_single_step_breakpoints (child_thr));
653 /* Normally we will get the pending SIGSTOP. But in some cases
654 we might get another signal delivered to the group first.
655 If we do get another signal, be sure not to lose it. */
656 if (WSTOPSIG (status) != SIGSTOP)
658 child_lwp->stop_expected = 1;
659 child_lwp->status_pending_p = 1;
660 child_lwp->status_pending = status;
662 else if (event == PTRACE_EVENT_CLONE && cs.report_thread_events)
664 child_lwp->waitstatus.set_thread_created ();
665 child_lwp->status_pending_p = 1;
666 child_lwp->status_pending = status;
669 if (event == PTRACE_EVENT_CLONE)
671 #ifdef USE_THREAD_DB
672 thread_db_notice_clone (event_thr, child_ptid);
673 #endif
676 if (event == PTRACE_EVENT_CLONE
677 && (event_thr->thread_options & GDB_THREAD_OPTION_CLONE) == 0)
679 threads_debug_printf
680 ("not reporting clone event from LWP %ld, new child is %ld\n",
681 ptid_of (event_thr).lwp (),
682 new_pid);
683 return 1;
686 /* Leave the child stopped until GDB processes the parent
687 event. */
688 child_thr->last_resume_kind = resume_stop;
689 child_thr->last_status.set_stopped (GDB_SIGNAL_0);
691 /* Report the event. */
692 threads_debug_printf
693 ("reporting %s event from LWP %ld, new child is %ld\n",
694 (event == PTRACE_EVENT_FORK ? "fork"
695 : event == PTRACE_EVENT_VFORK ? "vfork"
696 : event == PTRACE_EVENT_CLONE ? "clone"
697 : "???"),
698 ptid_of (event_thr).lwp (),
699 new_pid);
700 return 0;
702 else if (event == PTRACE_EVENT_VFORK_DONE)
704 event_lwp->waitstatus.set_vfork_done ();
706 if (event_lwp->bp_reinsert != 0 && supports_software_single_step ())
708 reinsert_single_step_breakpoints (event_thr);
710 gdb_assert (has_single_step_breakpoints (event_thr));
713 /* Report the event. */
714 return 0;
716 else if (event == PTRACE_EVENT_EXEC && cs.report_exec_events)
718 struct process_info *proc;
719 std::vector<int> syscalls_to_catch;
720 ptid_t event_ptid;
721 pid_t event_pid;
723 threads_debug_printf ("Got exec event from LWP %ld",
724 lwpid_of (event_thr));
726 /* Get the event ptid. */
727 event_ptid = ptid_of (event_thr);
728 event_pid = event_ptid.pid ();
730 /* Save the syscall list from the execing process. */
731 proc = get_thread_process (event_thr);
732 syscalls_to_catch = std::move (proc->syscalls_to_catch);
734 /* Delete the execing process and all its threads. */
735 mourn (proc);
736 switch_to_thread (nullptr);
738 /* Create a new process/lwp/thread. */
739 proc = add_linux_process (event_pid, 0);
740 event_lwp = add_lwp (event_ptid);
741 event_thr = get_lwp_thread (event_lwp);
742 gdb_assert (current_thread == event_thr);
743 arch_setup_thread (event_thr);
745 /* Set the event status. */
746 event_lwp->waitstatus.set_execd
747 (make_unique_xstrdup
748 (linux_proc_pid_to_exec_file (lwpid_of (event_thr))));
750 /* Mark the exec status as pending. */
751 event_lwp->stopped = 1;
752 event_lwp->status_pending_p = 1;
753 event_lwp->status_pending = wstat;
754 event_thr->last_resume_kind = resume_continue;
755 event_thr->last_status.set_ignore ();
757 /* Update syscall state in the new lwp, effectively mid-syscall too. */
758 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
760 /* Restore the list to catch. Don't rely on the client, which is free
761 to avoid sending a new list when the architecture doesn't change.
762 Also, for ANY_SYSCALL, the architecture doesn't really matter. */
763 proc->syscalls_to_catch = std::move (syscalls_to_catch);
765 /* Report the event. */
766 *orig_event_lwp = event_lwp;
767 return 0;
770 internal_error (_("unknown ptrace event %d"), event);
773 CORE_ADDR
774 linux_process_target::get_pc (lwp_info *lwp)
776 process_info *proc = get_thread_process (get_lwp_thread (lwp));
777 gdb_assert (!proc->starting_up);
779 if (!low_supports_breakpoints ())
780 return 0;
782 scoped_restore_current_thread restore_thread;
783 switch_to_thread (get_lwp_thread (lwp));
785 struct regcache *regcache = get_thread_regcache (current_thread, 1);
786 CORE_ADDR pc = low_get_pc (regcache);
788 threads_debug_printf ("pc is 0x%lx", (long) pc);
790 return pc;
793 void
794 linux_process_target::get_syscall_trapinfo (lwp_info *lwp, int *sysno)
796 struct regcache *regcache;
798 scoped_restore_current_thread restore_thread;
799 switch_to_thread (get_lwp_thread (lwp));
801 regcache = get_thread_regcache (current_thread, 1);
802 low_get_syscall_trapinfo (regcache, sysno);
804 threads_debug_printf ("get_syscall_trapinfo sysno %d", *sysno);
807 void
808 linux_process_target::low_get_syscall_trapinfo (regcache *regcache, int *sysno)
810 /* By default, report an unknown system call number. */
811 *sysno = UNKNOWN_SYSCALL;
814 bool
815 linux_process_target::save_stop_reason (lwp_info *lwp)
817 CORE_ADDR pc;
818 CORE_ADDR sw_breakpoint_pc;
819 #if USE_SIGTRAP_SIGINFO
820 siginfo_t siginfo;
821 #endif
823 if (!low_supports_breakpoints ())
824 return false;
826 process_info *proc = get_thread_process (get_lwp_thread (lwp));
827 if (proc->starting_up)
829 /* Claim we have the stop PC so that the caller doesn't try to
830 fetch it itself. */
831 return true;
834 pc = get_pc (lwp);
835 sw_breakpoint_pc = pc - low_decr_pc_after_break ();
837 /* breakpoint_at reads from the current thread. */
838 scoped_restore_current_thread restore_thread;
839 switch_to_thread (get_lwp_thread (lwp));
841 #if USE_SIGTRAP_SIGINFO
842 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
843 (PTRACE_TYPE_ARG3) 0, &siginfo) == 0)
845 if (siginfo.si_signo == SIGTRAP)
847 if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code)
848 && GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
850 /* The si_code is ambiguous on this arch -- check debug
851 registers. */
852 if (!check_stopped_by_watchpoint (lwp))
853 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
855 else if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code))
857 /* If we determine the LWP stopped for a SW breakpoint,
858 trust it. Particularly don't check watchpoint
859 registers, because at least on s390, we'd find
860 stopped-by-watchpoint as long as there's a watchpoint
861 set. */
862 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
864 else if (GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
866 /* This can indicate either a hardware breakpoint or
867 hardware watchpoint. Check debug registers. */
868 if (!check_stopped_by_watchpoint (lwp))
869 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
871 else if (siginfo.si_code == TRAP_TRACE)
873 /* We may have single stepped an instruction that
874 triggered a watchpoint. In that case, on some
875 architectures (such as x86), instead of TRAP_HWBKPT,
876 si_code indicates TRAP_TRACE, and we need to check
877 the debug registers separately. */
878 if (!check_stopped_by_watchpoint (lwp))
879 lwp->stop_reason = TARGET_STOPPED_BY_SINGLE_STEP;
883 #else
884 /* We may have just stepped a breakpoint instruction. E.g., in
885 non-stop mode, GDB first tells the thread A to step a range, and
886 then the user inserts a breakpoint inside the range. In that
887 case we need to report the breakpoint PC. */
888 if ((!lwp->stepping || lwp->stop_pc == sw_breakpoint_pc)
889 && low_breakpoint_at (sw_breakpoint_pc))
890 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
892 if (hardware_breakpoint_inserted_here (pc))
893 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
895 if (lwp->stop_reason == TARGET_STOPPED_BY_NO_REASON)
896 check_stopped_by_watchpoint (lwp);
897 #endif
899 if (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
901 threads_debug_printf
902 ("%s stopped by software breakpoint",
903 target_pid_to_str (ptid_of (get_lwp_thread (lwp))).c_str ());
905 /* Back up the PC if necessary. */
906 if (pc != sw_breakpoint_pc)
908 struct regcache *regcache
909 = get_thread_regcache (current_thread, 1);
910 low_set_pc (regcache, sw_breakpoint_pc);
913 /* Update this so we record the correct stop PC below. */
914 pc = sw_breakpoint_pc;
916 else if (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
917 threads_debug_printf
918 ("%s stopped by hardware breakpoint",
919 target_pid_to_str (ptid_of (get_lwp_thread (lwp))).c_str ());
920 else if (lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
921 threads_debug_printf
922 ("%s stopped by hardware watchpoint",
923 target_pid_to_str (ptid_of (get_lwp_thread (lwp))).c_str ());
924 else if (lwp->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP)
925 threads_debug_printf
926 ("%s stopped by trace",
927 target_pid_to_str (ptid_of (get_lwp_thread (lwp))).c_str ());
929 lwp->stop_pc = pc;
930 return true;
933 lwp_info *
934 linux_process_target::add_lwp (ptid_t ptid)
936 lwp_info *lwp = new lwp_info;
938 lwp->thread = add_thread (ptid, lwp);
940 low_new_thread (lwp);
942 return lwp;
945 void
946 linux_process_target::low_new_thread (lwp_info *info)
948 /* Nop. */
951 /* Callback to be used when calling fork_inferior, responsible for
952 actually initiating the tracing of the inferior. */
954 static void
955 linux_ptrace_fun ()
957 if (ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0,
958 (PTRACE_TYPE_ARG4) 0) < 0)
959 trace_start_error_with_name ("ptrace");
961 if (setpgid (0, 0) < 0)
962 trace_start_error_with_name ("setpgid");
964 /* If GDBserver is connected to gdb via stdio, redirect the inferior's
965 stdout to stderr so that inferior i/o doesn't corrupt the connection.
966 Also, redirect stdin to /dev/null. */
967 if (remote_connection_is_stdio ())
969 if (close (0) < 0)
970 trace_start_error_with_name ("close");
971 if (open ("/dev/null", O_RDONLY) < 0)
972 trace_start_error_with_name ("open");
973 if (dup2 (2, 1) < 0)
974 trace_start_error_with_name ("dup2");
975 if (write (2, "stdin/stdout redirected\n",
976 sizeof ("stdin/stdout redirected\n") - 1) < 0)
978 /* Errors ignored. */;
983 /* Start an inferior process and returns its pid.
984 PROGRAM is the name of the program to be started, and PROGRAM_ARGS
985 are its arguments. */
988 linux_process_target::create_inferior (const char *program,
989 const std::vector<char *> &program_args)
991 client_state &cs = get_client_state ();
992 struct lwp_info *new_lwp;
993 int pid;
994 ptid_t ptid;
997 maybe_disable_address_space_randomization restore_personality
998 (cs.disable_randomization);
999 std::string str_program_args = construct_inferior_arguments (program_args);
1001 pid = fork_inferior (program,
1002 str_program_args.c_str (),
1003 get_environ ()->envp (), linux_ptrace_fun,
1004 NULL, NULL, NULL, NULL);
1007 /* When spawning a new process, we can't open the mem file yet. We
1008 still have to nurse the process through the shell, and that execs
1009 a couple times. The address space a /proc/PID/mem file is
1010 accessing is destroyed on exec. */
1011 process_info *proc = add_linux_process_no_mem_file (pid, 0);
1013 ptid = ptid_t (pid, pid);
1014 new_lwp = add_lwp (ptid);
1015 new_lwp->must_set_ptrace_flags = 1;
1017 post_fork_inferior (pid, program);
1019 /* PROC is now past the shell running the program we want, so we can
1020 open the /proc/PID/mem file. */
1021 open_proc_mem_file (proc);
1023 return pid;
1026 /* Implement the post_create_inferior target_ops method. */
1028 void
1029 linux_process_target::post_create_inferior ()
1031 struct lwp_info *lwp = get_thread_lwp (current_thread);
1033 low_arch_setup ();
1035 if (lwp->must_set_ptrace_flags)
1037 struct process_info *proc = current_process ();
1038 int options = linux_low_ptrace_options (proc->attached);
1040 linux_enable_event_reporting (lwpid_of (current_thread), options);
1041 lwp->must_set_ptrace_flags = 0;
1046 linux_process_target::attach_lwp (ptid_t ptid)
1048 struct lwp_info *new_lwp;
1049 int lwpid = ptid.lwp ();
1051 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
1052 != 0)
1053 return errno;
1055 new_lwp = add_lwp (ptid);
1057 /* We need to wait for SIGSTOP before being able to make the next
1058 ptrace call on this LWP. */
1059 new_lwp->must_set_ptrace_flags = 1;
1061 if (linux_proc_pid_is_stopped (lwpid))
1063 threads_debug_printf ("Attached to a stopped process");
1065 /* The process is definitely stopped. It is in a job control
1066 stop, unless the kernel predates the TASK_STOPPED /
1067 TASK_TRACED distinction, in which case it might be in a
1068 ptrace stop. Make sure it is in a ptrace stop; from there we
1069 can kill it, signal it, et cetera.
1071 First make sure there is a pending SIGSTOP. Since we are
1072 already attached, the process can not transition from stopped
1073 to running without a PTRACE_CONT; so we know this signal will
1074 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1075 probably already in the queue (unless this kernel is old
1076 enough to use TASK_STOPPED for ptrace stops); but since
1077 SIGSTOP is not an RT signal, it can only be queued once. */
1078 kill_lwp (lwpid, SIGSTOP);
1080 /* Finally, resume the stopped process. This will deliver the
1081 SIGSTOP (or a higher priority signal, just like normal
1082 PTRACE_ATTACH), which we'll catch later on. */
1083 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1086 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
1087 brings it to a halt.
1089 There are several cases to consider here:
1091 1) gdbserver has already attached to the process and is being notified
1092 of a new thread that is being created.
1093 In this case we should ignore that SIGSTOP and resume the
1094 process. This is handled below by setting stop_expected = 1,
1095 and the fact that add_thread sets last_resume_kind ==
1096 resume_continue.
1098 2) This is the first thread (the process thread), and we're attaching
1099 to it via attach_inferior.
1100 In this case we want the process thread to stop.
1101 This is handled by having linux_attach set last_resume_kind ==
1102 resume_stop after we return.
1104 If the pid we are attaching to is also the tgid, we attach to and
1105 stop all the existing threads. Otherwise, we attach to pid and
1106 ignore any other threads in the same group as this pid.
1108 3) GDB is connecting to gdbserver and is requesting an enumeration of all
1109 existing threads.
1110 In this case we want the thread to stop.
1111 FIXME: This case is currently not properly handled.
1112 We should wait for the SIGSTOP but don't. Things work apparently
1113 because enough time passes between when we ptrace (ATTACH) and when
1114 gdb makes the next ptrace call on the thread.
1116 On the other hand, if we are currently trying to stop all threads, we
1117 should treat the new thread as if we had sent it a SIGSTOP. This works
1118 because we are guaranteed that the add_lwp call above added us to the
1119 end of the list, and so the new thread has not yet reached
1120 wait_for_sigstop (but will). */
1121 new_lwp->stop_expected = 1;
1123 return 0;
1126 /* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
1127 already attached. Returns true if a new LWP is found, false
1128 otherwise. */
1130 static int
1131 attach_proc_task_lwp_callback (ptid_t ptid)
1133 /* Is this a new thread? */
1134 if (find_thread_ptid (ptid) == NULL)
1136 int lwpid = ptid.lwp ();
1137 int err;
1139 threads_debug_printf ("Found new lwp %d", lwpid);
1141 err = the_linux_target->attach_lwp (ptid);
1143 /* Be quiet if we simply raced with the thread exiting. EPERM
1144 is returned if the thread's task still exists, and is marked
1145 as exited or zombie, as well as other conditions, so in that
1146 case, confirm the status in /proc/PID/status. */
1147 if (err == ESRCH
1148 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
1149 threads_debug_printf
1150 ("Cannot attach to lwp %d: thread is gone (%d: %s)",
1151 lwpid, err, safe_strerror (err));
1152 else if (err != 0)
1154 std::string reason
1155 = linux_ptrace_attach_fail_reason_string (ptid, err);
1157 error (_("Cannot attach to lwp %d: %s"), lwpid, reason.c_str ());
1160 return 1;
1162 return 0;
1165 static void async_file_mark (void);
1167 /* Attach to PID. If PID is the tgid, attach to it and all
1168 of its threads. */
1171 linux_process_target::attach (unsigned long pid)
1173 struct process_info *proc;
1174 struct thread_info *initial_thread;
1175 ptid_t ptid = ptid_t (pid, pid);
1176 int err;
1178 /* Delay opening the /proc/PID/mem file until we've successfully
1179 attached. */
1180 proc = add_linux_process_no_mem_file (pid, 1);
1182 /* Attach to PID. We will check for other threads
1183 soon. */
1184 err = attach_lwp (ptid);
1185 if (err != 0)
1187 this->remove_linux_process (proc);
1189 std::string reason = linux_ptrace_attach_fail_reason_string (ptid, err);
1190 error ("Cannot attach to process %ld: %s", pid, reason.c_str ());
1193 open_proc_mem_file (proc);
1195 /* Don't ignore the initial SIGSTOP if we just attached to this
1196 process. It will be collected by wait shortly. */
1197 initial_thread = find_thread_ptid (ptid_t (pid, pid));
1198 gdb_assert (initial_thread != nullptr);
1199 initial_thread->last_resume_kind = resume_stop;
1201 /* We must attach to every LWP. If /proc is mounted, use that to
1202 find them now. On the one hand, the inferior may be using raw
1203 clone instead of using pthreads. On the other hand, even if it
1204 is using pthreads, GDB may not be connected yet (thread_db needs
1205 to do symbol lookups, through qSymbol). Also, thread_db walks
1206 structures in the inferior's address space to find the list of
1207 threads/LWPs, and those structures may well be corrupted. Note
1208 that once thread_db is loaded, we'll still use it to list threads
1209 and associate pthread info with each LWP. */
1212 linux_proc_attach_tgid_threads (pid, attach_proc_task_lwp_callback);
1214 catch (const gdb_exception_error &)
1216 /* Make sure we do not deliver the SIGSTOP to the process. */
1217 initial_thread->last_resume_kind = resume_continue;
1219 this->detach (proc);
1220 throw;
1223 /* GDB will shortly read the xml target description for this
1224 process, to figure out the process' architecture. But the target
1225 description is only filled in when the first process/thread in
1226 the thread group reports its initial PTRACE_ATTACH SIGSTOP. Do
1227 that now, otherwise, if GDB is fast enough, it could read the
1228 target description _before_ that initial stop. */
1229 if (non_stop)
1231 struct lwp_info *lwp;
1232 int wstat, lwpid;
1233 ptid_t pid_ptid = ptid_t (pid);
1235 lwpid = wait_for_event_filtered (pid_ptid, pid_ptid, &wstat, __WALL);
1236 gdb_assert (lwpid > 0);
1238 lwp = find_lwp_pid (ptid_t (lwpid));
1239 gdb_assert (lwp != nullptr);
1241 if (!WIFSTOPPED (wstat) || WSTOPSIG (wstat) != SIGSTOP)
1243 lwp->status_pending_p = 1;
1244 lwp->status_pending = wstat;
1247 initial_thread->last_resume_kind = resume_continue;
1249 async_file_mark ();
1251 gdb_assert (proc->tdesc != NULL);
1254 return 0;
1257 static int
1258 last_thread_of_process_p (int pid)
1260 bool seen_one = false;
1262 thread_info *thread = find_thread (pid, [&] (thread_info *thr_arg)
1264 if (!seen_one)
1266 /* This is the first thread of this process we see. */
1267 seen_one = true;
1268 return false;
1270 else
1272 /* This is the second thread of this process we see. */
1273 return true;
1277 return thread == NULL;
1280 /* Kill LWP. */
1282 static void
1283 linux_kill_one_lwp (struct lwp_info *lwp)
1285 struct thread_info *thr = get_lwp_thread (lwp);
1286 int pid = lwpid_of (thr);
1288 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
1289 there is no signal context, and ptrace(PTRACE_KILL) (or
1290 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
1291 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
1292 alternative is to kill with SIGKILL. We only need one SIGKILL
1293 per process, not one for each thread. But since we still support
1294 support debugging programs using raw clone without CLONE_THREAD,
1295 we send one for each thread. For years, we used PTRACE_KILL
1296 only, so we're being a bit paranoid about some old kernels where
1297 PTRACE_KILL might work better (dubious if there are any such, but
1298 that's why it's paranoia), so we try SIGKILL first, PTRACE_KILL
1299 second, and so we're fine everywhere. */
1301 errno = 0;
1302 kill_lwp (pid, SIGKILL);
1303 if (debug_threads)
1305 int save_errno = errno;
1307 threads_debug_printf ("kill_lwp (SIGKILL) %s, 0, 0 (%s)",
1308 target_pid_to_str (ptid_of (thr)).c_str (),
1309 save_errno ? safe_strerror (save_errno) : "OK");
1312 errno = 0;
1313 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1314 if (debug_threads)
1316 int save_errno = errno;
1318 threads_debug_printf ("PTRACE_KILL %s, 0, 0 (%s)",
1319 target_pid_to_str (ptid_of (thr)).c_str (),
1320 save_errno ? safe_strerror (save_errno) : "OK");
1324 /* Kill LWP and wait for it to die. */
1326 static void
1327 kill_wait_lwp (struct lwp_info *lwp)
1329 struct thread_info *thr = get_lwp_thread (lwp);
1330 int pid = ptid_of (thr).pid ();
1331 int lwpid = ptid_of (thr).lwp ();
1332 int wstat;
1333 int res;
1335 threads_debug_printf ("killing lwp %d, for pid: %d", lwpid, pid);
1339 linux_kill_one_lwp (lwp);
1341 /* Make sure it died. Notes:
1343 - The loop is most likely unnecessary.
1345 - We don't use wait_for_event as that could delete lwps
1346 while we're iterating over them. We're not interested in
1347 any pending status at this point, only in making sure all
1348 wait status on the kernel side are collected until the
1349 process is reaped.
1351 - We don't use __WALL here as the __WALL emulation relies on
1352 SIGCHLD, and killing a stopped process doesn't generate
1353 one, nor an exit status.
1355 res = my_waitpid (lwpid, &wstat, 0);
1356 if (res == -1 && errno == ECHILD)
1357 res = my_waitpid (lwpid, &wstat, __WCLONE);
1358 } while (res > 0 && WIFSTOPPED (wstat));
1360 /* Even if it was stopped, the child may have already disappeared.
1361 E.g., if it was killed by SIGKILL. */
1362 if (res < 0 && errno != ECHILD)
1363 perror_with_name ("kill_wait_lwp");
1366 /* Callback for `for_each_thread'. Kills an lwp of a given process,
1367 except the leader. */
1369 static void
1370 kill_one_lwp_callback (thread_info *thread, int pid)
1372 struct lwp_info *lwp = get_thread_lwp (thread);
1374 /* We avoid killing the first thread here, because of a Linux kernel (at
1375 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
1376 the children get a chance to be reaped, it will remain a zombie
1377 forever. */
1379 if (lwpid_of (thread) == pid)
1381 threads_debug_printf ("is last of process %s",
1382 target_pid_to_str (thread->id).c_str ());
1383 return;
1386 kill_wait_lwp (lwp);
1390 linux_process_target::kill (process_info *process)
1392 int pid = process->pid;
1394 /* If we're killing a running inferior, make sure it is stopped
1395 first, as PTRACE_KILL will not work otherwise. */
1396 stop_all_lwps (0, NULL);
1398 for_each_thread (pid, [&] (thread_info *thread)
1400 kill_one_lwp_callback (thread, pid);
1403 /* See the comment in linux_kill_one_lwp. We did not kill the first
1404 thread in the list, so do so now. */
1405 lwp_info *lwp = find_lwp_pid (ptid_t (pid));
1407 if (lwp == NULL)
1408 threads_debug_printf ("cannot find lwp for pid: %d", pid);
1409 else
1410 kill_wait_lwp (lwp);
1412 mourn (process);
1414 /* Since we presently can only stop all lwps of all processes, we
1415 need to unstop lwps of other processes. */
1416 unstop_all_lwps (0, NULL);
1417 return 0;
1420 /* Get pending signal of THREAD, for detaching purposes. This is the
1421 signal the thread last stopped for, which we need to deliver to the
1422 thread when detaching, otherwise, it'd be suppressed/lost. */
1424 static int
1425 get_detach_signal (struct thread_info *thread)
1427 client_state &cs = get_client_state ();
1428 enum gdb_signal signo = GDB_SIGNAL_0;
1429 int status;
1430 struct lwp_info *lp = get_thread_lwp (thread);
1432 if (lp->status_pending_p)
1433 status = lp->status_pending;
1434 else
1436 /* If the thread had been suspended by gdbserver, and it stopped
1437 cleanly, then it'll have stopped with SIGSTOP. But we don't
1438 want to deliver that SIGSTOP. */
1439 if (thread->last_status.kind () != TARGET_WAITKIND_STOPPED
1440 || thread->last_status.sig () == GDB_SIGNAL_0)
1441 return 0;
1443 /* Otherwise, we may need to deliver the signal we
1444 intercepted. */
1445 status = lp->last_status;
1448 if (!WIFSTOPPED (status))
1450 threads_debug_printf ("lwp %s hasn't stopped: no pending signal",
1451 target_pid_to_str (ptid_of (thread)).c_str ());
1452 return 0;
1455 /* Extended wait statuses aren't real SIGTRAPs. */
1456 if (WSTOPSIG (status) == SIGTRAP && linux_is_extended_waitstatus (status))
1458 threads_debug_printf ("lwp %s had stopped with extended "
1459 "status: no pending signal",
1460 target_pid_to_str (ptid_of (thread)).c_str ());
1461 return 0;
1464 signo = gdb_signal_from_host (WSTOPSIG (status));
1466 if (cs.program_signals_p && !cs.program_signals[signo])
1468 threads_debug_printf ("lwp %s had signal %s, but it is in nopass state",
1469 target_pid_to_str (ptid_of (thread)).c_str (),
1470 gdb_signal_to_string (signo));
1471 return 0;
1473 else if (!cs.program_signals_p
1474 /* If we have no way to know which signals GDB does not
1475 want to have passed to the program, assume
1476 SIGTRAP/SIGINT, which is GDB's default. */
1477 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
1479 threads_debug_printf ("lwp %s had signal %s, "
1480 "but we don't know if we should pass it. "
1481 "Default to not.",
1482 target_pid_to_str (ptid_of (thread)).c_str (),
1483 gdb_signal_to_string (signo));
1484 return 0;
1486 else
1488 threads_debug_printf ("lwp %s has pending signal %s: delivering it",
1489 target_pid_to_str (ptid_of (thread)).c_str (),
1490 gdb_signal_to_string (signo));
1492 return WSTOPSIG (status);
1496 void
1497 linux_process_target::detach_one_lwp (lwp_info *lwp)
1499 struct thread_info *thread = get_lwp_thread (lwp);
1500 int sig;
1501 int lwpid;
1503 /* If there is a pending SIGSTOP, get rid of it. */
1504 if (lwp->stop_expected)
1506 threads_debug_printf ("Sending SIGCONT to %s",
1507 target_pid_to_str (ptid_of (thread)).c_str ());
1509 kill_lwp (lwpid_of (thread), SIGCONT);
1510 lwp->stop_expected = 0;
1513 /* Pass on any pending signal for this thread. */
1514 sig = get_detach_signal (thread);
1516 /* Preparing to resume may try to write registers, and fail if the
1517 lwp is zombie. If that happens, ignore the error. We'll handle
1518 it below, when detach fails with ESRCH. */
1521 /* Flush any pending changes to the process's registers. */
1522 regcache_invalidate_thread (thread);
1524 /* Finally, let it resume. */
1525 low_prepare_to_resume (lwp);
1527 catch (const gdb_exception_error &ex)
1529 if (!check_ptrace_stopped_lwp_gone (lwp))
1530 throw;
1533 lwpid = lwpid_of (thread);
1534 if (ptrace (PTRACE_DETACH, lwpid, (PTRACE_TYPE_ARG3) 0,
1535 (PTRACE_TYPE_ARG4) (long) sig) < 0)
1537 int save_errno = errno;
1539 /* We know the thread exists, so ESRCH must mean the lwp is
1540 zombie. This can happen if one of the already-detached
1541 threads exits the whole thread group. In that case we're
1542 still attached, and must reap the lwp. */
1543 if (save_errno == ESRCH)
1545 int ret, status;
1547 ret = my_waitpid (lwpid, &status, __WALL);
1548 if (ret == -1)
1550 warning (_("Couldn't reap LWP %d while detaching: %s"),
1551 lwpid, safe_strerror (errno));
1553 else if (!WIFEXITED (status) && !WIFSIGNALED (status))
1555 warning (_("Reaping LWP %d while detaching "
1556 "returned unexpected status 0x%x"),
1557 lwpid, status);
1560 else
1562 error (_("Can't detach %s: %s"),
1563 target_pid_to_str (ptid_of (thread)).c_str (),
1564 safe_strerror (save_errno));
1567 else
1568 threads_debug_printf ("PTRACE_DETACH (%s, %s, 0) (OK)",
1569 target_pid_to_str (ptid_of (thread)).c_str (),
1570 strsignal (sig));
1572 delete_lwp (lwp);
1576 linux_process_target::detach (process_info *process)
1578 struct lwp_info *main_lwp;
1580 /* As there's a step over already in progress, let it finish first,
1581 otherwise nesting a stabilize_threads operation on top gets real
1582 messy. */
1583 complete_ongoing_step_over ();
1585 /* Stop all threads before detaching. First, ptrace requires that
1586 the thread is stopped to successfully detach. Second, thread_db
1587 may need to uninstall thread event breakpoints from memory, which
1588 only works with a stopped process anyway. */
1589 stop_all_lwps (0, NULL);
1591 #ifdef USE_THREAD_DB
1592 thread_db_detach (process);
1593 #endif
1595 /* Stabilize threads (move out of jump pads). */
1596 target_stabilize_threads ();
1598 /* Detach from the clone lwps first. If the thread group exits just
1599 while we're detaching, we must reap the clone lwps before we're
1600 able to reap the leader. */
1601 for_each_thread (process->pid, [this] (thread_info *thread)
1603 /* We don't actually detach from the thread group leader just yet.
1604 If the thread group exits, we must reap the zombie clone lwps
1605 before we're able to reap the leader. */
1606 if (thread->id.pid () == thread->id.lwp ())
1607 return;
1609 lwp_info *lwp = get_thread_lwp (thread);
1610 detach_one_lwp (lwp);
1613 main_lwp = find_lwp_pid (ptid_t (process->pid));
1614 gdb_assert (main_lwp != nullptr);
1615 detach_one_lwp (main_lwp);
1617 mourn (process);
1619 /* Since we presently can only stop all lwps of all processes, we
1620 need to unstop lwps of other processes. */
1621 unstop_all_lwps (0, NULL);
1622 return 0;
1625 /* Remove all LWPs that belong to process PROC from the lwp list. */
1627 void
1628 linux_process_target::mourn (process_info *process)
1630 #ifdef USE_THREAD_DB
1631 thread_db_mourn (process);
1632 #endif
1634 for_each_thread (process->pid, [this] (thread_info *thread)
1636 delete_lwp (get_thread_lwp (thread));
1639 this->remove_linux_process (process);
1642 void
1643 linux_process_target::join (int pid)
1645 int status, ret;
1647 do {
1648 ret = my_waitpid (pid, &status, 0);
1649 if (WIFEXITED (status) || WIFSIGNALED (status))
1650 break;
1651 } while (ret != -1 || errno != ECHILD);
1654 /* Return true if the given thread is still alive. */
1656 bool
1657 linux_process_target::thread_alive (ptid_t ptid)
1659 struct lwp_info *lwp = find_lwp_pid (ptid);
1661 /* We assume we always know if a thread exits. If a whole process
1662 exited but we still haven't been able to report it to GDB, we'll
1663 hold on to the last lwp of the dead process. */
1664 if (lwp != NULL)
1665 return !lwp_is_marked_dead (lwp);
1666 else
1667 return 0;
1670 bool
1671 linux_process_target::thread_still_has_status_pending (thread_info *thread)
1673 struct lwp_info *lp = get_thread_lwp (thread);
1675 if (!lp->status_pending_p)
1676 return 0;
1678 if (thread->last_resume_kind != resume_stop
1679 && (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1680 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
1682 CORE_ADDR pc;
1683 int discard = 0;
1685 gdb_assert (lp->last_status != 0);
1687 pc = get_pc (lp);
1689 scoped_restore_current_thread restore_thread;
1690 switch_to_thread (thread);
1692 if (pc != lp->stop_pc)
1694 threads_debug_printf ("PC of %ld changed",
1695 lwpid_of (thread));
1696 discard = 1;
1699 #if !USE_SIGTRAP_SIGINFO
1700 else if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1701 && !low_breakpoint_at (pc))
1703 threads_debug_printf ("previous SW breakpoint of %ld gone",
1704 lwpid_of (thread));
1705 discard = 1;
1707 else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT
1708 && !hardware_breakpoint_inserted_here (pc))
1710 threads_debug_printf ("previous HW breakpoint of %ld gone",
1711 lwpid_of (thread));
1712 discard = 1;
1714 #endif
1716 if (discard)
1718 threads_debug_printf ("discarding pending breakpoint status");
1719 lp->status_pending_p = 0;
1720 return 0;
1724 return 1;
1727 /* Returns true if LWP is resumed from the client's perspective. */
1729 static int
1730 lwp_resumed (struct lwp_info *lwp)
1732 struct thread_info *thread = get_lwp_thread (lwp);
1734 if (thread->last_resume_kind != resume_stop)
1735 return 1;
1737 /* Did gdb send us a `vCont;t', but we haven't reported the
1738 corresponding stop to gdb yet? If so, the thread is still
1739 resumed/running from gdb's perspective. */
1740 if (thread->last_resume_kind == resume_stop
1741 && thread->last_status.kind () == TARGET_WAITKIND_IGNORE)
1742 return 1;
1744 return 0;
1747 bool
1748 linux_process_target::status_pending_p_callback (thread_info *thread,
1749 ptid_t ptid)
1751 struct lwp_info *lp = get_thread_lwp (thread);
1753 /* Check if we're only interested in events from a specific process
1754 or a specific LWP. */
1755 if (!thread->id.matches (ptid))
1756 return 0;
1758 if (!lwp_resumed (lp))
1759 return 0;
1761 if (lp->status_pending_p
1762 && !thread_still_has_status_pending (thread))
1764 resume_one_lwp (lp, lp->stepping, GDB_SIGNAL_0, NULL);
1765 return 0;
1768 return lp->status_pending_p;
1771 struct lwp_info *
1772 find_lwp_pid (ptid_t ptid)
1774 long lwp = ptid.lwp () != 0 ? ptid.lwp () : ptid.pid ();
1775 thread_info *thread = find_thread ([lwp] (thread_info *thr_arg)
1777 return thr_arg->id.lwp () == lwp;
1780 if (thread == NULL)
1781 return NULL;
1783 return get_thread_lwp (thread);
1786 /* Return the number of known LWPs in the tgid given by PID. */
1788 static int
1789 num_lwps (int pid)
1791 int count = 0;
1793 for_each_thread (pid, [&] (thread_info *thread)
1795 count++;
1798 return count;
1801 /* See nat/linux-nat.h. */
1803 struct lwp_info *
1804 iterate_over_lwps (ptid_t filter,
1805 gdb::function_view<iterate_over_lwps_ftype> callback)
1807 thread_info *thread = find_thread (filter, [&] (thread_info *thr_arg)
1809 lwp_info *lwp = get_thread_lwp (thr_arg);
1811 return callback (lwp);
1814 if (thread == NULL)
1815 return NULL;
1817 return get_thread_lwp (thread);
1820 bool
1821 linux_process_target::check_zombie_leaders ()
1823 bool new_pending_event = false;
1825 for_each_process ([&] (process_info *proc)
1827 pid_t leader_pid = pid_of (proc);
1828 lwp_info *leader_lp = find_lwp_pid (ptid_t (leader_pid));
1830 threads_debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1831 "num_lwps=%d, zombie=%d",
1832 leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1833 linux_proc_pid_is_zombie (leader_pid));
1835 if (leader_lp != NULL && !leader_lp->stopped
1836 /* Check if there are other threads in the group, as we may
1837 have raced with the inferior simply exiting. Note this
1838 isn't a watertight check. If the inferior is
1839 multi-threaded and is exiting, it may be we see the
1840 leader as zombie before we reap all the non-leader
1841 threads. See comments below. */
1842 && !last_thread_of_process_p (leader_pid)
1843 && linux_proc_pid_is_zombie (leader_pid))
1845 /* A zombie leader in a multi-threaded program can mean one
1846 of three things:
1848 #1 - Only the leader exited, not the whole program, e.g.,
1849 with pthread_exit. Since we can't reap the leader's exit
1850 status until all other threads are gone and reaped too,
1851 we want to delete the zombie leader right away, as it
1852 can't be debugged, we can't read its registers, etc.
1853 This is the main reason we check for zombie leaders
1854 disappearing.
1856 #2 - The whole thread-group/process exited (a group exit,
1857 via e.g. exit(3), and there is (or will be shortly) an
1858 exit reported for each thread in the process, and then
1859 finally an exit for the leader once the non-leaders are
1860 reaped.
1862 #3 - There are 3 or more threads in the group, and a
1863 thread other than the leader exec'd. See comments on
1864 exec events at the top of the file.
1866 Ideally we would never delete the leader for case #2.
1867 Instead, we want to collect the exit status of each
1868 non-leader thread, and then finally collect the exit
1869 status of the leader as normal and use its exit code as
1870 whole-process exit code. Unfortunately, there's no
1871 race-free way to distinguish cases #1 and #2. We can't
1872 assume the exit events for the non-leaders threads are
1873 already pending in the kernel, nor can we assume the
1874 non-leader threads are in zombie state already. Between
1875 the leader becoming zombie and the non-leaders exiting
1876 and becoming zombie themselves, there's a small time
1877 window, so such a check would be racy. Temporarily
1878 pausing all threads and checking to see if all threads
1879 exit or not before re-resuming them would work in the
1880 case that all threads are running right now, but it
1881 wouldn't work if some thread is currently already
1882 ptrace-stopped, e.g., due to scheduler-locking.
1884 So what we do is we delete the leader anyhow, and then
1885 later on when we see its exit status, we re-add it back.
1886 We also make sure that we only report a whole-process
1887 exit when we see the leader exiting, as opposed to when
1888 the last LWP in the LWP list exits, which can be a
1889 non-leader if we deleted the leader here. */
1890 threads_debug_printf ("Thread group leader %d zombie "
1891 "(it exited, or another thread execd), "
1892 "deleting it.",
1893 leader_pid);
1895 thread_info *leader_thread = get_lwp_thread (leader_lp);
1896 if (report_exit_events_for (leader_thread))
1898 mark_lwp_dead (leader_lp, W_EXITCODE (0, 0), true);
1899 new_pending_event = true;
1901 else
1902 delete_lwp (leader_lp);
1906 return new_pending_event;
1909 /* Callback for `find_thread'. Returns the first LWP that is not
1910 stopped. */
1912 static bool
1913 not_stopped_callback (thread_info *thread, ptid_t filter)
1915 if (!thread->id.matches (filter))
1916 return false;
1918 lwp_info *lwp = get_thread_lwp (thread);
1920 return !lwp->stopped;
1923 /* Increment LWP's suspend count. */
1925 static void
1926 lwp_suspended_inc (struct lwp_info *lwp)
1928 lwp->suspended++;
1930 if (lwp->suspended > 4)
1931 threads_debug_printf
1932 ("LWP %ld has a suspiciously high suspend count, suspended=%d",
1933 lwpid_of (get_lwp_thread (lwp)), lwp->suspended);
1936 /* Decrement LWP's suspend count. */
1938 static void
1939 lwp_suspended_decr (struct lwp_info *lwp)
1941 lwp->suspended--;
1943 if (lwp->suspended < 0)
1945 struct thread_info *thread = get_lwp_thread (lwp);
1947 internal_error ("unsuspend LWP %ld, suspended=%d\n", lwpid_of (thread),
1948 lwp->suspended);
1952 /* This function should only be called if the LWP got a SIGTRAP.
1954 Handle any tracepoint steps or hits. Return true if a tracepoint
1955 event was handled, 0 otherwise. */
1957 static int
1958 handle_tracepoints (struct lwp_info *lwp)
1960 struct thread_info *tinfo = get_lwp_thread (lwp);
1961 int tpoint_related_event = 0;
1963 gdb_assert (lwp->suspended == 0);
1965 /* If this tracepoint hit causes a tracing stop, we'll immediately
1966 uninsert tracepoints. To do this, we temporarily pause all
1967 threads, unpatch away, and then unpause threads. We need to make
1968 sure the unpausing doesn't resume LWP too. */
1969 lwp_suspended_inc (lwp);
1971 /* And we need to be sure that any all-threads-stopping doesn't try
1972 to move threads out of the jump pads, as it could deadlock the
1973 inferior (LWP could be in the jump pad, maybe even holding the
1974 lock.) */
1976 /* Do any necessary step collect actions. */
1977 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1979 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1981 /* See if we just hit a tracepoint and do its main collect
1982 actions. */
1983 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1985 lwp_suspended_decr (lwp);
1987 gdb_assert (lwp->suspended == 0);
1988 gdb_assert (!stabilizing_threads
1989 || (lwp->collecting_fast_tracepoint
1990 != fast_tpoint_collect_result::not_collecting));
1992 if (tpoint_related_event)
1994 threads_debug_printf ("got a tracepoint event");
1995 return 1;
1998 return 0;
2001 fast_tpoint_collect_result
2002 linux_process_target::linux_fast_tracepoint_collecting
2003 (lwp_info *lwp, fast_tpoint_collect_status *status)
2005 CORE_ADDR thread_area;
2006 struct thread_info *thread = get_lwp_thread (lwp);
2008 /* Get the thread area address. This is used to recognize which
2009 thread is which when tracing with the in-process agent library.
2010 We don't read anything from the address, and treat it as opaque;
2011 it's the address itself that we assume is unique per-thread. */
2012 if (low_get_thread_area (lwpid_of (thread), &thread_area) == -1)
2013 return fast_tpoint_collect_result::not_collecting;
2015 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
2019 linux_process_target::low_get_thread_area (int lwpid, CORE_ADDR *addrp)
2021 return -1;
2024 bool
2025 linux_process_target::maybe_move_out_of_jump_pad (lwp_info *lwp, int *wstat)
2027 scoped_restore_current_thread restore_thread;
2028 switch_to_thread (get_lwp_thread (lwp));
2030 if ((wstat == NULL
2031 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
2032 && supports_fast_tracepoints ()
2033 && agent_loaded_p ())
2035 struct fast_tpoint_collect_status status;
2037 threads_debug_printf
2038 ("Checking whether LWP %ld needs to move out of the jump pad.",
2039 lwpid_of (current_thread));
2041 fast_tpoint_collect_result r
2042 = linux_fast_tracepoint_collecting (lwp, &status);
2044 if (wstat == NULL
2045 || (WSTOPSIG (*wstat) != SIGILL
2046 && WSTOPSIG (*wstat) != SIGFPE
2047 && WSTOPSIG (*wstat) != SIGSEGV
2048 && WSTOPSIG (*wstat) != SIGBUS))
2050 lwp->collecting_fast_tracepoint = r;
2052 if (r != fast_tpoint_collect_result::not_collecting)
2054 if (r == fast_tpoint_collect_result::before_insn
2055 && lwp->exit_jump_pad_bkpt == NULL)
2057 /* Haven't executed the original instruction yet.
2058 Set breakpoint there, and wait till it's hit,
2059 then single-step until exiting the jump pad. */
2060 lwp->exit_jump_pad_bkpt
2061 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
2064 threads_debug_printf
2065 ("Checking whether LWP %ld needs to move out of the jump pad..."
2066 " it does", lwpid_of (current_thread));
2068 return true;
2071 else
2073 /* If we get a synchronous signal while collecting, *and*
2074 while executing the (relocated) original instruction,
2075 reset the PC to point at the tpoint address, before
2076 reporting to GDB. Otherwise, it's an IPA lib bug: just
2077 report the signal to GDB, and pray for the best. */
2079 lwp->collecting_fast_tracepoint
2080 = fast_tpoint_collect_result::not_collecting;
2082 if (r != fast_tpoint_collect_result::not_collecting
2083 && (status.adjusted_insn_addr <= lwp->stop_pc
2084 && lwp->stop_pc < status.adjusted_insn_addr_end))
2086 siginfo_t info;
2087 struct regcache *regcache;
2089 /* The si_addr on a few signals references the address
2090 of the faulting instruction. Adjust that as
2091 well. */
2092 if ((WSTOPSIG (*wstat) == SIGILL
2093 || WSTOPSIG (*wstat) == SIGFPE
2094 || WSTOPSIG (*wstat) == SIGBUS
2095 || WSTOPSIG (*wstat) == SIGSEGV)
2096 && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
2097 (PTRACE_TYPE_ARG3) 0, &info) == 0
2098 /* Final check just to make sure we don't clobber
2099 the siginfo of non-kernel-sent signals. */
2100 && (uintptr_t) info.si_addr == lwp->stop_pc)
2102 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
2103 ptrace (PTRACE_SETSIGINFO, lwpid_of (current_thread),
2104 (PTRACE_TYPE_ARG3) 0, &info);
2107 regcache = get_thread_regcache (current_thread, 1);
2108 low_set_pc (regcache, status.tpoint_addr);
2109 lwp->stop_pc = status.tpoint_addr;
2111 /* Cancel any fast tracepoint lock this thread was
2112 holding. */
2113 force_unlock_trace_buffer ();
2116 if (lwp->exit_jump_pad_bkpt != NULL)
2118 threads_debug_printf
2119 ("Cancelling fast exit-jump-pad: removing bkpt."
2120 "stopping all threads momentarily.");
2122 stop_all_lwps (1, lwp);
2124 delete_breakpoint (lwp->exit_jump_pad_bkpt);
2125 lwp->exit_jump_pad_bkpt = NULL;
2127 unstop_all_lwps (1, lwp);
2129 gdb_assert (lwp->suspended >= 0);
2134 threads_debug_printf
2135 ("Checking whether LWP %ld needs to move out of the jump pad... no",
2136 lwpid_of (current_thread));
2138 return false;
2141 /* Enqueue one signal in the "signals to report later when out of the
2142 jump pad" list. */
2144 static void
2145 enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2147 struct thread_info *thread = get_lwp_thread (lwp);
2149 threads_debug_printf ("Deferring signal %d for LWP %ld.",
2150 WSTOPSIG (*wstat), lwpid_of (thread));
2152 if (debug_threads)
2154 for (const auto &sig : lwp->pending_signals_to_report)
2155 threads_debug_printf (" Already queued %d", sig.signal);
2157 threads_debug_printf (" (no more currently queued signals)");
2160 /* Don't enqueue non-RT signals if they are already in the deferred
2161 queue. (SIGSTOP being the easiest signal to see ending up here
2162 twice) */
2163 if (WSTOPSIG (*wstat) < __SIGRTMIN)
2165 for (const auto &sig : lwp->pending_signals_to_report)
2167 if (sig.signal == WSTOPSIG (*wstat))
2169 threads_debug_printf
2170 ("Not requeuing already queued non-RT signal %d for LWP %ld",
2171 sig.signal, lwpid_of (thread));
2172 return;
2177 lwp->pending_signals_to_report.emplace_back (WSTOPSIG (*wstat));
2179 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2180 &lwp->pending_signals_to_report.back ().info);
2183 /* Dequeue one signal from the "signals to report later when out of
2184 the jump pad" list. */
2186 static int
2187 dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2189 struct thread_info *thread = get_lwp_thread (lwp);
2191 if (!lwp->pending_signals_to_report.empty ())
2193 const pending_signal &p_sig = lwp->pending_signals_to_report.front ();
2195 *wstat = W_STOPCODE (p_sig.signal);
2196 if (p_sig.info.si_signo != 0)
2197 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2198 &p_sig.info);
2200 lwp->pending_signals_to_report.pop_front ();
2202 threads_debug_printf ("Reporting deferred signal %d for LWP %ld.",
2203 WSTOPSIG (*wstat), lwpid_of (thread));
2205 if (debug_threads)
2207 for (const auto &sig : lwp->pending_signals_to_report)
2208 threads_debug_printf (" Still queued %d", sig.signal);
2210 threads_debug_printf (" (no more queued signals)");
2213 return 1;
2216 return 0;
2219 bool
2220 linux_process_target::check_stopped_by_watchpoint (lwp_info *child)
2222 scoped_restore_current_thread restore_thread;
2223 switch_to_thread (get_lwp_thread (child));
2225 if (low_stopped_by_watchpoint ())
2227 child->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
2228 child->stopped_data_address = low_stopped_data_address ();
2231 return child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
2234 bool
2235 linux_process_target::low_stopped_by_watchpoint ()
2237 return false;
2240 CORE_ADDR
2241 linux_process_target::low_stopped_data_address ()
2243 return 0;
2246 /* Return the ptrace options that we want to try to enable. */
2248 static int
2249 linux_low_ptrace_options (int attached)
2251 client_state &cs = get_client_state ();
2252 int options = 0;
2254 if (!attached)
2255 options |= PTRACE_O_EXITKILL;
2257 if (cs.report_fork_events)
2258 options |= PTRACE_O_TRACEFORK;
2260 if (cs.report_vfork_events)
2261 options |= (PTRACE_O_TRACEVFORK | PTRACE_O_TRACEVFORKDONE);
2263 if (cs.report_exec_events)
2264 options |= PTRACE_O_TRACEEXEC;
2266 options |= PTRACE_O_TRACESYSGOOD;
2268 return options;
2271 void
2272 linux_process_target::filter_event (int lwpid, int wstat)
2274 struct lwp_info *child;
2275 struct thread_info *thread;
2276 int have_stop_pc = 0;
2278 child = find_lwp_pid (ptid_t (lwpid));
2280 /* Check for events reported by anything not in our LWP list. */
2281 if (child == nullptr)
2283 if (WIFSTOPPED (wstat))
2285 if (WSTOPSIG (wstat) == SIGTRAP
2286 && linux_ptrace_get_extended_event (wstat) == PTRACE_EVENT_EXEC)
2288 /* A non-leader thread exec'ed after we've seen the
2289 leader zombie, and removed it from our lists (in
2290 check_zombie_leaders). The non-leader thread changes
2291 its tid to the tgid. */
2292 threads_debug_printf
2293 ("Re-adding thread group leader LWP %d after exec.",
2294 lwpid);
2296 child = add_lwp (ptid_t (lwpid, lwpid));
2297 child->stopped = 1;
2298 switch_to_thread (child->thread);
2300 else
2302 /* A process we are controlling has forked and the new
2303 child's stop was reported to us by the kernel. Save
2304 its PID and go back to waiting for the fork event to
2305 be reported - the stopped process might be returned
2306 from waitpid before or after the fork event is. */
2307 threads_debug_printf
2308 ("Saving LWP %d status %s in stopped_pids list",
2309 lwpid, status_to_str (wstat).c_str ());
2310 add_to_pid_list (&stopped_pids, lwpid, wstat);
2313 else
2315 /* Don't report an event for the exit of an LWP not in our
2316 list, i.e. not part of any inferior we're debugging.
2317 This can happen if we detach from a program we originally
2318 forked and then it exits. However, note that we may have
2319 earlier deleted a leader of an inferior we're debugging,
2320 in check_zombie_leaders. Re-add it back here if so. */
2321 find_process ([&] (process_info *proc)
2323 if (proc->pid == lwpid)
2325 threads_debug_printf
2326 ("Re-adding thread group leader LWP %d after exit.",
2327 lwpid);
2329 child = add_lwp (ptid_t (lwpid, lwpid));
2330 return true;
2332 return false;
2336 if (child == nullptr)
2337 return;
2340 thread = get_lwp_thread (child);
2342 child->stopped = 1;
2344 child->last_status = wstat;
2346 /* Check if the thread has exited. */
2347 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat)))
2349 threads_debug_printf ("%d exited", lwpid);
2351 if (finish_step_over (child))
2353 /* Unsuspend all other LWPs, and set them back running again. */
2354 unsuspend_all_lwps (child);
2357 /* If this is not the leader LWP, then the exit signal was not
2358 the end of the debugged application and should be ignored,
2359 unless GDB wants to hear about thread exits. */
2360 if (report_exit_events_for (thread) || is_leader (thread))
2362 /* Since events are serialized to GDB core, and we can't
2363 report this one right now. Leave the status pending for
2364 the next time we're able to report it. */
2365 mark_lwp_dead (child, wstat, false);
2366 return;
2368 else
2370 delete_lwp (child);
2371 return;
2375 gdb_assert (WIFSTOPPED (wstat));
2377 if (WIFSTOPPED (wstat))
2379 struct process_info *proc;
2381 /* Architecture-specific setup after inferior is running. */
2382 proc = find_process_pid (pid_of (thread));
2383 if (proc->tdesc == NULL)
2385 if (proc->attached)
2387 /* This needs to happen after we have attached to the
2388 inferior and it is stopped for the first time, but
2389 before we access any inferior registers. */
2390 arch_setup_thread (thread);
2392 else
2394 /* The process is started, but GDBserver will do
2395 architecture-specific setup after the program stops at
2396 the first instruction. */
2397 child->status_pending_p = 1;
2398 child->status_pending = wstat;
2399 return;
2404 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
2406 struct process_info *proc = find_process_pid (pid_of (thread));
2407 int options = linux_low_ptrace_options (proc->attached);
2409 linux_enable_event_reporting (lwpid, options);
2410 child->must_set_ptrace_flags = 0;
2413 /* Always update syscall_state, even if it will be filtered later. */
2414 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SYSCALL_SIGTRAP)
2416 child->syscall_state
2417 = (child->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2418 ? TARGET_WAITKIND_SYSCALL_RETURN
2419 : TARGET_WAITKIND_SYSCALL_ENTRY);
2421 else
2423 /* Almost all other ptrace-stops are known to be outside of system
2424 calls, with further exceptions in handle_extended_wait. */
2425 child->syscall_state = TARGET_WAITKIND_IGNORE;
2428 /* Be careful to not overwrite stop_pc until save_stop_reason is
2429 called. */
2430 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
2431 && linux_is_extended_waitstatus (wstat))
2433 child->stop_pc = get_pc (child);
2434 if (handle_extended_wait (&child, wstat))
2436 /* The event has been handled, so just return without
2437 reporting it. */
2438 return;
2442 if (linux_wstatus_maybe_breakpoint (wstat))
2444 if (save_stop_reason (child))
2445 have_stop_pc = 1;
2448 if (!have_stop_pc)
2449 child->stop_pc = get_pc (child);
2451 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
2452 && child->stop_expected)
2454 threads_debug_printf ("Expected stop.");
2456 child->stop_expected = 0;
2458 if (thread->last_resume_kind == resume_stop)
2460 /* We want to report the stop to the core. Treat the
2461 SIGSTOP as a normal event. */
2462 threads_debug_printf ("resume_stop SIGSTOP caught for %s.",
2463 target_pid_to_str (ptid_of (thread)).c_str ());
2465 else if (stopping_threads != NOT_STOPPING_THREADS)
2467 /* Stopping threads. We don't want this SIGSTOP to end up
2468 pending. */
2469 threads_debug_printf ("SIGSTOP caught for %s while stopping threads.",
2470 target_pid_to_str (ptid_of (thread)).c_str ());
2471 return;
2473 else
2475 /* This is a delayed SIGSTOP. Filter out the event. */
2476 threads_debug_printf ("%s %s, 0, 0 (discard delayed SIGSTOP)",
2477 child->stepping ? "step" : "continue",
2478 target_pid_to_str (ptid_of (thread)).c_str ());
2480 resume_one_lwp (child, child->stepping, 0, NULL);
2481 return;
2485 child->status_pending_p = 1;
2486 child->status_pending = wstat;
2487 return;
2490 bool
2491 linux_process_target::maybe_hw_step (thread_info *thread)
2493 if (supports_hardware_single_step ())
2494 return true;
2495 else
2497 /* GDBserver must insert single-step breakpoint for software
2498 single step. */
2499 gdb_assert (has_single_step_breakpoints (thread));
2500 return false;
2504 void
2505 linux_process_target::resume_stopped_resumed_lwps (thread_info *thread)
2507 struct lwp_info *lp = get_thread_lwp (thread);
2509 if (lp->stopped
2510 && !lp->suspended
2511 && !lp->status_pending_p
2512 && thread->last_status.kind () == TARGET_WAITKIND_IGNORE)
2514 int step = 0;
2516 if (thread->last_resume_kind == resume_step)
2518 if (supports_software_single_step ())
2519 install_software_single_step_breakpoints (lp);
2521 step = maybe_hw_step (thread);
2524 threads_debug_printf ("resuming stopped-resumed LWP %s at %s: step=%d",
2525 target_pid_to_str (ptid_of (thread)).c_str (),
2526 paddress (lp->stop_pc), step);
2528 resume_one_lwp (lp, step, GDB_SIGNAL_0, NULL);
2533 linux_process_target::wait_for_event_filtered (ptid_t wait_ptid,
2534 ptid_t filter_ptid,
2535 int *wstatp, int options)
2537 struct thread_info *event_thread;
2538 struct lwp_info *event_child, *requested_child;
2539 sigset_t block_mask, prev_mask;
2541 retry:
2542 /* N.B. event_thread points to the thread_info struct that contains
2543 event_child. Keep them in sync. */
2544 event_thread = NULL;
2545 event_child = NULL;
2546 requested_child = NULL;
2548 /* Check for a lwp with a pending status. */
2550 if (filter_ptid == minus_one_ptid || filter_ptid.is_pid ())
2552 event_thread = find_thread_in_random ([&] (thread_info *thread)
2554 return status_pending_p_callback (thread, filter_ptid);
2557 if (event_thread != NULL)
2559 event_child = get_thread_lwp (event_thread);
2560 threads_debug_printf ("Got a pending child %ld", lwpid_of (event_thread));
2563 else if (filter_ptid != null_ptid)
2565 requested_child = find_lwp_pid (filter_ptid);
2566 gdb_assert (requested_child != nullptr);
2568 if (stopping_threads == NOT_STOPPING_THREADS
2569 && requested_child->status_pending_p
2570 && (requested_child->collecting_fast_tracepoint
2571 != fast_tpoint_collect_result::not_collecting))
2573 enqueue_one_deferred_signal (requested_child,
2574 &requested_child->status_pending);
2575 requested_child->status_pending_p = 0;
2576 requested_child->status_pending = 0;
2577 resume_one_lwp (requested_child, 0, 0, NULL);
2580 if (requested_child->suspended
2581 && requested_child->status_pending_p)
2583 internal_error ("requesting an event out of a"
2584 " suspended child?");
2587 if (requested_child->status_pending_p)
2589 event_child = requested_child;
2590 event_thread = get_lwp_thread (event_child);
2594 if (event_child != NULL)
2596 threads_debug_printf ("Got an event from pending child %ld (%04x)",
2597 lwpid_of (event_thread),
2598 event_child->status_pending);
2600 *wstatp = event_child->status_pending;
2601 event_child->status_pending_p = 0;
2602 event_child->status_pending = 0;
2603 switch_to_thread (event_thread);
2604 return lwpid_of (event_thread);
2607 /* But if we don't find a pending event, we'll have to wait.
2609 We only enter this loop if no process has a pending wait status.
2610 Thus any action taken in response to a wait status inside this
2611 loop is responding as soon as we detect the status, not after any
2612 pending events. */
2614 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2615 all signals while here. */
2616 sigfillset (&block_mask);
2617 gdb_sigmask (SIG_BLOCK, &block_mask, &prev_mask);
2619 /* Always pull all events out of the kernel. We'll randomly select
2620 an event LWP out of all that have events, to prevent
2621 starvation. */
2622 while (event_child == NULL)
2624 pid_t ret = 0;
2626 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2627 quirks:
2629 - If the thread group leader exits while other threads in the
2630 thread group still exist, waitpid(TGID, ...) hangs. That
2631 waitpid won't return an exit status until the other threads
2632 in the group are reaped.
2634 - When a non-leader thread execs, that thread just vanishes
2635 without reporting an exit (so we'd hang if we waited for it
2636 explicitly in that case). The exec event is reported to
2637 the TGID pid. */
2638 errno = 0;
2639 ret = my_waitpid (-1, wstatp, options | WNOHANG);
2641 threads_debug_printf ("waitpid(-1, ...) returned %d, %s",
2642 ret, errno ? safe_strerror (errno) : "ERRNO-OK");
2644 if (ret > 0)
2646 threads_debug_printf ("waitpid %ld received %s",
2647 (long) ret, status_to_str (*wstatp).c_str ());
2649 /* Filter all events. IOW, leave all events pending. We'll
2650 randomly select an event LWP out of all that have events
2651 below. */
2652 filter_event (ret, *wstatp);
2653 /* Retry until nothing comes out of waitpid. A single
2654 SIGCHLD can indicate more than one child stopped. */
2655 continue;
2658 /* Now that we've pulled all events out of the kernel, resume
2659 LWPs that don't have an interesting event to report. */
2660 if (stopping_threads == NOT_STOPPING_THREADS)
2661 for_each_thread ([this] (thread_info *thread)
2663 resume_stopped_resumed_lwps (thread);
2666 /* ... and find an LWP with a status to report to the core, if
2667 any. */
2668 event_thread = find_thread_in_random ([&] (thread_info *thread)
2670 return status_pending_p_callback (thread, filter_ptid);
2673 if (event_thread != NULL)
2675 event_child = get_thread_lwp (event_thread);
2676 *wstatp = event_child->status_pending;
2677 event_child->status_pending_p = 0;
2678 event_child->status_pending = 0;
2679 break;
2682 /* Check for zombie thread group leaders. Those can't be reaped
2683 until all other threads in the thread group are. */
2684 if (check_zombie_leaders ())
2685 goto retry;
2687 auto not_stopped = [&] (thread_info *thread)
2689 return not_stopped_callback (thread, wait_ptid);
2692 /* If there are no resumed children left in the set of LWPs we
2693 want to wait for, bail. We can't just block in
2694 waitpid/sigsuspend, because lwps might have been left stopped
2695 in trace-stop state, and we'd be stuck forever waiting for
2696 their status to change (which would only happen if we resumed
2697 them). Even if WNOHANG is set, this return code is preferred
2698 over 0 (below), as it is more detailed. */
2699 if (find_thread (not_stopped) == NULL)
2701 threads_debug_printf ("exit (no unwaited-for LWP)");
2703 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
2704 return -1;
2707 /* No interesting event to report to the caller. */
2708 if ((options & WNOHANG))
2710 threads_debug_printf ("WNOHANG set, no event found");
2712 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
2713 return 0;
2716 /* Block until we get an event reported with SIGCHLD. */
2717 threads_debug_printf ("sigsuspend'ing");
2719 sigsuspend (&prev_mask);
2720 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
2721 goto retry;
2724 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
2726 switch_to_thread (event_thread);
2728 return lwpid_of (event_thread);
2732 linux_process_target::wait_for_event (ptid_t ptid, int *wstatp, int options)
2734 return wait_for_event_filtered (ptid, ptid, wstatp, options);
2737 /* Select one LWP out of those that have events pending. */
2739 static void
2740 select_event_lwp (struct lwp_info **orig_lp)
2742 struct thread_info *event_thread = NULL;
2744 /* In all-stop, give preference to the LWP that is being
2745 single-stepped. There will be at most one, and it's the LWP that
2746 the core is most interested in. If we didn't do this, then we'd
2747 have to handle pending step SIGTRAPs somehow in case the core
2748 later continues the previously-stepped thread, otherwise we'd
2749 report the pending SIGTRAP, and the core, not having stepped the
2750 thread, wouldn't understand what the trap was for, and therefore
2751 would report it to the user as a random signal. */
2752 if (!non_stop)
2754 event_thread = find_thread ([] (thread_info *thread)
2756 lwp_info *lp = get_thread_lwp (thread);
2758 return (thread->last_status.kind () == TARGET_WAITKIND_IGNORE
2759 && thread->last_resume_kind == resume_step
2760 && lp->status_pending_p);
2763 if (event_thread != NULL)
2764 threads_debug_printf
2765 ("Select single-step %s",
2766 target_pid_to_str (ptid_of (event_thread)).c_str ());
2768 if (event_thread == NULL)
2770 /* No single-stepping LWP. Select one at random, out of those
2771 which have had events. */
2773 event_thread = find_thread_in_random ([&] (thread_info *thread)
2775 lwp_info *lp = get_thread_lwp (thread);
2777 /* Only resumed LWPs that have an event pending. */
2778 return (thread->last_status.kind () == TARGET_WAITKIND_IGNORE
2779 && lp->status_pending_p);
2783 if (event_thread != NULL)
2785 struct lwp_info *event_lp = get_thread_lwp (event_thread);
2787 /* Switch the event LWP. */
2788 *orig_lp = event_lp;
2792 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
2793 NULL. */
2795 static void
2796 unsuspend_all_lwps (struct lwp_info *except)
2798 for_each_thread ([&] (thread_info *thread)
2800 lwp_info *lwp = get_thread_lwp (thread);
2802 if (lwp != except)
2803 lwp_suspended_decr (lwp);
2807 static bool lwp_running (thread_info *thread);
2809 /* Stabilize threads (move out of jump pads).
2811 If a thread is midway collecting a fast tracepoint, we need to
2812 finish the collection and move it out of the jump pad before
2813 reporting the signal.
2815 This avoids recursion while collecting (when a signal arrives
2816 midway, and the signal handler itself collects), which would trash
2817 the trace buffer. In case the user set a breakpoint in a signal
2818 handler, this avoids the backtrace showing the jump pad, etc..
2819 Most importantly, there are certain things we can't do safely if
2820 threads are stopped in a jump pad (or in its callee's). For
2821 example:
2823 - starting a new trace run. A thread still collecting the
2824 previous run, could trash the trace buffer when resumed. The trace
2825 buffer control structures would have been reset but the thread had
2826 no way to tell. The thread could even midway memcpy'ing to the
2827 buffer, which would mean that when resumed, it would clobber the
2828 trace buffer that had been set for a new run.
2830 - we can't rewrite/reuse the jump pads for new tracepoints
2831 safely. Say you do tstart while a thread is stopped midway while
2832 collecting. When the thread is later resumed, it finishes the
2833 collection, and returns to the jump pad, to execute the original
2834 instruction that was under the tracepoint jump at the time the
2835 older run had been started. If the jump pad had been rewritten
2836 since for something else in the new run, the thread would now
2837 execute the wrong / random instructions. */
2839 void
2840 linux_process_target::stabilize_threads ()
2842 thread_info *thread_stuck = find_thread ([this] (thread_info *thread)
2844 return stuck_in_jump_pad (thread);
2847 if (thread_stuck != NULL)
2849 threads_debug_printf ("can't stabilize, LWP %ld is stuck in jump pad",
2850 lwpid_of (thread_stuck));
2851 return;
2854 scoped_restore_current_thread restore_thread;
2856 stabilizing_threads = 1;
2858 /* Kick 'em all. */
2859 for_each_thread ([this] (thread_info *thread)
2861 move_out_of_jump_pad (thread);
2864 /* Loop until all are stopped out of the jump pads. */
2865 while (find_thread (lwp_running) != NULL)
2867 struct target_waitstatus ourstatus;
2868 struct lwp_info *lwp;
2869 int wstat;
2871 /* Note that we go through the full wait even loop. While
2872 moving threads out of jump pad, we need to be able to step
2873 over internal breakpoints and such. */
2874 wait_1 (minus_one_ptid, &ourstatus, 0);
2876 if (ourstatus.kind () == TARGET_WAITKIND_STOPPED)
2878 lwp = get_thread_lwp (current_thread);
2880 /* Lock it. */
2881 lwp_suspended_inc (lwp);
2883 if (ourstatus.sig () != GDB_SIGNAL_0
2884 || current_thread->last_resume_kind == resume_stop)
2886 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.sig ()));
2887 enqueue_one_deferred_signal (lwp, &wstat);
2892 unsuspend_all_lwps (NULL);
2894 stabilizing_threads = 0;
2896 if (debug_threads)
2898 thread_stuck = find_thread ([this] (thread_info *thread)
2900 return stuck_in_jump_pad (thread);
2903 if (thread_stuck != NULL)
2904 threads_debug_printf
2905 ("couldn't stabilize, LWP %ld got stuck in jump pad",
2906 lwpid_of (thread_stuck));
2910 /* Convenience function that is called when the kernel reports an
2911 event that is not passed out to GDB. */
2913 static ptid_t
2914 ignore_event (struct target_waitstatus *ourstatus)
2916 /* If we got an event, there may still be others, as a single
2917 SIGCHLD can indicate more than one child stopped. This forces
2918 another target_wait call. */
2919 async_file_mark ();
2921 ourstatus->set_ignore ();
2922 return null_ptid;
2925 ptid_t
2926 linux_process_target::filter_exit_event (lwp_info *event_child,
2927 target_waitstatus *ourstatus)
2929 struct thread_info *thread = get_lwp_thread (event_child);
2930 ptid_t ptid = ptid_of (thread);
2932 if (ourstatus->kind () == TARGET_WAITKIND_THREAD_EXITED)
2934 /* We're reporting a thread exit for the leader. The exit was
2935 detected by check_zombie_leaders. */
2936 gdb_assert (is_leader (thread));
2937 gdb_assert (report_exit_events_for (thread));
2939 delete_lwp (event_child);
2940 return ptid;
2943 /* Note we must filter TARGET_WAITKIND_SIGNALLED as well, otherwise
2944 if a non-leader thread exits with a signal, we'd report it to the
2945 core which would interpret it as the whole-process exiting.
2946 There is no TARGET_WAITKIND_THREAD_SIGNALLED event kind. */
2947 if (ourstatus->kind () != TARGET_WAITKIND_EXITED
2948 && ourstatus->kind () != TARGET_WAITKIND_SIGNALLED)
2949 return ptid;
2951 if (!is_leader (thread))
2953 if (report_exit_events_for (thread))
2954 ourstatus->set_thread_exited (0);
2955 else
2956 ourstatus->set_ignore ();
2958 delete_lwp (event_child);
2960 return ptid;
2963 /* Returns 1 if GDB is interested in any event_child syscalls. */
2965 static int
2966 gdb_catching_syscalls_p (struct lwp_info *event_child)
2968 struct thread_info *thread = get_lwp_thread (event_child);
2969 struct process_info *proc = get_thread_process (thread);
2971 return !proc->syscalls_to_catch.empty ();
2974 bool
2975 linux_process_target::gdb_catch_this_syscall (lwp_info *event_child)
2977 int sysno;
2978 struct thread_info *thread = get_lwp_thread (event_child);
2979 struct process_info *proc = get_thread_process (thread);
2981 if (proc->syscalls_to_catch.empty ())
2982 return false;
2984 if (proc->syscalls_to_catch[0] == ANY_SYSCALL)
2985 return true;
2987 get_syscall_trapinfo (event_child, &sysno);
2989 for (int iter : proc->syscalls_to_catch)
2990 if (iter == sysno)
2991 return true;
2993 return false;
2996 ptid_t
2997 linux_process_target::wait_1 (ptid_t ptid, target_waitstatus *ourstatus,
2998 target_wait_flags target_options)
3000 THREADS_SCOPED_DEBUG_ENTER_EXIT;
3002 client_state &cs = get_client_state ();
3003 int w;
3004 struct lwp_info *event_child;
3005 int options;
3006 int pid;
3007 int step_over_finished;
3008 int bp_explains_trap;
3009 int maybe_internal_trap;
3010 int report_to_gdb;
3011 int trace_event;
3012 int in_step_range;
3014 threads_debug_printf ("[%s]", target_pid_to_str (ptid).c_str ());
3016 /* Translate generic target options into linux options. */
3017 options = __WALL;
3018 if (target_options & TARGET_WNOHANG)
3019 options |= WNOHANG;
3021 bp_explains_trap = 0;
3022 trace_event = 0;
3023 in_step_range = 0;
3024 ourstatus->set_ignore ();
3026 bool was_any_resumed = any_resumed ();
3028 if (step_over_bkpt == null_ptid)
3029 pid = wait_for_event (ptid, &w, options);
3030 else
3032 threads_debug_printf ("step_over_bkpt set [%s], doing a blocking wait",
3033 target_pid_to_str (step_over_bkpt).c_str ());
3034 pid = wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
3037 if (pid == 0 || (pid == -1 && !was_any_resumed))
3039 gdb_assert (target_options & TARGET_WNOHANG);
3041 threads_debug_printf ("ret = null_ptid, TARGET_WAITKIND_IGNORE");
3043 ourstatus->set_ignore ();
3044 return null_ptid;
3046 else if (pid == -1)
3048 threads_debug_printf ("ret = null_ptid, TARGET_WAITKIND_NO_RESUMED");
3050 ourstatus->set_no_resumed ();
3051 return null_ptid;
3054 event_child = get_thread_lwp (current_thread);
3056 /* wait_for_event only returns an exit status for the last
3057 child of a process. Report it. */
3058 if (WIFEXITED (w) || WIFSIGNALED (w))
3060 if (WIFEXITED (w))
3062 /* If we already have the exit recorded in waitstatus, use
3063 it. This will happen when we detect a zombie leader,
3064 when we had GDB_THREAD_OPTION_EXIT enabled for it. We
3065 want to report its exit as TARGET_WAITKIND_THREAD_EXITED,
3066 as the whole process hasn't exited yet. */
3067 const target_waitstatus &ws = event_child->waitstatus;
3068 if (ws.kind () != TARGET_WAITKIND_IGNORE)
3070 gdb_assert (ws.kind () == TARGET_WAITKIND_EXITED
3071 || ws.kind () == TARGET_WAITKIND_THREAD_EXITED);
3072 *ourstatus = ws;
3074 else
3075 ourstatus->set_exited (WEXITSTATUS (w));
3077 threads_debug_printf
3078 ("ret = %s, exited with retcode %d",
3079 target_pid_to_str (ptid_of (current_thread)).c_str (),
3080 WEXITSTATUS (w));
3082 else
3084 ourstatus->set_signalled (gdb_signal_from_host (WTERMSIG (w)));
3086 threads_debug_printf
3087 ("ret = %s, terminated with signal %d",
3088 target_pid_to_str (ptid_of (current_thread)).c_str (),
3089 WTERMSIG (w));
3092 return filter_exit_event (event_child, ourstatus);
3095 /* If step-over executes a breakpoint instruction, in the case of a
3096 hardware single step it means a gdb/gdbserver breakpoint had been
3097 planted on top of a permanent breakpoint, in the case of a software
3098 single step it may just mean that gdbserver hit the reinsert breakpoint.
3099 The PC has been adjusted by save_stop_reason to point at
3100 the breakpoint address.
3101 So in the case of the hardware single step advance the PC manually
3102 past the breakpoint and in the case of software single step advance only
3103 if it's not the single_step_breakpoint we are hitting.
3104 This avoids that a program would keep trapping a permanent breakpoint
3105 forever. */
3106 if (step_over_bkpt != null_ptid
3107 && event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3108 && (event_child->stepping
3109 || !single_step_breakpoint_inserted_here (event_child->stop_pc)))
3111 int increment_pc = 0;
3112 int breakpoint_kind = 0;
3113 CORE_ADDR stop_pc = event_child->stop_pc;
3115 breakpoint_kind = breakpoint_kind_from_current_state (&stop_pc);
3116 sw_breakpoint_from_kind (breakpoint_kind, &increment_pc);
3118 threads_debug_printf
3119 ("step-over for %s executed software breakpoint",
3120 target_pid_to_str (ptid_of (current_thread)).c_str ());
3122 if (increment_pc != 0)
3124 struct regcache *regcache
3125 = get_thread_regcache (current_thread, 1);
3127 event_child->stop_pc += increment_pc;
3128 low_set_pc (regcache, event_child->stop_pc);
3130 if (!low_breakpoint_at (event_child->stop_pc))
3131 event_child->stop_reason = TARGET_STOPPED_BY_NO_REASON;
3135 /* If this event was not handled before, and is not a SIGTRAP, we
3136 report it. SIGILL and SIGSEGV are also treated as traps in case
3137 a breakpoint is inserted at the current PC. If this target does
3138 not support internal breakpoints at all, we also report the
3139 SIGTRAP without further processing; it's of no concern to us. */
3140 maybe_internal_trap
3141 = (low_supports_breakpoints ()
3142 && (WSTOPSIG (w) == SIGTRAP
3143 || ((WSTOPSIG (w) == SIGILL
3144 || WSTOPSIG (w) == SIGSEGV)
3145 && low_breakpoint_at (event_child->stop_pc))));
3147 if (maybe_internal_trap)
3149 /* Handle anything that requires bookkeeping before deciding to
3150 report the event or continue waiting. */
3152 /* First check if we can explain the SIGTRAP with an internal
3153 breakpoint, or if we should possibly report the event to GDB.
3154 Do this before anything that may remove or insert a
3155 breakpoint. */
3156 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
3158 /* We have a SIGTRAP, possibly a step-over dance has just
3159 finished. If so, tweak the state machine accordingly,
3160 reinsert breakpoints and delete any single-step
3161 breakpoints. */
3162 step_over_finished = finish_step_over (event_child);
3164 /* Now invoke the callbacks of any internal breakpoints there. */
3165 check_breakpoints (event_child->stop_pc);
3167 /* Handle tracepoint data collecting. This may overflow the
3168 trace buffer, and cause a tracing stop, removing
3169 breakpoints. */
3170 trace_event = handle_tracepoints (event_child);
3172 if (bp_explains_trap)
3173 threads_debug_printf ("Hit a gdbserver breakpoint.");
3175 else
3177 /* We have some other signal, possibly a step-over dance was in
3178 progress, and it should be cancelled too. */
3179 step_over_finished = finish_step_over (event_child);
3182 /* We have all the data we need. Either report the event to GDB, or
3183 resume threads and keep waiting for more. */
3185 /* If we're collecting a fast tracepoint, finish the collection and
3186 move out of the jump pad before delivering a signal. See
3187 linux_stabilize_threads. */
3189 if (WIFSTOPPED (w)
3190 && WSTOPSIG (w) != SIGTRAP
3191 && supports_fast_tracepoints ()
3192 && agent_loaded_p ())
3194 threads_debug_printf ("Got signal %d for LWP %ld. Check if we need "
3195 "to defer or adjust it.",
3196 WSTOPSIG (w), lwpid_of (current_thread));
3198 /* Allow debugging the jump pad itself. */
3199 if (current_thread->last_resume_kind != resume_step
3200 && maybe_move_out_of_jump_pad (event_child, &w))
3202 enqueue_one_deferred_signal (event_child, &w);
3204 threads_debug_printf ("Signal %d for LWP %ld deferred (in jump pad)",
3205 WSTOPSIG (w), lwpid_of (current_thread));
3207 resume_one_lwp (event_child, 0, 0, NULL);
3209 return ignore_event (ourstatus);
3213 if (event_child->collecting_fast_tracepoint
3214 != fast_tpoint_collect_result::not_collecting)
3216 threads_debug_printf
3217 ("LWP %ld was trying to move out of the jump pad (%d). "
3218 "Check if we're already there.",
3219 lwpid_of (current_thread),
3220 (int) event_child->collecting_fast_tracepoint);
3222 trace_event = 1;
3224 event_child->collecting_fast_tracepoint
3225 = linux_fast_tracepoint_collecting (event_child, NULL);
3227 if (event_child->collecting_fast_tracepoint
3228 != fast_tpoint_collect_result::before_insn)
3230 /* No longer need this breakpoint. */
3231 if (event_child->exit_jump_pad_bkpt != NULL)
3233 threads_debug_printf
3234 ("No longer need exit-jump-pad bkpt; removing it."
3235 "stopping all threads momentarily.");
3237 /* Other running threads could hit this breakpoint.
3238 We don't handle moribund locations like GDB does,
3239 instead we always pause all threads when removing
3240 breakpoints, so that any step-over or
3241 decr_pc_after_break adjustment is always taken
3242 care of while the breakpoint is still
3243 inserted. */
3244 stop_all_lwps (1, event_child);
3246 delete_breakpoint (event_child->exit_jump_pad_bkpt);
3247 event_child->exit_jump_pad_bkpt = NULL;
3249 unstop_all_lwps (1, event_child);
3251 gdb_assert (event_child->suspended >= 0);
3255 if (event_child->collecting_fast_tracepoint
3256 == fast_tpoint_collect_result::not_collecting)
3258 threads_debug_printf
3259 ("fast tracepoint finished collecting successfully.");
3261 /* We may have a deferred signal to report. */
3262 if (dequeue_one_deferred_signal (event_child, &w))
3263 threads_debug_printf ("dequeued one signal.");
3264 else
3266 threads_debug_printf ("no deferred signals.");
3268 if (stabilizing_threads)
3270 ourstatus->set_stopped (GDB_SIGNAL_0);
3272 threads_debug_printf
3273 ("ret = %s, stopped while stabilizing threads",
3274 target_pid_to_str (ptid_of (current_thread)).c_str ());
3276 return ptid_of (current_thread);
3282 /* Check whether GDB would be interested in this event. */
3284 /* Check if GDB is interested in this syscall. */
3285 if (WIFSTOPPED (w)
3286 && WSTOPSIG (w) == SYSCALL_SIGTRAP
3287 && !gdb_catch_this_syscall (event_child))
3289 threads_debug_printf ("Ignored syscall for LWP %ld.",
3290 lwpid_of (current_thread));
3292 resume_one_lwp (event_child, event_child->stepping, 0, NULL);
3294 return ignore_event (ourstatus);
3297 /* If GDB is not interested in this signal, don't stop other
3298 threads, and don't report it to GDB. Just resume the inferior
3299 right away. We do this for threading-related signals as well as
3300 any that GDB specifically requested we ignore. But never ignore
3301 SIGSTOP if we sent it ourselves, and do not ignore signals when
3302 stepping - they may require special handling to skip the signal
3303 handler. Also never ignore signals that could be caused by a
3304 breakpoint. */
3305 if (WIFSTOPPED (w)
3306 && current_thread->last_resume_kind != resume_step
3307 && (
3308 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
3309 (current_process ()->priv->thread_db != NULL
3310 && (WSTOPSIG (w) == __SIGRTMIN
3311 || WSTOPSIG (w) == __SIGRTMIN + 1))
3313 #endif
3314 (cs.pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
3315 && !(WSTOPSIG (w) == SIGSTOP
3316 && current_thread->last_resume_kind == resume_stop)
3317 && !linux_wstatus_maybe_breakpoint (w))))
3319 siginfo_t info, *info_p;
3321 threads_debug_printf ("Ignored signal %d for LWP %ld.",
3322 WSTOPSIG (w), lwpid_of (current_thread));
3324 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
3325 (PTRACE_TYPE_ARG3) 0, &info) == 0)
3326 info_p = &info;
3327 else
3328 info_p = NULL;
3330 if (step_over_finished)
3332 /* We cancelled this thread's step-over above. We still
3333 need to unsuspend all other LWPs, and set them back
3334 running again while the signal handler runs. */
3335 unsuspend_all_lwps (event_child);
3337 /* Enqueue the pending signal info so that proceed_all_lwps
3338 doesn't lose it. */
3339 enqueue_pending_signal (event_child, WSTOPSIG (w), info_p);
3341 proceed_all_lwps ();
3343 else
3345 resume_one_lwp (event_child, event_child->stepping,
3346 WSTOPSIG (w), info_p);
3349 return ignore_event (ourstatus);
3352 /* Note that all addresses are always "out of the step range" when
3353 there's no range to begin with. */
3354 in_step_range = lwp_in_step_range (event_child);
3356 /* If GDB wanted this thread to single step, and the thread is out
3357 of the step range, we always want to report the SIGTRAP, and let
3358 GDB handle it. Watchpoints should always be reported. So should
3359 signals we can't explain. A SIGTRAP we can't explain could be a
3360 GDB breakpoint --- we may or not support Z0 breakpoints. If we
3361 do, we're be able to handle GDB breakpoints on top of internal
3362 breakpoints, by handling the internal breakpoint and still
3363 reporting the event to GDB. If we don't, we're out of luck, GDB
3364 won't see the breakpoint hit. If we see a single-step event but
3365 the thread should be continuing, don't pass the trap to gdb.
3366 That indicates that we had previously finished a single-step but
3367 left the single-step pending -- see
3368 complete_ongoing_step_over. */
3369 report_to_gdb = (!maybe_internal_trap
3370 || (current_thread->last_resume_kind == resume_step
3371 && !in_step_range)
3372 || event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3373 || (!in_step_range
3374 && !bp_explains_trap
3375 && !trace_event
3376 && !step_over_finished
3377 && !(current_thread->last_resume_kind == resume_continue
3378 && event_child->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP))
3379 || (gdb_breakpoint_here (event_child->stop_pc)
3380 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
3381 && gdb_no_commands_at_breakpoint (event_child->stop_pc))
3382 || event_child->waitstatus.kind () != TARGET_WAITKIND_IGNORE);
3384 run_breakpoint_commands (event_child->stop_pc);
3386 /* We found no reason GDB would want us to stop. We either hit one
3387 of our own breakpoints, or finished an internal step GDB
3388 shouldn't know about. */
3389 if (!report_to_gdb)
3391 if (bp_explains_trap)
3392 threads_debug_printf ("Hit a gdbserver breakpoint.");
3394 if (step_over_finished)
3395 threads_debug_printf ("Step-over finished.");
3397 if (trace_event)
3398 threads_debug_printf ("Tracepoint event.");
3400 if (lwp_in_step_range (event_child))
3401 threads_debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).",
3402 paddress (event_child->stop_pc),
3403 paddress (event_child->step_range_start),
3404 paddress (event_child->step_range_end));
3406 /* We're not reporting this breakpoint to GDB, so apply the
3407 decr_pc_after_break adjustment to the inferior's regcache
3408 ourselves. */
3410 if (low_supports_breakpoints ())
3412 struct regcache *regcache
3413 = get_thread_regcache (current_thread, 1);
3414 low_set_pc (regcache, event_child->stop_pc);
3417 if (step_over_finished)
3419 /* If we have finished stepping over a breakpoint, we've
3420 stopped and suspended all LWPs momentarily except the
3421 stepping one. This is where we resume them all again.
3422 We're going to keep waiting, so use proceed, which
3423 handles stepping over the next breakpoint. */
3424 unsuspend_all_lwps (event_child);
3426 else
3428 /* Remove the single-step breakpoints if any. Note that
3429 there isn't single-step breakpoint if we finished stepping
3430 over. */
3431 if (supports_software_single_step ()
3432 && has_single_step_breakpoints (current_thread))
3434 stop_all_lwps (0, event_child);
3435 delete_single_step_breakpoints (current_thread);
3436 unstop_all_lwps (0, event_child);
3440 threads_debug_printf ("proceeding all threads.");
3442 proceed_all_lwps ();
3444 return ignore_event (ourstatus);
3447 if (debug_threads)
3449 if (event_child->waitstatus.kind () != TARGET_WAITKIND_IGNORE)
3450 threads_debug_printf ("LWP %ld: extended event with waitstatus %s",
3451 lwpid_of (get_lwp_thread (event_child)),
3452 event_child->waitstatus.to_string ().c_str ());
3454 if (current_thread->last_resume_kind == resume_step)
3456 if (event_child->step_range_start == event_child->step_range_end)
3457 threads_debug_printf
3458 ("GDB wanted to single-step, reporting event.");
3459 else if (!lwp_in_step_range (event_child))
3460 threads_debug_printf ("Out of step range, reporting event.");
3463 if (event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
3464 threads_debug_printf ("Stopped by watchpoint.");
3465 else if (gdb_breakpoint_here (event_child->stop_pc))
3466 threads_debug_printf ("Stopped by GDB breakpoint.");
3469 threads_debug_printf ("Hit a non-gdbserver trap event.");
3471 /* Alright, we're going to report a stop. */
3473 /* Remove single-step breakpoints. */
3474 if (supports_software_single_step ())
3476 /* Remove single-step breakpoints or not. It it is true, stop all
3477 lwps, so that other threads won't hit the breakpoint in the
3478 staled memory. */
3479 int remove_single_step_breakpoints_p = 0;
3481 if (non_stop)
3483 remove_single_step_breakpoints_p
3484 = has_single_step_breakpoints (current_thread);
3486 else
3488 /* In all-stop, a stop reply cancels all previous resume
3489 requests. Delete all single-step breakpoints. */
3491 find_thread ([&] (thread_info *thread) {
3492 if (has_single_step_breakpoints (thread))
3494 remove_single_step_breakpoints_p = 1;
3495 return true;
3498 return false;
3502 if (remove_single_step_breakpoints_p)
3504 /* If we remove single-step breakpoints from memory, stop all lwps,
3505 so that other threads won't hit the breakpoint in the staled
3506 memory. */
3507 stop_all_lwps (0, event_child);
3509 if (non_stop)
3511 gdb_assert (has_single_step_breakpoints (current_thread));
3512 delete_single_step_breakpoints (current_thread);
3514 else
3516 for_each_thread ([] (thread_info *thread){
3517 if (has_single_step_breakpoints (thread))
3518 delete_single_step_breakpoints (thread);
3522 unstop_all_lwps (0, event_child);
3526 if (!stabilizing_threads)
3528 /* In all-stop, stop all threads. */
3529 if (!non_stop)
3530 stop_all_lwps (0, NULL);
3532 if (step_over_finished)
3534 if (!non_stop)
3536 /* If we were doing a step-over, all other threads but
3537 the stepping one had been paused in start_step_over,
3538 with their suspend counts incremented. We don't want
3539 to do a full unstop/unpause, because we're in
3540 all-stop mode (so we want threads stopped), but we
3541 still need to unsuspend the other threads, to
3542 decrement their `suspended' count back. */
3543 unsuspend_all_lwps (event_child);
3545 else
3547 /* If we just finished a step-over, then all threads had
3548 been momentarily paused. In all-stop, that's fine,
3549 we want threads stopped by now anyway. In non-stop,
3550 we need to re-resume threads that GDB wanted to be
3551 running. */
3552 unstop_all_lwps (1, event_child);
3556 /* If we're not waiting for a specific LWP, choose an event LWP
3557 from among those that have had events. Giving equal priority
3558 to all LWPs that have had events helps prevent
3559 starvation. */
3560 if (ptid == minus_one_ptid)
3562 event_child->status_pending_p = 1;
3563 event_child->status_pending = w;
3565 select_event_lwp (&event_child);
3567 /* current_thread and event_child must stay in sync. */
3568 switch_to_thread (get_lwp_thread (event_child));
3570 event_child->status_pending_p = 0;
3571 w = event_child->status_pending;
3575 /* Stabilize threads (move out of jump pads). */
3576 if (!non_stop)
3577 target_stabilize_threads ();
3579 else
3581 /* If we just finished a step-over, then all threads had been
3582 momentarily paused. In all-stop, that's fine, we want
3583 threads stopped by now anyway. In non-stop, we need to
3584 re-resume threads that GDB wanted to be running. */
3585 if (step_over_finished)
3586 unstop_all_lwps (1, event_child);
3589 /* At this point, we haven't set OURSTATUS. This is where we do it. */
3590 gdb_assert (ourstatus->kind () == TARGET_WAITKIND_IGNORE);
3592 if (event_child->waitstatus.kind () != TARGET_WAITKIND_IGNORE)
3594 /* If the reported event is an exit, fork, vfork, clone or exec,
3595 let GDB know. */
3597 /* Break the unreported fork/vfork/clone relationship chain. */
3598 if (is_new_child_status (event_child->waitstatus.kind ()))
3600 event_child->relative->relative = NULL;
3601 event_child->relative = NULL;
3604 *ourstatus = event_child->waitstatus;
3605 /* Clear the event lwp's waitstatus since we handled it already. */
3606 event_child->waitstatus.set_ignore ();
3608 else
3610 /* The LWP stopped due to a plain signal or a syscall signal. Either way,
3611 event_child->waitstatus wasn't filled in with the details, so look at
3612 the wait status W. */
3613 if (WSTOPSIG (w) == SYSCALL_SIGTRAP)
3615 int syscall_number;
3617 get_syscall_trapinfo (event_child, &syscall_number);
3618 if (event_child->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY)
3619 ourstatus->set_syscall_entry (syscall_number);
3620 else if (event_child->syscall_state == TARGET_WAITKIND_SYSCALL_RETURN)
3621 ourstatus->set_syscall_return (syscall_number);
3622 else
3623 gdb_assert_not_reached ("unexpected syscall state");
3625 else if (current_thread->last_resume_kind == resume_stop
3626 && WSTOPSIG (w) == SIGSTOP)
3628 /* A thread that has been requested to stop by GDB with vCont;t,
3629 and it stopped cleanly, so report as SIG0. The use of
3630 SIGSTOP is an implementation detail. */
3631 ourstatus->set_stopped (GDB_SIGNAL_0);
3633 else
3634 ourstatus->set_stopped (gdb_signal_from_host (WSTOPSIG (w)));
3637 /* Now that we've selected our final event LWP, un-adjust its PC if
3638 it was a software breakpoint, and the client doesn't know we can
3639 adjust the breakpoint ourselves. */
3640 if (event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3641 && !cs.swbreak_feature)
3643 int decr_pc = low_decr_pc_after_break ();
3645 if (decr_pc != 0)
3647 struct regcache *regcache
3648 = get_thread_regcache (current_thread, 1);
3649 low_set_pc (regcache, event_child->stop_pc + decr_pc);
3653 gdb_assert (step_over_bkpt == null_ptid);
3655 threads_debug_printf ("ret = %s, %s",
3656 target_pid_to_str (ptid_of (current_thread)).c_str (),
3657 ourstatus->to_string ().c_str ());
3659 return filter_exit_event (event_child, ourstatus);
3662 /* Get rid of any pending event in the pipe. */
3663 static void
3664 async_file_flush (void)
3666 linux_event_pipe.flush ();
3669 /* Put something in the pipe, so the event loop wakes up. */
3670 static void
3671 async_file_mark (void)
3673 linux_event_pipe.mark ();
3676 ptid_t
3677 linux_process_target::wait (ptid_t ptid,
3678 target_waitstatus *ourstatus,
3679 target_wait_flags target_options)
3681 ptid_t event_ptid;
3683 /* Flush the async file first. */
3684 if (target_is_async_p ())
3685 async_file_flush ();
3689 event_ptid = wait_1 (ptid, ourstatus, target_options);
3691 while ((target_options & TARGET_WNOHANG) == 0
3692 && ourstatus->kind () == TARGET_WAITKIND_IGNORE);
3694 /* If at least one stop was reported, there may be more. A single
3695 SIGCHLD can signal more than one child stop. */
3696 if (target_is_async_p ()
3697 && (target_options & TARGET_WNOHANG) != 0
3698 && event_ptid != null_ptid)
3699 async_file_mark ();
3701 return event_ptid;
3704 /* Send a signal to an LWP. */
3706 static int
3707 kill_lwp (unsigned long lwpid, int signo)
3709 int ret;
3711 errno = 0;
3712 ret = syscall (__NR_tkill, lwpid, signo);
3713 if (errno == ENOSYS)
3715 /* If tkill fails, then we are not using nptl threads, a
3716 configuration we no longer support. */
3717 perror_with_name (("tkill"));
3719 return ret;
3722 void
3723 linux_stop_lwp (struct lwp_info *lwp)
3725 send_sigstop (lwp);
3728 static void
3729 send_sigstop (struct lwp_info *lwp)
3731 int pid;
3733 pid = lwpid_of (get_lwp_thread (lwp));
3735 /* If we already have a pending stop signal for this process, don't
3736 send another. */
3737 if (lwp->stop_expected)
3739 threads_debug_printf ("Have pending sigstop for lwp %d", pid);
3741 return;
3744 threads_debug_printf ("Sending sigstop to lwp %d", pid);
3746 lwp->stop_expected = 1;
3747 kill_lwp (pid, SIGSTOP);
3750 static void
3751 send_sigstop (thread_info *thread, lwp_info *except)
3753 struct lwp_info *lwp = get_thread_lwp (thread);
3755 /* Ignore EXCEPT. */
3756 if (lwp == except)
3757 return;
3759 if (lwp->stopped)
3760 return;
3762 send_sigstop (lwp);
3765 /* Increment the suspend count of an LWP, and stop it, if not stopped
3766 yet. */
3767 static void
3768 suspend_and_send_sigstop (thread_info *thread, lwp_info *except)
3770 struct lwp_info *lwp = get_thread_lwp (thread);
3772 /* Ignore EXCEPT. */
3773 if (lwp == except)
3774 return;
3776 lwp_suspended_inc (lwp);
3778 send_sigstop (thread, except);
3781 /* Mark LWP dead, with WSTAT as exit status pending to report later.
3782 If THREAD_EVENT is true, interpret WSTAT as a thread exit event
3783 instead of a process exit event. This is meaningful for the leader
3784 thread, as we normally report a process-wide exit event when we see
3785 the leader exit, and a thread exit event when we see any other
3786 thread exit. */
3788 static void
3789 mark_lwp_dead (struct lwp_info *lwp, int wstat, bool thread_event)
3791 /* Store the exit status for later. */
3792 lwp->status_pending_p = 1;
3793 lwp->status_pending = wstat;
3795 /* Store in waitstatus as well, as there's nothing else to process
3796 for this event. */
3797 if (WIFEXITED (wstat))
3799 if (thread_event)
3800 lwp->waitstatus.set_thread_exited (WEXITSTATUS (wstat));
3801 else
3802 lwp->waitstatus.set_exited (WEXITSTATUS (wstat));
3804 else if (WIFSIGNALED (wstat))
3806 gdb_assert (!thread_event);
3807 lwp->waitstatus.set_signalled (gdb_signal_from_host (WTERMSIG (wstat)));
3809 else
3810 gdb_assert_not_reached ("unknown status kind");
3812 /* Prevent trying to stop it. */
3813 lwp->stopped = 1;
3815 /* No further stops are expected from a dead lwp. */
3816 lwp->stop_expected = 0;
3819 /* Return true if LWP has exited already, and has a pending exit event
3820 to report to GDB. */
3822 static int
3823 lwp_is_marked_dead (struct lwp_info *lwp)
3825 return (lwp->status_pending_p
3826 && (WIFEXITED (lwp->status_pending)
3827 || WIFSIGNALED (lwp->status_pending)));
3830 void
3831 linux_process_target::wait_for_sigstop ()
3833 struct thread_info *saved_thread;
3834 ptid_t saved_tid;
3835 int wstat;
3836 int ret;
3838 saved_thread = current_thread;
3839 if (saved_thread != NULL)
3840 saved_tid = saved_thread->id;
3841 else
3842 saved_tid = null_ptid; /* avoid bogus unused warning */
3844 scoped_restore_current_thread restore_thread;
3846 threads_debug_printf ("pulling events");
3848 /* Passing NULL_PTID as filter indicates we want all events to be
3849 left pending. Eventually this returns when there are no
3850 unwaited-for children left. */
3851 ret = wait_for_event_filtered (minus_one_ptid, null_ptid, &wstat, __WALL);
3852 gdb_assert (ret == -1);
3854 if (saved_thread == NULL || mythread_alive (saved_tid))
3855 return;
3856 else
3858 threads_debug_printf ("Previously current thread died.");
3860 /* We can't change the current inferior behind GDB's back,
3861 otherwise, a subsequent command may apply to the wrong
3862 process. */
3863 restore_thread.dont_restore ();
3864 switch_to_thread (nullptr);
3868 bool
3869 linux_process_target::stuck_in_jump_pad (thread_info *thread)
3871 struct lwp_info *lwp = get_thread_lwp (thread);
3873 if (lwp->suspended != 0)
3875 internal_error ("LWP %ld is suspended, suspended=%d\n",
3876 lwpid_of (thread), lwp->suspended);
3878 gdb_assert (lwp->stopped);
3880 /* Allow debugging the jump pad, gdb_collect, etc.. */
3881 return (supports_fast_tracepoints ()
3882 && agent_loaded_p ()
3883 && (gdb_breakpoint_here (lwp->stop_pc)
3884 || lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3885 || thread->last_resume_kind == resume_step)
3886 && (linux_fast_tracepoint_collecting (lwp, NULL)
3887 != fast_tpoint_collect_result::not_collecting));
3890 void
3891 linux_process_target::move_out_of_jump_pad (thread_info *thread)
3893 struct lwp_info *lwp = get_thread_lwp (thread);
3894 int *wstat;
3896 if (lwp->suspended != 0)
3898 internal_error ("LWP %ld is suspended, suspended=%d\n",
3899 lwpid_of (thread), lwp->suspended);
3901 gdb_assert (lwp->stopped);
3903 /* For gdb_breakpoint_here. */
3904 scoped_restore_current_thread restore_thread;
3905 switch_to_thread (thread);
3907 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
3909 /* Allow debugging the jump pad, gdb_collect, etc. */
3910 if (!gdb_breakpoint_here (lwp->stop_pc)
3911 && lwp->stop_reason != TARGET_STOPPED_BY_WATCHPOINT
3912 && thread->last_resume_kind != resume_step
3913 && maybe_move_out_of_jump_pad (lwp, wstat))
3915 threads_debug_printf ("LWP %ld needs stabilizing (in jump pad)",
3916 lwpid_of (thread));
3918 if (wstat)
3920 lwp->status_pending_p = 0;
3921 enqueue_one_deferred_signal (lwp, wstat);
3923 threads_debug_printf ("Signal %d for LWP %ld deferred (in jump pad",
3924 WSTOPSIG (*wstat), lwpid_of (thread));
3927 resume_one_lwp (lwp, 0, 0, NULL);
3929 else
3930 lwp_suspended_inc (lwp);
3933 static bool
3934 lwp_running (thread_info *thread)
3936 struct lwp_info *lwp = get_thread_lwp (thread);
3938 if (lwp_is_marked_dead (lwp))
3939 return false;
3941 return !lwp->stopped;
3944 void
3945 linux_process_target::stop_all_lwps (int suspend, lwp_info *except)
3947 /* Should not be called recursively. */
3948 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
3950 THREADS_SCOPED_DEBUG_ENTER_EXIT;
3952 threads_debug_printf
3953 ("%s, except=%s", suspend ? "stop-and-suspend" : "stop",
3954 (except != NULL
3955 ? target_pid_to_str (ptid_of (get_lwp_thread (except))).c_str ()
3956 : "none"));
3958 stopping_threads = (suspend
3959 ? STOPPING_AND_SUSPENDING_THREADS
3960 : STOPPING_THREADS);
3962 if (suspend)
3963 for_each_thread ([&] (thread_info *thread)
3965 suspend_and_send_sigstop (thread, except);
3967 else
3968 for_each_thread ([&] (thread_info *thread)
3970 send_sigstop (thread, except);
3973 wait_for_sigstop ();
3974 stopping_threads = NOT_STOPPING_THREADS;
3976 threads_debug_printf ("setting stopping_threads back to !stopping");
3979 /* Enqueue one signal in the chain of signals which need to be
3980 delivered to this process on next resume. */
3982 static void
3983 enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info)
3985 lwp->pending_signals.emplace_back (signal);
3986 if (info == nullptr)
3987 memset (&lwp->pending_signals.back ().info, 0, sizeof (siginfo_t));
3988 else
3989 lwp->pending_signals.back ().info = *info;
3992 void
3993 linux_process_target::install_software_single_step_breakpoints (lwp_info *lwp)
3995 struct thread_info *thread = get_lwp_thread (lwp);
3996 struct regcache *regcache = get_thread_regcache (thread, 1);
3998 scoped_restore_current_thread restore_thread;
4000 switch_to_thread (thread);
4001 std::vector<CORE_ADDR> next_pcs = low_get_next_pcs (regcache);
4003 for (CORE_ADDR pc : next_pcs)
4004 set_single_step_breakpoint (pc, current_ptid);
4008 linux_process_target::single_step (lwp_info* lwp)
4010 int step = 0;
4012 if (supports_hardware_single_step ())
4014 step = 1;
4016 else if (supports_software_single_step ())
4018 install_software_single_step_breakpoints (lwp);
4019 step = 0;
4021 else
4022 threads_debug_printf ("stepping is not implemented on this target");
4024 return step;
4027 /* The signal can be delivered to the inferior if we are not trying to
4028 finish a fast tracepoint collect. Since signal can be delivered in
4029 the step-over, the program may go to signal handler and trap again
4030 after return from the signal handler. We can live with the spurious
4031 double traps. */
4033 static int
4034 lwp_signal_can_be_delivered (struct lwp_info *lwp)
4036 return (lwp->collecting_fast_tracepoint
4037 == fast_tpoint_collect_result::not_collecting);
4040 void
4041 linux_process_target::resume_one_lwp_throw (lwp_info *lwp, int step,
4042 int signal, siginfo_t *info)
4044 struct thread_info *thread = get_lwp_thread (lwp);
4045 int ptrace_request;
4046 struct process_info *proc = get_thread_process (thread);
4048 /* Note that target description may not be initialised
4049 (proc->tdesc == NULL) at this point because the program hasn't
4050 stopped at the first instruction yet. It means GDBserver skips
4051 the extra traps from the wrapper program (see option --wrapper).
4052 Code in this function that requires register access should be
4053 guarded by proc->tdesc == NULL or something else. */
4055 if (lwp->stopped == 0)
4056 return;
4058 gdb_assert (lwp->waitstatus.kind () == TARGET_WAITKIND_IGNORE);
4060 fast_tpoint_collect_result fast_tp_collecting
4061 = lwp->collecting_fast_tracepoint;
4063 gdb_assert (!stabilizing_threads
4064 || (fast_tp_collecting
4065 != fast_tpoint_collect_result::not_collecting));
4067 /* Cancel actions that rely on GDB not changing the PC (e.g., the
4068 user used the "jump" command, or "set $pc = foo"). */
4069 if (thread->while_stepping != NULL && lwp->stop_pc != get_pc (lwp))
4071 /* Collecting 'while-stepping' actions doesn't make sense
4072 anymore. */
4073 release_while_stepping_state_list (thread);
4076 /* If we have pending signals or status, and a new signal, enqueue the
4077 signal. Also enqueue the signal if it can't be delivered to the
4078 inferior right now. */
4079 if (signal != 0
4080 && (lwp->status_pending_p
4081 || !lwp->pending_signals.empty ()
4082 || !lwp_signal_can_be_delivered (lwp)))
4084 enqueue_pending_signal (lwp, signal, info);
4086 /* Postpone any pending signal. It was enqueued above. */
4087 signal = 0;
4090 if (lwp->status_pending_p)
4092 threads_debug_printf
4093 ("Not resuming lwp %ld (%s, stop %s); has pending status",
4094 lwpid_of (thread), step ? "step" : "continue",
4095 lwp->stop_expected ? "expected" : "not expected");
4096 return;
4099 scoped_restore_current_thread restore_thread;
4100 switch_to_thread (thread);
4102 /* This bit needs some thinking about. If we get a signal that
4103 we must report while a single-step reinsert is still pending,
4104 we often end up resuming the thread. It might be better to
4105 (ew) allow a stack of pending events; then we could be sure that
4106 the reinsert happened right away and not lose any signals.
4108 Making this stack would also shrink the window in which breakpoints are
4109 uninserted (see comment in linux_wait_for_lwp) but not enough for
4110 complete correctness, so it won't solve that problem. It may be
4111 worthwhile just to solve this one, however. */
4112 if (lwp->bp_reinsert != 0)
4114 threads_debug_printf (" pending reinsert at 0x%s",
4115 paddress (lwp->bp_reinsert));
4117 if (supports_hardware_single_step ())
4119 if (fast_tp_collecting == fast_tpoint_collect_result::not_collecting)
4121 if (step == 0)
4122 warning ("BAD - reinserting but not stepping.");
4123 if (lwp->suspended)
4124 warning ("BAD - reinserting and suspended(%d).",
4125 lwp->suspended);
4129 step = maybe_hw_step (thread);
4132 if (fast_tp_collecting == fast_tpoint_collect_result::before_insn)
4133 threads_debug_printf
4134 ("lwp %ld wants to get out of fast tracepoint jump pad "
4135 "(exit-jump-pad-bkpt)", lwpid_of (thread));
4137 else if (fast_tp_collecting == fast_tpoint_collect_result::at_insn)
4139 threads_debug_printf
4140 ("lwp %ld wants to get out of fast tracepoint jump pad single-stepping",
4141 lwpid_of (thread));
4143 if (supports_hardware_single_step ())
4144 step = 1;
4145 else
4147 internal_error ("moving out of jump pad single-stepping"
4148 " not implemented on this target");
4152 /* If we have while-stepping actions in this thread set it stepping.
4153 If we have a signal to deliver, it may or may not be set to
4154 SIG_IGN, we don't know. Assume so, and allow collecting
4155 while-stepping into a signal handler. A possible smart thing to
4156 do would be to set an internal breakpoint at the signal return
4157 address, continue, and carry on catching this while-stepping
4158 action only when that breakpoint is hit. A future
4159 enhancement. */
4160 if (thread->while_stepping != NULL)
4162 threads_debug_printf
4163 ("lwp %ld has a while-stepping action -> forcing step.",
4164 lwpid_of (thread));
4166 step = single_step (lwp);
4169 if (proc->tdesc != NULL && low_supports_breakpoints ())
4171 struct regcache *regcache = get_thread_regcache (current_thread, 1);
4173 lwp->stop_pc = low_get_pc (regcache);
4175 threads_debug_printf (" %s from pc 0x%lx", step ? "step" : "continue",
4176 (long) lwp->stop_pc);
4179 /* If we have pending signals, consume one if it can be delivered to
4180 the inferior. */
4181 if (!lwp->pending_signals.empty () && lwp_signal_can_be_delivered (lwp))
4183 const pending_signal &p_sig = lwp->pending_signals.front ();
4185 signal = p_sig.signal;
4186 if (p_sig.info.si_signo != 0)
4187 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
4188 &p_sig.info);
4190 lwp->pending_signals.pop_front ();
4193 threads_debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)",
4194 lwpid_of (thread), step ? "step" : "continue", signal,
4195 lwp->stop_expected ? "expected" : "not expected");
4197 low_prepare_to_resume (lwp);
4199 regcache_invalidate_thread (thread);
4200 errno = 0;
4201 lwp->stepping = step;
4202 if (step)
4203 ptrace_request = PTRACE_SINGLESTEP;
4204 else if (gdb_catching_syscalls_p (lwp))
4205 ptrace_request = PTRACE_SYSCALL;
4206 else
4207 ptrace_request = PTRACE_CONT;
4208 ptrace (ptrace_request,
4209 lwpid_of (thread),
4210 (PTRACE_TYPE_ARG3) 0,
4211 /* Coerce to a uintptr_t first to avoid potential gcc warning
4212 of coercing an 8 byte integer to a 4 byte pointer. */
4213 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
4215 if (errno)
4217 int saved_errno = errno;
4219 threads_debug_printf ("ptrace errno = %d (%s)",
4220 saved_errno, strerror (saved_errno));
4222 errno = saved_errno;
4223 perror_with_name ("resuming thread");
4226 /* Successfully resumed. Clear state that no longer makes sense,
4227 and mark the LWP as running. Must not do this before resuming
4228 otherwise if that fails other code will be confused. E.g., we'd
4229 later try to stop the LWP and hang forever waiting for a stop
4230 status. Note that we must not throw after this is cleared,
4231 otherwise handle_zombie_lwp_error would get confused. */
4232 lwp->stopped = 0;
4233 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4236 void
4237 linux_process_target::low_prepare_to_resume (lwp_info *lwp)
4239 /* Nop. */
4242 /* Called when we try to resume a stopped LWP and that errors out. If
4243 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
4244 or about to become), discard the error, clear any pending status
4245 the LWP may have, and return true (we'll collect the exit status
4246 soon enough). Otherwise, return false. */
4248 static int
4249 check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
4251 struct thread_info *thread = get_lwp_thread (lp);
4253 /* If we get an error after resuming the LWP successfully, we'd
4254 confuse !T state for the LWP being gone. */
4255 gdb_assert (lp->stopped);
4257 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
4258 because even if ptrace failed with ESRCH, the tracee may be "not
4259 yet fully dead", but already refusing ptrace requests. In that
4260 case the tracee has 'R (Running)' state for a little bit
4261 (observed in Linux 3.18). See also the note on ESRCH in the
4262 ptrace(2) man page. Instead, check whether the LWP has any state
4263 other than ptrace-stopped. */
4265 /* Don't assume anything if /proc/PID/status can't be read. */
4266 if (linux_proc_pid_is_trace_stopped_nowarn (lwpid_of (thread)) == 0)
4268 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4269 lp->status_pending_p = 0;
4270 return 1;
4272 return 0;
4275 void
4276 linux_process_target::resume_one_lwp (lwp_info *lwp, int step, int signal,
4277 siginfo_t *info)
4281 resume_one_lwp_throw (lwp, step, signal, info);
4283 catch (const gdb_exception_error &ex)
4285 if (check_ptrace_stopped_lwp_gone (lwp))
4287 /* This could because we tried to resume an LWP after its leader
4288 exited. Mark it as resumed, so we can collect an exit event
4289 from it. */
4290 lwp->stopped = 0;
4291 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4293 else
4294 throw;
4298 /* This function is called once per thread via for_each_thread.
4299 We look up which resume request applies to THREAD and mark it with a
4300 pointer to the appropriate resume request.
4302 This algorithm is O(threads * resume elements), but resume elements
4303 is small (and will remain small at least until GDB supports thread
4304 suspension). */
4306 static void
4307 linux_set_resume_request (thread_info *thread, thread_resume *resume, size_t n)
4309 struct lwp_info *lwp = get_thread_lwp (thread);
4311 for (int ndx = 0; ndx < n; ndx++)
4313 ptid_t ptid = resume[ndx].thread;
4314 if (ptid == minus_one_ptid
4315 || ptid == thread->id
4316 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
4317 of PID'. */
4318 || (ptid.pid () == pid_of (thread)
4319 && (ptid.is_pid ()
4320 || ptid.lwp () == -1)))
4322 if (resume[ndx].kind == resume_stop
4323 && thread->last_resume_kind == resume_stop)
4325 threads_debug_printf
4326 ("already %s LWP %ld at GDB's request",
4327 (thread->last_status.kind () == TARGET_WAITKIND_STOPPED
4328 ? "stopped" : "stopping"),
4329 lwpid_of (thread));
4331 continue;
4334 /* Ignore (wildcard) resume requests for already-resumed
4335 threads. */
4336 if (resume[ndx].kind != resume_stop
4337 && thread->last_resume_kind != resume_stop)
4339 threads_debug_printf
4340 ("already %s LWP %ld at GDB's request",
4341 (thread->last_resume_kind == resume_step
4342 ? "stepping" : "continuing"),
4343 lwpid_of (thread));
4344 continue;
4347 /* Don't let wildcard resumes resume fork/vfork/clone
4348 children that GDB does not yet know are new children. */
4349 if (lwp->relative != NULL)
4351 struct lwp_info *rel = lwp->relative;
4353 if (rel->status_pending_p
4354 && is_new_child_status (rel->waitstatus.kind ()))
4356 threads_debug_printf
4357 ("not resuming LWP %ld: has queued stop reply",
4358 lwpid_of (thread));
4359 continue;
4363 /* If the thread has a pending event that has already been
4364 reported to GDBserver core, but GDB has not pulled the
4365 event out of the vStopped queue yet, likewise, ignore the
4366 (wildcard) resume request. */
4367 if (in_queued_stop_replies (thread->id))
4369 threads_debug_printf
4370 ("not resuming LWP %ld: has queued stop reply",
4371 lwpid_of (thread));
4372 continue;
4375 lwp->resume = &resume[ndx];
4376 thread->last_resume_kind = lwp->resume->kind;
4378 lwp->step_range_start = lwp->resume->step_range_start;
4379 lwp->step_range_end = lwp->resume->step_range_end;
4381 /* If we had a deferred signal to report, dequeue one now.
4382 This can happen if LWP gets more than one signal while
4383 trying to get out of a jump pad. */
4384 if (lwp->stopped
4385 && !lwp->status_pending_p
4386 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
4388 lwp->status_pending_p = 1;
4390 threads_debug_printf
4391 ("Dequeueing deferred signal %d for LWP %ld, "
4392 "leaving status pending.",
4393 WSTOPSIG (lwp->status_pending),
4394 lwpid_of (thread));
4397 return;
4401 /* No resume action for this thread. */
4402 lwp->resume = NULL;
4405 bool
4406 linux_process_target::resume_status_pending (thread_info *thread)
4408 struct lwp_info *lwp = get_thread_lwp (thread);
4410 /* LWPs which will not be resumed are not interesting, because
4411 we might not wait for them next time through linux_wait. */
4412 if (lwp->resume == NULL)
4413 return false;
4415 return thread_still_has_status_pending (thread);
4418 bool
4419 linux_process_target::thread_needs_step_over (thread_info *thread)
4421 struct lwp_info *lwp = get_thread_lwp (thread);
4422 CORE_ADDR pc;
4423 struct process_info *proc = get_thread_process (thread);
4425 /* GDBserver is skipping the extra traps from the wrapper program,
4426 don't have to do step over. */
4427 if (proc->tdesc == NULL)
4428 return false;
4430 /* LWPs which will not be resumed are not interesting, because we
4431 might not wait for them next time through linux_wait. */
4433 if (!lwp->stopped)
4435 threads_debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped",
4436 lwpid_of (thread));
4437 return false;
4440 if (thread->last_resume_kind == resume_stop)
4442 threads_debug_printf
4443 ("Need step over [LWP %ld]? Ignoring, should remain stopped",
4444 lwpid_of (thread));
4445 return false;
4448 gdb_assert (lwp->suspended >= 0);
4450 if (lwp->suspended)
4452 threads_debug_printf ("Need step over [LWP %ld]? Ignoring, suspended",
4453 lwpid_of (thread));
4454 return false;
4457 if (lwp->status_pending_p)
4459 threads_debug_printf
4460 ("Need step over [LWP %ld]? Ignoring, has pending status.",
4461 lwpid_of (thread));
4462 return false;
4465 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
4466 or we have. */
4467 pc = get_pc (lwp);
4469 /* If the PC has changed since we stopped, then don't do anything,
4470 and let the breakpoint/tracepoint be hit. This happens if, for
4471 instance, GDB handled the decr_pc_after_break subtraction itself,
4472 GDB is OOL stepping this thread, or the user has issued a "jump"
4473 command, or poked thread's registers herself. */
4474 if (pc != lwp->stop_pc)
4476 threads_debug_printf
4477 ("Need step over [LWP %ld]? Cancelling, PC was changed. "
4478 "Old stop_pc was 0x%s, PC is now 0x%s", lwpid_of (thread),
4479 paddress (lwp->stop_pc), paddress (pc));
4480 return false;
4483 /* On software single step target, resume the inferior with signal
4484 rather than stepping over. */
4485 if (supports_software_single_step ()
4486 && !lwp->pending_signals.empty ()
4487 && lwp_signal_can_be_delivered (lwp))
4489 threads_debug_printf
4490 ("Need step over [LWP %ld]? Ignoring, has pending signals.",
4491 lwpid_of (thread));
4493 return false;
4496 scoped_restore_current_thread restore_thread;
4497 switch_to_thread (thread);
4499 /* We can only step over breakpoints we know about. */
4500 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
4502 /* Don't step over a breakpoint that GDB expects to hit
4503 though. If the condition is being evaluated on the target's side
4504 and it evaluate to false, step over this breakpoint as well. */
4505 if (gdb_breakpoint_here (pc)
4506 && gdb_condition_true_at_breakpoint (pc)
4507 && gdb_no_commands_at_breakpoint (pc))
4509 threads_debug_printf ("Need step over [LWP %ld]? yes, but found"
4510 " GDB breakpoint at 0x%s; skipping step over",
4511 lwpid_of (thread), paddress (pc));
4513 return false;
4515 else
4517 threads_debug_printf ("Need step over [LWP %ld]? yes, "
4518 "found breakpoint at 0x%s",
4519 lwpid_of (thread), paddress (pc));
4521 /* We've found an lwp that needs stepping over --- return 1 so
4522 that find_thread stops looking. */
4523 return true;
4527 threads_debug_printf
4528 ("Need step over [LWP %ld]? No, no breakpoint found at 0x%s",
4529 lwpid_of (thread), paddress (pc));
4531 return false;
4534 void
4535 linux_process_target::start_step_over (lwp_info *lwp)
4537 struct thread_info *thread = get_lwp_thread (lwp);
4538 CORE_ADDR pc;
4540 threads_debug_printf ("Starting step-over on LWP %ld. Stopping all threads",
4541 lwpid_of (thread));
4543 stop_all_lwps (1, lwp);
4545 if (lwp->suspended != 0)
4547 internal_error ("LWP %ld suspended=%d\n", lwpid_of (thread),
4548 lwp->suspended);
4551 threads_debug_printf ("Done stopping all threads for step-over.");
4553 /* Note, we should always reach here with an already adjusted PC,
4554 either by GDB (if we're resuming due to GDB's request), or by our
4555 caller, if we just finished handling an internal breakpoint GDB
4556 shouldn't care about. */
4557 pc = get_pc (lwp);
4559 bool step = false;
4561 scoped_restore_current_thread restore_thread;
4562 switch_to_thread (thread);
4564 lwp->bp_reinsert = pc;
4565 uninsert_breakpoints_at (pc);
4566 uninsert_fast_tracepoint_jumps_at (pc);
4568 step = single_step (lwp);
4571 resume_one_lwp (lwp, step, 0, NULL);
4573 /* Require next event from this LWP. */
4574 step_over_bkpt = thread->id;
4577 bool
4578 linux_process_target::finish_step_over (lwp_info *lwp)
4580 if (lwp->bp_reinsert != 0)
4582 scoped_restore_current_thread restore_thread;
4584 threads_debug_printf ("Finished step over.");
4586 switch_to_thread (get_lwp_thread (lwp));
4588 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
4589 may be no breakpoint to reinsert there by now. */
4590 reinsert_breakpoints_at (lwp->bp_reinsert);
4591 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
4593 lwp->bp_reinsert = 0;
4595 /* Delete any single-step breakpoints. No longer needed. We
4596 don't have to worry about other threads hitting this trap,
4597 and later not being able to explain it, because we were
4598 stepping over a breakpoint, and we hold all threads but
4599 LWP stopped while doing that. */
4600 if (!supports_hardware_single_step ())
4602 gdb_assert (has_single_step_breakpoints (current_thread));
4603 delete_single_step_breakpoints (current_thread);
4606 step_over_bkpt = null_ptid;
4607 return true;
4609 else
4610 return false;
4613 void
4614 linux_process_target::complete_ongoing_step_over ()
4616 if (step_over_bkpt != null_ptid)
4618 struct lwp_info *lwp;
4619 int wstat;
4620 int ret;
4622 threads_debug_printf ("detach: step over in progress, finish it first");
4624 /* Passing NULL_PTID as filter indicates we want all events to
4625 be left pending. Eventually this returns when there are no
4626 unwaited-for children left. */
4627 ret = wait_for_event_filtered (minus_one_ptid, null_ptid, &wstat,
4628 __WALL);
4629 gdb_assert (ret == -1);
4631 lwp = find_lwp_pid (step_over_bkpt);
4632 if (lwp != NULL)
4634 finish_step_over (lwp);
4636 /* If we got our step SIGTRAP, don't leave it pending,
4637 otherwise we would report it to GDB as a spurious
4638 SIGTRAP. */
4639 gdb_assert (lwp->status_pending_p);
4640 if (WIFSTOPPED (lwp->status_pending)
4641 && WSTOPSIG (lwp->status_pending) == SIGTRAP)
4643 thread_info *thread = get_lwp_thread (lwp);
4644 if (thread->last_resume_kind != resume_step)
4646 threads_debug_printf ("detach: discard step-over SIGTRAP");
4648 lwp->status_pending_p = 0;
4649 lwp->status_pending = 0;
4650 resume_one_lwp (lwp, lwp->stepping, 0, NULL);
4652 else
4653 threads_debug_printf
4654 ("detach: resume_step, not discarding step-over SIGTRAP");
4657 step_over_bkpt = null_ptid;
4658 unsuspend_all_lwps (lwp);
4662 void
4663 linux_process_target::resume_one_thread (thread_info *thread,
4664 bool leave_all_stopped)
4666 struct lwp_info *lwp = get_thread_lwp (thread);
4667 int leave_pending;
4669 if (lwp->resume == NULL)
4670 return;
4672 if (lwp->resume->kind == resume_stop)
4674 threads_debug_printf ("resume_stop request for LWP %ld",
4675 lwpid_of (thread));
4677 if (!lwp->stopped)
4679 threads_debug_printf ("stopping LWP %ld", lwpid_of (thread));
4681 /* Stop the thread, and wait for the event asynchronously,
4682 through the event loop. */
4683 send_sigstop (lwp);
4685 else
4687 threads_debug_printf ("already stopped LWP %ld", lwpid_of (thread));
4689 /* The LWP may have been stopped in an internal event that
4690 was not meant to be notified back to GDB (e.g., gdbserver
4691 breakpoint), so we should be reporting a stop event in
4692 this case too. */
4694 /* If the thread already has a pending SIGSTOP, this is a
4695 no-op. Otherwise, something later will presumably resume
4696 the thread and this will cause it to cancel any pending
4697 operation, due to last_resume_kind == resume_stop. If
4698 the thread already has a pending status to report, we
4699 will still report it the next time we wait - see
4700 status_pending_p_callback. */
4702 /* If we already have a pending signal to report, then
4703 there's no need to queue a SIGSTOP, as this means we're
4704 midway through moving the LWP out of the jumppad, and we
4705 will report the pending signal as soon as that is
4706 finished. */
4707 if (lwp->pending_signals_to_report.empty ())
4708 send_sigstop (lwp);
4711 /* For stop requests, we're done. */
4712 lwp->resume = NULL;
4713 thread->last_status.set_ignore ();
4714 return;
4717 /* If this thread which is about to be resumed has a pending status,
4718 then don't resume it - we can just report the pending status.
4719 Likewise if it is suspended, because e.g., another thread is
4720 stepping past a breakpoint. Make sure to queue any signals that
4721 would otherwise be sent. In all-stop mode, we do this decision
4722 based on if *any* thread has a pending status. If there's a
4723 thread that needs the step-over-breakpoint dance, then don't
4724 resume any other thread but that particular one. */
4725 leave_pending = (lwp->suspended
4726 || lwp->status_pending_p
4727 || leave_all_stopped);
4729 /* If we have a new signal, enqueue the signal. */
4730 if (lwp->resume->sig != 0)
4732 siginfo_t info, *info_p;
4734 /* If this is the same signal we were previously stopped by,
4735 make sure to queue its siginfo. */
4736 if (WIFSTOPPED (lwp->last_status)
4737 && WSTOPSIG (lwp->last_status) == lwp->resume->sig
4738 && ptrace (PTRACE_GETSIGINFO, lwpid_of (thread),
4739 (PTRACE_TYPE_ARG3) 0, &info) == 0)
4740 info_p = &info;
4741 else
4742 info_p = NULL;
4744 enqueue_pending_signal (lwp, lwp->resume->sig, info_p);
4747 if (!leave_pending)
4749 threads_debug_printf ("resuming LWP %ld", lwpid_of (thread));
4751 proceed_one_lwp (thread, NULL);
4753 else
4754 threads_debug_printf ("leaving LWP %ld stopped", lwpid_of (thread));
4756 thread->last_status.set_ignore ();
4757 lwp->resume = NULL;
4760 void
4761 linux_process_target::resume (thread_resume *resume_info, size_t n)
4763 struct thread_info *need_step_over = NULL;
4765 THREADS_SCOPED_DEBUG_ENTER_EXIT;
4767 for_each_thread ([&] (thread_info *thread)
4769 linux_set_resume_request (thread, resume_info, n);
4772 /* If there is a thread which would otherwise be resumed, which has
4773 a pending status, then don't resume any threads - we can just
4774 report the pending status. Make sure to queue any signals that
4775 would otherwise be sent. In non-stop mode, we'll apply this
4776 logic to each thread individually. We consume all pending events
4777 before considering to start a step-over (in all-stop). */
4778 bool any_pending = false;
4779 if (!non_stop)
4780 any_pending = find_thread ([this] (thread_info *thread)
4782 return resume_status_pending (thread);
4783 }) != nullptr;
4785 /* If there is a thread which would otherwise be resumed, which is
4786 stopped at a breakpoint that needs stepping over, then don't
4787 resume any threads - have it step over the breakpoint with all
4788 other threads stopped, then resume all threads again. Make sure
4789 to queue any signals that would otherwise be delivered or
4790 queued. */
4791 if (!any_pending && low_supports_breakpoints ())
4792 need_step_over = find_thread ([this] (thread_info *thread)
4794 return thread_needs_step_over (thread);
4797 bool leave_all_stopped = (need_step_over != NULL || any_pending);
4799 if (need_step_over != NULL)
4800 threads_debug_printf ("Not resuming all, need step over");
4801 else if (any_pending)
4802 threads_debug_printf ("Not resuming, all-stop and found "
4803 "an LWP with pending status");
4804 else
4805 threads_debug_printf ("Resuming, no pending status or step over needed");
4807 /* Even if we're leaving threads stopped, queue all signals we'd
4808 otherwise deliver. */
4809 for_each_thread ([&] (thread_info *thread)
4811 resume_one_thread (thread, leave_all_stopped);
4814 if (need_step_over)
4815 start_step_over (get_thread_lwp (need_step_over));
4817 /* We may have events that were pending that can/should be sent to
4818 the client now. Trigger a linux_wait call. */
4819 if (target_is_async_p ())
4820 async_file_mark ();
4823 void
4824 linux_process_target::proceed_one_lwp (thread_info *thread, lwp_info *except)
4826 struct lwp_info *lwp = get_thread_lwp (thread);
4827 int step;
4829 if (lwp == except)
4830 return;
4832 threads_debug_printf ("lwp %ld", lwpid_of (thread));
4834 if (!lwp->stopped)
4836 threads_debug_printf (" LWP %ld already running", lwpid_of (thread));
4837 return;
4840 if (thread->last_resume_kind == resume_stop
4841 && thread->last_status.kind () != TARGET_WAITKIND_IGNORE)
4843 threads_debug_printf (" client wants LWP to remain %ld stopped",
4844 lwpid_of (thread));
4845 return;
4848 if (lwp->status_pending_p)
4850 threads_debug_printf (" LWP %ld has pending status, leaving stopped",
4851 lwpid_of (thread));
4852 return;
4855 gdb_assert (lwp->suspended >= 0);
4857 if (lwp->suspended)
4859 threads_debug_printf (" LWP %ld is suspended", lwpid_of (thread));
4860 return;
4863 if (thread->last_resume_kind == resume_stop
4864 && lwp->pending_signals_to_report.empty ()
4865 && (lwp->collecting_fast_tracepoint
4866 == fast_tpoint_collect_result::not_collecting))
4868 /* We haven't reported this LWP as stopped yet (otherwise, the
4869 last_status.kind check above would catch it, and we wouldn't
4870 reach here. This LWP may have been momentarily paused by a
4871 stop_all_lwps call while handling for example, another LWP's
4872 step-over. In that case, the pending expected SIGSTOP signal
4873 that was queued at vCont;t handling time will have already
4874 been consumed by wait_for_sigstop, and so we need to requeue
4875 another one here. Note that if the LWP already has a SIGSTOP
4876 pending, this is a no-op. */
4878 threads_debug_printf
4879 ("Client wants LWP %ld to stop. Making sure it has a SIGSTOP pending",
4880 lwpid_of (thread));
4882 send_sigstop (lwp);
4885 if (thread->last_resume_kind == resume_step)
4887 threads_debug_printf (" stepping LWP %ld, client wants it stepping",
4888 lwpid_of (thread));
4890 /* If resume_step is requested by GDB, install single-step
4891 breakpoints when the thread is about to be actually resumed if
4892 the single-step breakpoints weren't removed. */
4893 if (supports_software_single_step ()
4894 && !has_single_step_breakpoints (thread))
4895 install_software_single_step_breakpoints (lwp);
4897 step = maybe_hw_step (thread);
4899 else if (lwp->bp_reinsert != 0)
4901 threads_debug_printf (" stepping LWP %ld, reinsert set",
4902 lwpid_of (thread));
4904 step = maybe_hw_step (thread);
4906 else
4907 step = 0;
4909 resume_one_lwp (lwp, step, 0, NULL);
4912 void
4913 linux_process_target::unsuspend_and_proceed_one_lwp (thread_info *thread,
4914 lwp_info *except)
4916 struct lwp_info *lwp = get_thread_lwp (thread);
4918 if (lwp == except)
4919 return;
4921 lwp_suspended_decr (lwp);
4923 proceed_one_lwp (thread, except);
4926 void
4927 linux_process_target::proceed_all_lwps ()
4929 struct thread_info *need_step_over;
4931 /* If there is a thread which would otherwise be resumed, which is
4932 stopped at a breakpoint that needs stepping over, then don't
4933 resume any threads - have it step over the breakpoint with all
4934 other threads stopped, then resume all threads again. */
4936 if (low_supports_breakpoints ())
4938 need_step_over = find_thread ([this] (thread_info *thread)
4940 return thread_needs_step_over (thread);
4943 if (need_step_over != NULL)
4945 threads_debug_printf ("found thread %ld needing a step-over",
4946 lwpid_of (need_step_over));
4948 start_step_over (get_thread_lwp (need_step_over));
4949 return;
4953 threads_debug_printf ("Proceeding, no step-over needed");
4955 for_each_thread ([this] (thread_info *thread)
4957 proceed_one_lwp (thread, NULL);
4961 void
4962 linux_process_target::unstop_all_lwps (int unsuspend, lwp_info *except)
4964 THREADS_SCOPED_DEBUG_ENTER_EXIT;
4966 if (except)
4967 threads_debug_printf ("except=(LWP %ld)",
4968 lwpid_of (get_lwp_thread (except)));
4969 else
4970 threads_debug_printf ("except=nullptr");
4972 if (unsuspend)
4973 for_each_thread ([&] (thread_info *thread)
4975 unsuspend_and_proceed_one_lwp (thread, except);
4977 else
4978 for_each_thread ([&] (thread_info *thread)
4980 proceed_one_lwp (thread, except);
4985 #ifdef HAVE_LINUX_REGSETS
4987 #define use_linux_regsets 1
4989 /* Returns true if REGSET has been disabled. */
4991 static int
4992 regset_disabled (struct regsets_info *info, struct regset_info *regset)
4994 return (info->disabled_regsets != NULL
4995 && info->disabled_regsets[regset - info->regsets]);
4998 /* Disable REGSET. */
5000 static void
5001 disable_regset (struct regsets_info *info, struct regset_info *regset)
5003 int dr_offset;
5005 dr_offset = regset - info->regsets;
5006 if (info->disabled_regsets == NULL)
5007 info->disabled_regsets = (char *) xcalloc (1, info->num_regsets);
5008 info->disabled_regsets[dr_offset] = 1;
5011 static int
5012 regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
5013 struct regcache *regcache)
5015 struct regset_info *regset;
5016 int saw_general_regs = 0;
5017 int pid;
5018 struct iovec iov;
5020 pid = lwpid_of (current_thread);
5021 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
5023 void *buf, *data;
5024 int nt_type, res;
5026 if (regset->size == 0 || regset_disabled (regsets_info, regset))
5027 continue;
5029 buf = xmalloc (regset->size);
5031 nt_type = regset->nt_type;
5032 if (nt_type)
5034 iov.iov_base = buf;
5035 iov.iov_len = regset->size;
5036 data = (void *) &iov;
5038 else
5039 data = buf;
5041 #ifndef __sparc__
5042 res = ptrace (regset->get_request, pid,
5043 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5044 #else
5045 res = ptrace (regset->get_request, pid, data, nt_type);
5046 #endif
5047 if (res < 0)
5049 if (errno == EIO
5050 || (errno == EINVAL && regset->type == OPTIONAL_REGS))
5052 /* If we get EIO on a regset, or an EINVAL and the regset is
5053 optional, do not try it again for this process mode. */
5054 disable_regset (regsets_info, regset);
5056 else if (errno == ENODATA)
5058 /* ENODATA may be returned if the regset is currently
5059 not "active". This can happen in normal operation,
5060 so suppress the warning in this case. */
5062 else if (errno == ESRCH)
5064 /* At this point, ESRCH should mean the process is
5065 already gone, in which case we simply ignore attempts
5066 to read its registers. */
5068 else
5070 char s[256];
5071 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
5072 pid);
5073 perror (s);
5076 else
5078 if (regset->type == GENERAL_REGS)
5079 saw_general_regs = 1;
5080 regset->store_function (regcache, buf);
5082 free (buf);
5084 if (saw_general_regs)
5085 return 0;
5086 else
5087 return 1;
5090 static int
5091 regsets_store_inferior_registers (struct regsets_info *regsets_info,
5092 struct regcache *regcache)
5094 struct regset_info *regset;
5095 int saw_general_regs = 0;
5096 int pid;
5097 struct iovec iov;
5099 pid = lwpid_of (current_thread);
5100 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
5102 void *buf, *data;
5103 int nt_type, res;
5105 if (regset->size == 0 || regset_disabled (regsets_info, regset)
5106 || regset->fill_function == NULL)
5107 continue;
5109 buf = xmalloc (regset->size);
5111 /* First fill the buffer with the current register set contents,
5112 in case there are any items in the kernel's regset that are
5113 not in gdbserver's regcache. */
5115 nt_type = regset->nt_type;
5116 if (nt_type)
5118 iov.iov_base = buf;
5119 iov.iov_len = regset->size;
5120 data = (void *) &iov;
5122 else
5123 data = buf;
5125 #ifndef __sparc__
5126 res = ptrace (regset->get_request, pid,
5127 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5128 #else
5129 res = ptrace (regset->get_request, pid, data, nt_type);
5130 #endif
5132 if (res == 0)
5134 /* Then overlay our cached registers on that. */
5135 regset->fill_function (regcache, buf);
5137 /* Only now do we write the register set. */
5138 #ifndef __sparc__
5139 res = ptrace (regset->set_request, pid,
5140 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5141 #else
5142 res = ptrace (regset->set_request, pid, data, nt_type);
5143 #endif
5146 if (res < 0)
5148 if (errno == EIO
5149 || (errno == EINVAL && regset->type == OPTIONAL_REGS))
5151 /* If we get EIO on a regset, or an EINVAL and the regset is
5152 optional, do not try it again for this process mode. */
5153 disable_regset (regsets_info, regset);
5155 else if (errno == ESRCH)
5157 /* At this point, ESRCH should mean the process is
5158 already gone, in which case we simply ignore attempts
5159 to change its registers. See also the related
5160 comment in resume_one_lwp. */
5161 free (buf);
5162 return 0;
5164 else
5166 perror ("Warning: ptrace(regsets_store_inferior_registers)");
5169 else if (regset->type == GENERAL_REGS)
5170 saw_general_regs = 1;
5171 free (buf);
5173 if (saw_general_regs)
5174 return 0;
5175 else
5176 return 1;
5179 #else /* !HAVE_LINUX_REGSETS */
5181 #define use_linux_regsets 0
5182 #define regsets_fetch_inferior_registers(regsets_info, regcache) 1
5183 #define regsets_store_inferior_registers(regsets_info, regcache) 1
5185 #endif
5187 /* Return 1 if register REGNO is supported by one of the regset ptrace
5188 calls or 0 if it has to be transferred individually. */
5190 static int
5191 linux_register_in_regsets (const struct regs_info *regs_info, int regno)
5193 unsigned char mask = 1 << (regno % 8);
5194 size_t index = regno / 8;
5196 return (use_linux_regsets
5197 && (regs_info->regset_bitmap == NULL
5198 || (regs_info->regset_bitmap[index] & mask) != 0));
5201 #ifdef HAVE_LINUX_USRREGS
5203 static int
5204 register_addr (const struct usrregs_info *usrregs, int regnum)
5206 int addr;
5208 if (regnum < 0 || regnum >= usrregs->num_regs)
5209 error ("Invalid register number %d.", regnum);
5211 addr = usrregs->regmap[regnum];
5213 return addr;
5217 void
5218 linux_process_target::fetch_register (const usrregs_info *usrregs,
5219 regcache *regcache, int regno)
5221 CORE_ADDR regaddr;
5222 int i, size;
5223 char *buf;
5224 int pid;
5226 if (regno >= usrregs->num_regs)
5227 return;
5228 if (low_cannot_fetch_register (regno))
5229 return;
5231 regaddr = register_addr (usrregs, regno);
5232 if (regaddr == -1)
5233 return;
5235 size = ((register_size (regcache->tdesc, regno)
5236 + sizeof (PTRACE_XFER_TYPE) - 1)
5237 & -sizeof (PTRACE_XFER_TYPE));
5238 buf = (char *) alloca (size);
5240 pid = lwpid_of (current_thread);
5241 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5243 errno = 0;
5244 *(PTRACE_XFER_TYPE *) (buf + i) =
5245 ptrace (PTRACE_PEEKUSER, pid,
5246 /* Coerce to a uintptr_t first to avoid potential gcc warning
5247 of coercing an 8 byte integer to a 4 byte pointer. */
5248 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
5249 regaddr += sizeof (PTRACE_XFER_TYPE);
5250 if (errno != 0)
5252 /* Mark register REGNO unavailable. */
5253 supply_register (regcache, regno, NULL);
5254 return;
5258 low_supply_ptrace_register (regcache, regno, buf);
5261 void
5262 linux_process_target::store_register (const usrregs_info *usrregs,
5263 regcache *regcache, int regno)
5265 CORE_ADDR regaddr;
5266 int i, size;
5267 char *buf;
5268 int pid;
5270 if (regno >= usrregs->num_regs)
5271 return;
5272 if (low_cannot_store_register (regno))
5273 return;
5275 regaddr = register_addr (usrregs, regno);
5276 if (regaddr == -1)
5277 return;
5279 size = ((register_size (regcache->tdesc, regno)
5280 + sizeof (PTRACE_XFER_TYPE) - 1)
5281 & -sizeof (PTRACE_XFER_TYPE));
5282 buf = (char *) alloca (size);
5283 memset (buf, 0, size);
5285 low_collect_ptrace_register (regcache, regno, buf);
5287 pid = lwpid_of (current_thread);
5288 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5290 errno = 0;
5291 ptrace (PTRACE_POKEUSER, pid,
5292 /* Coerce to a uintptr_t first to avoid potential gcc warning
5293 about coercing an 8 byte integer to a 4 byte pointer. */
5294 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
5295 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
5296 if (errno != 0)
5298 /* At this point, ESRCH should mean the process is
5299 already gone, in which case we simply ignore attempts
5300 to change its registers. See also the related
5301 comment in resume_one_lwp. */
5302 if (errno == ESRCH)
5303 return;
5306 if (!low_cannot_store_register (regno))
5307 error ("writing register %d: %s", regno, safe_strerror (errno));
5309 regaddr += sizeof (PTRACE_XFER_TYPE);
5312 #endif /* HAVE_LINUX_USRREGS */
5314 void
5315 linux_process_target::low_collect_ptrace_register (regcache *regcache,
5316 int regno, char *buf)
5318 collect_register (regcache, regno, buf);
5321 void
5322 linux_process_target::low_supply_ptrace_register (regcache *regcache,
5323 int regno, const char *buf)
5325 supply_register (regcache, regno, buf);
5328 void
5329 linux_process_target::usr_fetch_inferior_registers (const regs_info *regs_info,
5330 regcache *regcache,
5331 int regno, int all)
5333 #ifdef HAVE_LINUX_USRREGS
5334 struct usrregs_info *usr = regs_info->usrregs;
5336 if (regno == -1)
5338 for (regno = 0; regno < usr->num_regs; regno++)
5339 if (all || !linux_register_in_regsets (regs_info, regno))
5340 fetch_register (usr, regcache, regno);
5342 else
5343 fetch_register (usr, regcache, regno);
5344 #endif
5347 void
5348 linux_process_target::usr_store_inferior_registers (const regs_info *regs_info,
5349 regcache *regcache,
5350 int regno, int all)
5352 #ifdef HAVE_LINUX_USRREGS
5353 struct usrregs_info *usr = regs_info->usrregs;
5355 if (regno == -1)
5357 for (regno = 0; regno < usr->num_regs; regno++)
5358 if (all || !linux_register_in_regsets (regs_info, regno))
5359 store_register (usr, regcache, regno);
5361 else
5362 store_register (usr, regcache, regno);
5363 #endif
5366 void
5367 linux_process_target::fetch_registers (regcache *regcache, int regno)
5369 int use_regsets;
5370 int all = 0;
5371 const regs_info *regs_info = get_regs_info ();
5373 if (regno == -1)
5375 if (regs_info->usrregs != NULL)
5376 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
5377 low_fetch_register (regcache, regno);
5379 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
5380 if (regs_info->usrregs != NULL)
5381 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
5383 else
5385 if (low_fetch_register (regcache, regno))
5386 return;
5388 use_regsets = linux_register_in_regsets (regs_info, regno);
5389 if (use_regsets)
5390 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
5391 regcache);
5392 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5393 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
5397 void
5398 linux_process_target::store_registers (regcache *regcache, int regno)
5400 int use_regsets;
5401 int all = 0;
5402 const regs_info *regs_info = get_regs_info ();
5404 if (regno == -1)
5406 all = regsets_store_inferior_registers (regs_info->regsets_info,
5407 regcache);
5408 if (regs_info->usrregs != NULL)
5409 usr_store_inferior_registers (regs_info, regcache, regno, all);
5411 else
5413 use_regsets = linux_register_in_regsets (regs_info, regno);
5414 if (use_regsets)
5415 all = regsets_store_inferior_registers (regs_info->regsets_info,
5416 regcache);
5417 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5418 usr_store_inferior_registers (regs_info, regcache, regno, 1);
5422 bool
5423 linux_process_target::low_fetch_register (regcache *regcache, int regno)
5425 return false;
5428 /* A wrapper for the read_memory target op. */
5430 static int
5431 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
5433 return the_target->read_memory (memaddr, myaddr, len);
5437 /* Helper for read_memory/write_memory using /proc/PID/mem. Because
5438 we can use a single read/write call, this can be much more
5439 efficient than banging away at PTRACE_PEEKTEXT. Also, unlike
5440 PTRACE_PEEKTEXT/PTRACE_POKETEXT, this works with running threads.
5441 One an only one of READBUF and WRITEBUF is non-null. If READBUF is
5442 not null, then we're reading, otherwise we're writing. */
5444 static int
5445 proc_xfer_memory (CORE_ADDR memaddr, unsigned char *readbuf,
5446 const gdb_byte *writebuf, int len)
5448 gdb_assert ((readbuf == nullptr) != (writebuf == nullptr));
5450 process_info *proc = current_process ();
5452 int fd = proc->priv->mem_fd;
5453 if (fd == -1)
5454 return EIO;
5456 while (len > 0)
5458 int bytes;
5460 /* Use pread64/pwrite64 if available, since they save a syscall
5461 and can handle 64-bit offsets even on 32-bit platforms (for
5462 instance, SPARC debugging a SPARC64 application). But only
5463 use them if the offset isn't so high that when cast to off_t
5464 it'd be negative, as seen on SPARC64. pread64/pwrite64
5465 outright reject such offsets. lseek does not. */
5466 #ifdef HAVE_PREAD64
5467 if ((off_t) memaddr >= 0)
5468 bytes = (readbuf != nullptr
5469 ? pread64 (fd, readbuf, len, memaddr)
5470 : pwrite64 (fd, writebuf, len, memaddr));
5471 else
5472 #endif
5474 bytes = -1;
5475 if (lseek (fd, memaddr, SEEK_SET) != -1)
5476 bytes = (readbuf != nullptr
5477 ? read (fd, readbuf, len)
5478 : write (fd, writebuf, len));
5481 if (bytes < 0)
5482 return errno;
5483 else if (bytes == 0)
5485 /* EOF means the address space is gone, the whole process
5486 exited or execed. */
5487 return EIO;
5490 memaddr += bytes;
5491 if (readbuf != nullptr)
5492 readbuf += bytes;
5493 else
5494 writebuf += bytes;
5495 len -= bytes;
5498 return 0;
5502 linux_process_target::read_memory (CORE_ADDR memaddr,
5503 unsigned char *myaddr, int len)
5505 return proc_xfer_memory (memaddr, myaddr, nullptr, len);
5508 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
5509 memory at MEMADDR. On failure (cannot write to the inferior)
5510 returns the value of errno. Always succeeds if LEN is zero. */
5513 linux_process_target::write_memory (CORE_ADDR memaddr,
5514 const unsigned char *myaddr, int len)
5516 if (debug_threads)
5518 /* Dump up to four bytes. */
5519 char str[4 * 2 + 1];
5520 char *p = str;
5521 int dump = len < 4 ? len : 4;
5523 for (int i = 0; i < dump; i++)
5525 sprintf (p, "%02x", myaddr[i]);
5526 p += 2;
5528 *p = '\0';
5530 threads_debug_printf ("Writing %s to 0x%08lx in process %d",
5531 str, (long) memaddr, current_process ()->pid);
5534 return proc_xfer_memory (memaddr, nullptr, myaddr, len);
5537 void
5538 linux_process_target::look_up_symbols ()
5540 #ifdef USE_THREAD_DB
5541 struct process_info *proc = current_process ();
5543 if (proc->priv->thread_db != NULL)
5544 return;
5546 thread_db_init ();
5547 #endif
5550 void
5551 linux_process_target::request_interrupt ()
5553 /* Send a SIGINT to the process group. This acts just like the user
5554 typed a ^C on the controlling terminal. */
5555 int res = ::kill (-signal_pid, SIGINT);
5556 if (res == -1)
5557 warning (_("Sending SIGINT to process group of pid %ld failed: %s"),
5558 signal_pid, safe_strerror (errno));
5561 bool
5562 linux_process_target::supports_read_auxv ()
5564 return true;
5567 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
5568 to debugger memory starting at MYADDR. */
5571 linux_process_target::read_auxv (int pid, CORE_ADDR offset,
5572 unsigned char *myaddr, unsigned int len)
5574 char filename[PATH_MAX];
5575 int fd, n;
5577 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5579 fd = open (filename, O_RDONLY);
5580 if (fd < 0)
5581 return -1;
5583 if (offset != (CORE_ADDR) 0
5584 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5585 n = -1;
5586 else
5587 n = read (fd, myaddr, len);
5589 close (fd);
5591 return n;
5595 linux_process_target::insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
5596 int size, raw_breakpoint *bp)
5598 if (type == raw_bkpt_type_sw)
5599 return insert_memory_breakpoint (bp);
5600 else
5601 return low_insert_point (type, addr, size, bp);
5605 linux_process_target::low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
5606 int size, raw_breakpoint *bp)
5608 /* Unsupported (see target.h). */
5609 return 1;
5613 linux_process_target::remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
5614 int size, raw_breakpoint *bp)
5616 if (type == raw_bkpt_type_sw)
5617 return remove_memory_breakpoint (bp);
5618 else
5619 return low_remove_point (type, addr, size, bp);
5623 linux_process_target::low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
5624 int size, raw_breakpoint *bp)
5626 /* Unsupported (see target.h). */
5627 return 1;
5630 /* Implement the stopped_by_sw_breakpoint target_ops
5631 method. */
5633 bool
5634 linux_process_target::stopped_by_sw_breakpoint ()
5636 struct lwp_info *lwp = get_thread_lwp (current_thread);
5638 return (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT);
5641 /* Implement the supports_stopped_by_sw_breakpoint target_ops
5642 method. */
5644 bool
5645 linux_process_target::supports_stopped_by_sw_breakpoint ()
5647 return USE_SIGTRAP_SIGINFO;
5650 /* Implement the stopped_by_hw_breakpoint target_ops
5651 method. */
5653 bool
5654 linux_process_target::stopped_by_hw_breakpoint ()
5656 struct lwp_info *lwp = get_thread_lwp (current_thread);
5658 return (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT);
5661 /* Implement the supports_stopped_by_hw_breakpoint target_ops
5662 method. */
5664 bool
5665 linux_process_target::supports_stopped_by_hw_breakpoint ()
5667 return USE_SIGTRAP_SIGINFO;
5670 /* Implement the supports_hardware_single_step target_ops method. */
5672 bool
5673 linux_process_target::supports_hardware_single_step ()
5675 return true;
5678 bool
5679 linux_process_target::stopped_by_watchpoint ()
5681 struct lwp_info *lwp = get_thread_lwp (current_thread);
5683 return lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
5686 CORE_ADDR
5687 linux_process_target::stopped_data_address ()
5689 struct lwp_info *lwp = get_thread_lwp (current_thread);
5691 return lwp->stopped_data_address;
5694 /* This is only used for targets that define PT_TEXT_ADDR,
5695 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
5696 the target has different ways of acquiring this information, like
5697 loadmaps. */
5699 bool
5700 linux_process_target::supports_read_offsets ()
5702 #ifdef SUPPORTS_READ_OFFSETS
5703 return true;
5704 #else
5705 return false;
5706 #endif
5709 /* Under uClinux, programs are loaded at non-zero offsets, which we need
5710 to tell gdb about. */
5713 linux_process_target::read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
5715 #ifdef SUPPORTS_READ_OFFSETS
5716 unsigned long text, text_end, data;
5717 int pid = lwpid_of (current_thread);
5719 errno = 0;
5721 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
5722 (PTRACE_TYPE_ARG4) 0);
5723 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
5724 (PTRACE_TYPE_ARG4) 0);
5725 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
5726 (PTRACE_TYPE_ARG4) 0);
5728 if (errno == 0)
5730 /* Both text and data offsets produced at compile-time (and so
5731 used by gdb) are relative to the beginning of the program,
5732 with the data segment immediately following the text segment.
5733 However, the actual runtime layout in memory may put the data
5734 somewhere else, so when we send gdb a data base-address, we
5735 use the real data base address and subtract the compile-time
5736 data base-address from it (which is just the length of the
5737 text segment). BSS immediately follows data in both
5738 cases. */
5739 *text_p = text;
5740 *data_p = data - (text_end - text);
5742 return 1;
5744 return 0;
5745 #else
5746 gdb_assert_not_reached ("target op read_offsets not supported");
5747 #endif
5750 bool
5751 linux_process_target::supports_get_tls_address ()
5753 #ifdef USE_THREAD_DB
5754 return true;
5755 #else
5756 return false;
5757 #endif
5761 linux_process_target::get_tls_address (thread_info *thread,
5762 CORE_ADDR offset,
5763 CORE_ADDR load_module,
5764 CORE_ADDR *address)
5766 #ifdef USE_THREAD_DB
5767 return thread_db_get_tls_address (thread, offset, load_module, address);
5768 #else
5769 return -1;
5770 #endif
5773 bool
5774 linux_process_target::supports_qxfer_osdata ()
5776 return true;
5780 linux_process_target::qxfer_osdata (const char *annex,
5781 unsigned char *readbuf,
5782 unsigned const char *writebuf,
5783 CORE_ADDR offset, int len)
5785 return linux_common_xfer_osdata (annex, readbuf, offset, len);
5788 void
5789 linux_process_target::siginfo_fixup (siginfo_t *siginfo,
5790 gdb_byte *inf_siginfo, int direction)
5792 bool done = low_siginfo_fixup (siginfo, inf_siginfo, direction);
5794 /* If there was no callback, or the callback didn't do anything,
5795 then just do a straight memcpy. */
5796 if (!done)
5798 if (direction == 1)
5799 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
5800 else
5801 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
5805 bool
5806 linux_process_target::low_siginfo_fixup (siginfo_t *native, gdb_byte *inf,
5807 int direction)
5809 return false;
5812 bool
5813 linux_process_target::supports_qxfer_siginfo ()
5815 return true;
5819 linux_process_target::qxfer_siginfo (const char *annex,
5820 unsigned char *readbuf,
5821 unsigned const char *writebuf,
5822 CORE_ADDR offset, int len)
5824 int pid;
5825 siginfo_t siginfo;
5826 gdb_byte inf_siginfo[sizeof (siginfo_t)];
5828 if (current_thread == NULL)
5829 return -1;
5831 pid = lwpid_of (current_thread);
5833 threads_debug_printf ("%s siginfo for lwp %d.",
5834 readbuf != NULL ? "Reading" : "Writing",
5835 pid);
5837 if (offset >= sizeof (siginfo))
5838 return -1;
5840 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
5841 return -1;
5843 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
5844 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
5845 inferior with a 64-bit GDBSERVER should look the same as debugging it
5846 with a 32-bit GDBSERVER, we need to convert it. */
5847 siginfo_fixup (&siginfo, inf_siginfo, 0);
5849 if (offset + len > sizeof (siginfo))
5850 len = sizeof (siginfo) - offset;
5852 if (readbuf != NULL)
5853 memcpy (readbuf, inf_siginfo + offset, len);
5854 else
5856 memcpy (inf_siginfo + offset, writebuf, len);
5858 /* Convert back to ptrace layout before flushing it out. */
5859 siginfo_fixup (&siginfo, inf_siginfo, 1);
5861 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
5862 return -1;
5865 return len;
5868 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
5869 so we notice when children change state; as the handler for the
5870 sigsuspend in my_waitpid. */
5872 static void
5873 sigchld_handler (int signo)
5875 int old_errno = errno;
5877 if (debug_threads)
5881 /* Use the async signal safe debug function. */
5882 if (debug_write ("sigchld_handler\n",
5883 sizeof ("sigchld_handler\n") - 1) < 0)
5884 break; /* just ignore */
5885 } while (0);
5888 if (target_is_async_p ())
5889 async_file_mark (); /* trigger a linux_wait */
5891 errno = old_errno;
5894 bool
5895 linux_process_target::supports_non_stop ()
5897 return true;
5900 bool
5901 linux_process_target::async (bool enable)
5903 bool previous = target_is_async_p ();
5905 threads_debug_printf ("async (%d), previous=%d",
5906 enable, previous);
5908 if (previous != enable)
5910 sigset_t mask;
5911 sigemptyset (&mask);
5912 sigaddset (&mask, SIGCHLD);
5914 gdb_sigmask (SIG_BLOCK, &mask, NULL);
5916 if (enable)
5918 if (!linux_event_pipe.open_pipe ())
5920 gdb_sigmask (SIG_UNBLOCK, &mask, NULL);
5922 warning ("creating event pipe failed.");
5923 return previous;
5926 /* Register the event loop handler. */
5927 add_file_handler (linux_event_pipe.event_fd (),
5928 handle_target_event, NULL,
5929 "linux-low");
5931 /* Always trigger a linux_wait. */
5932 async_file_mark ();
5934 else
5936 delete_file_handler (linux_event_pipe.event_fd ());
5938 linux_event_pipe.close_pipe ();
5941 gdb_sigmask (SIG_UNBLOCK, &mask, NULL);
5944 return previous;
5948 linux_process_target::start_non_stop (bool nonstop)
5950 /* Register or unregister from event-loop accordingly. */
5951 target_async (nonstop);
5953 if (target_is_async_p () != (nonstop != false))
5954 return -1;
5956 return 0;
5959 bool
5960 linux_process_target::supports_multi_process ()
5962 return true;
5965 /* Check if fork events are supported. */
5967 bool
5968 linux_process_target::supports_fork_events ()
5970 return true;
5973 /* Check if vfork events are supported. */
5975 bool
5976 linux_process_target::supports_vfork_events ()
5978 return true;
5981 /* Return the set of supported thread options. */
5983 gdb_thread_options
5984 linux_process_target::supported_thread_options ()
5986 return GDB_THREAD_OPTION_CLONE | GDB_THREAD_OPTION_EXIT;
5989 /* Check if exec events are supported. */
5991 bool
5992 linux_process_target::supports_exec_events ()
5994 return true;
5997 /* Target hook for 'handle_new_gdb_connection'. Causes a reset of the
5998 ptrace flags for all inferiors. This is in case the new GDB connection
5999 doesn't support the same set of events that the previous one did. */
6001 void
6002 linux_process_target::handle_new_gdb_connection ()
6004 /* Request that all the lwps reset their ptrace options. */
6005 for_each_thread ([] (thread_info *thread)
6007 struct lwp_info *lwp = get_thread_lwp (thread);
6009 if (!lwp->stopped)
6011 /* Stop the lwp so we can modify its ptrace options. */
6012 lwp->must_set_ptrace_flags = 1;
6013 linux_stop_lwp (lwp);
6015 else
6017 /* Already stopped; go ahead and set the ptrace options. */
6018 struct process_info *proc = find_process_pid (pid_of (thread));
6019 int options = linux_low_ptrace_options (proc->attached);
6021 linux_enable_event_reporting (lwpid_of (thread), options);
6022 lwp->must_set_ptrace_flags = 0;
6028 linux_process_target::handle_monitor_command (char *mon)
6030 #ifdef USE_THREAD_DB
6031 return thread_db_handle_monitor_command (mon);
6032 #else
6033 return 0;
6034 #endif
6038 linux_process_target::core_of_thread (ptid_t ptid)
6040 return linux_common_core_of_thread (ptid);
6043 bool
6044 linux_process_target::supports_disable_randomization ()
6046 return true;
6049 bool
6050 linux_process_target::supports_agent ()
6052 return true;
6055 bool
6056 linux_process_target::supports_range_stepping ()
6058 if (supports_software_single_step ())
6059 return true;
6061 return low_supports_range_stepping ();
6064 bool
6065 linux_process_target::low_supports_range_stepping ()
6067 return false;
6070 bool
6071 linux_process_target::supports_pid_to_exec_file ()
6073 return true;
6076 const char *
6077 linux_process_target::pid_to_exec_file (int pid)
6079 return linux_proc_pid_to_exec_file (pid);
6082 bool
6083 linux_process_target::supports_multifs ()
6085 return true;
6089 linux_process_target::multifs_open (int pid, const char *filename,
6090 int flags, mode_t mode)
6092 return linux_mntns_open_cloexec (pid, filename, flags, mode);
6096 linux_process_target::multifs_unlink (int pid, const char *filename)
6098 return linux_mntns_unlink (pid, filename);
6101 ssize_t
6102 linux_process_target::multifs_readlink (int pid, const char *filename,
6103 char *buf, size_t bufsiz)
6105 return linux_mntns_readlink (pid, filename, buf, bufsiz);
6108 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
6109 struct target_loadseg
6111 /* Core address to which the segment is mapped. */
6112 Elf32_Addr addr;
6113 /* VMA recorded in the program header. */
6114 Elf32_Addr p_vaddr;
6115 /* Size of this segment in memory. */
6116 Elf32_Word p_memsz;
6119 # if defined PT_GETDSBT
6120 struct target_loadmap
6122 /* Protocol version number, must be zero. */
6123 Elf32_Word version;
6124 /* Pointer to the DSBT table, its size, and the DSBT index. */
6125 unsigned *dsbt_table;
6126 unsigned dsbt_size, dsbt_index;
6127 /* Number of segments in this map. */
6128 Elf32_Word nsegs;
6129 /* The actual memory map. */
6130 struct target_loadseg segs[/*nsegs*/];
6132 # define LINUX_LOADMAP PT_GETDSBT
6133 # define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
6134 # define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
6135 # else
6136 struct target_loadmap
6138 /* Protocol version number, must be zero. */
6139 Elf32_Half version;
6140 /* Number of segments in this map. */
6141 Elf32_Half nsegs;
6142 /* The actual memory map. */
6143 struct target_loadseg segs[/*nsegs*/];
6145 # define LINUX_LOADMAP PTRACE_GETFDPIC
6146 # define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
6147 # define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
6148 # endif
6150 bool
6151 linux_process_target::supports_read_loadmap ()
6153 return true;
6157 linux_process_target::read_loadmap (const char *annex, CORE_ADDR offset,
6158 unsigned char *myaddr, unsigned int len)
6160 int pid = lwpid_of (current_thread);
6161 int addr = -1;
6162 struct target_loadmap *data = NULL;
6163 unsigned int actual_length, copy_length;
6165 if (strcmp (annex, "exec") == 0)
6166 addr = (int) LINUX_LOADMAP_EXEC;
6167 else if (strcmp (annex, "interp") == 0)
6168 addr = (int) LINUX_LOADMAP_INTERP;
6169 else
6170 return -1;
6172 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
6173 return -1;
6175 if (data == NULL)
6176 return -1;
6178 actual_length = sizeof (struct target_loadmap)
6179 + sizeof (struct target_loadseg) * data->nsegs;
6181 if (offset < 0 || offset > actual_length)
6182 return -1;
6184 copy_length = actual_length - offset < len ? actual_length - offset : len;
6185 memcpy (myaddr, (char *) data + offset, copy_length);
6186 return copy_length;
6188 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
6190 bool
6191 linux_process_target::supports_catch_syscall ()
6193 return low_supports_catch_syscall ();
6196 bool
6197 linux_process_target::low_supports_catch_syscall ()
6199 return false;
6202 CORE_ADDR
6203 linux_process_target::read_pc (regcache *regcache)
6205 if (!low_supports_breakpoints ())
6206 return 0;
6208 return low_get_pc (regcache);
6211 void
6212 linux_process_target::write_pc (regcache *regcache, CORE_ADDR pc)
6214 gdb_assert (low_supports_breakpoints ());
6216 low_set_pc (regcache, pc);
6219 bool
6220 linux_process_target::supports_thread_stopped ()
6222 return true;
6225 bool
6226 linux_process_target::thread_stopped (thread_info *thread)
6228 return get_thread_lwp (thread)->stopped;
6231 bool
6232 linux_process_target::any_resumed ()
6234 bool any_resumed;
6236 auto status_pending_p_any = [&] (thread_info *thread)
6238 return status_pending_p_callback (thread, minus_one_ptid);
6241 auto not_stopped = [&] (thread_info *thread)
6243 return not_stopped_callback (thread, minus_one_ptid);
6246 /* Find a resumed LWP, if any. */
6247 if (find_thread (status_pending_p_any) != NULL)
6248 any_resumed = 1;
6249 else if (find_thread (not_stopped) != NULL)
6250 any_resumed = 1;
6251 else
6252 any_resumed = 0;
6254 return any_resumed;
6257 /* This exposes stop-all-threads functionality to other modules. */
6259 void
6260 linux_process_target::pause_all (bool freeze)
6262 stop_all_lwps (freeze, NULL);
6265 /* This exposes unstop-all-threads functionality to other gdbserver
6266 modules. */
6268 void
6269 linux_process_target::unpause_all (bool unfreeze)
6271 unstop_all_lwps (unfreeze, NULL);
6274 /* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
6276 static int
6277 get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
6278 CORE_ADDR *phdr_memaddr, int *num_phdr)
6280 char filename[PATH_MAX];
6281 int fd;
6282 const int auxv_size = is_elf64
6283 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
6284 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
6286 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
6288 fd = open (filename, O_RDONLY);
6289 if (fd < 0)
6290 return 1;
6292 *phdr_memaddr = 0;
6293 *num_phdr = 0;
6294 while (read (fd, buf, auxv_size) == auxv_size
6295 && (*phdr_memaddr == 0 || *num_phdr == 0))
6297 if (is_elf64)
6299 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
6301 switch (aux->a_type)
6303 case AT_PHDR:
6304 *phdr_memaddr = aux->a_un.a_val;
6305 break;
6306 case AT_PHNUM:
6307 *num_phdr = aux->a_un.a_val;
6308 break;
6311 else
6313 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
6315 switch (aux->a_type)
6317 case AT_PHDR:
6318 *phdr_memaddr = aux->a_un.a_val;
6319 break;
6320 case AT_PHNUM:
6321 *num_phdr = aux->a_un.a_val;
6322 break;
6327 close (fd);
6329 if (*phdr_memaddr == 0 || *num_phdr == 0)
6331 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
6332 "phdr_memaddr = %ld, phdr_num = %d",
6333 (long) *phdr_memaddr, *num_phdr);
6334 return 2;
6337 return 0;
6340 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
6342 static CORE_ADDR
6343 get_dynamic (const int pid, const int is_elf64)
6345 CORE_ADDR phdr_memaddr, relocation;
6346 int num_phdr, i;
6347 unsigned char *phdr_buf;
6348 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
6350 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
6351 return 0;
6353 gdb_assert (num_phdr < 100); /* Basic sanity check. */
6354 phdr_buf = (unsigned char *) alloca (num_phdr * phdr_size);
6356 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
6357 return 0;
6359 /* Compute relocation: it is expected to be 0 for "regular" executables,
6360 non-zero for PIE ones. */
6361 relocation = -1;
6362 for (i = 0; relocation == -1 && i < num_phdr; i++)
6363 if (is_elf64)
6365 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6367 if (p->p_type == PT_PHDR)
6368 relocation = phdr_memaddr - p->p_vaddr;
6370 else
6372 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6374 if (p->p_type == PT_PHDR)
6375 relocation = phdr_memaddr - p->p_vaddr;
6378 if (relocation == -1)
6380 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
6381 any real world executables, including PIE executables, have always
6382 PT_PHDR present. PT_PHDR is not present in some shared libraries or
6383 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
6384 or present DT_DEBUG anyway (fpc binaries are statically linked).
6386 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
6388 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
6390 return 0;
6393 for (i = 0; i < num_phdr; i++)
6395 if (is_elf64)
6397 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6399 if (p->p_type == PT_DYNAMIC)
6400 return p->p_vaddr + relocation;
6402 else
6404 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6406 if (p->p_type == PT_DYNAMIC)
6407 return p->p_vaddr + relocation;
6411 return 0;
6414 /* Return &_r_debug in the inferior, or -1 if not present. Return value
6415 can be 0 if the inferior does not yet have the library list initialized.
6416 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
6417 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
6419 static CORE_ADDR
6420 get_r_debug (const int pid, const int is_elf64)
6422 CORE_ADDR dynamic_memaddr;
6423 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
6424 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
6425 CORE_ADDR map = -1;
6427 dynamic_memaddr = get_dynamic (pid, is_elf64);
6428 if (dynamic_memaddr == 0)
6429 return map;
6431 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
6433 if (is_elf64)
6435 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
6436 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6437 union
6439 Elf64_Xword map;
6440 unsigned char buf[sizeof (Elf64_Xword)];
6442 rld_map;
6443 #endif
6444 #ifdef DT_MIPS_RLD_MAP
6445 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6447 if (linux_read_memory (dyn->d_un.d_val,
6448 rld_map.buf, sizeof (rld_map.buf)) == 0)
6449 return rld_map.map;
6450 else
6451 break;
6453 #endif /* DT_MIPS_RLD_MAP */
6454 #ifdef DT_MIPS_RLD_MAP_REL
6455 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6457 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6458 rld_map.buf, sizeof (rld_map.buf)) == 0)
6459 return rld_map.map;
6460 else
6461 break;
6463 #endif /* DT_MIPS_RLD_MAP_REL */
6465 if (dyn->d_tag == DT_DEBUG && map == -1)
6466 map = dyn->d_un.d_val;
6468 if (dyn->d_tag == DT_NULL)
6469 break;
6471 else
6473 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
6474 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6475 union
6477 Elf32_Word map;
6478 unsigned char buf[sizeof (Elf32_Word)];
6480 rld_map;
6481 #endif
6482 #ifdef DT_MIPS_RLD_MAP
6483 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6485 if (linux_read_memory (dyn->d_un.d_val,
6486 rld_map.buf, sizeof (rld_map.buf)) == 0)
6487 return rld_map.map;
6488 else
6489 break;
6491 #endif /* DT_MIPS_RLD_MAP */
6492 #ifdef DT_MIPS_RLD_MAP_REL
6493 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6495 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6496 rld_map.buf, sizeof (rld_map.buf)) == 0)
6497 return rld_map.map;
6498 else
6499 break;
6501 #endif /* DT_MIPS_RLD_MAP_REL */
6503 if (dyn->d_tag == DT_DEBUG && map == -1)
6504 map = dyn->d_un.d_val;
6506 if (dyn->d_tag == DT_NULL)
6507 break;
6510 dynamic_memaddr += dyn_size;
6513 return map;
6516 /* Read one pointer from MEMADDR in the inferior. */
6518 static int
6519 read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
6521 int ret;
6523 /* Go through a union so this works on either big or little endian
6524 hosts, when the inferior's pointer size is smaller than the size
6525 of CORE_ADDR. It is assumed the inferior's endianness is the
6526 same of the superior's. */
6527 union
6529 CORE_ADDR core_addr;
6530 unsigned int ui;
6531 unsigned char uc;
6532 } addr;
6534 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
6535 if (ret == 0)
6537 if (ptr_size == sizeof (CORE_ADDR))
6538 *ptr = addr.core_addr;
6539 else if (ptr_size == sizeof (unsigned int))
6540 *ptr = addr.ui;
6541 else
6542 gdb_assert_not_reached ("unhandled pointer size");
6544 return ret;
6547 bool
6548 linux_process_target::supports_qxfer_libraries_svr4 ()
6550 return true;
6553 struct link_map_offsets
6555 /* Offset and size of r_debug.r_version. */
6556 int r_version_offset;
6558 /* Offset and size of r_debug.r_map. */
6559 int r_map_offset;
6561 /* Offset of r_debug_extended.r_next. */
6562 int r_next_offset;
6564 /* Offset to l_addr field in struct link_map. */
6565 int l_addr_offset;
6567 /* Offset to l_name field in struct link_map. */
6568 int l_name_offset;
6570 /* Offset to l_ld field in struct link_map. */
6571 int l_ld_offset;
6573 /* Offset to l_next field in struct link_map. */
6574 int l_next_offset;
6576 /* Offset to l_prev field in struct link_map. */
6577 int l_prev_offset;
6580 static const link_map_offsets lmo_32bit_offsets =
6582 0, /* r_version offset. */
6583 4, /* r_debug.r_map offset. */
6584 20, /* r_debug_extended.r_next. */
6585 0, /* l_addr offset in link_map. */
6586 4, /* l_name offset in link_map. */
6587 8, /* l_ld offset in link_map. */
6588 12, /* l_next offset in link_map. */
6589 16 /* l_prev offset in link_map. */
6592 static const link_map_offsets lmo_64bit_offsets =
6594 0, /* r_version offset. */
6595 8, /* r_debug.r_map offset. */
6596 40, /* r_debug_extended.r_next. */
6597 0, /* l_addr offset in link_map. */
6598 8, /* l_name offset in link_map. */
6599 16, /* l_ld offset in link_map. */
6600 24, /* l_next offset in link_map. */
6601 32 /* l_prev offset in link_map. */
6604 /* Get the loaded shared libraries from one namespace. */
6606 static void
6607 read_link_map (std::string &document, CORE_ADDR lmid, CORE_ADDR lm_addr,
6608 CORE_ADDR lm_prev, int ptr_size, const link_map_offsets *lmo)
6610 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
6612 while (lm_addr
6613 && read_one_ptr (lm_addr + lmo->l_name_offset,
6614 &l_name, ptr_size) == 0
6615 && read_one_ptr (lm_addr + lmo->l_addr_offset,
6616 &l_addr, ptr_size) == 0
6617 && read_one_ptr (lm_addr + lmo->l_ld_offset,
6618 &l_ld, ptr_size) == 0
6619 && read_one_ptr (lm_addr + lmo->l_prev_offset,
6620 &l_prev, ptr_size) == 0
6621 && read_one_ptr (lm_addr + lmo->l_next_offset,
6622 &l_next, ptr_size) == 0)
6624 unsigned char libname[PATH_MAX];
6626 if (lm_prev != l_prev)
6628 warning ("Corrupted shared library list: 0x%s != 0x%s",
6629 paddress (lm_prev), paddress (l_prev));
6630 break;
6633 /* Not checking for error because reading may stop before we've got
6634 PATH_MAX worth of characters. */
6635 libname[0] = '\0';
6636 linux_read_memory (l_name, libname, sizeof (libname) - 1);
6637 libname[sizeof (libname) - 1] = '\0';
6638 if (libname[0] != '\0')
6640 string_appendf (document, "<library name=\"");
6641 xml_escape_text_append (document, (char *) libname);
6642 string_appendf (document, "\" lm=\"0x%s\" l_addr=\"0x%s\" "
6643 "l_ld=\"0x%s\" lmid=\"0x%s\"/>",
6644 paddress (lm_addr), paddress (l_addr),
6645 paddress (l_ld), paddress (lmid));
6648 lm_prev = lm_addr;
6649 lm_addr = l_next;
6653 /* Construct qXfer:libraries-svr4:read reply. */
6656 linux_process_target::qxfer_libraries_svr4 (const char *annex,
6657 unsigned char *readbuf,
6658 unsigned const char *writebuf,
6659 CORE_ADDR offset, int len)
6661 struct process_info_private *const priv = current_process ()->priv;
6662 char filename[PATH_MAX];
6663 int pid, is_elf64;
6664 unsigned int machine;
6665 CORE_ADDR lmid = 0, lm_addr = 0, lm_prev = 0;
6667 if (writebuf != NULL)
6668 return -2;
6669 if (readbuf == NULL)
6670 return -1;
6672 pid = lwpid_of (current_thread);
6673 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
6674 is_elf64 = elf_64_file_p (filename, &machine);
6675 const link_map_offsets *lmo;
6676 int ptr_size;
6677 if (is_elf64)
6679 lmo = &lmo_64bit_offsets;
6680 ptr_size = 8;
6682 else
6684 lmo = &lmo_32bit_offsets;
6685 ptr_size = 4;
6688 while (annex[0] != '\0')
6690 const char *sep;
6691 CORE_ADDR *addrp;
6692 int name_len;
6694 sep = strchr (annex, '=');
6695 if (sep == NULL)
6696 break;
6698 name_len = sep - annex;
6699 if (name_len == 4 && startswith (annex, "lmid"))
6700 addrp = &lmid;
6701 else if (name_len == 5 && startswith (annex, "start"))
6702 addrp = &lm_addr;
6703 else if (name_len == 4 && startswith (annex, "prev"))
6704 addrp = &lm_prev;
6705 else
6707 annex = strchr (sep, ';');
6708 if (annex == NULL)
6709 break;
6710 annex++;
6711 continue;
6714 annex = decode_address_to_semicolon (addrp, sep + 1);
6717 std::string document = "<library-list-svr4 version=\"1.0\"";
6719 /* When the starting LM_ADDR is passed in the annex, only traverse that
6720 namespace, which is assumed to be identified by LMID.
6722 Otherwise, start with R_DEBUG and traverse all namespaces we find. */
6723 if (lm_addr != 0)
6725 document += ">";
6726 read_link_map (document, lmid, lm_addr, lm_prev, ptr_size, lmo);
6728 else
6730 if (lm_prev != 0)
6731 warning ("ignoring prev=0x%s without start", paddress (lm_prev));
6733 /* We could interpret LMID as 'provide only the libraries for this
6734 namespace' but GDB is currently only providing lmid, start, and
6735 prev, or nothing. */
6736 if (lmid != 0)
6737 warning ("ignoring lmid=0x%s without start", paddress (lmid));
6739 CORE_ADDR r_debug = priv->r_debug;
6740 if (r_debug == 0)
6741 r_debug = priv->r_debug = get_r_debug (pid, is_elf64);
6743 /* We failed to find DT_DEBUG. Such situation will not change
6744 for this inferior - do not retry it. Report it to GDB as
6745 E01, see for the reasons at the GDB solib-svr4.c side. */
6746 if (r_debug == (CORE_ADDR) -1)
6747 return -1;
6749 /* Terminate the header if we end up with an empty list. */
6750 if (r_debug == 0)
6751 document += ">";
6753 while (r_debug != 0)
6755 int r_version = 0;
6756 if (linux_read_memory (r_debug + lmo->r_version_offset,
6757 (unsigned char *) &r_version,
6758 sizeof (r_version)) != 0)
6760 warning ("unable to read r_version from 0x%s",
6761 paddress (r_debug + lmo->r_version_offset));
6762 break;
6765 if (r_version < 1)
6767 warning ("unexpected r_debug version %d", r_version);
6768 break;
6771 if (read_one_ptr (r_debug + lmo->r_map_offset, &lm_addr,
6772 ptr_size) != 0)
6774 warning ("unable to read r_map from 0x%s",
6775 paddress (r_debug + lmo->r_map_offset));
6776 break;
6779 /* We read the entire namespace. */
6780 lm_prev = 0;
6782 /* The first entry corresponds to the main executable unless the
6783 dynamic loader was loaded late by a static executable. But
6784 in such case the main executable does not have PT_DYNAMIC
6785 present and we would not have gotten here. */
6786 if (r_debug == priv->r_debug)
6788 if (lm_addr != 0)
6789 string_appendf (document, " main-lm=\"0x%s\">",
6790 paddress (lm_addr));
6791 else
6792 document += ">";
6794 lm_prev = lm_addr;
6795 if (read_one_ptr (lm_addr + lmo->l_next_offset,
6796 &lm_addr, ptr_size) != 0)
6798 warning ("unable to read l_next from 0x%s",
6799 paddress (lm_addr + lmo->l_next_offset));
6800 break;
6804 read_link_map (document, r_debug, lm_addr, lm_prev, ptr_size, lmo);
6806 if (r_version < 2)
6807 break;
6809 if (read_one_ptr (r_debug + lmo->r_next_offset, &r_debug,
6810 ptr_size) != 0)
6812 warning ("unable to read r_next from 0x%s",
6813 paddress (r_debug + lmo->r_next_offset));
6814 break;
6819 document += "</library-list-svr4>";
6821 int document_len = document.length ();
6822 if (offset < document_len)
6823 document_len -= offset;
6824 else
6825 document_len = 0;
6826 if (len > document_len)
6827 len = document_len;
6829 memcpy (readbuf, document.data () + offset, len);
6831 return len;
6834 #ifdef HAVE_LINUX_BTRACE
6836 bool
6837 linux_process_target::supports_btrace ()
6839 return true;
6842 btrace_target_info *
6843 linux_process_target::enable_btrace (thread_info *tp,
6844 const btrace_config *conf)
6846 return linux_enable_btrace (tp->id, conf);
6849 /* See to_disable_btrace target method. */
6852 linux_process_target::disable_btrace (btrace_target_info *tinfo)
6854 enum btrace_error err;
6856 err = linux_disable_btrace (tinfo);
6857 return (err == BTRACE_ERR_NONE ? 0 : -1);
6860 /* Encode an Intel Processor Trace configuration. */
6862 static void
6863 linux_low_encode_pt_config (std::string *buffer,
6864 const struct btrace_data_pt_config *config)
6866 *buffer += "<pt-config>\n";
6868 switch (config->cpu.vendor)
6870 case CV_INTEL:
6871 string_xml_appendf (*buffer, "<cpu vendor=\"GenuineIntel\" family=\"%u\" "
6872 "model=\"%u\" stepping=\"%u\"/>\n",
6873 config->cpu.family, config->cpu.model,
6874 config->cpu.stepping);
6875 break;
6877 default:
6878 break;
6881 *buffer += "</pt-config>\n";
6884 /* Encode a raw buffer. */
6886 static void
6887 linux_low_encode_raw (std::string *buffer, const gdb_byte *data,
6888 unsigned int size)
6890 if (size == 0)
6891 return;
6893 /* We use hex encoding - see gdbsupport/rsp-low.h. */
6894 *buffer += "<raw>\n";
6896 while (size-- > 0)
6898 char elem[2];
6900 elem[0] = tohex ((*data >> 4) & 0xf);
6901 elem[1] = tohex (*data++ & 0xf);
6903 buffer->append (elem, 2);
6906 *buffer += "</raw>\n";
6909 /* See to_read_btrace target method. */
6912 linux_process_target::read_btrace (btrace_target_info *tinfo,
6913 std::string *buffer,
6914 enum btrace_read_type type)
6916 struct btrace_data btrace;
6917 enum btrace_error err;
6919 err = linux_read_btrace (&btrace, tinfo, type);
6920 if (err != BTRACE_ERR_NONE)
6922 if (err == BTRACE_ERR_OVERFLOW)
6923 *buffer += "E.Overflow.";
6924 else
6925 *buffer += "E.Generic Error.";
6927 return -1;
6930 switch (btrace.format)
6932 case BTRACE_FORMAT_NONE:
6933 *buffer += "E.No Trace.";
6934 return -1;
6936 case BTRACE_FORMAT_BTS:
6937 *buffer += "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n";
6938 *buffer += "<btrace version=\"1.0\">\n";
6940 for (const btrace_block &block : *btrace.variant.bts.blocks)
6941 string_xml_appendf (*buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
6942 paddress (block.begin), paddress (block.end));
6944 *buffer += "</btrace>\n";
6945 break;
6947 case BTRACE_FORMAT_PT:
6948 *buffer += "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n";
6949 *buffer += "<btrace version=\"1.0\">\n";
6950 *buffer += "<pt>\n";
6952 linux_low_encode_pt_config (buffer, &btrace.variant.pt.config);
6954 linux_low_encode_raw (buffer, btrace.variant.pt.data,
6955 btrace.variant.pt.size);
6957 *buffer += "</pt>\n";
6958 *buffer += "</btrace>\n";
6959 break;
6961 default:
6962 *buffer += "E.Unsupported Trace Format.";
6963 return -1;
6966 return 0;
6969 /* See to_btrace_conf target method. */
6972 linux_process_target::read_btrace_conf (const btrace_target_info *tinfo,
6973 std::string *buffer)
6975 const struct btrace_config *conf;
6977 *buffer += "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n";
6978 *buffer += "<btrace-conf version=\"1.0\">\n";
6980 conf = linux_btrace_conf (tinfo);
6981 if (conf != NULL)
6983 switch (conf->format)
6985 case BTRACE_FORMAT_NONE:
6986 break;
6988 case BTRACE_FORMAT_BTS:
6989 string_xml_appendf (*buffer, "<bts");
6990 string_xml_appendf (*buffer, " size=\"0x%x\"", conf->bts.size);
6991 string_xml_appendf (*buffer, " />\n");
6992 break;
6994 case BTRACE_FORMAT_PT:
6995 string_xml_appendf (*buffer, "<pt");
6996 string_xml_appendf (*buffer, " size=\"0x%x\"", conf->pt.size);
6997 string_xml_appendf (*buffer, "/>\n");
6998 break;
7002 *buffer += "</btrace-conf>\n";
7003 return 0;
7005 #endif /* HAVE_LINUX_BTRACE */
7007 /* See nat/linux-nat.h. */
7009 ptid_t
7010 current_lwp_ptid (void)
7012 return ptid_of (current_thread);
7015 /* A helper function that copies NAME to DEST, replacing non-printable
7016 characters with '?'. Returns DEST as a convenience. */
7018 static const char *
7019 replace_non_ascii (char *dest, const char *name)
7021 while (*name != '\0')
7023 if (!ISPRINT (*name))
7024 *dest++ = '?';
7025 else
7026 *dest++ = *name;
7027 ++name;
7029 return dest;
7032 const char *
7033 linux_process_target::thread_name (ptid_t thread)
7035 static char dest[100];
7037 const char *name = linux_proc_tid_get_name (thread);
7038 if (name == nullptr)
7039 return nullptr;
7041 /* Linux limits the comm file to 16 bytes (including the trailing
7042 \0. If the program or thread name is set when using a multi-byte
7043 encoding, this might cause it to be truncated mid-character. In
7044 this situation, sending the truncated form in an XML <thread>
7045 response will cause a parse error in gdb. So, instead convert
7046 from the locale's encoding (we can't be sure this is the correct
7047 encoding, but it's as good a guess as we have) to UTF-8, but in a
7048 way that ignores any encoding errors. See PR remote/30618. */
7049 const char *cset = nl_langinfo (CODESET);
7050 iconv_t handle = iconv_open ("UTF-8//IGNORE", cset);
7051 if (handle == (iconv_t) -1)
7052 return replace_non_ascii (dest, name);
7054 size_t inbytes = strlen (name);
7055 char *inbuf = const_cast<char *> (name);
7056 size_t outbytes = sizeof (dest);
7057 char *outbuf = dest;
7058 size_t result = iconv (handle, &inbuf, &inbytes, &outbuf, &outbytes);
7060 if (result == (size_t) -1)
7062 if (errno == E2BIG)
7063 outbuf = &dest[sizeof (dest) - 1];
7064 else if ((errno == EILSEQ || errno == EINVAL)
7065 && outbuf < &dest[sizeof (dest) - 2])
7066 *outbuf++ = '?';
7067 *outbuf = '\0';
7070 iconv_close (handle);
7071 return *dest == '\0' ? nullptr : dest;
7074 #if USE_THREAD_DB
7075 bool
7076 linux_process_target::thread_handle (ptid_t ptid, gdb_byte **handle,
7077 int *handle_len)
7079 return thread_db_thread_handle (ptid, handle, handle_len);
7081 #endif
7083 thread_info *
7084 linux_process_target::thread_pending_parent (thread_info *thread)
7086 lwp_info *parent = get_thread_lwp (thread)->pending_parent ();
7088 if (parent == nullptr)
7089 return nullptr;
7091 return get_lwp_thread (parent);
7094 thread_info *
7095 linux_process_target::thread_pending_child (thread_info *thread,
7096 target_waitkind *kind)
7098 lwp_info *child = get_thread_lwp (thread)->pending_child (kind);
7100 if (child == nullptr)
7101 return nullptr;
7103 return get_lwp_thread (child);
7106 /* Default implementation of linux_target_ops method "set_pc" for
7107 32-bit pc register which is literally named "pc". */
7109 void
7110 linux_set_pc_32bit (struct regcache *regcache, CORE_ADDR pc)
7112 uint32_t newpc = pc;
7114 supply_register_by_name (regcache, "pc", &newpc);
7117 /* Default implementation of linux_target_ops method "get_pc" for
7118 32-bit pc register which is literally named "pc". */
7120 CORE_ADDR
7121 linux_get_pc_32bit (struct regcache *regcache)
7123 uint32_t pc;
7125 collect_register_by_name (regcache, "pc", &pc);
7126 threads_debug_printf ("stop pc is 0x%" PRIx32, pc);
7127 return pc;
7130 /* Default implementation of linux_target_ops method "set_pc" for
7131 64-bit pc register which is literally named "pc". */
7133 void
7134 linux_set_pc_64bit (struct regcache *regcache, CORE_ADDR pc)
7136 uint64_t newpc = pc;
7138 supply_register_by_name (regcache, "pc", &newpc);
7141 /* Default implementation of linux_target_ops method "get_pc" for
7142 64-bit pc register which is literally named "pc". */
7144 CORE_ADDR
7145 linux_get_pc_64bit (struct regcache *regcache)
7147 uint64_t pc;
7149 collect_register_by_name (regcache, "pc", &pc);
7150 threads_debug_printf ("stop pc is 0x%" PRIx64, pc);
7151 return pc;
7154 /* See linux-low.h. */
7157 linux_get_auxv (int pid, int wordsize, CORE_ADDR match, CORE_ADDR *valp)
7159 gdb_byte *data = (gdb_byte *) alloca (2 * wordsize);
7160 int offset = 0;
7162 gdb_assert (wordsize == 4 || wordsize == 8);
7164 while (the_target->read_auxv (pid, offset, data, 2 * wordsize)
7165 == 2 * wordsize)
7167 if (wordsize == 4)
7169 uint32_t *data_p = (uint32_t *) data;
7170 if (data_p[0] == match)
7172 *valp = data_p[1];
7173 return 1;
7176 else
7178 uint64_t *data_p = (uint64_t *) data;
7179 if (data_p[0] == match)
7181 *valp = data_p[1];
7182 return 1;
7186 offset += 2 * wordsize;
7189 return 0;
7192 /* See linux-low.h. */
7194 CORE_ADDR
7195 linux_get_hwcap (int pid, int wordsize)
7197 CORE_ADDR hwcap = 0;
7198 linux_get_auxv (pid, wordsize, AT_HWCAP, &hwcap);
7199 return hwcap;
7202 /* See linux-low.h. */
7204 CORE_ADDR
7205 linux_get_hwcap2 (int pid, int wordsize)
7207 CORE_ADDR hwcap2 = 0;
7208 linux_get_auxv (pid, wordsize, AT_HWCAP2, &hwcap2);
7209 return hwcap2;
7212 #ifdef HAVE_LINUX_REGSETS
7213 void
7214 initialize_regsets_info (struct regsets_info *info)
7216 for (info->num_regsets = 0;
7217 info->regsets[info->num_regsets].size >= 0;
7218 info->num_regsets++)
7221 #endif
7223 void
7224 initialize_low (void)
7226 struct sigaction sigchld_action;
7228 memset (&sigchld_action, 0, sizeof (sigchld_action));
7229 set_target_ops (the_linux_target);
7231 linux_ptrace_init_warnings ();
7232 linux_proc_init_warnings ();
7234 sigchld_action.sa_handler = sigchld_handler;
7235 sigemptyset (&sigchld_action.sa_mask);
7236 sigchld_action.sa_flags = SA_RESTART;
7237 sigaction (SIGCHLD, &sigchld_action, NULL);
7239 initialize_low_arch ();
7241 linux_check_ptrace_features ();