Automatic date update in version.in
[binutils-gdb.git] / gdb / linux-nat.c
blob55cab41bb61bea276446d92abe0a7b7ef2bdd6f3
1 /* GNU/Linux native-dependent code common to multiple platforms.
3 Copyright (C) 2001-2024 Free Software Foundation, Inc.
5 This file is part of GDB.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20 #include "inferior.h"
21 #include "infrun.h"
22 #include "target.h"
23 #include "nat/linux-nat.h"
24 #include "nat/linux-waitpid.h"
25 #include "gdbsupport/gdb_wait.h"
26 #include <unistd.h>
27 #include <sys/syscall.h>
28 #include "nat/gdb_ptrace.h"
29 #include "linux-nat.h"
30 #include "nat/linux-ptrace.h"
31 #include "nat/linux-procfs.h"
32 #include "nat/linux-personality.h"
33 #include "linux-fork.h"
34 #include "gdbthread.h"
35 #include "gdbcmd.h"
36 #include "regcache.h"
37 #include "regset.h"
38 #include "inf-child.h"
39 #include "inf-ptrace.h"
40 #include "auxv.h"
41 #include <sys/procfs.h>
42 #include "elf-bfd.h"
43 #include "gregset.h"
44 #include "gdbcore.h"
45 #include <ctype.h>
46 #include <sys/stat.h>
47 #include <fcntl.h>
48 #include "inf-loop.h"
49 #include "gdbsupport/event-loop.h"
50 #include "event-top.h"
51 #include <pwd.h>
52 #include <sys/types.h>
53 #include <dirent.h>
54 #include "xml-support.h"
55 #include <sys/vfs.h>
56 #include "solib.h"
57 #include "nat/linux-osdata.h"
58 #include "linux-tdep.h"
59 #include "symfile.h"
60 #include "gdbsupport/agent.h"
61 #include "tracepoint.h"
62 #include "target-descriptions.h"
63 #include "gdbsupport/filestuff.h"
64 #include "objfiles.h"
65 #include "nat/linux-namespaces.h"
66 #include "gdbsupport/block-signals.h"
67 #include "gdbsupport/fileio.h"
68 #include "gdbsupport/scope-exit.h"
69 #include "gdbsupport/gdb-sigmask.h"
70 #include "gdbsupport/common-debug.h"
71 #include <unordered_map>
73 /* This comment documents high-level logic of this file.
75 Waiting for events in sync mode
76 ===============================
78 When waiting for an event in a specific thread, we just use waitpid,
79 passing the specific pid, and not passing WNOHANG.
81 When waiting for an event in all threads, waitpid is not quite good:
83 - If the thread group leader exits while other threads in the thread
84 group still exist, waitpid(TGID, ...) hangs. That waitpid won't
85 return an exit status until the other threads in the group are
86 reaped.
88 - When a non-leader thread execs, that thread just vanishes without
89 reporting an exit (so we'd hang if we waited for it explicitly in
90 that case). The exec event is instead reported to the TGID pid.
92 The solution is to always use -1 and WNOHANG, together with
93 sigsuspend.
95 First, we use non-blocking waitpid to check for events. If nothing is
96 found, we use sigsuspend to wait for SIGCHLD. When SIGCHLD arrives,
97 it means something happened to a child process. As soon as we know
98 there's an event, we get back to calling nonblocking waitpid.
100 Note that SIGCHLD should be blocked between waitpid and sigsuspend
101 calls, so that we don't miss a signal. If SIGCHLD arrives in between,
102 when it's blocked, the signal becomes pending and sigsuspend
103 immediately notices it and returns.
105 Waiting for events in async mode (TARGET_WNOHANG)
106 =================================================
108 In async mode, GDB should always be ready to handle both user input
109 and target events, so neither blocking waitpid nor sigsuspend are
110 viable options. Instead, we should asynchronously notify the GDB main
111 event loop whenever there's an unprocessed event from the target. We
112 detect asynchronous target events by handling SIGCHLD signals. To
113 notify the event loop about target events, an event pipe is used
114 --- the pipe is registered as waitable event source in the event loop,
115 the event loop select/poll's on the read end of this pipe (as well on
116 other event sources, e.g., stdin), and the SIGCHLD handler marks the
117 event pipe to raise an event. This is more portable than relying on
118 pselect/ppoll, since on kernels that lack those syscalls, libc
119 emulates them with select/poll+sigprocmask, and that is racy
120 (a.k.a. plain broken).
122 Obviously, if we fail to notify the event loop if there's a target
123 event, it's bad. OTOH, if we notify the event loop when there's no
124 event from the target, linux_nat_wait will detect that there's no real
125 event to report, and return event of type TARGET_WAITKIND_IGNORE.
126 This is mostly harmless, but it will waste time and is better avoided.
128 The main design point is that every time GDB is outside linux-nat.c,
129 we have a SIGCHLD handler installed that is called when something
130 happens to the target and notifies the GDB event loop. Whenever GDB
131 core decides to handle the event, and calls into linux-nat.c, we
132 process things as in sync mode, except that the we never block in
133 sigsuspend.
135 While processing an event, we may end up momentarily blocked in
136 waitpid calls. Those waitpid calls, while blocking, are guarantied to
137 return quickly. E.g., in all-stop mode, before reporting to the core
138 that an LWP hit a breakpoint, all LWPs are stopped by sending them
139 SIGSTOP, and synchronously waiting for the SIGSTOP to be reported.
140 Note that this is different from blocking indefinitely waiting for the
141 next event --- here, we're already handling an event.
143 Use of signals
144 ==============
146 We stop threads by sending a SIGSTOP. The use of SIGSTOP instead of another
147 signal is not entirely significant; we just need for a signal to be delivered,
148 so that we can intercept it. SIGSTOP's advantage is that it can not be
149 blocked. A disadvantage is that it is not a real-time signal, so it can only
150 be queued once; we do not keep track of other sources of SIGSTOP.
152 Two other signals that can't be blocked are SIGCONT and SIGKILL. But we can't
153 use them, because they have special behavior when the signal is generated -
154 not when it is delivered. SIGCONT resumes the entire thread group and SIGKILL
155 kills the entire thread group.
157 A delivered SIGSTOP would stop the entire thread group, not just the thread we
158 tkill'd. But we never let the SIGSTOP be delivered; we always intercept and
159 cancel it (by PTRACE_CONT without passing SIGSTOP).
161 We could use a real-time signal instead. This would solve those problems; we
162 could use PTRACE_GETSIGINFO to locate the specific stop signals sent by GDB.
163 But we would still have to have some support for SIGSTOP, since PTRACE_ATTACH
164 generates it, and there are races with trying to find a signal that is not
165 blocked.
167 Exec events
168 ===========
170 The case of a thread group (process) with 3 or more threads, and a
171 thread other than the leader execs is worth detailing:
173 On an exec, the Linux kernel destroys all threads except the execing
174 one in the thread group, and resets the execing thread's tid to the
175 tgid. No exit notification is sent for the execing thread -- from the
176 ptracer's perspective, it appears as though the execing thread just
177 vanishes. Until we reap all other threads except the leader and the
178 execing thread, the leader will be zombie, and the execing thread will
179 be in `D (disc sleep)' state. As soon as all other threads are
180 reaped, the execing thread changes its tid to the tgid, and the
181 previous (zombie) leader vanishes, giving place to the "new"
182 leader.
184 Accessing inferior memory
185 =========================
187 To access inferior memory, we strongly prefer /proc/PID/mem. We
188 fallback to ptrace if and only if /proc/PID/mem is not writable, as a
189 concession for obsolescent kernels (such as found in RHEL6). For
190 modern kernels, the fallback shouldn't trigger. GDBserver does not
191 have the ptrace fallback already, and at some point, we'll consider
192 removing it from native GDB too.
194 /proc/PID/mem has a few advantages over alternatives like
195 PTRACE_PEEKTEXT/PTRACE_POKETEXT or process_vm_readv/process_vm_writev:
197 - Because we can use a single read/write call, /proc/PID/mem can be
198 much more efficient than banging away at
199 PTRACE_PEEKTEXT/PTRACE_POKETEXT, one word at a time.
201 - /proc/PID/mem allows writing to read-only pages, which we need to
202 e.g., plant breakpoint instructions. process_vm_writev does not
203 allow this.
205 - /proc/PID/mem allows memory access even if all threads are running.
206 OTOH, PTRACE_PEEKTEXT/PTRACE_POKETEXT require passing down the tid
207 of a stopped task. This lets us e.g., install breakpoints while the
208 inferior is running, clear a displaced stepping scratch pad when the
209 thread that was displaced stepping exits, print inferior globals,
210 etc., all without having to worry about temporarily pausing some
211 thread.
213 - /proc/PID/mem does not suffer from a race that could cause us to
214 access memory of the wrong address space when the inferior execs.
216 process_vm_readv/process_vm_writev have this problem.
218 E.g., say GDB decides to write to memory just while the inferior
219 execs. In this scenario, GDB could write memory to the post-exec
220 address space thinking it was writing to the pre-exec address space,
221 with high probability of corrupting the inferior. Or if GDB decides
222 instead to read memory just while the inferior execs, it could read
223 bogus contents out of the wrong address space.
225 ptrace used to have this problem too, but no longer has since Linux
226 commit dbb5afad100a ("ptrace: make ptrace() fail if the tracee
227 changed its pid unexpectedly"), in Linux 5.13. (And if ptrace were
228 ever changed to allow access memory via zombie or running threads,
229 it would better not forget to consider this scenario.)
231 We avoid this race with /proc/PID/mem, by opening the file as soon
232 as we start debugging the inferior, when it is known the inferior is
233 stopped, and holding on to the open file descriptor, to be used
234 whenever we need to access inferior memory. If the inferior execs
235 or exits, reading/writing from/to the file returns 0 (EOF),
236 indicating the address space is gone, and so we return
237 TARGET_XFER_EOF to the core. We close the old file and open a new
238 one when we finally see the PTRACE_EVENT_EXEC event. */
240 #ifndef O_LARGEFILE
241 #define O_LARGEFILE 0
242 #endif
244 struct linux_nat_target *linux_target;
246 /* Does the current host support PTRACE_GETREGSET? */
247 enum tribool have_ptrace_getregset = TRIBOOL_UNKNOWN;
249 /* When true, print debug messages relating to the linux native target. */
251 static bool debug_linux_nat;
253 /* Implement 'show debug linux-nat'. */
255 static void
256 show_debug_linux_nat (struct ui_file *file, int from_tty,
257 struct cmd_list_element *c, const char *value)
259 gdb_printf (file, _("Debugging of GNU/Linux native targets is %s.\n"),
260 value);
263 /* Print a linux-nat debug statement. */
265 #define linux_nat_debug_printf(fmt, ...) \
266 debug_prefixed_printf_cond (debug_linux_nat, "linux-nat", fmt, ##__VA_ARGS__)
268 /* Print "linux-nat" enter/exit debug statements. */
270 #define LINUX_NAT_SCOPED_DEBUG_ENTER_EXIT \
271 scoped_debug_enter_exit (debug_linux_nat, "linux-nat")
273 struct simple_pid_list
275 int pid;
276 int status;
277 struct simple_pid_list *next;
279 static struct simple_pid_list *stopped_pids;
281 /* Whether target_thread_events is in effect. */
282 static int report_thread_events;
284 static int kill_lwp (int lwpid, int signo);
286 static int stop_callback (struct lwp_info *lp);
288 static void block_child_signals (sigset_t *prev_mask);
289 static void restore_child_signals_mask (sigset_t *prev_mask);
291 struct lwp_info;
292 static struct lwp_info *add_lwp (ptid_t ptid);
293 static void purge_lwp_list (int pid);
294 static void delete_lwp (ptid_t ptid);
295 static struct lwp_info *find_lwp_pid (ptid_t ptid);
297 static int lwp_status_pending_p (struct lwp_info *lp);
299 static void save_stop_reason (struct lwp_info *lp);
301 static bool proc_mem_file_is_writable ();
302 static void close_proc_mem_file (pid_t pid);
303 static void open_proc_mem_file (ptid_t ptid);
305 /* Return TRUE if LWP is the leader thread of the process. */
307 static bool
308 is_leader (lwp_info *lp)
310 return lp->ptid.pid () == lp->ptid.lwp ();
313 /* Convert an LWP's pending status to a std::string. */
315 static std::string
316 pending_status_str (lwp_info *lp)
318 gdb_assert (lwp_status_pending_p (lp));
320 if (lp->waitstatus.kind () != TARGET_WAITKIND_IGNORE)
321 return lp->waitstatus.to_string ();
322 else
323 return status_to_str (lp->status);
326 /* Return true if we should report exit events for LP. */
328 static bool
329 report_exit_events_for (lwp_info *lp)
331 thread_info *thr = linux_target->find_thread (lp->ptid);
332 gdb_assert (thr != nullptr);
334 return (report_thread_events
335 || (thr->thread_options () & GDB_THREAD_OPTION_EXIT) != 0);
339 /* LWP accessors. */
341 /* See nat/linux-nat.h. */
343 ptid_t
344 ptid_of_lwp (struct lwp_info *lwp)
346 return lwp->ptid;
349 /* See nat/linux-nat.h. */
351 void
352 lwp_set_arch_private_info (struct lwp_info *lwp,
353 struct arch_lwp_info *info)
355 lwp->arch_private = info;
358 /* See nat/linux-nat.h. */
360 struct arch_lwp_info *
361 lwp_arch_private_info (struct lwp_info *lwp)
363 return lwp->arch_private;
366 /* See nat/linux-nat.h. */
369 lwp_is_stopped (struct lwp_info *lwp)
371 return lwp->stopped;
374 /* See nat/linux-nat.h. */
376 enum target_stop_reason
377 lwp_stop_reason (struct lwp_info *lwp)
379 return lwp->stop_reason;
382 /* See nat/linux-nat.h. */
385 lwp_is_stepping (struct lwp_info *lwp)
387 return lwp->step;
391 /* Trivial list manipulation functions to keep track of a list of
392 new stopped processes. */
393 static void
394 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
396 struct simple_pid_list *new_pid = XNEW (struct simple_pid_list);
398 new_pid->pid = pid;
399 new_pid->status = status;
400 new_pid->next = *listp;
401 *listp = new_pid;
404 static int
405 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
407 struct simple_pid_list **p;
409 for (p = listp; *p != NULL; p = &(*p)->next)
410 if ((*p)->pid == pid)
412 struct simple_pid_list *next = (*p)->next;
414 *statusp = (*p)->status;
415 xfree (*p);
416 *p = next;
417 return 1;
419 return 0;
422 /* Return the ptrace options that we want to try to enable. */
424 static int
425 linux_nat_ptrace_options (int attached)
427 int options = 0;
429 if (!attached)
430 options |= PTRACE_O_EXITKILL;
432 options |= (PTRACE_O_TRACESYSGOOD
433 | PTRACE_O_TRACEVFORKDONE
434 | PTRACE_O_TRACEVFORK
435 | PTRACE_O_TRACEFORK
436 | PTRACE_O_TRACEEXEC);
438 return options;
441 /* Initialize ptrace and procfs warnings and check for supported
442 ptrace features given PID.
444 ATTACHED should be nonzero iff we attached to the inferior. */
446 static void
447 linux_init_ptrace_procfs (pid_t pid, int attached)
449 int options = linux_nat_ptrace_options (attached);
451 linux_enable_event_reporting (pid, options);
452 linux_ptrace_init_warnings ();
453 linux_proc_init_warnings ();
454 proc_mem_file_is_writable ();
457 linux_nat_target::~linux_nat_target ()
460 void
461 linux_nat_target::post_attach (int pid)
463 linux_init_ptrace_procfs (pid, 1);
466 /* Implement the virtual inf_ptrace_target::post_startup_inferior method. */
468 void
469 linux_nat_target::post_startup_inferior (ptid_t ptid)
471 linux_init_ptrace_procfs (ptid.pid (), 0);
474 /* Return the number of known LWPs in the tgid given by PID. */
476 static int
477 num_lwps (int pid)
479 int count = 0;
481 for (const lwp_info *lp ATTRIBUTE_UNUSED : all_lwps ())
482 if (lp->ptid.pid () == pid)
483 count++;
485 return count;
488 /* Deleter for lwp_info unique_ptr specialisation. */
490 struct lwp_deleter
492 void operator() (struct lwp_info *lwp) const
494 delete_lwp (lwp->ptid);
498 /* A unique_ptr specialisation for lwp_info. */
500 typedef std::unique_ptr<struct lwp_info, lwp_deleter> lwp_info_up;
502 /* Target hook for follow_fork. */
504 void
505 linux_nat_target::follow_fork (inferior *child_inf, ptid_t child_ptid,
506 target_waitkind fork_kind, bool follow_child,
507 bool detach_fork)
509 inf_ptrace_target::follow_fork (child_inf, child_ptid, fork_kind,
510 follow_child, detach_fork);
512 if (!follow_child)
514 bool has_vforked = fork_kind == TARGET_WAITKIND_VFORKED;
515 ptid_t parent_ptid = inferior_ptid;
516 int parent_pid = parent_ptid.lwp ();
517 int child_pid = child_ptid.lwp ();
519 /* We're already attached to the parent, by default. */
520 lwp_info *child_lp = add_lwp (child_ptid);
521 child_lp->stopped = 1;
522 child_lp->last_resume_kind = resume_stop;
524 /* Detach new forked process? */
525 if (detach_fork)
527 int child_stop_signal = 0;
528 bool detach_child = true;
530 /* Move CHILD_LP into a unique_ptr and clear the source pointer
531 to prevent us doing anything stupid with it. */
532 lwp_info_up child_lp_ptr (child_lp);
533 child_lp = nullptr;
535 linux_target->low_prepare_to_resume (child_lp_ptr.get ());
537 /* When debugging an inferior in an architecture that supports
538 hardware single stepping on a kernel without commit
539 6580807da14c423f0d0a708108e6df6ebc8bc83d, the vfork child
540 process starts with the TIF_SINGLESTEP/X86_EFLAGS_TF bits
541 set if the parent process had them set.
542 To work around this, single step the child process
543 once before detaching to clear the flags. */
545 /* Note that we consult the parent's architecture instead of
546 the child's because there's no inferior for the child at
547 this point. */
548 if (!gdbarch_software_single_step_p (target_thread_architecture
549 (parent_ptid)))
551 int status;
553 linux_disable_event_reporting (child_pid);
554 if (ptrace (PTRACE_SINGLESTEP, child_pid, 0, 0) < 0)
555 perror_with_name (_("Couldn't do single step"));
556 if (my_waitpid (child_pid, &status, 0) < 0)
557 perror_with_name (_("Couldn't wait vfork process"));
558 else
560 detach_child = WIFSTOPPED (status);
561 child_stop_signal = WSTOPSIG (status);
565 if (detach_child)
567 int signo = child_stop_signal;
569 if (signo != 0
570 && !signal_pass_state (gdb_signal_from_host (signo)))
571 signo = 0;
572 ptrace (PTRACE_DETACH, child_pid, 0, signo);
574 close_proc_mem_file (child_pid);
578 if (has_vforked)
580 lwp_info *parent_lp = find_lwp_pid (parent_ptid);
581 linux_nat_debug_printf ("waiting for VFORK_DONE on %d", parent_pid);
582 parent_lp->stopped = 1;
584 /* We'll handle the VFORK_DONE event like any other
585 event, in target_wait. */
588 else
590 struct lwp_info *child_lp;
592 child_lp = add_lwp (child_ptid);
593 child_lp->stopped = 1;
594 child_lp->last_resume_kind = resume_stop;
600 linux_nat_target::insert_fork_catchpoint (int pid)
602 return 0;
606 linux_nat_target::remove_fork_catchpoint (int pid)
608 return 0;
612 linux_nat_target::insert_vfork_catchpoint (int pid)
614 return 0;
618 linux_nat_target::remove_vfork_catchpoint (int pid)
620 return 0;
624 linux_nat_target::insert_exec_catchpoint (int pid)
626 return 0;
630 linux_nat_target::remove_exec_catchpoint (int pid)
632 return 0;
636 linux_nat_target::set_syscall_catchpoint (int pid, bool needed, int any_count,
637 gdb::array_view<const int> syscall_counts)
639 /* On GNU/Linux, we ignore the arguments. It means that we only
640 enable the syscall catchpoints, but do not disable them.
642 Also, we do not use the `syscall_counts' information because we do not
643 filter system calls here. We let GDB do the logic for us. */
644 return 0;
647 /* List of known LWPs, keyed by LWP PID. This speeds up the common
648 case of mapping a PID returned from the kernel to our corresponding
649 lwp_info data structure. */
650 static htab_t lwp_lwpid_htab;
652 /* Calculate a hash from a lwp_info's LWP PID. */
654 static hashval_t
655 lwp_info_hash (const void *ap)
657 const struct lwp_info *lp = (struct lwp_info *) ap;
658 pid_t pid = lp->ptid.lwp ();
660 return iterative_hash_object (pid, 0);
663 /* Equality function for the lwp_info hash table. Compares the LWP's
664 PID. */
666 static int
667 lwp_lwpid_htab_eq (const void *a, const void *b)
669 const struct lwp_info *entry = (const struct lwp_info *) a;
670 const struct lwp_info *element = (const struct lwp_info *) b;
672 return entry->ptid.lwp () == element->ptid.lwp ();
675 /* Create the lwp_lwpid_htab hash table. */
677 static void
678 lwp_lwpid_htab_create (void)
680 lwp_lwpid_htab = htab_create (100, lwp_info_hash, lwp_lwpid_htab_eq, NULL);
683 /* Add LP to the hash table. */
685 static void
686 lwp_lwpid_htab_add_lwp (struct lwp_info *lp)
688 void **slot;
690 slot = htab_find_slot (lwp_lwpid_htab, lp, INSERT);
691 gdb_assert (slot != NULL && *slot == NULL);
692 *slot = lp;
695 /* Head of doubly-linked list of known LWPs. Sorted by reverse
696 creation order. This order is assumed in some cases. E.g.,
697 reaping status after killing alls lwps of a process: the leader LWP
698 must be reaped last. */
700 static intrusive_list<lwp_info> lwp_list;
702 /* See linux-nat.h. */
704 lwp_info_range
705 all_lwps ()
707 return lwp_info_range (lwp_list.begin ());
710 /* See linux-nat.h. */
712 lwp_info_safe_range
713 all_lwps_safe ()
715 return lwp_info_safe_range (lwp_list.begin ());
718 /* Add LP to sorted-by-reverse-creation-order doubly-linked list. */
720 static void
721 lwp_list_add (struct lwp_info *lp)
723 lwp_list.push_front (*lp);
726 /* Remove LP from sorted-by-reverse-creation-order doubly-linked
727 list. */
729 static void
730 lwp_list_remove (struct lwp_info *lp)
732 /* Remove from sorted-by-creation-order list. */
733 lwp_list.erase (lwp_list.iterator_to (*lp));
738 /* Signal mask for use with sigsuspend in linux_nat_wait, initialized in
739 _initialize_linux_nat. */
740 static sigset_t suspend_mask;
742 /* Signals to block to make that sigsuspend work. */
743 static sigset_t blocked_mask;
745 /* SIGCHLD action. */
746 static struct sigaction sigchld_action;
748 /* Block child signals (SIGCHLD and linux threads signals), and store
749 the previous mask in PREV_MASK. */
751 static void
752 block_child_signals (sigset_t *prev_mask)
754 /* Make sure SIGCHLD is blocked. */
755 if (!sigismember (&blocked_mask, SIGCHLD))
756 sigaddset (&blocked_mask, SIGCHLD);
758 gdb_sigmask (SIG_BLOCK, &blocked_mask, prev_mask);
761 /* Restore child signals mask, previously returned by
762 block_child_signals. */
764 static void
765 restore_child_signals_mask (sigset_t *prev_mask)
767 gdb_sigmask (SIG_SETMASK, prev_mask, NULL);
770 /* Mask of signals to pass directly to the inferior. */
771 static sigset_t pass_mask;
773 /* Update signals to pass to the inferior. */
774 void
775 linux_nat_target::pass_signals
776 (gdb::array_view<const unsigned char> pass_signals)
778 int signo;
780 sigemptyset (&pass_mask);
782 for (signo = 1; signo < NSIG; signo++)
784 int target_signo = gdb_signal_from_host (signo);
785 if (target_signo < pass_signals.size () && pass_signals[target_signo])
786 sigaddset (&pass_mask, signo);
792 /* Prototypes for local functions. */
793 static int stop_wait_callback (struct lwp_info *lp);
794 static int resume_stopped_resumed_lwps (struct lwp_info *lp, const ptid_t wait_ptid);
795 static int check_ptrace_stopped_lwp_gone (struct lwp_info *lp);
799 /* Destroy and free LP. */
801 lwp_info::~lwp_info ()
803 /* Let the arch specific bits release arch_lwp_info. */
804 linux_target->low_delete_thread (this->arch_private);
807 /* Traversal function for purge_lwp_list. */
809 static int
810 lwp_lwpid_htab_remove_pid (void **slot, void *info)
812 struct lwp_info *lp = (struct lwp_info *) *slot;
813 int pid = *(int *) info;
815 if (lp->ptid.pid () == pid)
817 htab_clear_slot (lwp_lwpid_htab, slot);
818 lwp_list_remove (lp);
819 delete lp;
822 return 1;
825 /* Remove all LWPs belong to PID from the lwp list. */
827 static void
828 purge_lwp_list (int pid)
830 htab_traverse_noresize (lwp_lwpid_htab, lwp_lwpid_htab_remove_pid, &pid);
833 /* Add the LWP specified by PTID to the list. PTID is the first LWP
834 in the process. Return a pointer to the structure describing the
835 new LWP.
837 This differs from add_lwp in that we don't let the arch specific
838 bits know about this new thread. Current clients of this callback
839 take the opportunity to install watchpoints in the new thread, and
840 we shouldn't do that for the first thread. If we're spawning a
841 child ("run"), the thread executes the shell wrapper first, and we
842 shouldn't touch it until it execs the program we want to debug.
843 For "attach", it'd be okay to call the callback, but it's not
844 necessary, because watchpoints can't yet have been inserted into
845 the inferior. */
847 static struct lwp_info *
848 add_initial_lwp (ptid_t ptid)
850 gdb_assert (ptid.lwp_p ());
852 lwp_info *lp = new lwp_info (ptid);
855 /* Add to sorted-by-reverse-creation-order list. */
856 lwp_list_add (lp);
858 /* Add to keyed-by-pid htab. */
859 lwp_lwpid_htab_add_lwp (lp);
861 return lp;
864 /* Add the LWP specified by PID to the list. Return a pointer to the
865 structure describing the new LWP. The LWP should already be
866 stopped. */
868 static struct lwp_info *
869 add_lwp (ptid_t ptid)
871 struct lwp_info *lp;
873 lp = add_initial_lwp (ptid);
875 /* Let the arch specific bits know about this new thread. Current
876 clients of this callback take the opportunity to install
877 watchpoints in the new thread. We don't do this for the first
878 thread though. See add_initial_lwp. */
879 linux_target->low_new_thread (lp);
881 return lp;
884 /* Remove the LWP specified by PID from the list. */
886 static void
887 delete_lwp (ptid_t ptid)
889 lwp_info dummy (ptid);
891 void **slot = htab_find_slot (lwp_lwpid_htab, &dummy, NO_INSERT);
892 if (slot == NULL)
893 return;
895 lwp_info *lp = *(struct lwp_info **) slot;
896 gdb_assert (lp != NULL);
898 htab_clear_slot (lwp_lwpid_htab, slot);
900 /* Remove from sorted-by-creation-order list. */
901 lwp_list_remove (lp);
903 /* Release. */
904 delete lp;
907 /* Return a pointer to the structure describing the LWP corresponding
908 to PID. If no corresponding LWP could be found, return NULL. */
910 static struct lwp_info *
911 find_lwp_pid (ptid_t ptid)
913 int lwp;
915 if (ptid.lwp_p ())
916 lwp = ptid.lwp ();
917 else
918 lwp = ptid.pid ();
920 lwp_info dummy (ptid_t (0, lwp));
921 return (struct lwp_info *) htab_find (lwp_lwpid_htab, &dummy);
924 /* See nat/linux-nat.h. */
926 struct lwp_info *
927 iterate_over_lwps (ptid_t filter,
928 gdb::function_view<iterate_over_lwps_ftype> callback)
930 for (lwp_info *lp : all_lwps_safe ())
932 if (lp->ptid.matches (filter))
934 if (callback (lp) != 0)
935 return lp;
939 return NULL;
942 /* Update our internal state when changing from one checkpoint to
943 another indicated by NEW_PTID. We can only switch single-threaded
944 applications, so we only create one new LWP, and the previous list
945 is discarded. */
947 void
948 linux_nat_switch_fork (ptid_t new_ptid)
950 struct lwp_info *lp;
952 purge_lwp_list (inferior_ptid.pid ());
954 lp = add_lwp (new_ptid);
955 lp->stopped = 1;
957 /* This changes the thread's ptid while preserving the gdb thread
958 num. Also changes the inferior pid, while preserving the
959 inferior num. */
960 thread_change_ptid (linux_target, inferior_ptid, new_ptid);
962 /* We've just told GDB core that the thread changed target id, but,
963 in fact, it really is a different thread, with different register
964 contents. */
965 registers_changed ();
968 /* Handle the exit of a single thread LP. If DEL_THREAD is true,
969 delete the thread_info associated to LP, if it exists. */
971 static void
972 exit_lwp (struct lwp_info *lp, bool del_thread = true)
974 struct thread_info *th = linux_target->find_thread (lp->ptid);
976 if (th != nullptr && del_thread)
977 delete_thread (th);
979 delete_lwp (lp->ptid);
982 /* Wait for the LWP specified by LP, which we have just attached to.
983 Returns a wait status for that LWP, to cache. */
985 static int
986 linux_nat_post_attach_wait (ptid_t ptid, int *signalled)
988 pid_t new_pid, pid = ptid.lwp ();
989 int status;
991 if (linux_proc_pid_is_stopped (pid))
993 linux_nat_debug_printf ("Attaching to a stopped process");
995 /* The process is definitely stopped. It is in a job control
996 stop, unless the kernel predates the TASK_STOPPED /
997 TASK_TRACED distinction, in which case it might be in a
998 ptrace stop. Make sure it is in a ptrace stop; from there we
999 can kill it, signal it, et cetera.
1001 First make sure there is a pending SIGSTOP. Since we are
1002 already attached, the process can not transition from stopped
1003 to running without a PTRACE_CONT; so we know this signal will
1004 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1005 probably already in the queue (unless this kernel is old
1006 enough to use TASK_STOPPED for ptrace stops); but since SIGSTOP
1007 is not an RT signal, it can only be queued once. */
1008 kill_lwp (pid, SIGSTOP);
1010 /* Finally, resume the stopped process. This will deliver the SIGSTOP
1011 (or a higher priority signal, just like normal PTRACE_ATTACH). */
1012 ptrace (PTRACE_CONT, pid, 0, 0);
1015 /* Make sure the initial process is stopped. The user-level threads
1016 layer might want to poke around in the inferior, and that won't
1017 work if things haven't stabilized yet. */
1018 new_pid = my_waitpid (pid, &status, __WALL);
1019 gdb_assert (pid == new_pid);
1021 if (!WIFSTOPPED (status))
1023 /* The pid we tried to attach has apparently just exited. */
1024 linux_nat_debug_printf ("Failed to stop %d: %s", pid,
1025 status_to_str (status).c_str ());
1026 return status;
1029 if (WSTOPSIG (status) != SIGSTOP)
1031 *signalled = 1;
1032 linux_nat_debug_printf ("Received %s after attaching",
1033 status_to_str (status).c_str ());
1036 return status;
1039 void
1040 linux_nat_target::create_inferior (const char *exec_file,
1041 const std::string &allargs,
1042 char **env, int from_tty)
1044 maybe_disable_address_space_randomization restore_personality
1045 (disable_randomization);
1047 /* The fork_child mechanism is synchronous and calls target_wait, so
1048 we have to mask the async mode. */
1050 /* Make sure we report all signals during startup. */
1051 pass_signals ({});
1053 inf_ptrace_target::create_inferior (exec_file, allargs, env, from_tty);
1055 open_proc_mem_file (inferior_ptid);
1058 /* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
1059 already attached. Returns true if a new LWP is found, false
1060 otherwise. */
1062 static int
1063 attach_proc_task_lwp_callback (ptid_t ptid)
1065 struct lwp_info *lp;
1067 /* Ignore LWPs we're already attached to. */
1068 lp = find_lwp_pid (ptid);
1069 if (lp == NULL)
1071 int lwpid = ptid.lwp ();
1073 if (ptrace (PTRACE_ATTACH, lwpid, 0, 0) < 0)
1075 int err = errno;
1077 /* Be quiet if we simply raced with the thread exiting.
1078 EPERM is returned if the thread's task still exists, and
1079 is marked as exited or zombie, as well as other
1080 conditions, so in that case, confirm the status in
1081 /proc/PID/status. */
1082 if (err == ESRCH
1083 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
1085 linux_nat_debug_printf
1086 ("Cannot attach to lwp %d: thread is gone (%d: %s)",
1087 lwpid, err, safe_strerror (err));
1090 else
1092 std::string reason
1093 = linux_ptrace_attach_fail_reason_string (ptid, err);
1095 error (_("Cannot attach to lwp %d: %s"),
1096 lwpid, reason.c_str ());
1099 else
1101 linux_nat_debug_printf ("PTRACE_ATTACH %s, 0, 0 (OK)",
1102 ptid.to_string ().c_str ());
1104 lp = add_lwp (ptid);
1106 /* The next time we wait for this LWP we'll see a SIGSTOP as
1107 PTRACE_ATTACH brings it to a halt. */
1108 lp->signalled = 1;
1110 /* We need to wait for a stop before being able to make the
1111 next ptrace call on this LWP. */
1112 lp->must_set_ptrace_flags = 1;
1114 /* So that wait collects the SIGSTOP. */
1115 lp->resumed = 1;
1118 return 1;
1120 return 0;
1123 void
1124 linux_nat_target::attach (const char *args, int from_tty)
1126 struct lwp_info *lp;
1127 int status;
1128 ptid_t ptid;
1130 /* Make sure we report all signals during attach. */
1131 pass_signals ({});
1135 inf_ptrace_target::attach (args, from_tty);
1137 catch (const gdb_exception_error &ex)
1139 pid_t pid = parse_pid_to_attach (args);
1140 std::string reason = linux_ptrace_attach_fail_reason (pid);
1142 if (!reason.empty ())
1143 throw_error (ex.error, "warning: %s\n%s", reason.c_str (),
1144 ex.what ());
1145 else
1146 throw_error (ex.error, "%s", ex.what ());
1149 /* The ptrace base target adds the main thread with (pid,0,0)
1150 format. Decorate it with lwp info. */
1151 ptid = ptid_t (inferior_ptid.pid (),
1152 inferior_ptid.pid ());
1153 thread_change_ptid (linux_target, inferior_ptid, ptid);
1155 /* Add the initial process as the first LWP to the list. */
1156 lp = add_initial_lwp (ptid);
1158 status = linux_nat_post_attach_wait (lp->ptid, &lp->signalled);
1159 if (!WIFSTOPPED (status))
1161 if (WIFEXITED (status))
1163 int exit_code = WEXITSTATUS (status);
1165 target_terminal::ours ();
1166 target_mourn_inferior (inferior_ptid);
1167 if (exit_code == 0)
1168 error (_("Unable to attach: program exited normally."));
1169 else
1170 error (_("Unable to attach: program exited with code %d."),
1171 exit_code);
1173 else if (WIFSIGNALED (status))
1175 enum gdb_signal signo;
1177 target_terminal::ours ();
1178 target_mourn_inferior (inferior_ptid);
1180 signo = gdb_signal_from_host (WTERMSIG (status));
1181 error (_("Unable to attach: program terminated with signal "
1182 "%s, %s."),
1183 gdb_signal_to_name (signo),
1184 gdb_signal_to_string (signo));
1187 internal_error (_("unexpected status %d for PID %ld"),
1188 status, (long) ptid.lwp ());
1191 lp->stopped = 1;
1193 open_proc_mem_file (lp->ptid);
1195 /* Save the wait status to report later. */
1196 lp->resumed = 1;
1197 linux_nat_debug_printf ("waitpid %ld, saving status %s",
1198 (long) lp->ptid.pid (),
1199 status_to_str (status).c_str ());
1201 lp->status = status;
1203 /* We must attach to every LWP. If /proc is mounted, use that to
1204 find them now. The inferior may be using raw clone instead of
1205 using pthreads. But even if it is using pthreads, thread_db
1206 walks structures in the inferior's address space to find the list
1207 of threads/LWPs, and those structures may well be corrupted.
1208 Note that once thread_db is loaded, we'll still use it to list
1209 threads and associate pthread info with each LWP. */
1212 linux_proc_attach_tgid_threads (lp->ptid.pid (),
1213 attach_proc_task_lwp_callback);
1215 catch (const gdb_exception_error &)
1217 /* Failed to attach to some LWP. Detach any we've already
1218 attached to. */
1219 iterate_over_lwps (ptid_t (ptid.pid ()),
1220 [] (struct lwp_info *lwp) -> int
1222 /* Ignore errors when detaching. */
1223 ptrace (PTRACE_DETACH, lwp->ptid.lwp (), 0, 0);
1224 delete_lwp (lwp->ptid);
1225 return 0;
1228 target_terminal::ours ();
1229 target_mourn_inferior (inferior_ptid);
1231 throw;
1234 /* Add all the LWPs to gdb's thread list. */
1235 iterate_over_lwps (ptid_t (ptid.pid ()),
1236 [] (struct lwp_info *lwp) -> int
1238 if (lwp->ptid.pid () != lwp->ptid.lwp ())
1240 add_thread (linux_target, lwp->ptid);
1241 set_running (linux_target, lwp->ptid, true);
1242 set_executing (linux_target, lwp->ptid, true);
1244 return 0;
1248 /* Ptrace-detach the thread with pid PID. */
1250 static void
1251 detach_one_pid (int pid, int signo)
1253 if (ptrace (PTRACE_DETACH, pid, 0, signo) < 0)
1255 int save_errno = errno;
1257 /* We know the thread exists, so ESRCH must mean the lwp is
1258 zombie. This can happen if one of the already-detached
1259 threads exits the whole thread group. In that case we're
1260 still attached, and must reap the lwp. */
1261 if (save_errno == ESRCH)
1263 int ret, status;
1265 ret = my_waitpid (pid, &status, __WALL);
1266 if (ret == -1)
1268 warning (_("Couldn't reap LWP %d while detaching: %s"),
1269 pid, safe_strerror (errno));
1271 else if (!WIFEXITED (status) && !WIFSIGNALED (status))
1273 warning (_("Reaping LWP %d while detaching "
1274 "returned unexpected status 0x%x"),
1275 pid, status);
1278 else
1279 error (_("Can't detach %d: %s"),
1280 pid, safe_strerror (save_errno));
1282 else
1283 linux_nat_debug_printf ("PTRACE_DETACH (%d, %s, 0) (OK)",
1284 pid, strsignal (signo));
1287 /* Get pending signal of THREAD as a host signal number, for detaching
1288 purposes. This is the signal the thread last stopped for, which we
1289 need to deliver to the thread when detaching, otherwise, it'd be
1290 suppressed/lost. */
1292 static int
1293 get_detach_signal (struct lwp_info *lp)
1295 enum gdb_signal signo = GDB_SIGNAL_0;
1297 /* If we paused threads momentarily, we may have stored pending
1298 events in lp->status or lp->waitstatus (see stop_wait_callback),
1299 and GDB core hasn't seen any signal for those threads.
1300 Otherwise, the last signal reported to the core is found in the
1301 thread object's stop_signal.
1303 There's a corner case that isn't handled here at present. Only
1304 if the thread stopped with a TARGET_WAITKIND_STOPPED does
1305 stop_signal make sense as a real signal to pass to the inferior.
1306 Some catchpoint related events, like
1307 TARGET_WAITKIND_(V)FORK|EXEC|SYSCALL, have their stop_signal set
1308 to GDB_SIGNAL_SIGTRAP when the catchpoint triggers. But,
1309 those traps are debug API (ptrace in our case) related and
1310 induced; the inferior wouldn't see them if it wasn't being
1311 traced. Hence, we should never pass them to the inferior, even
1312 when set to pass state. Since this corner case isn't handled by
1313 infrun.c when proceeding with a signal, for consistency, neither
1314 do we handle it here (or elsewhere in the file we check for
1315 signal pass state). Normally SIGTRAP isn't set to pass state, so
1316 this is really a corner case. */
1318 if (lp->waitstatus.kind () != TARGET_WAITKIND_IGNORE)
1319 signo = GDB_SIGNAL_0; /* a pending ptrace event, not a real signal. */
1320 else if (lp->status)
1321 signo = gdb_signal_from_host (WSTOPSIG (lp->status));
1322 else
1324 thread_info *tp = linux_target->find_thread (lp->ptid);
1326 if (target_is_non_stop_p () && !tp->executing ())
1328 if (tp->has_pending_waitstatus ())
1330 /* If the thread has a pending event, and it was stopped with a
1331 signal, use that signal to resume it. If it has a pending
1332 event of another kind, it was not stopped with a signal, so
1333 resume it without a signal. */
1334 if (tp->pending_waitstatus ().kind () == TARGET_WAITKIND_STOPPED)
1335 signo = tp->pending_waitstatus ().sig ();
1336 else
1337 signo = GDB_SIGNAL_0;
1339 else
1340 signo = tp->stop_signal ();
1342 else if (!target_is_non_stop_p ())
1344 ptid_t last_ptid;
1345 process_stratum_target *last_target;
1347 get_last_target_status (&last_target, &last_ptid, nullptr);
1349 if (last_target == linux_target
1350 && lp->ptid.lwp () == last_ptid.lwp ())
1351 signo = tp->stop_signal ();
1355 if (signo == GDB_SIGNAL_0)
1357 linux_nat_debug_printf ("lwp %s has no pending signal",
1358 lp->ptid.to_string ().c_str ());
1360 else if (!signal_pass_state (signo))
1362 linux_nat_debug_printf
1363 ("lwp %s had signal %s but it is in no pass state",
1364 lp->ptid.to_string ().c_str (), gdb_signal_to_string (signo));
1366 else
1368 linux_nat_debug_printf ("lwp %s has pending signal %s",
1369 lp->ptid.to_string ().c_str (),
1370 gdb_signal_to_string (signo));
1372 return gdb_signal_to_host (signo);
1375 return 0;
1378 /* If LP has a pending fork/vfork/clone status, return it. */
1380 static std::optional<target_waitstatus>
1381 get_pending_child_status (lwp_info *lp)
1383 LINUX_NAT_SCOPED_DEBUG_ENTER_EXIT;
1385 linux_nat_debug_printf ("lwp %s (stopped = %d)",
1386 lp->ptid.to_string ().c_str (), lp->stopped);
1388 /* Check in lwp_info::status. */
1389 if (WIFSTOPPED (lp->status) && linux_is_extended_waitstatus (lp->status))
1391 int event = linux_ptrace_get_extended_event (lp->status);
1393 if (event == PTRACE_EVENT_FORK
1394 || event == PTRACE_EVENT_VFORK
1395 || event == PTRACE_EVENT_CLONE)
1397 unsigned long child_pid;
1398 int ret = ptrace (PTRACE_GETEVENTMSG, lp->ptid.lwp (), 0, &child_pid);
1399 if (ret == 0)
1401 target_waitstatus ws;
1403 if (event == PTRACE_EVENT_FORK)
1404 ws.set_forked (ptid_t (child_pid, child_pid));
1405 else if (event == PTRACE_EVENT_VFORK)
1406 ws.set_vforked (ptid_t (child_pid, child_pid));
1407 else if (event == PTRACE_EVENT_CLONE)
1408 ws.set_thread_cloned (ptid_t (lp->ptid.pid (), child_pid));
1409 else
1410 gdb_assert_not_reached ("unhandled");
1412 return ws;
1414 else
1416 perror_warning_with_name (_("Failed to retrieve event msg"));
1417 return {};
1422 /* Check in lwp_info::waitstatus. */
1423 if (is_new_child_status (lp->waitstatus.kind ()))
1424 return lp->waitstatus;
1426 thread_info *tp = linux_target->find_thread (lp->ptid);
1428 /* Check in thread_info::pending_waitstatus. */
1429 if (tp->has_pending_waitstatus ()
1430 && is_new_child_status (tp->pending_waitstatus ().kind ()))
1431 return tp->pending_waitstatus ();
1433 /* Check in thread_info::pending_follow. */
1434 if (is_new_child_status (tp->pending_follow.kind ()))
1435 return tp->pending_follow;
1437 return {};
1440 /* Detach from LP. If SIGNO_P is non-NULL, then it points to the
1441 signal number that should be passed to the LWP when detaching.
1442 Otherwise pass any pending signal the LWP may have, if any. */
1444 static void
1445 detach_one_lwp (struct lwp_info *lp, int *signo_p)
1447 int lwpid = lp->ptid.lwp ();
1448 int signo;
1450 /* If the lwp/thread we are about to detach has a pending fork/clone
1451 event, there is a process/thread GDB is attached to that the core
1452 of GDB doesn't know about. Detach from it. */
1454 std::optional<target_waitstatus> ws = get_pending_child_status (lp);
1455 if (ws.has_value ())
1456 detach_one_pid (ws->child_ptid ().lwp (), 0);
1458 /* If there is a pending SIGSTOP, get rid of it. */
1459 if (lp->signalled)
1461 linux_nat_debug_printf ("Sending SIGCONT to %s",
1462 lp->ptid.to_string ().c_str ());
1464 kill_lwp (lwpid, SIGCONT);
1465 lp->signalled = 0;
1468 /* If the lwp has exited or was terminated due to a signal, there's
1469 nothing left to do. */
1470 if (lp->waitstatus.kind () == TARGET_WAITKIND_EXITED
1471 || lp->waitstatus.kind () == TARGET_WAITKIND_THREAD_EXITED
1472 || lp->waitstatus.kind () == TARGET_WAITKIND_SIGNALLED)
1474 linux_nat_debug_printf
1475 ("Can't detach %s - it has exited or was terminated: %s.",
1476 lp->ptid.to_string ().c_str (),
1477 lp->waitstatus.to_string ().c_str ());
1478 delete_lwp (lp->ptid);
1479 return;
1482 if (signo_p == NULL)
1484 /* Pass on any pending signal for this LWP. */
1485 signo = get_detach_signal (lp);
1487 else
1488 signo = *signo_p;
1490 linux_nat_debug_printf ("preparing to resume lwp %s (stopped = %d)",
1491 lp->ptid.to_string ().c_str (),
1492 lp->stopped);
1494 /* Preparing to resume may try to write registers, and fail if the
1495 lwp is zombie. If that happens, ignore the error. We'll handle
1496 it below, when detach fails with ESRCH. */
1499 linux_target->low_prepare_to_resume (lp);
1501 catch (const gdb_exception_error &ex)
1503 if (!check_ptrace_stopped_lwp_gone (lp))
1504 throw;
1507 detach_one_pid (lwpid, signo);
1509 delete_lwp (lp->ptid);
1512 static int
1513 detach_callback (struct lwp_info *lp)
1515 /* We don't actually detach from the thread group leader just yet.
1516 If the thread group exits, we must reap the zombie clone lwps
1517 before we're able to reap the leader. */
1518 if (lp->ptid.lwp () != lp->ptid.pid ())
1519 detach_one_lwp (lp, NULL);
1520 return 0;
1523 void
1524 linux_nat_target::detach (inferior *inf, int from_tty)
1526 LINUX_NAT_SCOPED_DEBUG_ENTER_EXIT;
1528 struct lwp_info *main_lwp;
1529 int pid = inf->pid;
1531 /* Don't unregister from the event loop, as there may be other
1532 inferiors running. */
1534 /* Stop all threads before detaching. ptrace requires that the
1535 thread is stopped to successfully detach. */
1536 iterate_over_lwps (ptid_t (pid), stop_callback);
1537 /* ... and wait until all of them have reported back that
1538 they're no longer running. */
1539 iterate_over_lwps (ptid_t (pid), stop_wait_callback);
1541 /* We can now safely remove breakpoints. We don't this in earlier
1542 in common code because this target doesn't currently support
1543 writing memory while the inferior is running. */
1544 remove_breakpoints_inf (current_inferior ());
1546 iterate_over_lwps (ptid_t (pid), detach_callback);
1548 /* We have detached from everything except the main thread now, so
1549 should only have one thread left. However, in non-stop mode the
1550 main thread might have exited, in which case we'll have no threads
1551 left. */
1552 gdb_assert (num_lwps (pid) == 1
1553 || (target_is_non_stop_p () && num_lwps (pid) == 0));
1555 if (pid == inferior_ptid.pid () && forks_exist_p ())
1557 /* Multi-fork case. The current inferior_ptid is being detached
1558 from, but there are other viable forks to debug. Detach from
1559 the current fork, and context-switch to the first
1560 available. */
1561 linux_fork_detach (from_tty, find_lwp_pid (ptid_t (pid)));
1563 else
1565 target_announce_detach (from_tty);
1567 /* In non-stop mode it is possible that the main thread has exited,
1568 in which case we don't try to detach. */
1569 main_lwp = find_lwp_pid (ptid_t (pid));
1570 if (main_lwp != nullptr)
1572 /* Pass on any pending signal for the last LWP. */
1573 int signo = get_detach_signal (main_lwp);
1575 detach_one_lwp (main_lwp, &signo);
1577 else
1578 gdb_assert (target_is_non_stop_p ());
1580 detach_success (inf);
1583 close_proc_mem_file (pid);
1586 /* Resume execution of the inferior process. If STEP is nonzero,
1587 single-step it. If SIGNAL is nonzero, give it that signal. */
1589 static void
1590 linux_resume_one_lwp_throw (struct lwp_info *lp, int step,
1591 enum gdb_signal signo)
1593 lp->step = step;
1595 /* stop_pc doubles as the PC the LWP had when it was last resumed.
1596 We only presently need that if the LWP is stepped though (to
1597 handle the case of stepping a breakpoint instruction). */
1598 if (step)
1600 struct regcache *regcache = get_thread_regcache (linux_target, lp->ptid);
1602 lp->stop_pc = regcache_read_pc (regcache);
1604 else
1605 lp->stop_pc = 0;
1607 linux_target->low_prepare_to_resume (lp);
1608 linux_target->low_resume (lp->ptid, step, signo);
1610 /* Successfully resumed. Clear state that no longer makes sense,
1611 and mark the LWP as running. Must not do this before resuming
1612 otherwise if that fails other code will be confused. E.g., we'd
1613 later try to stop the LWP and hang forever waiting for a stop
1614 status. Note that we must not throw after this is cleared,
1615 otherwise handle_zombie_lwp_error would get confused. */
1616 lp->stopped = 0;
1617 lp->core = -1;
1618 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
1619 registers_changed_ptid (linux_target, lp->ptid);
1622 /* Called when we try to resume a stopped LWP and that errors out. If
1623 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
1624 or about to become), discard the error, clear any pending status
1625 the LWP may have, and return true (we'll collect the exit status
1626 soon enough). Otherwise, return false. */
1628 static int
1629 check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
1631 /* If we get an error after resuming the LWP successfully, we'd
1632 confuse !T state for the LWP being gone. */
1633 gdb_assert (lp->stopped);
1635 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
1636 because even if ptrace failed with ESRCH, the tracee may be "not
1637 yet fully dead", but already refusing ptrace requests. In that
1638 case the tracee has 'R (Running)' state for a little bit
1639 (observed in Linux 3.18). See also the note on ESRCH in the
1640 ptrace(2) man page. Instead, check whether the LWP has any state
1641 other than ptrace-stopped. */
1643 /* Don't assume anything if /proc/PID/status can't be read. */
1644 if (linux_proc_pid_is_trace_stopped_nowarn (lp->ptid.lwp ()) == 0)
1646 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
1647 lp->status = 0;
1648 lp->waitstatus.set_ignore ();
1649 return 1;
1651 return 0;
1654 /* Like linux_resume_one_lwp_throw, but no error is thrown if the LWP
1655 disappears while we try to resume it. */
1657 static void
1658 linux_resume_one_lwp (struct lwp_info *lp, int step, enum gdb_signal signo)
1662 linux_resume_one_lwp_throw (lp, step, signo);
1664 catch (const gdb_exception_error &ex)
1666 if (!check_ptrace_stopped_lwp_gone (lp))
1667 throw;
1671 /* Resume LP. */
1673 static void
1674 resume_lwp (struct lwp_info *lp, int step, enum gdb_signal signo)
1676 if (lp->stopped)
1678 struct inferior *inf = find_inferior_ptid (linux_target, lp->ptid);
1680 if (inf->vfork_child != NULL)
1682 linux_nat_debug_printf ("Not resuming sibling %s (vfork parent)",
1683 lp->ptid.to_string ().c_str ());
1685 else if (!lwp_status_pending_p (lp))
1687 linux_nat_debug_printf ("Resuming sibling %s, %s, %s",
1688 lp->ptid.to_string ().c_str (),
1689 (signo != GDB_SIGNAL_0
1690 ? strsignal (gdb_signal_to_host (signo))
1691 : "0"),
1692 step ? "step" : "resume");
1694 linux_resume_one_lwp (lp, step, signo);
1696 else
1698 linux_nat_debug_printf ("Not resuming sibling %s (has pending)",
1699 lp->ptid.to_string ().c_str ());
1702 else
1703 linux_nat_debug_printf ("Not resuming sibling %s (not stopped)",
1704 lp->ptid.to_string ().c_str ());
1707 /* Callback for iterate_over_lwps. If LWP is EXCEPT, do nothing.
1708 Resume LWP with the last stop signal, if it is in pass state. */
1710 static int
1711 linux_nat_resume_callback (struct lwp_info *lp, struct lwp_info *except)
1713 enum gdb_signal signo = GDB_SIGNAL_0;
1715 if (lp == except)
1716 return 0;
1718 if (lp->stopped)
1720 struct thread_info *thread;
1722 thread = linux_target->find_thread (lp->ptid);
1723 if (thread != NULL)
1725 signo = thread->stop_signal ();
1726 thread->set_stop_signal (GDB_SIGNAL_0);
1730 resume_lwp (lp, 0, signo);
1731 return 0;
1734 static int
1735 resume_clear_callback (struct lwp_info *lp)
1737 lp->resumed = 0;
1738 lp->last_resume_kind = resume_stop;
1739 return 0;
1742 static int
1743 resume_set_callback (struct lwp_info *lp)
1745 lp->resumed = 1;
1746 lp->last_resume_kind = resume_continue;
1747 return 0;
1750 void
1751 linux_nat_target::resume (ptid_t scope_ptid, int step, enum gdb_signal signo)
1753 struct lwp_info *lp;
1755 linux_nat_debug_printf ("Preparing to %s %s, %s, inferior_ptid %s",
1756 step ? "step" : "resume",
1757 scope_ptid.to_string ().c_str (),
1758 (signo != GDB_SIGNAL_0
1759 ? strsignal (gdb_signal_to_host (signo)) : "0"),
1760 inferior_ptid.to_string ().c_str ());
1762 /* Mark the lwps we're resuming as resumed and update their
1763 last_resume_kind to resume_continue. */
1764 iterate_over_lwps (scope_ptid, resume_set_callback);
1766 lp = find_lwp_pid (inferior_ptid);
1767 gdb_assert (lp != NULL);
1769 /* Remember if we're stepping. */
1770 lp->last_resume_kind = step ? resume_step : resume_continue;
1772 /* If we have a pending wait status for this thread, there is no
1773 point in resuming the process. But first make sure that
1774 linux_nat_wait won't preemptively handle the event - we
1775 should never take this short-circuit if we are going to
1776 leave LP running, since we have skipped resuming all the
1777 other threads. This bit of code needs to be synchronized
1778 with linux_nat_wait. */
1780 if (lp->status && WIFSTOPPED (lp->status))
1782 if (!lp->step
1783 && WSTOPSIG (lp->status)
1784 && sigismember (&pass_mask, WSTOPSIG (lp->status)))
1786 linux_nat_debug_printf
1787 ("Not short circuiting for ignored status 0x%x", lp->status);
1789 /* FIXME: What should we do if we are supposed to continue
1790 this thread with a signal? */
1791 gdb_assert (signo == GDB_SIGNAL_0);
1792 signo = gdb_signal_from_host (WSTOPSIG (lp->status));
1793 lp->status = 0;
1797 if (lwp_status_pending_p (lp))
1799 /* FIXME: What should we do if we are supposed to continue
1800 this thread with a signal? */
1801 gdb_assert (signo == GDB_SIGNAL_0);
1803 linux_nat_debug_printf ("Short circuiting for status %s",
1804 pending_status_str (lp).c_str ());
1806 if (target_can_async_p ())
1808 target_async (true);
1809 /* Tell the event loop we have something to process. */
1810 async_file_mark ();
1812 return;
1815 /* No use iterating unless we're resuming other threads. */
1816 if (scope_ptid != lp->ptid)
1817 iterate_over_lwps (scope_ptid, [=] (struct lwp_info *info)
1819 return linux_nat_resume_callback (info, lp);
1822 linux_nat_debug_printf ("%s %s, %s (resume event thread)",
1823 step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
1824 lp->ptid.to_string ().c_str (),
1825 (signo != GDB_SIGNAL_0
1826 ? strsignal (gdb_signal_to_host (signo)) : "0"));
1828 linux_resume_one_lwp (lp, step, signo);
1831 /* Send a signal to an LWP. */
1833 static int
1834 kill_lwp (int lwpid, int signo)
1836 int ret;
1838 errno = 0;
1839 ret = syscall (__NR_tkill, lwpid, signo);
1840 if (errno == ENOSYS)
1842 /* If tkill fails, then we are not using nptl threads, a
1843 configuration we no longer support. */
1844 perror_with_name (("tkill"));
1846 return ret;
1849 /* Handle a GNU/Linux syscall trap wait response. If we see a syscall
1850 event, check if the core is interested in it: if not, ignore the
1851 event, and keep waiting; otherwise, we need to toggle the LWP's
1852 syscall entry/exit status, since the ptrace event itself doesn't
1853 indicate it, and report the trap to higher layers. */
1855 static int
1856 linux_handle_syscall_trap (struct lwp_info *lp, int stopping)
1858 struct target_waitstatus *ourstatus = &lp->waitstatus;
1859 struct gdbarch *gdbarch = target_thread_architecture (lp->ptid);
1860 thread_info *thread = linux_target->find_thread (lp->ptid);
1861 int syscall_number = (int) gdbarch_get_syscall_number (gdbarch, thread);
1863 if (stopping)
1865 /* If we're stopping threads, there's a SIGSTOP pending, which
1866 makes it so that the LWP reports an immediate syscall return,
1867 followed by the SIGSTOP. Skip seeing that "return" using
1868 PTRACE_CONT directly, and let stop_wait_callback collect the
1869 SIGSTOP. Later when the thread is resumed, a new syscall
1870 entry event. If we didn't do this (and returned 0), we'd
1871 leave a syscall entry pending, and our caller, by using
1872 PTRACE_CONT to collect the SIGSTOP, skips the syscall return
1873 itself. Later, when the user re-resumes this LWP, we'd see
1874 another syscall entry event and we'd mistake it for a return.
1876 If stop_wait_callback didn't force the SIGSTOP out of the LWP
1877 (leaving immediately with LWP->signalled set, without issuing
1878 a PTRACE_CONT), it would still be problematic to leave this
1879 syscall enter pending, as later when the thread is resumed,
1880 it would then see the same syscall exit mentioned above,
1881 followed by the delayed SIGSTOP, while the syscall didn't
1882 actually get to execute. It seems it would be even more
1883 confusing to the user. */
1885 linux_nat_debug_printf
1886 ("ignoring syscall %d for LWP %ld (stopping threads), resuming with "
1887 "PTRACE_CONT for SIGSTOP", syscall_number, lp->ptid.lwp ());
1889 lp->syscall_state = TARGET_WAITKIND_IGNORE;
1890 ptrace (PTRACE_CONT, lp->ptid.lwp (), 0, 0);
1891 lp->stopped = 0;
1892 return 1;
1895 /* Always update the entry/return state, even if this particular
1896 syscall isn't interesting to the core now. In async mode,
1897 the user could install a new catchpoint for this syscall
1898 between syscall enter/return, and we'll need to know to
1899 report a syscall return if that happens. */
1900 lp->syscall_state = (lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
1901 ? TARGET_WAITKIND_SYSCALL_RETURN
1902 : TARGET_WAITKIND_SYSCALL_ENTRY);
1904 if (catch_syscall_enabled ())
1906 if (catching_syscall_number (syscall_number))
1908 /* Alright, an event to report. */
1909 if (lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY)
1910 ourstatus->set_syscall_entry (syscall_number);
1911 else if (lp->syscall_state == TARGET_WAITKIND_SYSCALL_RETURN)
1912 ourstatus->set_syscall_return (syscall_number);
1913 else
1914 gdb_assert_not_reached ("unexpected syscall state");
1916 linux_nat_debug_printf
1917 ("stopping for %s of syscall %d for LWP %ld",
1918 (lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
1919 ? "entry" : "return"), syscall_number, lp->ptid.lwp ());
1921 return 0;
1924 linux_nat_debug_printf
1925 ("ignoring %s of syscall %d for LWP %ld",
1926 (lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
1927 ? "entry" : "return"), syscall_number, lp->ptid.lwp ());
1929 else
1931 /* If we had been syscall tracing, and hence used PT_SYSCALL
1932 before on this LWP, it could happen that the user removes all
1933 syscall catchpoints before we get to process this event.
1934 There are two noteworthy issues here:
1936 - When stopped at a syscall entry event, resuming with
1937 PT_STEP still resumes executing the syscall and reports a
1938 syscall return.
1940 - Only PT_SYSCALL catches syscall enters. If we last
1941 single-stepped this thread, then this event can't be a
1942 syscall enter. If we last single-stepped this thread, this
1943 has to be a syscall exit.
1945 The points above mean that the next resume, be it PT_STEP or
1946 PT_CONTINUE, can not trigger a syscall trace event. */
1947 linux_nat_debug_printf
1948 ("caught syscall event with no syscall catchpoints. %d for LWP %ld, "
1949 "ignoring", syscall_number, lp->ptid.lwp ());
1950 lp->syscall_state = TARGET_WAITKIND_IGNORE;
1953 /* The core isn't interested in this event. For efficiency, avoid
1954 stopping all threads only to have the core resume them all again.
1955 Since we're not stopping threads, if we're still syscall tracing
1956 and not stepping, we can't use PTRACE_CONT here, as we'd miss any
1957 subsequent syscall. Simply resume using the inf-ptrace layer,
1958 which knows when to use PT_SYSCALL or PT_CONTINUE. */
1960 linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
1961 return 1;
1964 /* See target.h. */
1966 void
1967 linux_nat_target::follow_clone (ptid_t child_ptid)
1969 lwp_info *new_lp = add_lwp (child_ptid);
1970 new_lp->stopped = 1;
1972 /* If the thread_db layer is active, let it record the user
1973 level thread id and status, and add the thread to GDB's
1974 list. */
1975 if (!thread_db_notice_clone (inferior_ptid, new_lp->ptid))
1977 /* The process is not using thread_db. Add the LWP to
1978 GDB's list. */
1979 add_thread (linux_target, new_lp->ptid);
1982 /* We just created NEW_LP so it cannot yet contain STATUS. */
1983 gdb_assert (new_lp->status == 0);
1985 if (!pull_pid_from_list (&stopped_pids, child_ptid.lwp (), &new_lp->status))
1986 internal_error (_("no saved status for clone lwp"));
1988 if (WSTOPSIG (new_lp->status) != SIGSTOP)
1990 /* This can happen if someone starts sending signals to
1991 the new thread before it gets a chance to run, which
1992 have a lower number than SIGSTOP (e.g. SIGUSR1).
1993 This is an unlikely case, and harder to handle for
1994 fork / vfork than for clone, so we do not try - but
1995 we handle it for clone events here. */
1997 new_lp->signalled = 1;
1999 /* Save the wait status to report later. */
2000 linux_nat_debug_printf
2001 ("waitpid of new LWP %ld, saving status %s",
2002 (long) new_lp->ptid.lwp (), status_to_str (new_lp->status).c_str ());
2004 else
2006 new_lp->status = 0;
2008 if (report_thread_events)
2009 new_lp->waitstatus.set_thread_created ();
2013 /* Handle a GNU/Linux extended wait response. If we see a clone
2014 event, we need to add the new LWP to our list (and not report the
2015 trap to higher layers). This function returns non-zero if the
2016 event should be ignored and we should wait again. If STOPPING is
2017 true, the new LWP remains stopped, otherwise it is continued. */
2019 static int
2020 linux_handle_extended_wait (struct lwp_info *lp, int status)
2022 int pid = lp->ptid.lwp ();
2023 struct target_waitstatus *ourstatus = &lp->waitstatus;
2024 int event = linux_ptrace_get_extended_event (status);
2026 /* All extended events we currently use are mid-syscall. Only
2027 PTRACE_EVENT_STOP is delivered more like a signal-stop, but
2028 you have to be using PTRACE_SEIZE to get that. */
2029 lp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
2031 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK
2032 || event == PTRACE_EVENT_CLONE)
2034 unsigned long new_pid;
2035 int ret;
2037 ptrace (PTRACE_GETEVENTMSG, pid, 0, &new_pid);
2039 /* If we haven't already seen the new PID stop, wait for it now. */
2040 if (! pull_pid_from_list (&stopped_pids, new_pid, &status))
2042 /* The new child has a pending SIGSTOP. We can't affect it until it
2043 hits the SIGSTOP, but we're already attached. */
2044 ret = my_waitpid (new_pid, &status, __WALL);
2045 if (ret == -1)
2046 perror_with_name (_("waiting for new child"));
2047 else if (ret != new_pid)
2048 internal_error (_("wait returned unexpected PID %d"), ret);
2049 else if (!WIFSTOPPED (status))
2050 internal_error (_("wait returned unexpected status 0x%x"), status);
2053 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
2055 open_proc_mem_file (ptid_t (new_pid, new_pid));
2057 /* The arch-specific native code may need to know about new
2058 forks even if those end up never mapped to an
2059 inferior. */
2060 linux_target->low_new_fork (lp, new_pid);
2062 else if (event == PTRACE_EVENT_CLONE)
2064 linux_target->low_new_clone (lp, new_pid);
2067 if (event == PTRACE_EVENT_FORK
2068 && linux_fork_checkpointing_p (lp->ptid.pid ()))
2070 /* Handle checkpointing by linux-fork.c here as a special
2071 case. We don't want the follow-fork-mode or 'catch fork'
2072 to interfere with this. */
2074 /* This won't actually modify the breakpoint list, but will
2075 physically remove the breakpoints from the child. */
2076 detach_breakpoints (ptid_t (new_pid, new_pid));
2078 /* Retain child fork in ptrace (stopped) state. */
2079 if (!find_fork_pid (new_pid))
2080 add_fork (new_pid);
2082 /* Report as spurious, so that infrun doesn't want to follow
2083 this fork. We're actually doing an infcall in
2084 linux-fork.c. */
2085 ourstatus->set_spurious ();
2087 /* Report the stop to the core. */
2088 return 0;
2091 if (event == PTRACE_EVENT_FORK)
2092 ourstatus->set_forked (ptid_t (new_pid, new_pid));
2093 else if (event == PTRACE_EVENT_VFORK)
2094 ourstatus->set_vforked (ptid_t (new_pid, new_pid));
2095 else if (event == PTRACE_EVENT_CLONE)
2097 linux_nat_debug_printf
2098 ("Got clone event from LWP %d, new child is LWP %ld", pid, new_pid);
2100 /* Save the status again, we'll use it in follow_clone. */
2101 add_to_pid_list (&stopped_pids, new_pid, status);
2103 ourstatus->set_thread_cloned (ptid_t (lp->ptid.pid (), new_pid));
2106 return 0;
2109 if (event == PTRACE_EVENT_EXEC)
2111 linux_nat_debug_printf ("Got exec event from LWP %ld", lp->ptid.lwp ());
2113 /* Close the previous /proc/PID/mem file for this inferior,
2114 which was using the address space which is now gone.
2115 Reading/writing from this file would return 0/EOF. */
2116 close_proc_mem_file (lp->ptid.pid ());
2118 /* Open a new file for the new address space. */
2119 open_proc_mem_file (lp->ptid);
2121 ourstatus->set_execd
2122 (make_unique_xstrdup (linux_proc_pid_to_exec_file (pid)));
2124 /* The thread that execed must have been resumed, but, when a
2125 thread execs, it changes its tid to the tgid, and the old
2126 tgid thread might have not been resumed. */
2127 lp->resumed = 1;
2129 /* All other LWPs are gone now. We'll have received a thread
2130 exit notification for all threads other the execing one.
2131 That one, if it wasn't the leader, just silently changes its
2132 tid to the tgid, and the previous leader vanishes. Since
2133 Linux 3.0, the former thread ID can be retrieved with
2134 PTRACE_GETEVENTMSG, but since we support older kernels, don't
2135 bother with it, and just walk the LWP list. Even with
2136 PTRACE_GETEVENTMSG, we'd still need to lookup the
2137 corresponding LWP object, and it would be an extra ptrace
2138 syscall, so this way may even be more efficient. */
2139 for (lwp_info *other_lp : all_lwps_safe ())
2140 if (other_lp != lp && other_lp->ptid.pid () == lp->ptid.pid ())
2141 exit_lwp (other_lp);
2143 return 0;
2146 if (event == PTRACE_EVENT_VFORK_DONE)
2148 linux_nat_debug_printf
2149 ("Got PTRACE_EVENT_VFORK_DONE from LWP %ld",
2150 lp->ptid.lwp ());
2151 ourstatus->set_vfork_done ();
2152 return 0;
2155 internal_error (_("unknown ptrace event %d"), event);
2158 /* Suspend waiting for a signal. We're mostly interested in
2159 SIGCHLD/SIGINT. */
2161 static void
2162 wait_for_signal ()
2164 linux_nat_debug_printf ("about to sigsuspend");
2165 sigsuspend (&suspend_mask);
2167 /* If the quit flag is set, it means that the user pressed Ctrl-C
2168 and we're debugging a process that is running on a separate
2169 terminal, so we must forward the Ctrl-C to the inferior. (If the
2170 inferior is sharing GDB's terminal, then the Ctrl-C reaches the
2171 inferior directly.) We must do this here because functions that
2172 need to block waiting for a signal loop forever until there's an
2173 event to report before returning back to the event loop. */
2174 if (!target_terminal::is_ours ())
2176 if (check_quit_flag ())
2177 target_pass_ctrlc ();
2181 /* Mark LWP dead, with STATUS as exit status pending to report
2182 later. */
2184 static void
2185 mark_lwp_dead (lwp_info *lp, int status)
2187 /* Store the exit status lp->waitstatus, because lp->status would be
2188 ambiguous (W_EXITCODE(0,0) == 0). */
2189 lp->waitstatus = host_status_to_waitstatus (status);
2191 /* If we're processing LP's status, there should be no other event
2192 already recorded as pending. */
2193 gdb_assert (lp->status == 0);
2195 /* Dead LWPs aren't expected to report a pending sigstop. */
2196 lp->signalled = 0;
2198 /* Prevent trying to stop it. */
2199 lp->stopped = 1;
2202 /* Wait for LP to stop. Returns the wait status, or 0 if the LWP has
2203 exited. */
2205 static int
2206 wait_lwp (struct lwp_info *lp)
2208 pid_t pid;
2209 int status = 0;
2210 int thread_dead = 0;
2211 sigset_t prev_mask;
2213 gdb_assert (!lp->stopped);
2214 gdb_assert (lp->status == 0);
2216 /* Make sure SIGCHLD is blocked for sigsuspend avoiding a race below. */
2217 block_child_signals (&prev_mask);
2219 for (;;)
2221 pid = my_waitpid (lp->ptid.lwp (), &status, __WALL | WNOHANG);
2222 if (pid == -1 && errno == ECHILD)
2224 /* The thread has previously exited. We need to delete it
2225 now because if this was a non-leader thread execing, we
2226 won't get an exit event. See comments on exec events at
2227 the top of the file. */
2228 thread_dead = 1;
2229 linux_nat_debug_printf ("%s vanished.",
2230 lp->ptid.to_string ().c_str ());
2232 if (pid != 0)
2233 break;
2235 /* Bugs 10970, 12702.
2236 Thread group leader may have exited in which case we'll lock up in
2237 waitpid if there are other threads, even if they are all zombies too.
2238 Basically, we're not supposed to use waitpid this way.
2239 tkill(pid,0) cannot be used here as it gets ESRCH for both
2240 for zombie and running processes.
2242 As a workaround, check if we're waiting for the thread group leader and
2243 if it's a zombie, and avoid calling waitpid if it is.
2245 This is racy, what if the tgl becomes a zombie right after we check?
2246 Therefore always use WNOHANG with sigsuspend - it is equivalent to
2247 waiting waitpid but linux_proc_pid_is_zombie is safe this way. */
2249 if (lp->ptid.pid () == lp->ptid.lwp ()
2250 && linux_proc_pid_is_zombie (lp->ptid.lwp ()))
2252 thread_dead = 1;
2253 linux_nat_debug_printf ("Thread group leader %s vanished.",
2254 lp->ptid.to_string ().c_str ());
2255 break;
2258 /* Wait for next SIGCHLD and try again. This may let SIGCHLD handlers
2259 get invoked despite our caller had them intentionally blocked by
2260 block_child_signals. This is sensitive only to the loop of
2261 linux_nat_wait_1 and there if we get called my_waitpid gets called
2262 again before it gets to sigsuspend so we can safely let the handlers
2263 get executed here. */
2264 wait_for_signal ();
2267 restore_child_signals_mask (&prev_mask);
2269 if (!thread_dead)
2271 gdb_assert (pid == lp->ptid.lwp ());
2273 linux_nat_debug_printf ("waitpid %s received %s",
2274 lp->ptid.to_string ().c_str (),
2275 status_to_str (status).c_str ());
2277 /* Check if the thread has exited. */
2278 if (WIFEXITED (status) || WIFSIGNALED (status))
2280 if (report_exit_events_for (lp) || is_leader (lp))
2282 linux_nat_debug_printf ("LWP %d exited.", lp->ptid.pid ());
2284 /* If this is the leader exiting, it means the whole
2285 process is gone. Store the status to report to the
2286 core. */
2287 mark_lwp_dead (lp, status);
2288 return 0;
2291 thread_dead = 1;
2292 linux_nat_debug_printf ("%s exited.",
2293 lp->ptid.to_string ().c_str ());
2297 if (thread_dead)
2299 exit_lwp (lp);
2300 return 0;
2303 gdb_assert (WIFSTOPPED (status));
2304 lp->stopped = 1;
2306 if (lp->must_set_ptrace_flags)
2308 inferior *inf = find_inferior_pid (linux_target, lp->ptid.pid ());
2309 int options = linux_nat_ptrace_options (inf->attach_flag);
2311 linux_enable_event_reporting (lp->ptid.lwp (), options);
2312 lp->must_set_ptrace_flags = 0;
2315 /* Handle GNU/Linux's syscall SIGTRAPs. */
2316 if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP)
2318 /* No longer need the sysgood bit. The ptrace event ends up
2319 recorded in lp->waitstatus if we care for it. We can carry
2320 on handling the event like a regular SIGTRAP from here
2321 on. */
2322 status = W_STOPCODE (SIGTRAP);
2323 if (linux_handle_syscall_trap (lp, 1))
2324 return wait_lwp (lp);
2326 else
2328 /* Almost all other ptrace-stops are known to be outside of system
2329 calls, with further exceptions in linux_handle_extended_wait. */
2330 lp->syscall_state = TARGET_WAITKIND_IGNORE;
2333 /* Handle GNU/Linux's extended waitstatus for trace events. */
2334 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP
2335 && linux_is_extended_waitstatus (status))
2337 linux_nat_debug_printf ("Handling extended status 0x%06x", status);
2338 linux_handle_extended_wait (lp, status);
2339 return 0;
2342 return status;
2345 /* Send a SIGSTOP to LP. */
2347 static int
2348 stop_callback (struct lwp_info *lp)
2350 if (!lp->stopped && !lp->signalled)
2352 int ret;
2354 linux_nat_debug_printf ("kill %s **<SIGSTOP>**",
2355 lp->ptid.to_string ().c_str ());
2357 errno = 0;
2358 ret = kill_lwp (lp->ptid.lwp (), SIGSTOP);
2359 linux_nat_debug_printf ("lwp kill %d %s", ret,
2360 errno ? safe_strerror (errno) : "ERRNO-OK");
2362 lp->signalled = 1;
2363 gdb_assert (lp->status == 0);
2366 return 0;
2369 /* Request a stop on LWP. */
2371 void
2372 linux_stop_lwp (struct lwp_info *lwp)
2374 stop_callback (lwp);
2377 /* See linux-nat.h */
2379 void
2380 linux_stop_and_wait_all_lwps (void)
2382 /* Stop all LWP's ... */
2383 iterate_over_lwps (minus_one_ptid, stop_callback);
2385 /* ... and wait until all of them have reported back that
2386 they're no longer running. */
2387 iterate_over_lwps (minus_one_ptid, stop_wait_callback);
2390 /* See linux-nat.h */
2392 void
2393 linux_unstop_all_lwps (void)
2395 iterate_over_lwps (minus_one_ptid,
2396 [] (struct lwp_info *info)
2398 return resume_stopped_resumed_lwps (info, minus_one_ptid);
2402 /* Return non-zero if LWP PID has a pending SIGINT. */
2404 static int
2405 linux_nat_has_pending_sigint (int pid)
2407 sigset_t pending, blocked, ignored;
2409 linux_proc_pending_signals (pid, &pending, &blocked, &ignored);
2411 if (sigismember (&pending, SIGINT)
2412 && !sigismember (&ignored, SIGINT))
2413 return 1;
2415 return 0;
2418 /* Set a flag in LP indicating that we should ignore its next SIGINT. */
2420 static int
2421 set_ignore_sigint (struct lwp_info *lp)
2423 /* If a thread has a pending SIGINT, consume it; otherwise, set a
2424 flag to consume the next one. */
2425 if (lp->stopped && lp->status != 0 && WIFSTOPPED (lp->status)
2426 && WSTOPSIG (lp->status) == SIGINT)
2427 lp->status = 0;
2428 else
2429 lp->ignore_sigint = 1;
2431 return 0;
2434 /* If LP does not have a SIGINT pending, then clear the ignore_sigint flag.
2435 This function is called after we know the LWP has stopped; if the LWP
2436 stopped before the expected SIGINT was delivered, then it will never have
2437 arrived. Also, if the signal was delivered to a shared queue and consumed
2438 by a different thread, it will never be delivered to this LWP. */
2440 static void
2441 maybe_clear_ignore_sigint (struct lwp_info *lp)
2443 if (!lp->ignore_sigint)
2444 return;
2446 if (!linux_nat_has_pending_sigint (lp->ptid.lwp ()))
2448 linux_nat_debug_printf ("Clearing bogus flag for %s",
2449 lp->ptid.to_string ().c_str ());
2450 lp->ignore_sigint = 0;
2454 /* Fetch the possible triggered data watchpoint info and store it in
2457 On some archs, like x86, that use debug registers to set
2458 watchpoints, it's possible that the way to know which watched
2459 address trapped, is to check the register that is used to select
2460 which address to watch. Problem is, between setting the watchpoint
2461 and reading back which data address trapped, the user may change
2462 the set of watchpoints, and, as a consequence, GDB changes the
2463 debug registers in the inferior. To avoid reading back a stale
2464 stopped-data-address when that happens, we cache in LP the fact
2465 that a watchpoint trapped, and the corresponding data address, as
2466 soon as we see LP stop with a SIGTRAP. If GDB changes the debug
2467 registers meanwhile, we have the cached data we can rely on. */
2469 static int
2470 check_stopped_by_watchpoint (struct lwp_info *lp)
2472 scoped_restore save_inferior_ptid = make_scoped_restore (&inferior_ptid);
2473 inferior_ptid = lp->ptid;
2475 if (linux_target->low_stopped_by_watchpoint ())
2477 lp->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
2478 lp->stopped_data_address_p
2479 = linux_target->low_stopped_data_address (&lp->stopped_data_address);
2482 return lp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
2485 /* Returns true if the LWP had stopped for a watchpoint. */
2487 bool
2488 linux_nat_target::stopped_by_watchpoint ()
2490 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2492 gdb_assert (lp != NULL);
2494 return lp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
2497 bool
2498 linux_nat_target::stopped_data_address (CORE_ADDR *addr_p)
2500 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2502 gdb_assert (lp != NULL);
2504 *addr_p = lp->stopped_data_address;
2506 return lp->stopped_data_address_p;
2509 /* Commonly any breakpoint / watchpoint generate only SIGTRAP. */
2511 bool
2512 linux_nat_target::low_status_is_event (int status)
2514 return WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP;
2517 /* Wait until LP is stopped. */
2519 static int
2520 stop_wait_callback (struct lwp_info *lp)
2522 inferior *inf = find_inferior_ptid (linux_target, lp->ptid);
2524 /* If this is a vfork parent, bail out, it is not going to report
2525 any SIGSTOP until the vfork is done with. */
2526 if (inf->vfork_child != NULL)
2527 return 0;
2529 if (!lp->stopped)
2531 int status;
2533 status = wait_lwp (lp);
2534 if (status == 0)
2535 return 0;
2537 if (lp->ignore_sigint && WIFSTOPPED (status)
2538 && WSTOPSIG (status) == SIGINT)
2540 lp->ignore_sigint = 0;
2542 errno = 0;
2543 ptrace (PTRACE_CONT, lp->ptid.lwp (), 0, 0);
2544 lp->stopped = 0;
2545 linux_nat_debug_printf
2546 ("PTRACE_CONT %s, 0, 0 (%s) (discarding SIGINT)",
2547 lp->ptid.to_string ().c_str (),
2548 errno ? safe_strerror (errno) : "OK");
2550 return stop_wait_callback (lp);
2553 maybe_clear_ignore_sigint (lp);
2555 if (WSTOPSIG (status) != SIGSTOP)
2557 /* The thread was stopped with a signal other than SIGSTOP. */
2559 linux_nat_debug_printf ("Pending event %s in %s",
2560 status_to_str ((int) status).c_str (),
2561 lp->ptid.to_string ().c_str ());
2563 /* Save the sigtrap event. */
2564 lp->status = status;
2565 gdb_assert (lp->signalled);
2566 save_stop_reason (lp);
2568 else
2570 /* We caught the SIGSTOP that we intended to catch. */
2572 linux_nat_debug_printf ("Expected SIGSTOP caught for %s.",
2573 lp->ptid.to_string ().c_str ());
2575 lp->signalled = 0;
2577 /* If we are waiting for this stop so we can report the thread
2578 stopped then we need to record this status. Otherwise, we can
2579 now discard this stop event. */
2580 if (lp->last_resume_kind == resume_stop)
2582 lp->status = status;
2583 save_stop_reason (lp);
2588 return 0;
2591 /* Get the inferior associated to LWP. Must be called with an LWP that has
2592 an associated inferior. Always return non-nullptr. */
2594 static inferior *
2595 lwp_inferior (const lwp_info *lwp)
2597 inferior *inf = find_inferior_ptid (linux_target, lwp->ptid);
2598 gdb_assert (inf != nullptr);
2599 return inf;
2602 /* Return non-zero if LP has a wait status pending. Discard the
2603 pending event and resume the LWP if the event that originally
2604 caused the stop became uninteresting. */
2606 static int
2607 status_callback (struct lwp_info *lp)
2609 /* Only report a pending wait status if we pretend that this has
2610 indeed been resumed. */
2611 if (!lp->resumed)
2612 return 0;
2614 if (!lwp_status_pending_p (lp))
2615 return 0;
2617 if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
2618 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
2620 struct regcache *regcache = get_thread_regcache (linux_target, lp->ptid);
2621 CORE_ADDR pc;
2622 int discard = 0;
2624 pc = regcache_read_pc (regcache);
2626 if (pc != lp->stop_pc)
2628 linux_nat_debug_printf ("PC of %s changed. was=%s, now=%s",
2629 lp->ptid.to_string ().c_str (),
2630 paddress (current_inferior ()->arch (),
2631 lp->stop_pc),
2632 paddress (current_inferior ()->arch (), pc));
2633 discard = 1;
2636 if (discard)
2638 linux_nat_debug_printf ("pending event of %s cancelled.",
2639 lp->ptid.to_string ().c_str ());
2641 lp->status = 0;
2642 linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
2643 return 0;
2647 return 1;
2650 /* Count the LWP's that have had events. */
2652 static int
2653 count_events_callback (struct lwp_info *lp, int *count)
2655 gdb_assert (count != NULL);
2657 /* Select only resumed LWPs that have an event pending. */
2658 if (lp->resumed && lwp_status_pending_p (lp))
2659 (*count)++;
2661 return 0;
2664 /* Select the LWP (if any) that is currently being single-stepped. */
2666 static int
2667 select_singlestep_lwp_callback (struct lwp_info *lp)
2669 if (lp->last_resume_kind == resume_step
2670 && lp->status != 0)
2671 return 1;
2672 else
2673 return 0;
2676 /* Returns true if LP has a status pending. */
2678 static int
2679 lwp_status_pending_p (struct lwp_info *lp)
2681 /* We check for lp->waitstatus in addition to lp->status, because we
2682 can have pending process exits recorded in lp->status and
2683 W_EXITCODE(0,0) happens to be 0. */
2684 return lp->status != 0 || lp->waitstatus.kind () != TARGET_WAITKIND_IGNORE;
2687 /* Select the Nth LWP that has had an event. */
2689 static int
2690 select_event_lwp_callback (struct lwp_info *lp, int *selector)
2692 gdb_assert (selector != NULL);
2694 /* Select only resumed LWPs that have an event pending. */
2695 if (lp->resumed && lwp_status_pending_p (lp))
2696 if ((*selector)-- == 0)
2697 return 1;
2699 return 0;
2702 /* Called when the LWP stopped for a signal/trap. If it stopped for a
2703 trap check what caused it (breakpoint, watchpoint, trace, etc.),
2704 and save the result in the LWP's stop_reason field. If it stopped
2705 for a breakpoint, decrement the PC if necessary on the lwp's
2706 architecture. */
2708 static void
2709 save_stop_reason (struct lwp_info *lp)
2711 struct regcache *regcache;
2712 struct gdbarch *gdbarch;
2713 CORE_ADDR pc;
2714 CORE_ADDR sw_bp_pc;
2715 siginfo_t siginfo;
2717 gdb_assert (lp->stop_reason == TARGET_STOPPED_BY_NO_REASON);
2718 gdb_assert (lp->status != 0);
2720 if (!linux_target->low_status_is_event (lp->status))
2721 return;
2723 inferior *inf = lwp_inferior (lp);
2724 if (inf->starting_up)
2725 return;
2727 regcache = get_thread_regcache (linux_target, lp->ptid);
2728 gdbarch = regcache->arch ();
2730 pc = regcache_read_pc (regcache);
2731 sw_bp_pc = pc - gdbarch_decr_pc_after_break (gdbarch);
2733 if (linux_nat_get_siginfo (lp->ptid, &siginfo))
2735 if (siginfo.si_signo == SIGTRAP)
2737 if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code)
2738 && GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
2740 /* The si_code is ambiguous on this arch -- check debug
2741 registers. */
2742 if (!check_stopped_by_watchpoint (lp))
2743 lp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
2745 else if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code))
2747 /* If we determine the LWP stopped for a SW breakpoint,
2748 trust it. Particularly don't check watchpoint
2749 registers, because, at least on s390, we'd find
2750 stopped-by-watchpoint as long as there's a watchpoint
2751 set. */
2752 lp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
2754 else if (GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
2756 /* This can indicate either a hardware breakpoint or
2757 hardware watchpoint. Check debug registers. */
2758 if (!check_stopped_by_watchpoint (lp))
2759 lp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
2761 else if (siginfo.si_code == TRAP_TRACE)
2763 linux_nat_debug_printf ("%s stopped by trace",
2764 lp->ptid.to_string ().c_str ());
2766 /* We may have single stepped an instruction that
2767 triggered a watchpoint. In that case, on some
2768 architectures (such as x86), instead of TRAP_HWBKPT,
2769 si_code indicates TRAP_TRACE, and we need to check
2770 the debug registers separately. */
2771 check_stopped_by_watchpoint (lp);
2776 if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
2778 linux_nat_debug_printf ("%s stopped by software breakpoint",
2779 lp->ptid.to_string ().c_str ());
2781 /* Back up the PC if necessary. */
2782 if (pc != sw_bp_pc)
2783 regcache_write_pc (regcache, sw_bp_pc);
2785 /* Update this so we record the correct stop PC below. */
2786 pc = sw_bp_pc;
2788 else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
2790 linux_nat_debug_printf ("%s stopped by hardware breakpoint",
2791 lp->ptid.to_string ().c_str ());
2793 else if (lp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
2795 linux_nat_debug_printf ("%s stopped by hardware watchpoint",
2796 lp->ptid.to_string ().c_str ());
2799 lp->stop_pc = pc;
2803 /* Returns true if the LWP had stopped for a software breakpoint. */
2805 bool
2806 linux_nat_target::stopped_by_sw_breakpoint ()
2808 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2810 gdb_assert (lp != NULL);
2812 return lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2815 /* Implement the supports_stopped_by_sw_breakpoint method. */
2817 bool
2818 linux_nat_target::supports_stopped_by_sw_breakpoint ()
2820 return true;
2823 /* Returns true if the LWP had stopped for a hardware
2824 breakpoint/watchpoint. */
2826 bool
2827 linux_nat_target::stopped_by_hw_breakpoint ()
2829 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2831 gdb_assert (lp != NULL);
2833 return lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2836 /* Implement the supports_stopped_by_hw_breakpoint method. */
2838 bool
2839 linux_nat_target::supports_stopped_by_hw_breakpoint ()
2841 return true;
2844 /* Select one LWP out of those that have events pending. */
2846 static void
2847 select_event_lwp (ptid_t filter, struct lwp_info **orig_lp, int *status)
2849 int num_events = 0;
2850 int random_selector;
2851 struct lwp_info *event_lp = NULL;
2853 /* Record the wait status for the original LWP. */
2854 (*orig_lp)->status = *status;
2856 /* In all-stop, give preference to the LWP that is being
2857 single-stepped. There will be at most one, and it will be the
2858 LWP that the core is most interested in. If we didn't do this,
2859 then we'd have to handle pending step SIGTRAPs somehow in case
2860 the core later continues the previously-stepped thread, as
2861 otherwise we'd report the pending SIGTRAP then, and the core, not
2862 having stepped the thread, wouldn't understand what the trap was
2863 for, and therefore would report it to the user as a random
2864 signal. */
2865 if (!target_is_non_stop_p ())
2867 event_lp = iterate_over_lwps (filter, select_singlestep_lwp_callback);
2868 if (event_lp != NULL)
2870 linux_nat_debug_printf ("Select single-step %s",
2871 event_lp->ptid.to_string ().c_str ());
2875 if (event_lp == NULL)
2877 /* Pick one at random, out of those which have had events. */
2879 /* First see how many events we have. */
2880 iterate_over_lwps (filter,
2881 [&] (struct lwp_info *info)
2883 return count_events_callback (info, &num_events);
2885 gdb_assert (num_events > 0);
2887 /* Now randomly pick a LWP out of those that have had
2888 events. */
2889 random_selector = (int)
2890 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2892 if (num_events > 1)
2893 linux_nat_debug_printf ("Found %d events, selecting #%d",
2894 num_events, random_selector);
2896 event_lp
2897 = (iterate_over_lwps
2898 (filter,
2899 [&] (struct lwp_info *info)
2901 return select_event_lwp_callback (info,
2902 &random_selector);
2903 }));
2906 if (event_lp != NULL)
2908 /* Switch the event LWP. */
2909 *orig_lp = event_lp;
2910 *status = event_lp->status;
2913 /* Flush the wait status for the event LWP. */
2914 (*orig_lp)->status = 0;
2917 /* Return non-zero if LP has been resumed. */
2919 static int
2920 resumed_callback (struct lwp_info *lp)
2922 return lp->resumed;
2925 /* Check if we should go on and pass this event to common code.
2927 If so, save the status to the lwp_info structure associated to LWPID. */
2929 static void
2930 linux_nat_filter_event (int lwpid, int status)
2932 struct lwp_info *lp;
2933 int event = linux_ptrace_get_extended_event (status);
2935 lp = find_lwp_pid (ptid_t (lwpid));
2937 /* Check for events reported by anything not in our LWP list. */
2938 if (lp == nullptr)
2940 if (WIFSTOPPED (status))
2942 if (WSTOPSIG (status) == SIGTRAP && event == PTRACE_EVENT_EXEC)
2944 /* A non-leader thread exec'ed after we've seen the
2945 leader zombie, and removed it from our lists (in
2946 check_zombie_leaders). The non-leader thread changes
2947 its tid to the tgid. */
2948 linux_nat_debug_printf
2949 ("Re-adding thread group leader LWP %d after exec.",
2950 lwpid);
2952 lp = add_lwp (ptid_t (lwpid, lwpid));
2953 lp->stopped = 1;
2954 lp->resumed = 1;
2955 add_thread (linux_target, lp->ptid);
2957 else
2959 /* A process we are controlling has forked and the new
2960 child's stop was reported to us by the kernel. Save
2961 its PID and go back to waiting for the fork event to
2962 be reported - the stopped process might be returned
2963 from waitpid before or after the fork event is. */
2964 linux_nat_debug_printf
2965 ("Saving LWP %d status %s in stopped_pids list",
2966 lwpid, status_to_str (status).c_str ());
2967 add_to_pid_list (&stopped_pids, lwpid, status);
2970 else
2972 /* Don't report an event for the exit of an LWP not in our
2973 list, i.e. not part of any inferior we're debugging.
2974 This can happen if we detach from a program we originally
2975 forked and then it exits. However, note that we may have
2976 earlier deleted a leader of an inferior we're debugging,
2977 in check_zombie_leaders. Re-add it back here if so. */
2978 for (inferior *inf : all_inferiors (linux_target))
2980 if (inf->pid == lwpid)
2982 linux_nat_debug_printf
2983 ("Re-adding thread group leader LWP %d after exit.",
2984 lwpid);
2986 lp = add_lwp (ptid_t (lwpid, lwpid));
2987 lp->resumed = 1;
2988 add_thread (linux_target, lp->ptid);
2989 break;
2994 if (lp == nullptr)
2995 return;
2998 /* This LWP is stopped now. (And if dead, this prevents it from
2999 ever being continued.) */
3000 lp->stopped = 1;
3002 if (WIFSTOPPED (status) && lp->must_set_ptrace_flags)
3004 inferior *inf = find_inferior_pid (linux_target, lp->ptid.pid ());
3005 int options = linux_nat_ptrace_options (inf->attach_flag);
3007 linux_enable_event_reporting (lp->ptid.lwp (), options);
3008 lp->must_set_ptrace_flags = 0;
3011 /* Handle GNU/Linux's syscall SIGTRAPs. */
3012 if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP)
3014 /* No longer need the sysgood bit. The ptrace event ends up
3015 recorded in lp->waitstatus if we care for it. We can carry
3016 on handling the event like a regular SIGTRAP from here
3017 on. */
3018 status = W_STOPCODE (SIGTRAP);
3019 if (linux_handle_syscall_trap (lp, 0))
3020 return;
3022 else
3024 /* Almost all other ptrace-stops are known to be outside of system
3025 calls, with further exceptions in linux_handle_extended_wait. */
3026 lp->syscall_state = TARGET_WAITKIND_IGNORE;
3029 /* Handle GNU/Linux's extended waitstatus for trace events. */
3030 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP
3031 && linux_is_extended_waitstatus (status))
3033 linux_nat_debug_printf ("Handling extended status 0x%06x", status);
3035 if (linux_handle_extended_wait (lp, status))
3036 return;
3039 /* Check if the thread has exited. */
3040 if (WIFEXITED (status) || WIFSIGNALED (status))
3042 if (!report_exit_events_for (lp) && !is_leader (lp))
3044 linux_nat_debug_printf ("%s exited.",
3045 lp->ptid.to_string ().c_str ());
3047 /* If this was not the leader exiting, then the exit signal
3048 was not the end of the debugged application and should be
3049 ignored. */
3050 exit_lwp (lp);
3051 return;
3054 /* Note that even if the leader was ptrace-stopped, it can still
3055 exit, if e.g., some other thread brings down the whole
3056 process (calls `exit'). So don't assert that the lwp is
3057 resumed. */
3058 linux_nat_debug_printf ("LWP %ld exited (resumed=%d)",
3059 lp->ptid.lwp (), lp->resumed);
3061 mark_lwp_dead (lp, status);
3062 return;
3065 /* Make sure we don't report a SIGSTOP that we sent ourselves in
3066 an attempt to stop an LWP. */
3067 if (lp->signalled
3068 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGSTOP)
3070 lp->signalled = 0;
3072 if (lp->last_resume_kind == resume_stop)
3074 linux_nat_debug_printf ("resume_stop SIGSTOP caught for %s.",
3075 lp->ptid.to_string ().c_str ());
3077 else
3079 /* This is a delayed SIGSTOP. Filter out the event. */
3081 linux_nat_debug_printf
3082 ("%s %s, 0, 0 (discard delayed SIGSTOP)",
3083 lp->step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3084 lp->ptid.to_string ().c_str ());
3086 linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
3087 gdb_assert (lp->resumed);
3088 return;
3092 /* Make sure we don't report a SIGINT that we have already displayed
3093 for another thread. */
3094 if (lp->ignore_sigint
3095 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGINT)
3097 linux_nat_debug_printf ("Delayed SIGINT caught for %s.",
3098 lp->ptid.to_string ().c_str ());
3100 /* This is a delayed SIGINT. */
3101 lp->ignore_sigint = 0;
3103 linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
3104 linux_nat_debug_printf ("%s %s, 0, 0 (discard SIGINT)",
3105 lp->step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3106 lp->ptid.to_string ().c_str ());
3107 gdb_assert (lp->resumed);
3109 /* Discard the event. */
3110 return;
3113 /* Don't report signals that GDB isn't interested in, such as
3114 signals that are neither printed nor stopped upon. Stopping all
3115 threads can be a bit time-consuming, so if we want decent
3116 performance with heavily multi-threaded programs, especially when
3117 they're using a high frequency timer, we'd better avoid it if we
3118 can. */
3119 if (WIFSTOPPED (status))
3121 enum gdb_signal signo = gdb_signal_from_host (WSTOPSIG (status));
3123 if (!target_is_non_stop_p ())
3125 /* Only do the below in all-stop, as we currently use SIGSTOP
3126 to implement target_stop (see linux_nat_stop) in
3127 non-stop. */
3128 if (signo == GDB_SIGNAL_INT && signal_pass_state (signo) == 0)
3130 /* If ^C/BREAK is typed at the tty/console, SIGINT gets
3131 forwarded to the entire process group, that is, all LWPs
3132 will receive it - unless they're using CLONE_THREAD to
3133 share signals. Since we only want to report it once, we
3134 mark it as ignored for all LWPs except this one. */
3135 iterate_over_lwps (ptid_t (lp->ptid.pid ()), set_ignore_sigint);
3136 lp->ignore_sigint = 0;
3138 else
3139 maybe_clear_ignore_sigint (lp);
3142 /* When using hardware single-step, we need to report every signal.
3143 Otherwise, signals in pass_mask may be short-circuited
3144 except signals that might be caused by a breakpoint, or SIGSTOP
3145 if we sent the SIGSTOP and are waiting for it to arrive. */
3146 if (!lp->step
3147 && WSTOPSIG (status) && sigismember (&pass_mask, WSTOPSIG (status))
3148 && (WSTOPSIG (status) != SIGSTOP
3149 || !linux_target->find_thread (lp->ptid)->stop_requested)
3150 && !linux_wstatus_maybe_breakpoint (status))
3152 linux_resume_one_lwp (lp, lp->step, signo);
3153 linux_nat_debug_printf
3154 ("%s %s, %s (preempt 'handle')",
3155 lp->step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3156 lp->ptid.to_string ().c_str (),
3157 (signo != GDB_SIGNAL_0
3158 ? strsignal (gdb_signal_to_host (signo)) : "0"));
3159 return;
3163 /* An interesting event. */
3164 gdb_assert (lp);
3165 lp->status = status;
3166 save_stop_reason (lp);
3169 /* Detect zombie thread group leaders, and "exit" them. We can't reap
3170 their exits until all other threads in the group have exited. */
3172 static void
3173 check_zombie_leaders (void)
3175 for (inferior *inf : all_inferiors ())
3177 struct lwp_info *leader_lp;
3179 if (inf->pid == 0)
3180 continue;
3182 leader_lp = find_lwp_pid (ptid_t (inf->pid));
3183 if (leader_lp != NULL
3184 /* Check if there are other threads in the group, as we may
3185 have raced with the inferior simply exiting. Note this
3186 isn't a watertight check. If the inferior is
3187 multi-threaded and is exiting, it may be we see the
3188 leader as zombie before we reap all the non-leader
3189 threads. See comments below. */
3190 && num_lwps (inf->pid) > 1
3191 && linux_proc_pid_is_zombie (inf->pid))
3193 /* A zombie leader in a multi-threaded program can mean one
3194 of three things:
3196 #1 - Only the leader exited, not the whole program, e.g.,
3197 with pthread_exit. Since we can't reap the leader's exit
3198 status until all other threads are gone and reaped too,
3199 we want to delete the zombie leader right away, as it
3200 can't be debugged, we can't read its registers, etc.
3201 This is the main reason we check for zombie leaders
3202 disappearing.
3204 #2 - The whole thread-group/process exited (a group exit,
3205 via e.g. exit(3), and there is (or will be shortly) an
3206 exit reported for each thread in the process, and then
3207 finally an exit for the leader once the non-leaders are
3208 reaped.
3210 #3 - There are 3 or more threads in the group, and a
3211 thread other than the leader exec'd. See comments on
3212 exec events at the top of the file.
3214 Ideally we would never delete the leader for case #2.
3215 Instead, we want to collect the exit status of each
3216 non-leader thread, and then finally collect the exit
3217 status of the leader as normal and use its exit code as
3218 whole-process exit code. Unfortunately, there's no
3219 race-free way to distinguish cases #1 and #2. We can't
3220 assume the exit events for the non-leaders threads are
3221 already pending in the kernel, nor can we assume the
3222 non-leader threads are in zombie state already. Between
3223 the leader becoming zombie and the non-leaders exiting
3224 and becoming zombie themselves, there's a small time
3225 window, so such a check would be racy. Temporarily
3226 pausing all threads and checking to see if all threads
3227 exit or not before re-resuming them would work in the
3228 case that all threads are running right now, but it
3229 wouldn't work if some thread is currently already
3230 ptrace-stopped, e.g., due to scheduler-locking.
3232 So what we do is we delete the leader anyhow, and then
3233 later on when we see its exit status, we re-add it back.
3234 We also make sure that we only report a whole-process
3235 exit when we see the leader exiting, as opposed to when
3236 the last LWP in the LWP list exits, which can be a
3237 non-leader if we deleted the leader here. */
3238 linux_nat_debug_printf ("Thread group leader %d zombie "
3239 "(it exited, or another thread execd), "
3240 "deleting it.",
3241 inf->pid);
3242 exit_lwp (leader_lp);
3247 /* Convenience function that is called when we're about to return an
3248 event to the core. If the event is an exit or signalled event,
3249 then this decides whether to report it as process-wide event, as a
3250 thread exit event, or to suppress it. All other event kinds are
3251 passed through unmodified. */
3253 static ptid_t
3254 filter_exit_event (struct lwp_info *event_child,
3255 struct target_waitstatus *ourstatus)
3257 ptid_t ptid = event_child->ptid;
3259 /* Note we must filter TARGET_WAITKIND_SIGNALLED as well, otherwise
3260 if a non-leader thread exits with a signal, we'd report it to the
3261 core which would interpret it as the whole-process exiting.
3262 There is no TARGET_WAITKIND_THREAD_SIGNALLED event kind. */
3263 if (ourstatus->kind () != TARGET_WAITKIND_EXITED
3264 && ourstatus->kind () != TARGET_WAITKIND_SIGNALLED)
3265 return ptid;
3267 if (!is_leader (event_child))
3269 if (report_exit_events_for (event_child))
3271 ourstatus->set_thread_exited (0);
3272 /* Delete lwp, but not thread_info, infrun will need it to
3273 process the event. */
3274 exit_lwp (event_child, false);
3276 else
3278 ourstatus->set_ignore ();
3279 exit_lwp (event_child);
3283 return ptid;
3286 static ptid_t
3287 linux_nat_wait_1 (ptid_t ptid, struct target_waitstatus *ourstatus,
3288 target_wait_flags target_options)
3290 LINUX_NAT_SCOPED_DEBUG_ENTER_EXIT;
3292 sigset_t prev_mask;
3293 enum resume_kind last_resume_kind;
3294 struct lwp_info *lp;
3295 int status;
3297 /* The first time we get here after starting a new inferior, we may
3298 not have added it to the LWP list yet - this is the earliest
3299 moment at which we know its PID. */
3300 if (ptid.is_pid () && find_lwp_pid (ptid) == nullptr)
3302 ptid_t lwp_ptid (ptid.pid (), ptid.pid ());
3304 /* Upgrade the main thread's ptid. */
3305 thread_change_ptid (linux_target, ptid, lwp_ptid);
3306 lp = add_initial_lwp (lwp_ptid);
3307 lp->resumed = 1;
3310 /* Make sure SIGCHLD is blocked until the sigsuspend below. */
3311 block_child_signals (&prev_mask);
3313 /* First check if there is a LWP with a wait status pending. */
3314 lp = iterate_over_lwps (ptid, status_callback);
3315 if (lp != NULL)
3317 linux_nat_debug_printf ("Using pending wait status %s for %s.",
3318 pending_status_str (lp).c_str (),
3319 lp->ptid.to_string ().c_str ());
3322 /* But if we don't find a pending event, we'll have to wait. Always
3323 pull all events out of the kernel. We'll randomly select an
3324 event LWP out of all that have events, to prevent starvation. */
3326 while (lp == NULL)
3328 pid_t lwpid;
3330 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
3331 quirks:
3333 - If the thread group leader exits while other threads in the
3334 thread group still exist, waitpid(TGID, ...) hangs. That
3335 waitpid won't return an exit status until the other threads
3336 in the group are reaped.
3338 - When a non-leader thread execs, that thread just vanishes
3339 without reporting an exit (so we'd hang if we waited for it
3340 explicitly in that case). The exec event is reported to
3341 the TGID pid. */
3343 errno = 0;
3344 lwpid = my_waitpid (-1, &status, __WALL | WNOHANG);
3346 linux_nat_debug_printf ("waitpid(-1, ...) returned %d, %s",
3347 lwpid,
3348 errno ? safe_strerror (errno) : "ERRNO-OK");
3350 if (lwpid > 0)
3352 linux_nat_debug_printf ("waitpid %ld received %s",
3353 (long) lwpid,
3354 status_to_str (status).c_str ());
3356 linux_nat_filter_event (lwpid, status);
3357 /* Retry until nothing comes out of waitpid. A single
3358 SIGCHLD can indicate more than one child stopped. */
3359 continue;
3362 /* Now that we've pulled all events out of the kernel, resume
3363 LWPs that don't have an interesting event to report. */
3364 iterate_over_lwps (minus_one_ptid,
3365 [] (struct lwp_info *info)
3367 return resume_stopped_resumed_lwps (info, minus_one_ptid);
3370 /* ... and find an LWP with a status to report to the core, if
3371 any. */
3372 lp = iterate_over_lwps (ptid, status_callback);
3373 if (lp != NULL)
3374 break;
3376 /* Check for zombie thread group leaders. Those can't be reaped
3377 until all other threads in the thread group are. */
3378 check_zombie_leaders ();
3380 /* If there are no resumed children left, bail. We'd be stuck
3381 forever in the sigsuspend call below otherwise. */
3382 if (iterate_over_lwps (ptid, resumed_callback) == NULL)
3384 linux_nat_debug_printf ("exit (no resumed LWP)");
3386 ourstatus->set_no_resumed ();
3388 restore_child_signals_mask (&prev_mask);
3389 return minus_one_ptid;
3392 /* No interesting event to report to the core. */
3394 if (target_options & TARGET_WNOHANG)
3396 linux_nat_debug_printf ("no interesting events found");
3398 ourstatus->set_ignore ();
3399 restore_child_signals_mask (&prev_mask);
3400 return minus_one_ptid;
3403 /* We shouldn't end up here unless we want to try again. */
3404 gdb_assert (lp == NULL);
3406 /* Block until we get an event reported with SIGCHLD. */
3407 wait_for_signal ();
3410 gdb_assert (lp);
3411 gdb_assert (lp->stopped);
3413 status = lp->status;
3414 lp->status = 0;
3416 if (!target_is_non_stop_p ())
3418 /* Now stop all other LWP's ... */
3419 iterate_over_lwps (minus_one_ptid, stop_callback);
3421 /* ... and wait until all of them have reported back that
3422 they're no longer running. */
3423 iterate_over_lwps (minus_one_ptid, stop_wait_callback);
3426 /* If we're not waiting for a specific LWP, choose an event LWP from
3427 among those that have had events. Giving equal priority to all
3428 LWPs that have had events helps prevent starvation. */
3429 if (ptid == minus_one_ptid || ptid.is_pid ())
3430 select_event_lwp (ptid, &lp, &status);
3432 gdb_assert (lp != NULL);
3434 /* We'll need this to determine whether to report a SIGSTOP as
3435 GDB_SIGNAL_0. Need to take a copy because resume_clear_callback
3436 clears it. */
3437 last_resume_kind = lp->last_resume_kind;
3439 if (!target_is_non_stop_p ())
3441 /* In all-stop, from the core's perspective, all LWPs are now
3442 stopped until a new resume action is sent over. */
3443 iterate_over_lwps (minus_one_ptid, resume_clear_callback);
3445 else
3447 resume_clear_callback (lp);
3450 if (linux_target->low_status_is_event (status))
3452 linux_nat_debug_printf ("trap ptid is %s.",
3453 lp->ptid.to_string ().c_str ());
3456 if (lp->waitstatus.kind () != TARGET_WAITKIND_IGNORE)
3458 *ourstatus = lp->waitstatus;
3459 lp->waitstatus.set_ignore ();
3461 else
3462 *ourstatus = host_status_to_waitstatus (status);
3464 linux_nat_debug_printf ("event found");
3466 restore_child_signals_mask (&prev_mask);
3468 if (last_resume_kind == resume_stop
3469 && ourstatus->kind () == TARGET_WAITKIND_STOPPED
3470 && WSTOPSIG (status) == SIGSTOP)
3472 /* A thread that has been requested to stop by GDB with
3473 target_stop, and it stopped cleanly, so report as SIG0. The
3474 use of SIGSTOP is an implementation detail. */
3475 ourstatus->set_stopped (GDB_SIGNAL_0);
3478 if (ourstatus->kind () == TARGET_WAITKIND_EXITED
3479 || ourstatus->kind () == TARGET_WAITKIND_SIGNALLED)
3480 lp->core = -1;
3481 else
3482 lp->core = linux_common_core_of_thread (lp->ptid);
3484 return filter_exit_event (lp, ourstatus);
3487 /* Resume LWPs that are currently stopped without any pending status
3488 to report, but are resumed from the core's perspective. */
3490 static int
3491 resume_stopped_resumed_lwps (struct lwp_info *lp, const ptid_t wait_ptid)
3493 inferior *inf = lwp_inferior (lp);
3495 if (!lp->stopped)
3497 linux_nat_debug_printf ("NOT resuming LWP %s, not stopped",
3498 lp->ptid.to_string ().c_str ());
3500 else if (!lp->resumed)
3502 linux_nat_debug_printf ("NOT resuming LWP %s, not resumed",
3503 lp->ptid.to_string ().c_str ());
3505 else if (lwp_status_pending_p (lp))
3507 linux_nat_debug_printf ("NOT resuming LWP %s, has pending status",
3508 lp->ptid.to_string ().c_str ());
3510 else if (inf->vfork_child != nullptr)
3512 linux_nat_debug_printf ("NOT resuming LWP %s (vfork parent)",
3513 lp->ptid.to_string ().c_str ());
3515 else
3517 struct regcache *regcache = get_thread_regcache (linux_target, lp->ptid);
3518 struct gdbarch *gdbarch = regcache->arch ();
3522 CORE_ADDR pc = regcache_read_pc (regcache);
3523 int leave_stopped = 0;
3525 /* Don't bother if there's a breakpoint at PC that we'd hit
3526 immediately, and we're not waiting for this LWP. */
3527 if (!lp->ptid.matches (wait_ptid))
3529 if (breakpoint_inserted_here_p (inf->aspace.get (), pc))
3530 leave_stopped = 1;
3533 if (!leave_stopped)
3535 linux_nat_debug_printf
3536 ("resuming stopped-resumed LWP %s at %s: step=%d",
3537 lp->ptid.to_string ().c_str (), paddress (gdbarch, pc),
3538 lp->step);
3540 linux_resume_one_lwp_throw (lp, lp->step, GDB_SIGNAL_0);
3543 catch (const gdb_exception_error &ex)
3545 if (!check_ptrace_stopped_lwp_gone (lp))
3546 throw;
3550 return 0;
3553 ptid_t
3554 linux_nat_target::wait (ptid_t ptid, struct target_waitstatus *ourstatus,
3555 target_wait_flags target_options)
3557 LINUX_NAT_SCOPED_DEBUG_ENTER_EXIT;
3559 ptid_t event_ptid;
3561 linux_nat_debug_printf ("[%s], [%s]", ptid.to_string ().c_str (),
3562 target_options_to_string (target_options).c_str ());
3564 /* Flush the async file first. */
3565 if (target_is_async_p ())
3566 async_file_flush ();
3568 /* Resume LWPs that are currently stopped without any pending status
3569 to report, but are resumed from the core's perspective. LWPs get
3570 in this state if we find them stopping at a time we're not
3571 interested in reporting the event (target_wait on a
3572 specific_process, for example, see linux_nat_wait_1), and
3573 meanwhile the event became uninteresting. Don't bother resuming
3574 LWPs we're not going to wait for if they'd stop immediately. */
3575 if (target_is_non_stop_p ())
3576 iterate_over_lwps (minus_one_ptid,
3577 [=] (struct lwp_info *info)
3579 return resume_stopped_resumed_lwps (info, ptid);
3582 event_ptid = linux_nat_wait_1 (ptid, ourstatus, target_options);
3584 /* If we requested any event, and something came out, assume there
3585 may be more. If we requested a specific lwp or process, also
3586 assume there may be more. */
3587 if (target_is_async_p ()
3588 && ((ourstatus->kind () != TARGET_WAITKIND_IGNORE
3589 && ourstatus->kind () != TARGET_WAITKIND_NO_RESUMED)
3590 || ptid != minus_one_ptid))
3591 async_file_mark ();
3593 return event_ptid;
3596 /* Kill one LWP. */
3598 static void
3599 kill_one_lwp (pid_t pid)
3601 /* PTRACE_KILL may resume the inferior. Send SIGKILL first. */
3603 errno = 0;
3604 kill_lwp (pid, SIGKILL);
3606 if (debug_linux_nat)
3608 int save_errno = errno;
3610 linux_nat_debug_printf
3611 ("kill (SIGKILL) %ld, 0, 0 (%s)", (long) pid,
3612 save_errno != 0 ? safe_strerror (save_errno) : "OK");
3615 /* Some kernels ignore even SIGKILL for processes under ptrace. */
3617 errno = 0;
3618 ptrace (PTRACE_KILL, pid, 0, 0);
3619 if (debug_linux_nat)
3621 int save_errno = errno;
3623 linux_nat_debug_printf
3624 ("PTRACE_KILL %ld, 0, 0 (%s)", (long) pid,
3625 save_errno ? safe_strerror (save_errno) : "OK");
3629 /* Wait for an LWP to die. */
3631 static void
3632 kill_wait_one_lwp (pid_t pid)
3634 pid_t res;
3636 /* We must make sure that there are no pending events (delayed
3637 SIGSTOPs, pending SIGTRAPs, etc.) to make sure the current
3638 program doesn't interfere with any following debugging session. */
3642 res = my_waitpid (pid, NULL, __WALL);
3643 if (res != (pid_t) -1)
3645 linux_nat_debug_printf ("wait %ld received unknown.", (long) pid);
3647 /* The Linux kernel sometimes fails to kill a thread
3648 completely after PTRACE_KILL; that goes from the stop
3649 point in do_fork out to the one in get_signal_to_deliver
3650 and waits again. So kill it again. */
3651 kill_one_lwp (pid);
3654 while (res == pid);
3656 gdb_assert (res == -1 && errno == ECHILD);
3659 /* Callback for iterate_over_lwps. */
3661 static int
3662 kill_callback (struct lwp_info *lp)
3664 kill_one_lwp (lp->ptid.lwp ());
3665 return 0;
3668 /* Callback for iterate_over_lwps. */
3670 static int
3671 kill_wait_callback (struct lwp_info *lp)
3673 kill_wait_one_lwp (lp->ptid.lwp ());
3674 return 0;
3677 /* Kill the fork/clone child of LP if it has an unfollowed child. */
3679 static int
3680 kill_unfollowed_child_callback (lwp_info *lp)
3682 std::optional<target_waitstatus> ws = get_pending_child_status (lp);
3683 if (ws.has_value ())
3685 ptid_t child_ptid = ws->child_ptid ();
3686 int child_pid = child_ptid.pid ();
3687 int child_lwp = child_ptid.lwp ();
3689 kill_one_lwp (child_lwp);
3690 kill_wait_one_lwp (child_lwp);
3692 /* Let the arch-specific native code know this process is
3693 gone. */
3694 if (ws->kind () != TARGET_WAITKIND_THREAD_CLONED)
3695 linux_target->low_forget_process (child_pid);
3698 return 0;
3701 void
3702 linux_nat_target::kill ()
3704 ptid_t pid_ptid (inferior_ptid.pid ());
3706 /* If we're stopped while forking/cloning and we haven't followed
3707 yet, kill the child task. We need to do this first because the
3708 parent will be sleeping if this is a vfork. */
3709 iterate_over_lwps (pid_ptid, kill_unfollowed_child_callback);
3711 if (forks_exist_p ())
3712 linux_fork_killall ();
3713 else
3715 /* Stop all threads before killing them, since ptrace requires
3716 that the thread is stopped to successfully PTRACE_KILL. */
3717 iterate_over_lwps (pid_ptid, stop_callback);
3718 /* ... and wait until all of them have reported back that
3719 they're no longer running. */
3720 iterate_over_lwps (pid_ptid, stop_wait_callback);
3722 /* Kill all LWP's ... */
3723 iterate_over_lwps (pid_ptid, kill_callback);
3725 /* ... and wait until we've flushed all events. */
3726 iterate_over_lwps (pid_ptid, kill_wait_callback);
3729 target_mourn_inferior (inferior_ptid);
3732 void
3733 linux_nat_target::mourn_inferior ()
3735 LINUX_NAT_SCOPED_DEBUG_ENTER_EXIT;
3737 int pid = inferior_ptid.pid ();
3739 purge_lwp_list (pid);
3741 close_proc_mem_file (pid);
3743 if (! forks_exist_p ())
3744 /* Normal case, no other forks available. */
3745 inf_ptrace_target::mourn_inferior ();
3746 else
3747 /* Multi-fork case. The current inferior_ptid has exited, but
3748 there are other viable forks to debug. Delete the exiting
3749 one and context-switch to the first available. */
3750 linux_fork_mourn_inferior ();
3752 /* Let the arch-specific native code know this process is gone. */
3753 linux_target->low_forget_process (pid);
3756 /* Convert a native/host siginfo object, into/from the siginfo in the
3757 layout of the inferiors' architecture. */
3759 static void
3760 siginfo_fixup (siginfo_t *siginfo, gdb_byte *inf_siginfo, int direction)
3762 /* If the low target didn't do anything, then just do a straight
3763 memcpy. */
3764 if (!linux_target->low_siginfo_fixup (siginfo, inf_siginfo, direction))
3766 if (direction == 1)
3767 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
3768 else
3769 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
3773 static enum target_xfer_status
3774 linux_xfer_siginfo (ptid_t ptid, enum target_object object,
3775 const char *annex, gdb_byte *readbuf,
3776 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
3777 ULONGEST *xfered_len)
3779 siginfo_t siginfo;
3780 gdb_byte inf_siginfo[sizeof (siginfo_t)];
3782 gdb_assert (object == TARGET_OBJECT_SIGNAL_INFO);
3783 gdb_assert (readbuf || writebuf);
3785 if (offset > sizeof (siginfo))
3786 return TARGET_XFER_E_IO;
3788 if (!linux_nat_get_siginfo (ptid, &siginfo))
3789 return TARGET_XFER_E_IO;
3791 /* When GDB is built as a 64-bit application, ptrace writes into
3792 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
3793 inferior with a 64-bit GDB should look the same as debugging it
3794 with a 32-bit GDB, we need to convert it. GDB core always sees
3795 the converted layout, so any read/write will have to be done
3796 post-conversion. */
3797 siginfo_fixup (&siginfo, inf_siginfo, 0);
3799 if (offset + len > sizeof (siginfo))
3800 len = sizeof (siginfo) - offset;
3802 if (readbuf != NULL)
3803 memcpy (readbuf, inf_siginfo + offset, len);
3804 else
3806 memcpy (inf_siginfo + offset, writebuf, len);
3808 /* Convert back to ptrace layout before flushing it out. */
3809 siginfo_fixup (&siginfo, inf_siginfo, 1);
3811 int pid = get_ptrace_pid (ptid);
3812 errno = 0;
3813 ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo);
3814 if (errno != 0)
3815 return TARGET_XFER_E_IO;
3818 *xfered_len = len;
3819 return TARGET_XFER_OK;
3822 static enum target_xfer_status
3823 linux_nat_xfer_osdata (enum target_object object,
3824 const char *annex, gdb_byte *readbuf,
3825 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
3826 ULONGEST *xfered_len);
3828 static enum target_xfer_status
3829 linux_proc_xfer_memory_partial (int pid, gdb_byte *readbuf,
3830 const gdb_byte *writebuf, ULONGEST offset,
3831 LONGEST len, ULONGEST *xfered_len);
3833 enum target_xfer_status
3834 linux_nat_target::xfer_partial (enum target_object object,
3835 const char *annex, gdb_byte *readbuf,
3836 const gdb_byte *writebuf,
3837 ULONGEST offset, ULONGEST len, ULONGEST *xfered_len)
3839 if (object == TARGET_OBJECT_SIGNAL_INFO)
3840 return linux_xfer_siginfo (inferior_ptid, object, annex, readbuf, writebuf,
3841 offset, len, xfered_len);
3843 /* The target is connected but no live inferior is selected. Pass
3844 this request down to a lower stratum (e.g., the executable
3845 file). */
3846 if (object == TARGET_OBJECT_MEMORY && inferior_ptid == null_ptid)
3847 return TARGET_XFER_EOF;
3849 if (object == TARGET_OBJECT_AUXV)
3850 return memory_xfer_auxv (this, object, annex, readbuf, writebuf,
3851 offset, len, xfered_len);
3853 if (object == TARGET_OBJECT_OSDATA)
3854 return linux_nat_xfer_osdata (object, annex, readbuf, writebuf,
3855 offset, len, xfered_len);
3857 if (object == TARGET_OBJECT_MEMORY)
3859 /* GDB calculates all addresses in the largest possible address
3860 width. The address width must be masked before its final use
3861 by linux_proc_xfer_partial.
3863 Compare ADDR_BIT first to avoid a compiler warning on shift overflow. */
3864 int addr_bit = gdbarch_addr_bit (current_inferior ()->arch ());
3866 if (addr_bit < (sizeof (ULONGEST) * HOST_CHAR_BIT))
3867 offset &= ((ULONGEST) 1 << addr_bit) - 1;
3869 /* If /proc/pid/mem is writable, don't fallback to ptrace. If
3870 the write via /proc/pid/mem fails because the inferior execed
3871 (and we haven't seen the exec event yet), a subsequent ptrace
3872 poke would incorrectly write memory to the post-exec address
3873 space, while the core was trying to write to the pre-exec
3874 address space. */
3875 if (proc_mem_file_is_writable ())
3876 return linux_proc_xfer_memory_partial (inferior_ptid.pid (), readbuf,
3877 writebuf, offset, len,
3878 xfered_len);
3881 return inf_ptrace_target::xfer_partial (object, annex, readbuf, writebuf,
3882 offset, len, xfered_len);
3885 bool
3886 linux_nat_target::thread_alive (ptid_t ptid)
3888 /* As long as a PTID is in lwp list, consider it alive. */
3889 return find_lwp_pid (ptid) != NULL;
3892 /* Implement the to_update_thread_list target method for this
3893 target. */
3895 void
3896 linux_nat_target::update_thread_list ()
3898 /* We add/delete threads from the list as clone/exit events are
3899 processed, so just try deleting exited threads still in the
3900 thread list. */
3901 delete_exited_threads ();
3903 /* Update the processor core that each lwp/thread was last seen
3904 running on. */
3905 for (lwp_info *lwp : all_lwps ())
3907 /* Avoid accessing /proc if the thread hasn't run since we last
3908 time we fetched the thread's core. Accessing /proc becomes
3909 noticeably expensive when we have thousands of LWPs. */
3910 if (lwp->core == -1)
3911 lwp->core = linux_common_core_of_thread (lwp->ptid);
3915 std::string
3916 linux_nat_target::pid_to_str (ptid_t ptid)
3918 if (ptid.lwp_p ()
3919 && (ptid.pid () != ptid.lwp ()
3920 || num_lwps (ptid.pid ()) > 1))
3921 return string_printf ("LWP %ld", ptid.lwp ());
3923 return normal_pid_to_str (ptid);
3926 const char *
3927 linux_nat_target::thread_name (struct thread_info *thr)
3929 return linux_proc_tid_get_name (thr->ptid);
3932 /* Accepts an integer PID; Returns a string representing a file that
3933 can be opened to get the symbols for the child process. */
3935 const char *
3936 linux_nat_target::pid_to_exec_file (int pid)
3938 return linux_proc_pid_to_exec_file (pid);
3941 /* Object representing an /proc/PID/mem open file. We keep one such
3942 file open per inferior.
3944 It might be tempting to think about only ever opening one file at
3945 most for all inferiors, closing/reopening the file as we access
3946 memory of different inferiors, to minimize number of file
3947 descriptors open, which can otherwise run into resource limits.
3948 However, that does not work correctly -- if the inferior execs and
3949 we haven't processed the exec event yet, and, we opened a
3950 /proc/PID/mem file, we will get a mem file accessing the post-exec
3951 address space, thinking we're opening it for the pre-exec address
3952 space. That is dangerous as we can poke memory (e.g. clearing
3953 breakpoints) in the post-exec memory by mistake, corrupting the
3954 inferior. For that reason, we open the mem file as early as
3955 possible, right after spawning, forking or attaching to the
3956 inferior, when the inferior is stopped and thus before it has a
3957 chance of execing.
3959 Note that after opening the file, even if the thread we opened it
3960 for subsequently exits, the open file is still usable for accessing
3961 memory. It's only when the whole process exits or execs that the
3962 file becomes invalid, at which point reads/writes return EOF. */
3964 class proc_mem_file
3966 public:
3967 proc_mem_file (ptid_t ptid, int fd)
3968 : m_ptid (ptid), m_fd (fd)
3970 gdb_assert (m_fd != -1);
3973 ~proc_mem_file ()
3975 linux_nat_debug_printf ("closing fd %d for /proc/%d/task/%ld/mem",
3976 m_fd, m_ptid.pid (), m_ptid.lwp ());
3977 close (m_fd);
3980 DISABLE_COPY_AND_ASSIGN (proc_mem_file);
3982 int fd ()
3984 return m_fd;
3987 private:
3988 /* The LWP this file was opened for. Just for debugging
3989 purposes. */
3990 ptid_t m_ptid;
3992 /* The file descriptor. */
3993 int m_fd = -1;
3996 /* The map between an inferior process id, and the open /proc/PID/mem
3997 file. This is stored in a map instead of in a per-inferior
3998 structure because we need to be able to access memory of processes
3999 which don't have a corresponding struct inferior object. E.g.,
4000 with "detach-on-fork on" (the default), and "follow-fork parent"
4001 (also default), we don't create an inferior for the fork child, but
4002 we still need to remove breakpoints from the fork child's
4003 memory. */
4004 static std::unordered_map<int, proc_mem_file> proc_mem_file_map;
4006 /* Close the /proc/PID/mem file for PID. */
4008 static void
4009 close_proc_mem_file (pid_t pid)
4011 proc_mem_file_map.erase (pid);
4014 /* Open the /proc/PID/mem file for the process (thread group) of PTID.
4015 We actually open /proc/PID/task/LWP/mem, as that's the LWP we know
4016 exists and is stopped right now. We prefer the
4017 /proc/PID/task/LWP/mem form over /proc/LWP/mem to avoid tid-reuse
4018 races, just in case this is ever called on an already-waited
4019 LWP. */
4021 static void
4022 open_proc_mem_file (ptid_t ptid)
4024 auto iter = proc_mem_file_map.find (ptid.pid ());
4025 gdb_assert (iter == proc_mem_file_map.end ());
4027 char filename[64];
4028 xsnprintf (filename, sizeof filename,
4029 "/proc/%d/task/%ld/mem", ptid.pid (), ptid.lwp ());
4031 int fd = gdb_open_cloexec (filename, O_RDWR | O_LARGEFILE, 0).release ();
4033 if (fd == -1)
4035 warning (_("opening /proc/PID/mem file for lwp %d.%ld failed: %s (%d)"),
4036 ptid.pid (), ptid.lwp (),
4037 safe_strerror (errno), errno);
4038 return;
4041 proc_mem_file_map.emplace (std::piecewise_construct,
4042 std::forward_as_tuple (ptid.pid ()),
4043 std::forward_as_tuple (ptid, fd));
4045 linux_nat_debug_printf ("opened fd %d for lwp %d.%ld",
4046 fd, ptid.pid (), ptid.lwp ());
4049 /* Helper for linux_proc_xfer_memory_partial and
4050 proc_mem_file_is_writable. FD is the already opened /proc/pid/mem
4051 file, and PID is the pid of the corresponding process. The rest of
4052 the arguments are like linux_proc_xfer_memory_partial's. */
4054 static enum target_xfer_status
4055 linux_proc_xfer_memory_partial_fd (int fd, int pid,
4056 gdb_byte *readbuf, const gdb_byte *writebuf,
4057 ULONGEST offset, LONGEST len,
4058 ULONGEST *xfered_len)
4060 ssize_t ret;
4062 gdb_assert (fd != -1);
4064 /* Use pread64/pwrite64 if available, since they save a syscall and
4065 can handle 64-bit offsets even on 32-bit platforms (for instance,
4066 SPARC debugging a SPARC64 application). But only use them if the
4067 offset isn't so high that when cast to off_t it'd be negative, as
4068 seen on SPARC64. pread64/pwrite64 outright reject such offsets.
4069 lseek does not. */
4070 #ifdef HAVE_PREAD64
4071 if ((off_t) offset >= 0)
4072 ret = (readbuf != nullptr
4073 ? pread64 (fd, readbuf, len, offset)
4074 : pwrite64 (fd, writebuf, len, offset));
4075 else
4076 #endif
4078 ret = lseek (fd, offset, SEEK_SET);
4079 if (ret != -1)
4080 ret = (readbuf != nullptr
4081 ? read (fd, readbuf, len)
4082 : write (fd, writebuf, len));
4085 if (ret == -1)
4087 linux_nat_debug_printf ("accessing fd %d for pid %d failed: %s (%d)",
4088 fd, pid, safe_strerror (errno), errno);
4089 return TARGET_XFER_E_IO;
4091 else if (ret == 0)
4093 /* EOF means the address space is gone, the whole process exited
4094 or execed. */
4095 linux_nat_debug_printf ("accessing fd %d for pid %d got EOF",
4096 fd, pid);
4097 return TARGET_XFER_EOF;
4099 else
4101 *xfered_len = ret;
4102 return TARGET_XFER_OK;
4106 /* Implement the to_xfer_partial target method using /proc/PID/mem.
4107 Because we can use a single read/write call, this can be much more
4108 efficient than banging away at PTRACE_PEEKTEXT. Also, unlike
4109 PTRACE_PEEKTEXT/PTRACE_POKETEXT, this works with running
4110 threads. */
4112 static enum target_xfer_status
4113 linux_proc_xfer_memory_partial (int pid, gdb_byte *readbuf,
4114 const gdb_byte *writebuf, ULONGEST offset,
4115 LONGEST len, ULONGEST *xfered_len)
4117 auto iter = proc_mem_file_map.find (pid);
4118 if (iter == proc_mem_file_map.end ())
4119 return TARGET_XFER_EOF;
4121 int fd = iter->second.fd ();
4123 return linux_proc_xfer_memory_partial_fd (fd, pid, readbuf, writebuf, offset,
4124 len, xfered_len);
4127 /* Check whether /proc/pid/mem is writable in the current kernel, and
4128 return true if so. It wasn't writable before Linux 2.6.39, but
4129 there's no way to know whether the feature was backported to older
4130 kernels. So we check to see if it works. The result is cached,
4131 and this is guaranteed to be called once early during inferior
4132 startup, so that any warning is printed out consistently between
4133 GDB invocations. Note we don't call it during GDB startup instead
4134 though, because then we might warn with e.g. just "gdb --version"
4135 on sandboxed systems. See PR gdb/29907. */
4137 static bool
4138 proc_mem_file_is_writable ()
4140 static std::optional<bool> writable;
4142 if (writable.has_value ())
4143 return *writable;
4145 writable.emplace (false);
4147 /* We check whether /proc/pid/mem is writable by trying to write to
4148 one of our variables via /proc/self/mem. */
4150 int fd = gdb_open_cloexec ("/proc/self/mem", O_RDWR | O_LARGEFILE, 0).release ();
4152 if (fd == -1)
4154 warning (_("opening /proc/self/mem file failed: %s (%d)"),
4155 safe_strerror (errno), errno);
4156 return *writable;
4159 SCOPE_EXIT { close (fd); };
4161 /* This is the variable we try to write to. Note OFFSET below. */
4162 volatile gdb_byte test_var = 0;
4164 gdb_byte writebuf[] = {0x55};
4165 ULONGEST offset = (uintptr_t) &test_var;
4166 ULONGEST xfered_len;
4168 enum target_xfer_status res
4169 = linux_proc_xfer_memory_partial_fd (fd, getpid (), nullptr, writebuf,
4170 offset, 1, &xfered_len);
4172 if (res == TARGET_XFER_OK)
4174 gdb_assert (xfered_len == 1);
4175 gdb_assert (test_var == 0x55);
4176 /* Success. */
4177 *writable = true;
4180 return *writable;
4183 /* Parse LINE as a signal set and add its set bits to SIGS. */
4185 static void
4186 add_line_to_sigset (const char *line, sigset_t *sigs)
4188 int len = strlen (line) - 1;
4189 const char *p;
4190 int signum;
4192 if (line[len] != '\n')
4193 error (_("Could not parse signal set: %s"), line);
4195 p = line;
4196 signum = len * 4;
4197 while (len-- > 0)
4199 int digit;
4201 if (*p >= '0' && *p <= '9')
4202 digit = *p - '0';
4203 else if (*p >= 'a' && *p <= 'f')
4204 digit = *p - 'a' + 10;
4205 else
4206 error (_("Could not parse signal set: %s"), line);
4208 signum -= 4;
4210 if (digit & 1)
4211 sigaddset (sigs, signum + 1);
4212 if (digit & 2)
4213 sigaddset (sigs, signum + 2);
4214 if (digit & 4)
4215 sigaddset (sigs, signum + 3);
4216 if (digit & 8)
4217 sigaddset (sigs, signum + 4);
4219 p++;
4223 /* Find process PID's pending signals from /proc/pid/status and set
4224 SIGS to match. */
4226 void
4227 linux_proc_pending_signals (int pid, sigset_t *pending,
4228 sigset_t *blocked, sigset_t *ignored)
4230 char buffer[PATH_MAX], fname[PATH_MAX];
4232 sigemptyset (pending);
4233 sigemptyset (blocked);
4234 sigemptyset (ignored);
4235 xsnprintf (fname, sizeof fname, "/proc/%d/status", pid);
4236 gdb_file_up procfile = gdb_fopen_cloexec (fname, "r");
4237 if (procfile == NULL)
4238 error (_("Could not open %s"), fname);
4240 while (fgets (buffer, PATH_MAX, procfile.get ()) != NULL)
4242 /* Normal queued signals are on the SigPnd line in the status
4243 file. However, 2.6 kernels also have a "shared" pending
4244 queue for delivering signals to a thread group, so check for
4245 a ShdPnd line also.
4247 Unfortunately some Red Hat kernels include the shared pending
4248 queue but not the ShdPnd status field. */
4250 if (startswith (buffer, "SigPnd:\t"))
4251 add_line_to_sigset (buffer + 8, pending);
4252 else if (startswith (buffer, "ShdPnd:\t"))
4253 add_line_to_sigset (buffer + 8, pending);
4254 else if (startswith (buffer, "SigBlk:\t"))
4255 add_line_to_sigset (buffer + 8, blocked);
4256 else if (startswith (buffer, "SigIgn:\t"))
4257 add_line_to_sigset (buffer + 8, ignored);
4261 static enum target_xfer_status
4262 linux_nat_xfer_osdata (enum target_object object,
4263 const char *annex, gdb_byte *readbuf,
4264 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
4265 ULONGEST *xfered_len)
4267 gdb_assert (object == TARGET_OBJECT_OSDATA);
4269 *xfered_len = linux_common_xfer_osdata (annex, readbuf, offset, len);
4270 if (*xfered_len == 0)
4271 return TARGET_XFER_EOF;
4272 else
4273 return TARGET_XFER_OK;
4276 std::vector<static_tracepoint_marker>
4277 linux_nat_target::static_tracepoint_markers_by_strid (const char *strid)
4279 char s[IPA_CMD_BUF_SIZE];
4280 int pid = inferior_ptid.pid ();
4281 std::vector<static_tracepoint_marker> markers;
4282 const char *p = s;
4283 ptid_t ptid = ptid_t (pid, 0);
4284 static_tracepoint_marker marker;
4286 /* Pause all */
4287 target_stop (ptid);
4289 strcpy (s, "qTfSTM");
4290 agent_run_command (pid, s, strlen (s) + 1);
4292 /* Unpause all. */
4293 SCOPE_EXIT { target_continue_no_signal (ptid); };
4295 while (*p++ == 'm')
4299 parse_static_tracepoint_marker_definition (p, &p, &marker);
4301 if (strid == NULL || marker.str_id == strid)
4302 markers.push_back (std::move (marker));
4304 while (*p++ == ','); /* comma-separated list */
4306 strcpy (s, "qTsSTM");
4307 agent_run_command (pid, s, strlen (s) + 1);
4308 p = s;
4311 return markers;
4314 /* target_can_async_p implementation. */
4316 bool
4317 linux_nat_target::can_async_p ()
4319 /* This flag should be checked in the common target.c code. */
4320 gdb_assert (target_async_permitted);
4322 /* Otherwise, this targets is always able to support async mode. */
4323 return true;
4326 bool
4327 linux_nat_target::supports_non_stop ()
4329 return true;
4332 /* to_always_non_stop_p implementation. */
4334 bool
4335 linux_nat_target::always_non_stop_p ()
4337 return true;
4340 bool
4341 linux_nat_target::supports_multi_process ()
4343 return true;
4346 bool
4347 linux_nat_target::supports_disable_randomization ()
4349 return true;
4352 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
4353 so we notice when any child changes state, and notify the
4354 event-loop; it allows us to use sigsuspend in linux_nat_wait_1
4355 above to wait for the arrival of a SIGCHLD. */
4357 static void
4358 sigchld_handler (int signo)
4360 int old_errno = errno;
4362 if (debug_linux_nat)
4363 gdb_stdlog->write_async_safe ("sigchld\n", sizeof ("sigchld\n") - 1);
4365 if (signo == SIGCHLD)
4367 /* Let the event loop know that there are events to handle. */
4368 linux_nat_target::async_file_mark_if_open ();
4371 errno = old_errno;
4374 /* Callback registered with the target events file descriptor. */
4376 static void
4377 handle_target_event (int error, gdb_client_data client_data)
4379 inferior_event_handler (INF_REG_EVENT);
4382 /* target_async implementation. */
4384 void
4385 linux_nat_target::async (bool enable)
4387 if (enable == is_async_p ())
4388 return;
4390 /* Block child signals while we create/destroy the pipe, as their
4391 handler writes to it. */
4392 gdb::block_signals blocker;
4394 if (enable)
4396 if (!async_file_open ())
4397 internal_error ("creating event pipe failed.");
4399 add_file_handler (async_wait_fd (), handle_target_event, NULL,
4400 "linux-nat");
4402 /* There may be pending events to handle. Tell the event loop
4403 to poll them. */
4404 async_file_mark ();
4406 else
4408 delete_file_handler (async_wait_fd ());
4409 async_file_close ();
4413 /* Stop an LWP, and push a GDB_SIGNAL_0 stop status if no other
4414 event came out. */
4416 static int
4417 linux_nat_stop_lwp (struct lwp_info *lwp)
4419 if (!lwp->stopped)
4421 linux_nat_debug_printf ("running -> suspending %s",
4422 lwp->ptid.to_string ().c_str ());
4425 if (lwp->last_resume_kind == resume_stop)
4427 linux_nat_debug_printf ("already stopping LWP %ld at GDB's request",
4428 lwp->ptid.lwp ());
4429 return 0;
4432 stop_callback (lwp);
4433 lwp->last_resume_kind = resume_stop;
4435 else
4437 /* Already known to be stopped; do nothing. */
4439 if (debug_linux_nat)
4441 if (linux_target->find_thread (lwp->ptid)->stop_requested)
4442 linux_nat_debug_printf ("already stopped/stop_requested %s",
4443 lwp->ptid.to_string ().c_str ());
4444 else
4445 linux_nat_debug_printf ("already stopped/no stop_requested yet %s",
4446 lwp->ptid.to_string ().c_str ());
4449 return 0;
4452 void
4453 linux_nat_target::stop (ptid_t ptid)
4455 LINUX_NAT_SCOPED_DEBUG_ENTER_EXIT;
4456 iterate_over_lwps (ptid, linux_nat_stop_lwp);
4459 /* Return the cached value of the processor core for thread PTID. */
4462 linux_nat_target::core_of_thread (ptid_t ptid)
4464 struct lwp_info *info = find_lwp_pid (ptid);
4466 if (info)
4467 return info->core;
4468 return -1;
4471 /* Implementation of to_filesystem_is_local. */
4473 bool
4474 linux_nat_target::filesystem_is_local ()
4476 struct inferior *inf = current_inferior ();
4478 if (inf->fake_pid_p || inf->pid == 0)
4479 return true;
4481 return linux_ns_same (inf->pid, LINUX_NS_MNT);
4484 /* Convert the INF argument passed to a to_fileio_* method
4485 to a process ID suitable for passing to its corresponding
4486 linux_mntns_* function. If INF is non-NULL then the
4487 caller is requesting the filesystem seen by INF. If INF
4488 is NULL then the caller is requesting the filesystem seen
4489 by the GDB. We fall back to GDB's filesystem in the case
4490 that INF is non-NULL but its PID is unknown. */
4492 static pid_t
4493 linux_nat_fileio_pid_of (struct inferior *inf)
4495 if (inf == NULL || inf->fake_pid_p || inf->pid == 0)
4496 return getpid ();
4497 else
4498 return inf->pid;
4501 /* Implementation of to_fileio_open. */
4504 linux_nat_target::fileio_open (struct inferior *inf, const char *filename,
4505 int flags, int mode, int warn_if_slow,
4506 fileio_error *target_errno)
4508 int nat_flags;
4509 mode_t nat_mode;
4510 int fd;
4512 if (fileio_to_host_openflags (flags, &nat_flags) == -1
4513 || fileio_to_host_mode (mode, &nat_mode) == -1)
4515 *target_errno = FILEIO_EINVAL;
4516 return -1;
4519 fd = linux_mntns_open_cloexec (linux_nat_fileio_pid_of (inf),
4520 filename, nat_flags, nat_mode);
4521 if (fd == -1)
4522 *target_errno = host_to_fileio_error (errno);
4524 return fd;
4527 /* Implementation of to_fileio_readlink. */
4529 std::optional<std::string>
4530 linux_nat_target::fileio_readlink (struct inferior *inf, const char *filename,
4531 fileio_error *target_errno)
4533 char buf[PATH_MAX];
4534 int len;
4536 len = linux_mntns_readlink (linux_nat_fileio_pid_of (inf),
4537 filename, buf, sizeof (buf));
4538 if (len < 0)
4540 *target_errno = host_to_fileio_error (errno);
4541 return {};
4544 return std::string (buf, len);
4547 /* Implementation of to_fileio_unlink. */
4550 linux_nat_target::fileio_unlink (struct inferior *inf, const char *filename,
4551 fileio_error *target_errno)
4553 int ret;
4555 ret = linux_mntns_unlink (linux_nat_fileio_pid_of (inf),
4556 filename);
4557 if (ret == -1)
4558 *target_errno = host_to_fileio_error (errno);
4560 return ret;
4563 /* Implementation of the to_thread_events method. */
4565 void
4566 linux_nat_target::thread_events (int enable)
4568 report_thread_events = enable;
4571 bool
4572 linux_nat_target::supports_set_thread_options (gdb_thread_options options)
4574 constexpr gdb_thread_options supported_options
4575 = GDB_THREAD_OPTION_CLONE | GDB_THREAD_OPTION_EXIT;
4576 return ((options & supported_options) == options);
4579 linux_nat_target::linux_nat_target ()
4581 /* We don't change the stratum; this target will sit at
4582 process_stratum and thread_db will set at thread_stratum. This
4583 is a little strange, since this is a multi-threaded-capable
4584 target, but we want to be on the stack below thread_db, and we
4585 also want to be used for single-threaded processes. */
4588 /* See linux-nat.h. */
4590 bool
4591 linux_nat_get_siginfo (ptid_t ptid, siginfo_t *siginfo)
4593 int pid = get_ptrace_pid (ptid);
4594 return ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, siginfo) == 0;
4597 /* See nat/linux-nat.h. */
4599 ptid_t
4600 current_lwp_ptid (void)
4602 gdb_assert (inferior_ptid.lwp_p ());
4603 return inferior_ptid;
4606 /* Implement 'maintenance info linux-lwps'. Displays some basic
4607 information about all the current lwp_info objects. */
4609 static void
4610 maintenance_info_lwps (const char *arg, int from_tty)
4612 if (all_lwps ().size () == 0)
4614 gdb_printf ("No Linux LWPs\n");
4615 return;
4618 /* Start the width at 8 to match the column heading below, then
4619 figure out the widest ptid string. We'll use this to build our
4620 output table below. */
4621 size_t ptid_width = 8;
4622 for (lwp_info *lp : all_lwps ())
4623 ptid_width = std::max (ptid_width, lp->ptid.to_string ().size ());
4625 /* Setup the table headers. */
4626 struct ui_out *uiout = current_uiout;
4627 ui_out_emit_table table_emitter (uiout, 2, -1, "linux-lwps");
4628 uiout->table_header (ptid_width, ui_left, "lwp-ptid", _("LWP Ptid"));
4629 uiout->table_header (9, ui_left, "thread-info", _("Thread ID"));
4630 uiout->table_body ();
4632 /* Display one table row for each lwp_info. */
4633 for (lwp_info *lp : all_lwps ())
4635 ui_out_emit_tuple tuple_emitter (uiout, "lwp-entry");
4637 thread_info *th = linux_target->find_thread (lp->ptid);
4639 uiout->field_string ("lwp-ptid", lp->ptid.to_string ().c_str ());
4640 if (th == nullptr)
4641 uiout->field_string ("thread-info", "None");
4642 else
4643 uiout->field_string ("thread-info", print_full_thread_id (th));
4645 uiout->message ("\n");
4649 void _initialize_linux_nat ();
4650 void
4651 _initialize_linux_nat ()
4653 add_setshow_boolean_cmd ("linux-nat", class_maintenance,
4654 &debug_linux_nat, _("\
4655 Set debugging of GNU/Linux native target."), _("\
4656 Show debugging of GNU/Linux native target."), _("\
4657 When on, print debug messages relating to the GNU/Linux native target."),
4658 nullptr,
4659 show_debug_linux_nat,
4660 &setdebuglist, &showdebuglist);
4662 add_setshow_boolean_cmd ("linux-namespaces", class_maintenance,
4663 &debug_linux_namespaces, _("\
4664 Set debugging of GNU/Linux namespaces module."), _("\
4665 Show debugging of GNU/Linux namespaces module."), _("\
4666 Enables printf debugging output."),
4667 NULL,
4668 NULL,
4669 &setdebuglist, &showdebuglist);
4671 /* Install a SIGCHLD handler. */
4672 sigchld_action.sa_handler = sigchld_handler;
4673 sigemptyset (&sigchld_action.sa_mask);
4674 sigchld_action.sa_flags = SA_RESTART;
4676 /* Make it the default. */
4677 sigaction (SIGCHLD, &sigchld_action, NULL);
4679 /* Make sure we don't block SIGCHLD during a sigsuspend. */
4680 gdb_sigmask (SIG_SETMASK, NULL, &suspend_mask);
4681 sigdelset (&suspend_mask, SIGCHLD);
4683 sigemptyset (&blocked_mask);
4685 lwp_lwpid_htab_create ();
4687 add_cmd ("linux-lwps", class_maintenance, maintenance_info_lwps,
4688 _("List the Linux LWPS."), &maintenanceinfolist);
4692 /* FIXME: kettenis/2000-08-26: The stuff on this page is specific to
4693 the GNU/Linux Threads library and therefore doesn't really belong
4694 here. */
4696 /* NPTL reserves the first two RT signals, but does not provide any
4697 way for the debugger to query the signal numbers - fortunately
4698 they don't change. */
4699 static int lin_thread_signals[] = { __SIGRTMIN, __SIGRTMIN + 1 };
4701 /* See linux-nat.h. */
4703 unsigned int
4704 lin_thread_get_thread_signal_num (void)
4706 return sizeof (lin_thread_signals) / sizeof (lin_thread_signals[0]);
4709 /* See linux-nat.h. */
4712 lin_thread_get_thread_signal (unsigned int i)
4714 gdb_assert (i < lin_thread_get_thread_signal_num ());
4715 return lin_thread_signals[i];