MIPS: Use 64-bit a ABI by default for `mipsisa64*-*-linux*' targets
[binutils-gdb.git] / gdb / linux-nat.c
blobe58d67183b5237d61f301c636cd3f903f1d01ffa
1 /* GNU/Linux native-dependent code common to multiple platforms.
3 Copyright (C) 2001-2023 Free Software Foundation, Inc.
5 This file is part of GDB.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20 #include "defs.h"
21 #include "inferior.h"
22 #include "infrun.h"
23 #include "target.h"
24 #include "nat/linux-nat.h"
25 #include "nat/linux-waitpid.h"
26 #include "gdbsupport/gdb_wait.h"
27 #include <unistd.h>
28 #include <sys/syscall.h>
29 #include "nat/gdb_ptrace.h"
30 #include "linux-nat.h"
31 #include "nat/linux-ptrace.h"
32 #include "nat/linux-procfs.h"
33 #include "nat/linux-personality.h"
34 #include "linux-fork.h"
35 #include "gdbthread.h"
36 #include "gdbcmd.h"
37 #include "regcache.h"
38 #include "regset.h"
39 #include "inf-child.h"
40 #include "inf-ptrace.h"
41 #include "auxv.h"
42 #include <sys/procfs.h> /* for elf_gregset etc. */
43 #include "elf-bfd.h" /* for elfcore_write_* */
44 #include "gregset.h" /* for gregset */
45 #include "gdbcore.h" /* for get_exec_file */
46 #include <ctype.h> /* for isdigit */
47 #include <sys/stat.h> /* for struct stat */
48 #include <fcntl.h> /* for O_RDONLY */
49 #include "inf-loop.h"
50 #include "gdbsupport/event-loop.h"
51 #include "event-top.h"
52 #include <pwd.h>
53 #include <sys/types.h>
54 #include <dirent.h>
55 #include "xml-support.h"
56 #include <sys/vfs.h>
57 #include "solib.h"
58 #include "nat/linux-osdata.h"
59 #include "linux-tdep.h"
60 #include "symfile.h"
61 #include "gdbsupport/agent.h"
62 #include "tracepoint.h"
63 #include "target-descriptions.h"
64 #include "gdbsupport/filestuff.h"
65 #include "objfiles.h"
66 #include "nat/linux-namespaces.h"
67 #include "gdbsupport/block-signals.h"
68 #include "gdbsupport/fileio.h"
69 #include "gdbsupport/scope-exit.h"
70 #include "gdbsupport/gdb-sigmask.h"
71 #include "gdbsupport/common-debug.h"
72 #include <unordered_map>
74 /* This comment documents high-level logic of this file.
76 Waiting for events in sync mode
77 ===============================
79 When waiting for an event in a specific thread, we just use waitpid,
80 passing the specific pid, and not passing WNOHANG.
82 When waiting for an event in all threads, waitpid is not quite good:
84 - If the thread group leader exits while other threads in the thread
85 group still exist, waitpid(TGID, ...) hangs. That waitpid won't
86 return an exit status until the other threads in the group are
87 reaped.
89 - When a non-leader thread execs, that thread just vanishes without
90 reporting an exit (so we'd hang if we waited for it explicitly in
91 that case). The exec event is instead reported to the TGID pid.
93 The solution is to always use -1 and WNOHANG, together with
94 sigsuspend.
96 First, we use non-blocking waitpid to check for events. If nothing is
97 found, we use sigsuspend to wait for SIGCHLD. When SIGCHLD arrives,
98 it means something happened to a child process. As soon as we know
99 there's an event, we get back to calling nonblocking waitpid.
101 Note that SIGCHLD should be blocked between waitpid and sigsuspend
102 calls, so that we don't miss a signal. If SIGCHLD arrives in between,
103 when it's blocked, the signal becomes pending and sigsuspend
104 immediately notices it and returns.
106 Waiting for events in async mode (TARGET_WNOHANG)
107 =================================================
109 In async mode, GDB should always be ready to handle both user input
110 and target events, so neither blocking waitpid nor sigsuspend are
111 viable options. Instead, we should asynchronously notify the GDB main
112 event loop whenever there's an unprocessed event from the target. We
113 detect asynchronous target events by handling SIGCHLD signals. To
114 notify the event loop about target events, an event pipe is used
115 --- the pipe is registered as waitable event source in the event loop,
116 the event loop select/poll's on the read end of this pipe (as well on
117 other event sources, e.g., stdin), and the SIGCHLD handler marks the
118 event pipe to raise an event. This is more portable than relying on
119 pselect/ppoll, since on kernels that lack those syscalls, libc
120 emulates them with select/poll+sigprocmask, and that is racy
121 (a.k.a. plain broken).
123 Obviously, if we fail to notify the event loop if there's a target
124 event, it's bad. OTOH, if we notify the event loop when there's no
125 event from the target, linux_nat_wait will detect that there's no real
126 event to report, and return event of type TARGET_WAITKIND_IGNORE.
127 This is mostly harmless, but it will waste time and is better avoided.
129 The main design point is that every time GDB is outside linux-nat.c,
130 we have a SIGCHLD handler installed that is called when something
131 happens to the target and notifies the GDB event loop. Whenever GDB
132 core decides to handle the event, and calls into linux-nat.c, we
133 process things as in sync mode, except that the we never block in
134 sigsuspend.
136 While processing an event, we may end up momentarily blocked in
137 waitpid calls. Those waitpid calls, while blocking, are guarantied to
138 return quickly. E.g., in all-stop mode, before reporting to the core
139 that an LWP hit a breakpoint, all LWPs are stopped by sending them
140 SIGSTOP, and synchronously waiting for the SIGSTOP to be reported.
141 Note that this is different from blocking indefinitely waiting for the
142 next event --- here, we're already handling an event.
144 Use of signals
145 ==============
147 We stop threads by sending a SIGSTOP. The use of SIGSTOP instead of another
148 signal is not entirely significant; we just need for a signal to be delivered,
149 so that we can intercept it. SIGSTOP's advantage is that it can not be
150 blocked. A disadvantage is that it is not a real-time signal, so it can only
151 be queued once; we do not keep track of other sources of SIGSTOP.
153 Two other signals that can't be blocked are SIGCONT and SIGKILL. But we can't
154 use them, because they have special behavior when the signal is generated -
155 not when it is delivered. SIGCONT resumes the entire thread group and SIGKILL
156 kills the entire thread group.
158 A delivered SIGSTOP would stop the entire thread group, not just the thread we
159 tkill'd. But we never let the SIGSTOP be delivered; we always intercept and
160 cancel it (by PTRACE_CONT without passing SIGSTOP).
162 We could use a real-time signal instead. This would solve those problems; we
163 could use PTRACE_GETSIGINFO to locate the specific stop signals sent by GDB.
164 But we would still have to have some support for SIGSTOP, since PTRACE_ATTACH
165 generates it, and there are races with trying to find a signal that is not
166 blocked.
168 Exec events
169 ===========
171 The case of a thread group (process) with 3 or more threads, and a
172 thread other than the leader execs is worth detailing:
174 On an exec, the Linux kernel destroys all threads except the execing
175 one in the thread group, and resets the execing thread's tid to the
176 tgid. No exit notification is sent for the execing thread -- from the
177 ptracer's perspective, it appears as though the execing thread just
178 vanishes. Until we reap all other threads except the leader and the
179 execing thread, the leader will be zombie, and the execing thread will
180 be in `D (disc sleep)' state. As soon as all other threads are
181 reaped, the execing thread changes its tid to the tgid, and the
182 previous (zombie) leader vanishes, giving place to the "new"
183 leader. */
185 #ifndef O_LARGEFILE
186 #define O_LARGEFILE 0
187 #endif
189 struct linux_nat_target *linux_target;
191 /* Does the current host support PTRACE_GETREGSET? */
192 enum tribool have_ptrace_getregset = TRIBOOL_UNKNOWN;
194 /* When true, print debug messages relating to the linux native target. */
196 static bool debug_linux_nat;
198 /* Implement 'show debug linux-nat'. */
200 static void
201 show_debug_linux_nat (struct ui_file *file, int from_tty,
202 struct cmd_list_element *c, const char *value)
204 gdb_printf (file, _("Debugging of GNU/Linux native targets is %s.\n"),
205 value);
208 /* Print a linux-nat debug statement. */
210 #define linux_nat_debug_printf(fmt, ...) \
211 debug_prefixed_printf_cond (debug_linux_nat, "linux-nat", fmt, ##__VA_ARGS__)
213 /* Print "linux-nat" enter/exit debug statements. */
215 #define LINUX_NAT_SCOPED_DEBUG_ENTER_EXIT \
216 scoped_debug_enter_exit (debug_linux_nat, "linux-nat")
218 struct simple_pid_list
220 int pid;
221 int status;
222 struct simple_pid_list *next;
224 static struct simple_pid_list *stopped_pids;
226 /* Whether target_thread_events is in effect. */
227 static int report_thread_events;
229 static int kill_lwp (int lwpid, int signo);
231 static int stop_callback (struct lwp_info *lp);
233 static void block_child_signals (sigset_t *prev_mask);
234 static void restore_child_signals_mask (sigset_t *prev_mask);
236 struct lwp_info;
237 static struct lwp_info *add_lwp (ptid_t ptid);
238 static void purge_lwp_list (int pid);
239 static void delete_lwp (ptid_t ptid);
240 static struct lwp_info *find_lwp_pid (ptid_t ptid);
242 static int lwp_status_pending_p (struct lwp_info *lp);
244 static void save_stop_reason (struct lwp_info *lp);
246 static bool proc_mem_file_is_writable ();
247 static void close_proc_mem_file (pid_t pid);
248 static void open_proc_mem_file (ptid_t ptid);
250 /* Return TRUE if LWP is the leader thread of the process. */
252 static bool
253 is_leader (lwp_info *lp)
255 return lp->ptid.pid () == lp->ptid.lwp ();
258 /* Convert an LWP's pending status to a std::string. */
260 static std::string
261 pending_status_str (lwp_info *lp)
263 gdb_assert (lwp_status_pending_p (lp));
265 if (lp->waitstatus.kind () != TARGET_WAITKIND_IGNORE)
266 return lp->waitstatus.to_string ();
267 else
268 return status_to_str (lp->status);
272 /* LWP accessors. */
274 /* See nat/linux-nat.h. */
276 ptid_t
277 ptid_of_lwp (struct lwp_info *lwp)
279 return lwp->ptid;
282 /* See nat/linux-nat.h. */
284 void
285 lwp_set_arch_private_info (struct lwp_info *lwp,
286 struct arch_lwp_info *info)
288 lwp->arch_private = info;
291 /* See nat/linux-nat.h. */
293 struct arch_lwp_info *
294 lwp_arch_private_info (struct lwp_info *lwp)
296 return lwp->arch_private;
299 /* See nat/linux-nat.h. */
302 lwp_is_stopped (struct lwp_info *lwp)
304 return lwp->stopped;
307 /* See nat/linux-nat.h. */
309 enum target_stop_reason
310 lwp_stop_reason (struct lwp_info *lwp)
312 return lwp->stop_reason;
315 /* See nat/linux-nat.h. */
318 lwp_is_stepping (struct lwp_info *lwp)
320 return lwp->step;
324 /* Trivial list manipulation functions to keep track of a list of
325 new stopped processes. */
326 static void
327 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
329 struct simple_pid_list *new_pid = XNEW (struct simple_pid_list);
331 new_pid->pid = pid;
332 new_pid->status = status;
333 new_pid->next = *listp;
334 *listp = new_pid;
337 static int
338 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
340 struct simple_pid_list **p;
342 for (p = listp; *p != NULL; p = &(*p)->next)
343 if ((*p)->pid == pid)
345 struct simple_pid_list *next = (*p)->next;
347 *statusp = (*p)->status;
348 xfree (*p);
349 *p = next;
350 return 1;
352 return 0;
355 /* Return the ptrace options that we want to try to enable. */
357 static int
358 linux_nat_ptrace_options (int attached)
360 int options = 0;
362 if (!attached)
363 options |= PTRACE_O_EXITKILL;
365 options |= (PTRACE_O_TRACESYSGOOD
366 | PTRACE_O_TRACEVFORKDONE
367 | PTRACE_O_TRACEVFORK
368 | PTRACE_O_TRACEFORK
369 | PTRACE_O_TRACEEXEC);
371 return options;
374 /* Initialize ptrace and procfs warnings and check for supported
375 ptrace features given PID.
377 ATTACHED should be nonzero iff we attached to the inferior. */
379 static void
380 linux_init_ptrace_procfs (pid_t pid, int attached)
382 int options = linux_nat_ptrace_options (attached);
384 linux_enable_event_reporting (pid, options);
385 linux_ptrace_init_warnings ();
386 linux_proc_init_warnings ();
387 proc_mem_file_is_writable ();
390 linux_nat_target::~linux_nat_target ()
393 void
394 linux_nat_target::post_attach (int pid)
396 linux_init_ptrace_procfs (pid, 1);
399 /* Implement the virtual inf_ptrace_target::post_startup_inferior method. */
401 void
402 linux_nat_target::post_startup_inferior (ptid_t ptid)
404 linux_init_ptrace_procfs (ptid.pid (), 0);
407 /* Return the number of known LWPs in the tgid given by PID. */
409 static int
410 num_lwps (int pid)
412 int count = 0;
414 for (const lwp_info *lp ATTRIBUTE_UNUSED : all_lwps ())
415 if (lp->ptid.pid () == pid)
416 count++;
418 return count;
421 /* Deleter for lwp_info unique_ptr specialisation. */
423 struct lwp_deleter
425 void operator() (struct lwp_info *lwp) const
427 delete_lwp (lwp->ptid);
431 /* A unique_ptr specialisation for lwp_info. */
433 typedef std::unique_ptr<struct lwp_info, lwp_deleter> lwp_info_up;
435 /* Target hook for follow_fork. */
437 void
438 linux_nat_target::follow_fork (inferior *child_inf, ptid_t child_ptid,
439 target_waitkind fork_kind, bool follow_child,
440 bool detach_fork)
442 inf_ptrace_target::follow_fork (child_inf, child_ptid, fork_kind,
443 follow_child, detach_fork);
445 if (!follow_child)
447 bool has_vforked = fork_kind == TARGET_WAITKIND_VFORKED;
448 ptid_t parent_ptid = inferior_ptid;
449 int parent_pid = parent_ptid.lwp ();
450 int child_pid = child_ptid.lwp ();
452 /* We're already attached to the parent, by default. */
453 lwp_info *child_lp = add_lwp (child_ptid);
454 child_lp->stopped = 1;
455 child_lp->last_resume_kind = resume_stop;
457 /* Detach new forked process? */
458 if (detach_fork)
460 int child_stop_signal = 0;
461 bool detach_child = true;
463 /* Move CHILD_LP into a unique_ptr and clear the source pointer
464 to prevent us doing anything stupid with it. */
465 lwp_info_up child_lp_ptr (child_lp);
466 child_lp = nullptr;
468 linux_target->low_prepare_to_resume (child_lp_ptr.get ());
470 /* When debugging an inferior in an architecture that supports
471 hardware single stepping on a kernel without commit
472 6580807da14c423f0d0a708108e6df6ebc8bc83d, the vfork child
473 process starts with the TIF_SINGLESTEP/X86_EFLAGS_TF bits
474 set if the parent process had them set.
475 To work around this, single step the child process
476 once before detaching to clear the flags. */
478 /* Note that we consult the parent's architecture instead of
479 the child's because there's no inferior for the child at
480 this point. */
481 if (!gdbarch_software_single_step_p (target_thread_architecture
482 (parent_ptid)))
484 int status;
486 linux_disable_event_reporting (child_pid);
487 if (ptrace (PTRACE_SINGLESTEP, child_pid, 0, 0) < 0)
488 perror_with_name (_("Couldn't do single step"));
489 if (my_waitpid (child_pid, &status, 0) < 0)
490 perror_with_name (_("Couldn't wait vfork process"));
491 else
493 detach_child = WIFSTOPPED (status);
494 child_stop_signal = WSTOPSIG (status);
498 if (detach_child)
500 int signo = child_stop_signal;
502 if (signo != 0
503 && !signal_pass_state (gdb_signal_from_host (signo)))
504 signo = 0;
505 ptrace (PTRACE_DETACH, child_pid, 0, signo);
507 close_proc_mem_file (child_pid);
511 if (has_vforked)
513 lwp_info *parent_lp = find_lwp_pid (parent_ptid);
514 linux_nat_debug_printf ("waiting for VFORK_DONE on %d", parent_pid);
515 parent_lp->stopped = 1;
517 /* We'll handle the VFORK_DONE event like any other
518 event, in target_wait. */
521 else
523 struct lwp_info *child_lp;
525 child_lp = add_lwp (child_ptid);
526 child_lp->stopped = 1;
527 child_lp->last_resume_kind = resume_stop;
533 linux_nat_target::insert_fork_catchpoint (int pid)
535 return 0;
539 linux_nat_target::remove_fork_catchpoint (int pid)
541 return 0;
545 linux_nat_target::insert_vfork_catchpoint (int pid)
547 return 0;
551 linux_nat_target::remove_vfork_catchpoint (int pid)
553 return 0;
557 linux_nat_target::insert_exec_catchpoint (int pid)
559 return 0;
563 linux_nat_target::remove_exec_catchpoint (int pid)
565 return 0;
569 linux_nat_target::set_syscall_catchpoint (int pid, bool needed, int any_count,
570 gdb::array_view<const int> syscall_counts)
572 /* On GNU/Linux, we ignore the arguments. It means that we only
573 enable the syscall catchpoints, but do not disable them.
575 Also, we do not use the `syscall_counts' information because we do not
576 filter system calls here. We let GDB do the logic for us. */
577 return 0;
580 /* List of known LWPs, keyed by LWP PID. This speeds up the common
581 case of mapping a PID returned from the kernel to our corresponding
582 lwp_info data structure. */
583 static htab_t lwp_lwpid_htab;
585 /* Calculate a hash from a lwp_info's LWP PID. */
587 static hashval_t
588 lwp_info_hash (const void *ap)
590 const struct lwp_info *lp = (struct lwp_info *) ap;
591 pid_t pid = lp->ptid.lwp ();
593 return iterative_hash_object (pid, 0);
596 /* Equality function for the lwp_info hash table. Compares the LWP's
597 PID. */
599 static int
600 lwp_lwpid_htab_eq (const void *a, const void *b)
602 const struct lwp_info *entry = (const struct lwp_info *) a;
603 const struct lwp_info *element = (const struct lwp_info *) b;
605 return entry->ptid.lwp () == element->ptid.lwp ();
608 /* Create the lwp_lwpid_htab hash table. */
610 static void
611 lwp_lwpid_htab_create (void)
613 lwp_lwpid_htab = htab_create (100, lwp_info_hash, lwp_lwpid_htab_eq, NULL);
616 /* Add LP to the hash table. */
618 static void
619 lwp_lwpid_htab_add_lwp (struct lwp_info *lp)
621 void **slot;
623 slot = htab_find_slot (lwp_lwpid_htab, lp, INSERT);
624 gdb_assert (slot != NULL && *slot == NULL);
625 *slot = lp;
628 /* Head of doubly-linked list of known LWPs. Sorted by reverse
629 creation order. This order is assumed in some cases. E.g.,
630 reaping status after killing alls lwps of a process: the leader LWP
631 must be reaped last. */
633 static intrusive_list<lwp_info> lwp_list;
635 /* See linux-nat.h. */
637 lwp_info_range
638 all_lwps ()
640 return lwp_info_range (lwp_list.begin ());
643 /* See linux-nat.h. */
645 lwp_info_safe_range
646 all_lwps_safe ()
648 return lwp_info_safe_range (lwp_list.begin ());
651 /* Add LP to sorted-by-reverse-creation-order doubly-linked list. */
653 static void
654 lwp_list_add (struct lwp_info *lp)
656 lwp_list.push_front (*lp);
659 /* Remove LP from sorted-by-reverse-creation-order doubly-linked
660 list. */
662 static void
663 lwp_list_remove (struct lwp_info *lp)
665 /* Remove from sorted-by-creation-order list. */
666 lwp_list.erase (lwp_list.iterator_to (*lp));
671 /* Signal mask for use with sigsuspend in linux_nat_wait, initialized in
672 _initialize_linux_nat. */
673 static sigset_t suspend_mask;
675 /* Signals to block to make that sigsuspend work. */
676 static sigset_t blocked_mask;
678 /* SIGCHLD action. */
679 static struct sigaction sigchld_action;
681 /* Block child signals (SIGCHLD and linux threads signals), and store
682 the previous mask in PREV_MASK. */
684 static void
685 block_child_signals (sigset_t *prev_mask)
687 /* Make sure SIGCHLD is blocked. */
688 if (!sigismember (&blocked_mask, SIGCHLD))
689 sigaddset (&blocked_mask, SIGCHLD);
691 gdb_sigmask (SIG_BLOCK, &blocked_mask, prev_mask);
694 /* Restore child signals mask, previously returned by
695 block_child_signals. */
697 static void
698 restore_child_signals_mask (sigset_t *prev_mask)
700 gdb_sigmask (SIG_SETMASK, prev_mask, NULL);
703 /* Mask of signals to pass directly to the inferior. */
704 static sigset_t pass_mask;
706 /* Update signals to pass to the inferior. */
707 void
708 linux_nat_target::pass_signals
709 (gdb::array_view<const unsigned char> pass_signals)
711 int signo;
713 sigemptyset (&pass_mask);
715 for (signo = 1; signo < NSIG; signo++)
717 int target_signo = gdb_signal_from_host (signo);
718 if (target_signo < pass_signals.size () && pass_signals[target_signo])
719 sigaddset (&pass_mask, signo);
725 /* Prototypes for local functions. */
726 static int stop_wait_callback (struct lwp_info *lp);
727 static int resume_stopped_resumed_lwps (struct lwp_info *lp, const ptid_t wait_ptid);
728 static int check_ptrace_stopped_lwp_gone (struct lwp_info *lp);
732 /* Destroy and free LP. */
734 lwp_info::~lwp_info ()
736 /* Let the arch specific bits release arch_lwp_info. */
737 linux_target->low_delete_thread (this->arch_private);
740 /* Traversal function for purge_lwp_list. */
742 static int
743 lwp_lwpid_htab_remove_pid (void **slot, void *info)
745 struct lwp_info *lp = (struct lwp_info *) *slot;
746 int pid = *(int *) info;
748 if (lp->ptid.pid () == pid)
750 htab_clear_slot (lwp_lwpid_htab, slot);
751 lwp_list_remove (lp);
752 delete lp;
755 return 1;
758 /* Remove all LWPs belong to PID from the lwp list. */
760 static void
761 purge_lwp_list (int pid)
763 htab_traverse_noresize (lwp_lwpid_htab, lwp_lwpid_htab_remove_pid, &pid);
766 /* Add the LWP specified by PTID to the list. PTID is the first LWP
767 in the process. Return a pointer to the structure describing the
768 new LWP.
770 This differs from add_lwp in that we don't let the arch specific
771 bits know about this new thread. Current clients of this callback
772 take the opportunity to install watchpoints in the new thread, and
773 we shouldn't do that for the first thread. If we're spawning a
774 child ("run"), the thread executes the shell wrapper first, and we
775 shouldn't touch it until it execs the program we want to debug.
776 For "attach", it'd be okay to call the callback, but it's not
777 necessary, because watchpoints can't yet have been inserted into
778 the inferior. */
780 static struct lwp_info *
781 add_initial_lwp (ptid_t ptid)
783 gdb_assert (ptid.lwp_p ());
785 lwp_info *lp = new lwp_info (ptid);
788 /* Add to sorted-by-reverse-creation-order list. */
789 lwp_list_add (lp);
791 /* Add to keyed-by-pid htab. */
792 lwp_lwpid_htab_add_lwp (lp);
794 return lp;
797 /* Add the LWP specified by PID to the list. Return a pointer to the
798 structure describing the new LWP. The LWP should already be
799 stopped. */
801 static struct lwp_info *
802 add_lwp (ptid_t ptid)
804 struct lwp_info *lp;
806 lp = add_initial_lwp (ptid);
808 /* Let the arch specific bits know about this new thread. Current
809 clients of this callback take the opportunity to install
810 watchpoints in the new thread. We don't do this for the first
811 thread though. See add_initial_lwp. */
812 linux_target->low_new_thread (lp);
814 return lp;
817 /* Remove the LWP specified by PID from the list. */
819 static void
820 delete_lwp (ptid_t ptid)
822 lwp_info dummy (ptid);
824 void **slot = htab_find_slot (lwp_lwpid_htab, &dummy, NO_INSERT);
825 if (slot == NULL)
826 return;
828 lwp_info *lp = *(struct lwp_info **) slot;
829 gdb_assert (lp != NULL);
831 htab_clear_slot (lwp_lwpid_htab, slot);
833 /* Remove from sorted-by-creation-order list. */
834 lwp_list_remove (lp);
836 /* Release. */
837 delete lp;
840 /* Return a pointer to the structure describing the LWP corresponding
841 to PID. If no corresponding LWP could be found, return NULL. */
843 static struct lwp_info *
844 find_lwp_pid (ptid_t ptid)
846 int lwp;
848 if (ptid.lwp_p ())
849 lwp = ptid.lwp ();
850 else
851 lwp = ptid.pid ();
853 lwp_info dummy (ptid_t (0, lwp));
854 return (struct lwp_info *) htab_find (lwp_lwpid_htab, &dummy);
857 /* See nat/linux-nat.h. */
859 struct lwp_info *
860 iterate_over_lwps (ptid_t filter,
861 gdb::function_view<iterate_over_lwps_ftype> callback)
863 for (lwp_info *lp : all_lwps_safe ())
865 if (lp->ptid.matches (filter))
867 if (callback (lp) != 0)
868 return lp;
872 return NULL;
875 /* Update our internal state when changing from one checkpoint to
876 another indicated by NEW_PTID. We can only switch single-threaded
877 applications, so we only create one new LWP, and the previous list
878 is discarded. */
880 void
881 linux_nat_switch_fork (ptid_t new_ptid)
883 struct lwp_info *lp;
885 purge_lwp_list (inferior_ptid.pid ());
887 lp = add_lwp (new_ptid);
888 lp->stopped = 1;
890 /* This changes the thread's ptid while preserving the gdb thread
891 num. Also changes the inferior pid, while preserving the
892 inferior num. */
893 thread_change_ptid (linux_target, inferior_ptid, new_ptid);
895 /* We've just told GDB core that the thread changed target id, but,
896 in fact, it really is a different thread, with different register
897 contents. */
898 registers_changed ();
901 /* Handle the exit of a single thread LP. */
903 static void
904 exit_lwp (struct lwp_info *lp)
906 struct thread_info *th = linux_target->find_thread (lp->ptid);
908 if (th)
909 delete_thread (th);
911 delete_lwp (lp->ptid);
914 /* Wait for the LWP specified by LP, which we have just attached to.
915 Returns a wait status for that LWP, to cache. */
917 static int
918 linux_nat_post_attach_wait (ptid_t ptid, int *signalled)
920 pid_t new_pid, pid = ptid.lwp ();
921 int status;
923 if (linux_proc_pid_is_stopped (pid))
925 linux_nat_debug_printf ("Attaching to a stopped process");
927 /* The process is definitely stopped. It is in a job control
928 stop, unless the kernel predates the TASK_STOPPED /
929 TASK_TRACED distinction, in which case it might be in a
930 ptrace stop. Make sure it is in a ptrace stop; from there we
931 can kill it, signal it, et cetera.
933 First make sure there is a pending SIGSTOP. Since we are
934 already attached, the process can not transition from stopped
935 to running without a PTRACE_CONT; so we know this signal will
936 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
937 probably already in the queue (unless this kernel is old
938 enough to use TASK_STOPPED for ptrace stops); but since SIGSTOP
939 is not an RT signal, it can only be queued once. */
940 kill_lwp (pid, SIGSTOP);
942 /* Finally, resume the stopped process. This will deliver the SIGSTOP
943 (or a higher priority signal, just like normal PTRACE_ATTACH). */
944 ptrace (PTRACE_CONT, pid, 0, 0);
947 /* Make sure the initial process is stopped. The user-level threads
948 layer might want to poke around in the inferior, and that won't
949 work if things haven't stabilized yet. */
950 new_pid = my_waitpid (pid, &status, __WALL);
951 gdb_assert (pid == new_pid);
953 if (!WIFSTOPPED (status))
955 /* The pid we tried to attach has apparently just exited. */
956 linux_nat_debug_printf ("Failed to stop %d: %s", pid,
957 status_to_str (status).c_str ());
958 return status;
961 if (WSTOPSIG (status) != SIGSTOP)
963 *signalled = 1;
964 linux_nat_debug_printf ("Received %s after attaching",
965 status_to_str (status).c_str ());
968 return status;
971 void
972 linux_nat_target::create_inferior (const char *exec_file,
973 const std::string &allargs,
974 char **env, int from_tty)
976 maybe_disable_address_space_randomization restore_personality
977 (disable_randomization);
979 /* The fork_child mechanism is synchronous and calls target_wait, so
980 we have to mask the async mode. */
982 /* Make sure we report all signals during startup. */
983 pass_signals ({});
985 inf_ptrace_target::create_inferior (exec_file, allargs, env, from_tty);
987 open_proc_mem_file (inferior_ptid);
990 /* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
991 already attached. Returns true if a new LWP is found, false
992 otherwise. */
994 static int
995 attach_proc_task_lwp_callback (ptid_t ptid)
997 struct lwp_info *lp;
999 /* Ignore LWPs we're already attached to. */
1000 lp = find_lwp_pid (ptid);
1001 if (lp == NULL)
1003 int lwpid = ptid.lwp ();
1005 if (ptrace (PTRACE_ATTACH, lwpid, 0, 0) < 0)
1007 int err = errno;
1009 /* Be quiet if we simply raced with the thread exiting.
1010 EPERM is returned if the thread's task still exists, and
1011 is marked as exited or zombie, as well as other
1012 conditions, so in that case, confirm the status in
1013 /proc/PID/status. */
1014 if (err == ESRCH
1015 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
1017 linux_nat_debug_printf
1018 ("Cannot attach to lwp %d: thread is gone (%d: %s)",
1019 lwpid, err, safe_strerror (err));
1022 else
1024 std::string reason
1025 = linux_ptrace_attach_fail_reason_string (ptid, err);
1027 warning (_("Cannot attach to lwp %d: %s"),
1028 lwpid, reason.c_str ());
1031 else
1033 linux_nat_debug_printf ("PTRACE_ATTACH %s, 0, 0 (OK)",
1034 ptid.to_string ().c_str ());
1036 lp = add_lwp (ptid);
1038 /* The next time we wait for this LWP we'll see a SIGSTOP as
1039 PTRACE_ATTACH brings it to a halt. */
1040 lp->signalled = 1;
1042 /* We need to wait for a stop before being able to make the
1043 next ptrace call on this LWP. */
1044 lp->must_set_ptrace_flags = 1;
1046 /* So that wait collects the SIGSTOP. */
1047 lp->resumed = 1;
1049 /* Also add the LWP to gdb's thread list, in case a
1050 matching libthread_db is not found (or the process uses
1051 raw clone). */
1052 add_thread (linux_target, lp->ptid);
1053 set_running (linux_target, lp->ptid, true);
1054 set_executing (linux_target, lp->ptid, true);
1057 return 1;
1059 return 0;
1062 void
1063 linux_nat_target::attach (const char *args, int from_tty)
1065 struct lwp_info *lp;
1066 int status;
1067 ptid_t ptid;
1069 /* Make sure we report all signals during attach. */
1070 pass_signals ({});
1074 inf_ptrace_target::attach (args, from_tty);
1076 catch (const gdb_exception_error &ex)
1078 pid_t pid = parse_pid_to_attach (args);
1079 std::string reason = linux_ptrace_attach_fail_reason (pid);
1081 if (!reason.empty ())
1082 throw_error (ex.error, "warning: %s\n%s", reason.c_str (),
1083 ex.what ());
1084 else
1085 throw_error (ex.error, "%s", ex.what ());
1088 /* The ptrace base target adds the main thread with (pid,0,0)
1089 format. Decorate it with lwp info. */
1090 ptid = ptid_t (inferior_ptid.pid (),
1091 inferior_ptid.pid ());
1092 thread_change_ptid (linux_target, inferior_ptid, ptid);
1094 /* Add the initial process as the first LWP to the list. */
1095 lp = add_initial_lwp (ptid);
1097 status = linux_nat_post_attach_wait (lp->ptid, &lp->signalled);
1098 if (!WIFSTOPPED (status))
1100 if (WIFEXITED (status))
1102 int exit_code = WEXITSTATUS (status);
1104 target_terminal::ours ();
1105 target_mourn_inferior (inferior_ptid);
1106 if (exit_code == 0)
1107 error (_("Unable to attach: program exited normally."));
1108 else
1109 error (_("Unable to attach: program exited with code %d."),
1110 exit_code);
1112 else if (WIFSIGNALED (status))
1114 enum gdb_signal signo;
1116 target_terminal::ours ();
1117 target_mourn_inferior (inferior_ptid);
1119 signo = gdb_signal_from_host (WTERMSIG (status));
1120 error (_("Unable to attach: program terminated with signal "
1121 "%s, %s."),
1122 gdb_signal_to_name (signo),
1123 gdb_signal_to_string (signo));
1126 internal_error (_("unexpected status %d for PID %ld"),
1127 status, (long) ptid.lwp ());
1130 lp->stopped = 1;
1132 open_proc_mem_file (lp->ptid);
1134 /* Save the wait status to report later. */
1135 lp->resumed = 1;
1136 linux_nat_debug_printf ("waitpid %ld, saving status %s",
1137 (long) lp->ptid.pid (),
1138 status_to_str (status).c_str ());
1140 lp->status = status;
1142 /* We must attach to every LWP. If /proc is mounted, use that to
1143 find them now. The inferior may be using raw clone instead of
1144 using pthreads. But even if it is using pthreads, thread_db
1145 walks structures in the inferior's address space to find the list
1146 of threads/LWPs, and those structures may well be corrupted.
1147 Note that once thread_db is loaded, we'll still use it to list
1148 threads and associate pthread info with each LWP. */
1149 linux_proc_attach_tgid_threads (lp->ptid.pid (),
1150 attach_proc_task_lwp_callback);
1153 /* Ptrace-detach the thread with pid PID. */
1155 static void
1156 detach_one_pid (int pid, int signo)
1158 if (ptrace (PTRACE_DETACH, pid, 0, signo) < 0)
1160 int save_errno = errno;
1162 /* We know the thread exists, so ESRCH must mean the lwp is
1163 zombie. This can happen if one of the already-detached
1164 threads exits the whole thread group. In that case we're
1165 still attached, and must reap the lwp. */
1166 if (save_errno == ESRCH)
1168 int ret, status;
1170 ret = my_waitpid (pid, &status, __WALL);
1171 if (ret == -1)
1173 warning (_("Couldn't reap LWP %d while detaching: %s"),
1174 pid, safe_strerror (errno));
1176 else if (!WIFEXITED (status) && !WIFSIGNALED (status))
1178 warning (_("Reaping LWP %d while detaching "
1179 "returned unexpected status 0x%x"),
1180 pid, status);
1183 else
1184 error (_("Can't detach %d: %s"),
1185 pid, safe_strerror (save_errno));
1187 else
1188 linux_nat_debug_printf ("PTRACE_DETACH (%d, %s, 0) (OK)",
1189 pid, strsignal (signo));
1192 /* Get pending signal of THREAD as a host signal number, for detaching
1193 purposes. This is the signal the thread last stopped for, which we
1194 need to deliver to the thread when detaching, otherwise, it'd be
1195 suppressed/lost. */
1197 static int
1198 get_detach_signal (struct lwp_info *lp)
1200 enum gdb_signal signo = GDB_SIGNAL_0;
1202 /* If we paused threads momentarily, we may have stored pending
1203 events in lp->status or lp->waitstatus (see stop_wait_callback),
1204 and GDB core hasn't seen any signal for those threads.
1205 Otherwise, the last signal reported to the core is found in the
1206 thread object's stop_signal.
1208 There's a corner case that isn't handled here at present. Only
1209 if the thread stopped with a TARGET_WAITKIND_STOPPED does
1210 stop_signal make sense as a real signal to pass to the inferior.
1211 Some catchpoint related events, like
1212 TARGET_WAITKIND_(V)FORK|EXEC|SYSCALL, have their stop_signal set
1213 to GDB_SIGNAL_SIGTRAP when the catchpoint triggers. But,
1214 those traps are debug API (ptrace in our case) related and
1215 induced; the inferior wouldn't see them if it wasn't being
1216 traced. Hence, we should never pass them to the inferior, even
1217 when set to pass state. Since this corner case isn't handled by
1218 infrun.c when proceeding with a signal, for consistency, neither
1219 do we handle it here (or elsewhere in the file we check for
1220 signal pass state). Normally SIGTRAP isn't set to pass state, so
1221 this is really a corner case. */
1223 if (lp->waitstatus.kind () != TARGET_WAITKIND_IGNORE)
1224 signo = GDB_SIGNAL_0; /* a pending ptrace event, not a real signal. */
1225 else if (lp->status)
1226 signo = gdb_signal_from_host (WSTOPSIG (lp->status));
1227 else
1229 thread_info *tp = linux_target->find_thread (lp->ptid);
1231 if (target_is_non_stop_p () && !tp->executing ())
1233 if (tp->has_pending_waitstatus ())
1235 /* If the thread has a pending event, and it was stopped with a
1236 signal, use that signal to resume it. If it has a pending
1237 event of another kind, it was not stopped with a signal, so
1238 resume it without a signal. */
1239 if (tp->pending_waitstatus ().kind () == TARGET_WAITKIND_STOPPED)
1240 signo = tp->pending_waitstatus ().sig ();
1241 else
1242 signo = GDB_SIGNAL_0;
1244 else
1245 signo = tp->stop_signal ();
1247 else if (!target_is_non_stop_p ())
1249 ptid_t last_ptid;
1250 process_stratum_target *last_target;
1252 get_last_target_status (&last_target, &last_ptid, nullptr);
1254 if (last_target == linux_target
1255 && lp->ptid.lwp () == last_ptid.lwp ())
1256 signo = tp->stop_signal ();
1260 if (signo == GDB_SIGNAL_0)
1262 linux_nat_debug_printf ("lwp %s has no pending signal",
1263 lp->ptid.to_string ().c_str ());
1265 else if (!signal_pass_state (signo))
1267 linux_nat_debug_printf
1268 ("lwp %s had signal %s but it is in no pass state",
1269 lp->ptid.to_string ().c_str (), gdb_signal_to_string (signo));
1271 else
1273 linux_nat_debug_printf ("lwp %s has pending signal %s",
1274 lp->ptid.to_string ().c_str (),
1275 gdb_signal_to_string (signo));
1277 return gdb_signal_to_host (signo);
1280 return 0;
1283 /* Detach from LP. If SIGNO_P is non-NULL, then it points to the
1284 signal number that should be passed to the LWP when detaching.
1285 Otherwise pass any pending signal the LWP may have, if any. */
1287 static void
1288 detach_one_lwp (struct lwp_info *lp, int *signo_p)
1290 LINUX_NAT_SCOPED_DEBUG_ENTER_EXIT;
1292 linux_nat_debug_printf ("lwp %s (stopped = %d)",
1293 lp->ptid.to_string ().c_str (), lp->stopped);
1295 int lwpid = lp->ptid.lwp ();
1296 int signo;
1298 gdb_assert (lp->status == 0 || WIFSTOPPED (lp->status));
1300 /* If the lwp/thread we are about to detach has a pending fork event,
1301 there is a process GDB is attached to that the core of GDB doesn't know
1302 about. Detach from it. */
1304 /* Check in lwp_info::status. */
1305 if (WIFSTOPPED (lp->status) && linux_is_extended_waitstatus (lp->status))
1307 int event = linux_ptrace_get_extended_event (lp->status);
1309 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
1311 unsigned long child_pid;
1312 int ret = ptrace (PTRACE_GETEVENTMSG, lp->ptid.lwp (), 0, &child_pid);
1313 if (ret == 0)
1314 detach_one_pid (child_pid, 0);
1315 else
1316 perror_warning_with_name (_("Failed to detach fork child"));
1320 /* Check in lwp_info::waitstatus. */
1321 if (lp->waitstatus.kind () == TARGET_WAITKIND_VFORKED
1322 || lp->waitstatus.kind () == TARGET_WAITKIND_FORKED)
1323 detach_one_pid (lp->waitstatus.child_ptid ().pid (), 0);
1326 /* Check in thread_info::pending_waitstatus. */
1327 thread_info *tp = linux_target->find_thread (lp->ptid);
1328 if (tp->has_pending_waitstatus ())
1330 const target_waitstatus &ws = tp->pending_waitstatus ();
1332 if (ws.kind () == TARGET_WAITKIND_VFORKED
1333 || ws.kind () == TARGET_WAITKIND_FORKED)
1334 detach_one_pid (ws.child_ptid ().pid (), 0);
1337 /* Check in thread_info::pending_follow. */
1338 if (tp->pending_follow.kind () == TARGET_WAITKIND_VFORKED
1339 || tp->pending_follow.kind () == TARGET_WAITKIND_FORKED)
1340 detach_one_pid (tp->pending_follow.child_ptid ().pid (), 0);
1342 if (lp->status != 0)
1343 linux_nat_debug_printf ("Pending %s for %s on detach.",
1344 strsignal (WSTOPSIG (lp->status)),
1345 lp->ptid.to_string ().c_str ());
1347 /* If there is a pending SIGSTOP, get rid of it. */
1348 if (lp->signalled)
1350 linux_nat_debug_printf ("Sending SIGCONT to %s",
1351 lp->ptid.to_string ().c_str ());
1353 kill_lwp (lwpid, SIGCONT);
1354 lp->signalled = 0;
1357 if (signo_p == NULL)
1359 /* Pass on any pending signal for this LWP. */
1360 signo = get_detach_signal (lp);
1362 else
1363 signo = *signo_p;
1365 linux_nat_debug_printf ("preparing to resume lwp %s (stopped = %d)",
1366 lp->ptid.to_string ().c_str (),
1367 lp->stopped);
1369 /* Preparing to resume may try to write registers, and fail if the
1370 lwp is zombie. If that happens, ignore the error. We'll handle
1371 it below, when detach fails with ESRCH. */
1374 linux_target->low_prepare_to_resume (lp);
1376 catch (const gdb_exception_error &ex)
1378 if (!check_ptrace_stopped_lwp_gone (lp))
1379 throw;
1382 detach_one_pid (lwpid, signo);
1384 delete_lwp (lp->ptid);
1387 static int
1388 detach_callback (struct lwp_info *lp)
1390 /* We don't actually detach from the thread group leader just yet.
1391 If the thread group exits, we must reap the zombie clone lwps
1392 before we're able to reap the leader. */
1393 if (lp->ptid.lwp () != lp->ptid.pid ())
1394 detach_one_lwp (lp, NULL);
1395 return 0;
1398 void
1399 linux_nat_target::detach (inferior *inf, int from_tty)
1401 LINUX_NAT_SCOPED_DEBUG_ENTER_EXIT;
1403 struct lwp_info *main_lwp;
1404 int pid = inf->pid;
1406 /* Don't unregister from the event loop, as there may be other
1407 inferiors running. */
1409 /* Stop all threads before detaching. ptrace requires that the
1410 thread is stopped to successfully detach. */
1411 iterate_over_lwps (ptid_t (pid), stop_callback);
1412 /* ... and wait until all of them have reported back that
1413 they're no longer running. */
1414 iterate_over_lwps (ptid_t (pid), stop_wait_callback);
1416 /* We can now safely remove breakpoints. We don't this in earlier
1417 in common code because this target doesn't currently support
1418 writing memory while the inferior is running. */
1419 remove_breakpoints_inf (current_inferior ());
1421 iterate_over_lwps (ptid_t (pid), detach_callback);
1423 /* Only the initial process should be left right now. */
1424 gdb_assert (num_lwps (pid) == 1);
1426 main_lwp = find_lwp_pid (ptid_t (pid));
1428 if (forks_exist_p ())
1430 /* Multi-fork case. The current inferior_ptid is being detached
1431 from, but there are other viable forks to debug. Detach from
1432 the current fork, and context-switch to the first
1433 available. */
1434 linux_fork_detach (from_tty);
1436 else
1438 target_announce_detach (from_tty);
1440 /* Pass on any pending signal for the last LWP. */
1441 int signo = get_detach_signal (main_lwp);
1443 detach_one_lwp (main_lwp, &signo);
1445 detach_success (inf);
1448 close_proc_mem_file (pid);
1451 /* Resume execution of the inferior process. If STEP is nonzero,
1452 single-step it. If SIGNAL is nonzero, give it that signal. */
1454 static void
1455 linux_resume_one_lwp_throw (struct lwp_info *lp, int step,
1456 enum gdb_signal signo)
1458 lp->step = step;
1460 /* stop_pc doubles as the PC the LWP had when it was last resumed.
1461 We only presently need that if the LWP is stepped though (to
1462 handle the case of stepping a breakpoint instruction). */
1463 if (step)
1465 struct regcache *regcache = get_thread_regcache (linux_target, lp->ptid);
1467 lp->stop_pc = regcache_read_pc (regcache);
1469 else
1470 lp->stop_pc = 0;
1472 linux_target->low_prepare_to_resume (lp);
1473 linux_target->low_resume (lp->ptid, step, signo);
1475 /* Successfully resumed. Clear state that no longer makes sense,
1476 and mark the LWP as running. Must not do this before resuming
1477 otherwise if that fails other code will be confused. E.g., we'd
1478 later try to stop the LWP and hang forever waiting for a stop
1479 status. Note that we must not throw after this is cleared,
1480 otherwise handle_zombie_lwp_error would get confused. */
1481 lp->stopped = 0;
1482 lp->core = -1;
1483 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
1484 registers_changed_ptid (linux_target, lp->ptid);
1487 /* Called when we try to resume a stopped LWP and that errors out. If
1488 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
1489 or about to become), discard the error, clear any pending status
1490 the LWP may have, and return true (we'll collect the exit status
1491 soon enough). Otherwise, return false. */
1493 static int
1494 check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
1496 /* If we get an error after resuming the LWP successfully, we'd
1497 confuse !T state for the LWP being gone. */
1498 gdb_assert (lp->stopped);
1500 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
1501 because even if ptrace failed with ESRCH, the tracee may be "not
1502 yet fully dead", but already refusing ptrace requests. In that
1503 case the tracee has 'R (Running)' state for a little bit
1504 (observed in Linux 3.18). See also the note on ESRCH in the
1505 ptrace(2) man page. Instead, check whether the LWP has any state
1506 other than ptrace-stopped. */
1508 /* Don't assume anything if /proc/PID/status can't be read. */
1509 if (linux_proc_pid_is_trace_stopped_nowarn (lp->ptid.lwp ()) == 0)
1511 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
1512 lp->status = 0;
1513 lp->waitstatus.set_ignore ();
1514 return 1;
1516 return 0;
1519 /* Like linux_resume_one_lwp_throw, but no error is thrown if the LWP
1520 disappears while we try to resume it. */
1522 static void
1523 linux_resume_one_lwp (struct lwp_info *lp, int step, enum gdb_signal signo)
1527 linux_resume_one_lwp_throw (lp, step, signo);
1529 catch (const gdb_exception_error &ex)
1531 if (!check_ptrace_stopped_lwp_gone (lp))
1532 throw;
1536 /* Resume LP. */
1538 static void
1539 resume_lwp (struct lwp_info *lp, int step, enum gdb_signal signo)
1541 if (lp->stopped)
1543 struct inferior *inf = find_inferior_ptid (linux_target, lp->ptid);
1545 if (inf->vfork_child != NULL)
1547 linux_nat_debug_printf ("Not resuming sibling %s (vfork parent)",
1548 lp->ptid.to_string ().c_str ());
1550 else if (!lwp_status_pending_p (lp))
1552 linux_nat_debug_printf ("Resuming sibling %s, %s, %s",
1553 lp->ptid.to_string ().c_str (),
1554 (signo != GDB_SIGNAL_0
1555 ? strsignal (gdb_signal_to_host (signo))
1556 : "0"),
1557 step ? "step" : "resume");
1559 linux_resume_one_lwp (lp, step, signo);
1561 else
1563 linux_nat_debug_printf ("Not resuming sibling %s (has pending)",
1564 lp->ptid.to_string ().c_str ());
1567 else
1568 linux_nat_debug_printf ("Not resuming sibling %s (not stopped)",
1569 lp->ptid.to_string ().c_str ());
1572 /* Callback for iterate_over_lwps. If LWP is EXCEPT, do nothing.
1573 Resume LWP with the last stop signal, if it is in pass state. */
1575 static int
1576 linux_nat_resume_callback (struct lwp_info *lp, struct lwp_info *except)
1578 enum gdb_signal signo = GDB_SIGNAL_0;
1580 if (lp == except)
1581 return 0;
1583 if (lp->stopped)
1585 struct thread_info *thread;
1587 thread = linux_target->find_thread (lp->ptid);
1588 if (thread != NULL)
1590 signo = thread->stop_signal ();
1591 thread->set_stop_signal (GDB_SIGNAL_0);
1595 resume_lwp (lp, 0, signo);
1596 return 0;
1599 static int
1600 resume_clear_callback (struct lwp_info *lp)
1602 lp->resumed = 0;
1603 lp->last_resume_kind = resume_stop;
1604 return 0;
1607 static int
1608 resume_set_callback (struct lwp_info *lp)
1610 lp->resumed = 1;
1611 lp->last_resume_kind = resume_continue;
1612 return 0;
1615 void
1616 linux_nat_target::resume (ptid_t scope_ptid, int step, enum gdb_signal signo)
1618 struct lwp_info *lp;
1620 linux_nat_debug_printf ("Preparing to %s %s, %s, inferior_ptid %s",
1621 step ? "step" : "resume",
1622 scope_ptid.to_string ().c_str (),
1623 (signo != GDB_SIGNAL_0
1624 ? strsignal (gdb_signal_to_host (signo)) : "0"),
1625 inferior_ptid.to_string ().c_str ());
1627 /* Mark the lwps we're resuming as resumed and update their
1628 last_resume_kind to resume_continue. */
1629 iterate_over_lwps (scope_ptid, resume_set_callback);
1631 lp = find_lwp_pid (inferior_ptid);
1632 gdb_assert (lp != NULL);
1634 /* Remember if we're stepping. */
1635 lp->last_resume_kind = step ? resume_step : resume_continue;
1637 /* If we have a pending wait status for this thread, there is no
1638 point in resuming the process. But first make sure that
1639 linux_nat_wait won't preemptively handle the event - we
1640 should never take this short-circuit if we are going to
1641 leave LP running, since we have skipped resuming all the
1642 other threads. This bit of code needs to be synchronized
1643 with linux_nat_wait. */
1645 if (lp->status && WIFSTOPPED (lp->status))
1647 if (!lp->step
1648 && WSTOPSIG (lp->status)
1649 && sigismember (&pass_mask, WSTOPSIG (lp->status)))
1651 linux_nat_debug_printf
1652 ("Not short circuiting for ignored status 0x%x", lp->status);
1654 /* FIXME: What should we do if we are supposed to continue
1655 this thread with a signal? */
1656 gdb_assert (signo == GDB_SIGNAL_0);
1657 signo = gdb_signal_from_host (WSTOPSIG (lp->status));
1658 lp->status = 0;
1662 if (lwp_status_pending_p (lp))
1664 /* FIXME: What should we do if we are supposed to continue
1665 this thread with a signal? */
1666 gdb_assert (signo == GDB_SIGNAL_0);
1668 linux_nat_debug_printf ("Short circuiting for status %s",
1669 pending_status_str (lp).c_str ());
1671 if (target_can_async_p ())
1673 target_async (true);
1674 /* Tell the event loop we have something to process. */
1675 async_file_mark ();
1677 return;
1680 /* No use iterating unless we're resuming other threads. */
1681 if (scope_ptid != lp->ptid)
1682 iterate_over_lwps (scope_ptid, [=] (struct lwp_info *info)
1684 return linux_nat_resume_callback (info, lp);
1687 linux_nat_debug_printf ("%s %s, %s (resume event thread)",
1688 step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
1689 lp->ptid.to_string ().c_str (),
1690 (signo != GDB_SIGNAL_0
1691 ? strsignal (gdb_signal_to_host (signo)) : "0"));
1693 linux_resume_one_lwp (lp, step, signo);
1696 /* Send a signal to an LWP. */
1698 static int
1699 kill_lwp (int lwpid, int signo)
1701 int ret;
1703 errno = 0;
1704 ret = syscall (__NR_tkill, lwpid, signo);
1705 if (errno == ENOSYS)
1707 /* If tkill fails, then we are not using nptl threads, a
1708 configuration we no longer support. */
1709 perror_with_name (("tkill"));
1711 return ret;
1714 /* Handle a GNU/Linux syscall trap wait response. If we see a syscall
1715 event, check if the core is interested in it: if not, ignore the
1716 event, and keep waiting; otherwise, we need to toggle the LWP's
1717 syscall entry/exit status, since the ptrace event itself doesn't
1718 indicate it, and report the trap to higher layers. */
1720 static int
1721 linux_handle_syscall_trap (struct lwp_info *lp, int stopping)
1723 struct target_waitstatus *ourstatus = &lp->waitstatus;
1724 struct gdbarch *gdbarch = target_thread_architecture (lp->ptid);
1725 thread_info *thread = linux_target->find_thread (lp->ptid);
1726 int syscall_number = (int) gdbarch_get_syscall_number (gdbarch, thread);
1728 if (stopping)
1730 /* If we're stopping threads, there's a SIGSTOP pending, which
1731 makes it so that the LWP reports an immediate syscall return,
1732 followed by the SIGSTOP. Skip seeing that "return" using
1733 PTRACE_CONT directly, and let stop_wait_callback collect the
1734 SIGSTOP. Later when the thread is resumed, a new syscall
1735 entry event. If we didn't do this (and returned 0), we'd
1736 leave a syscall entry pending, and our caller, by using
1737 PTRACE_CONT to collect the SIGSTOP, skips the syscall return
1738 itself. Later, when the user re-resumes this LWP, we'd see
1739 another syscall entry event and we'd mistake it for a return.
1741 If stop_wait_callback didn't force the SIGSTOP out of the LWP
1742 (leaving immediately with LWP->signalled set, without issuing
1743 a PTRACE_CONT), it would still be problematic to leave this
1744 syscall enter pending, as later when the thread is resumed,
1745 it would then see the same syscall exit mentioned above,
1746 followed by the delayed SIGSTOP, while the syscall didn't
1747 actually get to execute. It seems it would be even more
1748 confusing to the user. */
1750 linux_nat_debug_printf
1751 ("ignoring syscall %d for LWP %ld (stopping threads), resuming with "
1752 "PTRACE_CONT for SIGSTOP", syscall_number, lp->ptid.lwp ());
1754 lp->syscall_state = TARGET_WAITKIND_IGNORE;
1755 ptrace (PTRACE_CONT, lp->ptid.lwp (), 0, 0);
1756 lp->stopped = 0;
1757 return 1;
1760 /* Always update the entry/return state, even if this particular
1761 syscall isn't interesting to the core now. In async mode,
1762 the user could install a new catchpoint for this syscall
1763 between syscall enter/return, and we'll need to know to
1764 report a syscall return if that happens. */
1765 lp->syscall_state = (lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
1766 ? TARGET_WAITKIND_SYSCALL_RETURN
1767 : TARGET_WAITKIND_SYSCALL_ENTRY);
1769 if (catch_syscall_enabled ())
1771 if (catching_syscall_number (syscall_number))
1773 /* Alright, an event to report. */
1774 if (lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY)
1775 ourstatus->set_syscall_entry (syscall_number);
1776 else if (lp->syscall_state == TARGET_WAITKIND_SYSCALL_RETURN)
1777 ourstatus->set_syscall_return (syscall_number);
1778 else
1779 gdb_assert_not_reached ("unexpected syscall state");
1781 linux_nat_debug_printf
1782 ("stopping for %s of syscall %d for LWP %ld",
1783 (lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
1784 ? "entry" : "return"), syscall_number, lp->ptid.lwp ());
1786 return 0;
1789 linux_nat_debug_printf
1790 ("ignoring %s of syscall %d for LWP %ld",
1791 (lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
1792 ? "entry" : "return"), syscall_number, lp->ptid.lwp ());
1794 else
1796 /* If we had been syscall tracing, and hence used PT_SYSCALL
1797 before on this LWP, it could happen that the user removes all
1798 syscall catchpoints before we get to process this event.
1799 There are two noteworthy issues here:
1801 - When stopped at a syscall entry event, resuming with
1802 PT_STEP still resumes executing the syscall and reports a
1803 syscall return.
1805 - Only PT_SYSCALL catches syscall enters. If we last
1806 single-stepped this thread, then this event can't be a
1807 syscall enter. If we last single-stepped this thread, this
1808 has to be a syscall exit.
1810 The points above mean that the next resume, be it PT_STEP or
1811 PT_CONTINUE, can not trigger a syscall trace event. */
1812 linux_nat_debug_printf
1813 ("caught syscall event with no syscall catchpoints. %d for LWP %ld, "
1814 "ignoring", syscall_number, lp->ptid.lwp ());
1815 lp->syscall_state = TARGET_WAITKIND_IGNORE;
1818 /* The core isn't interested in this event. For efficiency, avoid
1819 stopping all threads only to have the core resume them all again.
1820 Since we're not stopping threads, if we're still syscall tracing
1821 and not stepping, we can't use PTRACE_CONT here, as we'd miss any
1822 subsequent syscall. Simply resume using the inf-ptrace layer,
1823 which knows when to use PT_SYSCALL or PT_CONTINUE. */
1825 linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
1826 return 1;
1829 /* Handle a GNU/Linux extended wait response. If we see a clone
1830 event, we need to add the new LWP to our list (and not report the
1831 trap to higher layers). This function returns non-zero if the
1832 event should be ignored and we should wait again. If STOPPING is
1833 true, the new LWP remains stopped, otherwise it is continued. */
1835 static int
1836 linux_handle_extended_wait (struct lwp_info *lp, int status)
1838 int pid = lp->ptid.lwp ();
1839 struct target_waitstatus *ourstatus = &lp->waitstatus;
1840 int event = linux_ptrace_get_extended_event (status);
1842 /* All extended events we currently use are mid-syscall. Only
1843 PTRACE_EVENT_STOP is delivered more like a signal-stop, but
1844 you have to be using PTRACE_SEIZE to get that. */
1845 lp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
1847 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK
1848 || event == PTRACE_EVENT_CLONE)
1850 unsigned long new_pid;
1851 int ret;
1853 ptrace (PTRACE_GETEVENTMSG, pid, 0, &new_pid);
1855 /* If we haven't already seen the new PID stop, wait for it now. */
1856 if (! pull_pid_from_list (&stopped_pids, new_pid, &status))
1858 /* The new child has a pending SIGSTOP. We can't affect it until it
1859 hits the SIGSTOP, but we're already attached. */
1860 ret = my_waitpid (new_pid, &status, __WALL);
1861 if (ret == -1)
1862 perror_with_name (_("waiting for new child"));
1863 else if (ret != new_pid)
1864 internal_error (_("wait returned unexpected PID %d"), ret);
1865 else if (!WIFSTOPPED (status))
1866 internal_error (_("wait returned unexpected status 0x%x"), status);
1869 ptid_t child_ptid (new_pid, new_pid);
1871 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
1873 open_proc_mem_file (child_ptid);
1875 /* The arch-specific native code may need to know about new
1876 forks even if those end up never mapped to an
1877 inferior. */
1878 linux_target->low_new_fork (lp, new_pid);
1880 else if (event == PTRACE_EVENT_CLONE)
1882 linux_target->low_new_clone (lp, new_pid);
1885 if (event == PTRACE_EVENT_FORK
1886 && linux_fork_checkpointing_p (lp->ptid.pid ()))
1888 /* Handle checkpointing by linux-fork.c here as a special
1889 case. We don't want the follow-fork-mode or 'catch fork'
1890 to interfere with this. */
1892 /* This won't actually modify the breakpoint list, but will
1893 physically remove the breakpoints from the child. */
1894 detach_breakpoints (ptid_t (new_pid, new_pid));
1896 /* Retain child fork in ptrace (stopped) state. */
1897 if (!find_fork_pid (new_pid))
1898 add_fork (new_pid);
1900 /* Report as spurious, so that infrun doesn't want to follow
1901 this fork. We're actually doing an infcall in
1902 linux-fork.c. */
1903 ourstatus->set_spurious ();
1905 /* Report the stop to the core. */
1906 return 0;
1909 if (event == PTRACE_EVENT_FORK)
1910 ourstatus->set_forked (child_ptid);
1911 else if (event == PTRACE_EVENT_VFORK)
1912 ourstatus->set_vforked (child_ptid);
1913 else if (event == PTRACE_EVENT_CLONE)
1915 struct lwp_info *new_lp;
1917 ourstatus->set_ignore ();
1919 linux_nat_debug_printf
1920 ("Got clone event from LWP %d, new child is LWP %ld", pid, new_pid);
1922 new_lp = add_lwp (ptid_t (lp->ptid.pid (), new_pid));
1923 new_lp->stopped = 1;
1924 new_lp->resumed = 1;
1926 /* If the thread_db layer is active, let it record the user
1927 level thread id and status, and add the thread to GDB's
1928 list. */
1929 if (!thread_db_notice_clone (lp->ptid, new_lp->ptid))
1931 /* The process is not using thread_db. Add the LWP to
1932 GDB's list. */
1933 add_thread (linux_target, new_lp->ptid);
1936 /* Even if we're stopping the thread for some reason
1937 internal to this module, from the perspective of infrun
1938 and the user/frontend, this new thread is running until
1939 it next reports a stop. */
1940 set_running (linux_target, new_lp->ptid, true);
1941 set_executing (linux_target, new_lp->ptid, true);
1943 if (WSTOPSIG (status) != SIGSTOP)
1945 /* This can happen if someone starts sending signals to
1946 the new thread before it gets a chance to run, which
1947 have a lower number than SIGSTOP (e.g. SIGUSR1).
1948 This is an unlikely case, and harder to handle for
1949 fork / vfork than for clone, so we do not try - but
1950 we handle it for clone events here. */
1952 new_lp->signalled = 1;
1954 /* We created NEW_LP so it cannot yet contain STATUS. */
1955 gdb_assert (new_lp->status == 0);
1957 /* Save the wait status to report later. */
1958 linux_nat_debug_printf
1959 ("waitpid of new LWP %ld, saving status %s",
1960 (long) new_lp->ptid.lwp (), status_to_str (status).c_str ());
1961 new_lp->status = status;
1963 else if (report_thread_events)
1965 new_lp->waitstatus.set_thread_created ();
1966 new_lp->status = status;
1969 return 1;
1972 return 0;
1975 if (event == PTRACE_EVENT_EXEC)
1977 linux_nat_debug_printf ("Got exec event from LWP %ld", lp->ptid.lwp ());
1979 /* Close the previous /proc/PID/mem file for this inferior,
1980 which was using the address space which is now gone.
1981 Reading/writing from this file would return 0/EOF. */
1982 close_proc_mem_file (lp->ptid.pid ());
1984 /* Open a new file for the new address space. */
1985 open_proc_mem_file (lp->ptid);
1987 ourstatus->set_execd
1988 (make_unique_xstrdup (linux_proc_pid_to_exec_file (pid)));
1990 /* The thread that execed must have been resumed, but, when a
1991 thread execs, it changes its tid to the tgid, and the old
1992 tgid thread might have not been resumed. */
1993 lp->resumed = 1;
1994 return 0;
1997 if (event == PTRACE_EVENT_VFORK_DONE)
1999 linux_nat_debug_printf
2000 ("Got PTRACE_EVENT_VFORK_DONE from LWP %ld",
2001 lp->ptid.lwp ());
2002 ourstatus->set_vfork_done ();
2003 return 0;
2006 internal_error (_("unknown ptrace event %d"), event);
2009 /* Suspend waiting for a signal. We're mostly interested in
2010 SIGCHLD/SIGINT. */
2012 static void
2013 wait_for_signal ()
2015 linux_nat_debug_printf ("about to sigsuspend");
2016 sigsuspend (&suspend_mask);
2018 /* If the quit flag is set, it means that the user pressed Ctrl-C
2019 and we're debugging a process that is running on a separate
2020 terminal, so we must forward the Ctrl-C to the inferior. (If the
2021 inferior is sharing GDB's terminal, then the Ctrl-C reaches the
2022 inferior directly.) We must do this here because functions that
2023 need to block waiting for a signal loop forever until there's an
2024 event to report before returning back to the event loop. */
2025 if (!target_terminal::is_ours ())
2027 if (check_quit_flag ())
2028 target_pass_ctrlc ();
2032 /* Wait for LP to stop. Returns the wait status, or 0 if the LWP has
2033 exited. */
2035 static int
2036 wait_lwp (struct lwp_info *lp)
2038 pid_t pid;
2039 int status = 0;
2040 int thread_dead = 0;
2041 sigset_t prev_mask;
2043 gdb_assert (!lp->stopped);
2044 gdb_assert (lp->status == 0);
2046 /* Make sure SIGCHLD is blocked for sigsuspend avoiding a race below. */
2047 block_child_signals (&prev_mask);
2049 for (;;)
2051 pid = my_waitpid (lp->ptid.lwp (), &status, __WALL | WNOHANG);
2052 if (pid == -1 && errno == ECHILD)
2054 /* The thread has previously exited. We need to delete it
2055 now because if this was a non-leader thread execing, we
2056 won't get an exit event. See comments on exec events at
2057 the top of the file. */
2058 thread_dead = 1;
2059 linux_nat_debug_printf ("%s vanished.",
2060 lp->ptid.to_string ().c_str ());
2062 if (pid != 0)
2063 break;
2065 /* Bugs 10970, 12702.
2066 Thread group leader may have exited in which case we'll lock up in
2067 waitpid if there are other threads, even if they are all zombies too.
2068 Basically, we're not supposed to use waitpid this way.
2069 tkill(pid,0) cannot be used here as it gets ESRCH for both
2070 for zombie and running processes.
2072 As a workaround, check if we're waiting for the thread group leader and
2073 if it's a zombie, and avoid calling waitpid if it is.
2075 This is racy, what if the tgl becomes a zombie right after we check?
2076 Therefore always use WNOHANG with sigsuspend - it is equivalent to
2077 waiting waitpid but linux_proc_pid_is_zombie is safe this way. */
2079 if (lp->ptid.pid () == lp->ptid.lwp ()
2080 && linux_proc_pid_is_zombie (lp->ptid.lwp ()))
2082 thread_dead = 1;
2083 linux_nat_debug_printf ("Thread group leader %s vanished.",
2084 lp->ptid.to_string ().c_str ());
2085 break;
2088 /* Wait for next SIGCHLD and try again. This may let SIGCHLD handlers
2089 get invoked despite our caller had them intentionally blocked by
2090 block_child_signals. This is sensitive only to the loop of
2091 linux_nat_wait_1 and there if we get called my_waitpid gets called
2092 again before it gets to sigsuspend so we can safely let the handlers
2093 get executed here. */
2094 wait_for_signal ();
2097 restore_child_signals_mask (&prev_mask);
2099 if (!thread_dead)
2101 gdb_assert (pid == lp->ptid.lwp ());
2103 linux_nat_debug_printf ("waitpid %s received %s",
2104 lp->ptid.to_string ().c_str (),
2105 status_to_str (status).c_str ());
2107 /* Check if the thread has exited. */
2108 if (WIFEXITED (status) || WIFSIGNALED (status))
2110 if (report_thread_events
2111 || lp->ptid.pid () == lp->ptid.lwp ())
2113 linux_nat_debug_printf ("LWP %d exited.", lp->ptid.pid ());
2115 /* If this is the leader exiting, it means the whole
2116 process is gone. Store the status to report to the
2117 core. Store it in lp->waitstatus, because lp->status
2118 would be ambiguous (W_EXITCODE(0,0) == 0). */
2119 lp->waitstatus = host_status_to_waitstatus (status);
2120 return 0;
2123 thread_dead = 1;
2124 linux_nat_debug_printf ("%s exited.",
2125 lp->ptid.to_string ().c_str ());
2129 if (thread_dead)
2131 exit_lwp (lp);
2132 return 0;
2135 gdb_assert (WIFSTOPPED (status));
2136 lp->stopped = 1;
2138 if (lp->must_set_ptrace_flags)
2140 inferior *inf = find_inferior_pid (linux_target, lp->ptid.pid ());
2141 int options = linux_nat_ptrace_options (inf->attach_flag);
2143 linux_enable_event_reporting (lp->ptid.lwp (), options);
2144 lp->must_set_ptrace_flags = 0;
2147 /* Handle GNU/Linux's syscall SIGTRAPs. */
2148 if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP)
2150 /* No longer need the sysgood bit. The ptrace event ends up
2151 recorded in lp->waitstatus if we care for it. We can carry
2152 on handling the event like a regular SIGTRAP from here
2153 on. */
2154 status = W_STOPCODE (SIGTRAP);
2155 if (linux_handle_syscall_trap (lp, 1))
2156 return wait_lwp (lp);
2158 else
2160 /* Almost all other ptrace-stops are known to be outside of system
2161 calls, with further exceptions in linux_handle_extended_wait. */
2162 lp->syscall_state = TARGET_WAITKIND_IGNORE;
2165 /* Handle GNU/Linux's extended waitstatus for trace events. */
2166 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP
2167 && linux_is_extended_waitstatus (status))
2169 linux_nat_debug_printf ("Handling extended status 0x%06x", status);
2170 linux_handle_extended_wait (lp, status);
2171 return 0;
2174 return status;
2177 /* Send a SIGSTOP to LP. */
2179 static int
2180 stop_callback (struct lwp_info *lp)
2182 if (!lp->stopped && !lp->signalled)
2184 int ret;
2186 linux_nat_debug_printf ("kill %s **<SIGSTOP>**",
2187 lp->ptid.to_string ().c_str ());
2189 errno = 0;
2190 ret = kill_lwp (lp->ptid.lwp (), SIGSTOP);
2191 linux_nat_debug_printf ("lwp kill %d %s", ret,
2192 errno ? safe_strerror (errno) : "ERRNO-OK");
2194 lp->signalled = 1;
2195 gdb_assert (lp->status == 0);
2198 return 0;
2201 /* Request a stop on LWP. */
2203 void
2204 linux_stop_lwp (struct lwp_info *lwp)
2206 stop_callback (lwp);
2209 /* See linux-nat.h */
2211 void
2212 linux_stop_and_wait_all_lwps (void)
2214 /* Stop all LWP's ... */
2215 iterate_over_lwps (minus_one_ptid, stop_callback);
2217 /* ... and wait until all of them have reported back that
2218 they're no longer running. */
2219 iterate_over_lwps (minus_one_ptid, stop_wait_callback);
2222 /* See linux-nat.h */
2224 void
2225 linux_unstop_all_lwps (void)
2227 iterate_over_lwps (minus_one_ptid,
2228 [] (struct lwp_info *info)
2230 return resume_stopped_resumed_lwps (info, minus_one_ptid);
2234 /* Return non-zero if LWP PID has a pending SIGINT. */
2236 static int
2237 linux_nat_has_pending_sigint (int pid)
2239 sigset_t pending, blocked, ignored;
2241 linux_proc_pending_signals (pid, &pending, &blocked, &ignored);
2243 if (sigismember (&pending, SIGINT)
2244 && !sigismember (&ignored, SIGINT))
2245 return 1;
2247 return 0;
2250 /* Set a flag in LP indicating that we should ignore its next SIGINT. */
2252 static int
2253 set_ignore_sigint (struct lwp_info *lp)
2255 /* If a thread has a pending SIGINT, consume it; otherwise, set a
2256 flag to consume the next one. */
2257 if (lp->stopped && lp->status != 0 && WIFSTOPPED (lp->status)
2258 && WSTOPSIG (lp->status) == SIGINT)
2259 lp->status = 0;
2260 else
2261 lp->ignore_sigint = 1;
2263 return 0;
2266 /* If LP does not have a SIGINT pending, then clear the ignore_sigint flag.
2267 This function is called after we know the LWP has stopped; if the LWP
2268 stopped before the expected SIGINT was delivered, then it will never have
2269 arrived. Also, if the signal was delivered to a shared queue and consumed
2270 by a different thread, it will never be delivered to this LWP. */
2272 static void
2273 maybe_clear_ignore_sigint (struct lwp_info *lp)
2275 if (!lp->ignore_sigint)
2276 return;
2278 if (!linux_nat_has_pending_sigint (lp->ptid.lwp ()))
2280 linux_nat_debug_printf ("Clearing bogus flag for %s",
2281 lp->ptid.to_string ().c_str ());
2282 lp->ignore_sigint = 0;
2286 /* Fetch the possible triggered data watchpoint info and store it in
2289 On some archs, like x86, that use debug registers to set
2290 watchpoints, it's possible that the way to know which watched
2291 address trapped, is to check the register that is used to select
2292 which address to watch. Problem is, between setting the watchpoint
2293 and reading back which data address trapped, the user may change
2294 the set of watchpoints, and, as a consequence, GDB changes the
2295 debug registers in the inferior. To avoid reading back a stale
2296 stopped-data-address when that happens, we cache in LP the fact
2297 that a watchpoint trapped, and the corresponding data address, as
2298 soon as we see LP stop with a SIGTRAP. If GDB changes the debug
2299 registers meanwhile, we have the cached data we can rely on. */
2301 static int
2302 check_stopped_by_watchpoint (struct lwp_info *lp)
2304 scoped_restore save_inferior_ptid = make_scoped_restore (&inferior_ptid);
2305 inferior_ptid = lp->ptid;
2307 if (linux_target->low_stopped_by_watchpoint ())
2309 lp->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
2310 lp->stopped_data_address_p
2311 = linux_target->low_stopped_data_address (&lp->stopped_data_address);
2314 return lp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
2317 /* Returns true if the LWP had stopped for a watchpoint. */
2319 bool
2320 linux_nat_target::stopped_by_watchpoint ()
2322 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2324 gdb_assert (lp != NULL);
2326 return lp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
2329 bool
2330 linux_nat_target::stopped_data_address (CORE_ADDR *addr_p)
2332 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2334 gdb_assert (lp != NULL);
2336 *addr_p = lp->stopped_data_address;
2338 return lp->stopped_data_address_p;
2341 /* Commonly any breakpoint / watchpoint generate only SIGTRAP. */
2343 bool
2344 linux_nat_target::low_status_is_event (int status)
2346 return WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP;
2349 /* Wait until LP is stopped. */
2351 static int
2352 stop_wait_callback (struct lwp_info *lp)
2354 inferior *inf = find_inferior_ptid (linux_target, lp->ptid);
2356 /* If this is a vfork parent, bail out, it is not going to report
2357 any SIGSTOP until the vfork is done with. */
2358 if (inf->vfork_child != NULL)
2359 return 0;
2361 if (!lp->stopped)
2363 int status;
2365 status = wait_lwp (lp);
2366 if (status == 0)
2367 return 0;
2369 if (lp->ignore_sigint && WIFSTOPPED (status)
2370 && WSTOPSIG (status) == SIGINT)
2372 lp->ignore_sigint = 0;
2374 errno = 0;
2375 ptrace (PTRACE_CONT, lp->ptid.lwp (), 0, 0);
2376 lp->stopped = 0;
2377 linux_nat_debug_printf
2378 ("PTRACE_CONT %s, 0, 0 (%s) (discarding SIGINT)",
2379 lp->ptid.to_string ().c_str (),
2380 errno ? safe_strerror (errno) : "OK");
2382 return stop_wait_callback (lp);
2385 maybe_clear_ignore_sigint (lp);
2387 if (WSTOPSIG (status) != SIGSTOP)
2389 /* The thread was stopped with a signal other than SIGSTOP. */
2391 linux_nat_debug_printf ("Pending event %s in %s",
2392 status_to_str ((int) status).c_str (),
2393 lp->ptid.to_string ().c_str ());
2395 /* Save the sigtrap event. */
2396 lp->status = status;
2397 gdb_assert (lp->signalled);
2398 save_stop_reason (lp);
2400 else
2402 /* We caught the SIGSTOP that we intended to catch. */
2404 linux_nat_debug_printf ("Expected SIGSTOP caught for %s.",
2405 lp->ptid.to_string ().c_str ());
2407 lp->signalled = 0;
2409 /* If we are waiting for this stop so we can report the thread
2410 stopped then we need to record this status. Otherwise, we can
2411 now discard this stop event. */
2412 if (lp->last_resume_kind == resume_stop)
2414 lp->status = status;
2415 save_stop_reason (lp);
2420 return 0;
2423 /* Return non-zero if LP has a wait status pending. Discard the
2424 pending event and resume the LWP if the event that originally
2425 caused the stop became uninteresting. */
2427 static int
2428 status_callback (struct lwp_info *lp)
2430 /* Only report a pending wait status if we pretend that this has
2431 indeed been resumed. */
2432 if (!lp->resumed)
2433 return 0;
2435 if (!lwp_status_pending_p (lp))
2436 return 0;
2438 if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
2439 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
2441 struct regcache *regcache = get_thread_regcache (linux_target, lp->ptid);
2442 CORE_ADDR pc;
2443 int discard = 0;
2445 pc = regcache_read_pc (regcache);
2447 if (pc != lp->stop_pc)
2449 linux_nat_debug_printf ("PC of %s changed. was=%s, now=%s",
2450 lp->ptid.to_string ().c_str (),
2451 paddress (target_gdbarch (), lp->stop_pc),
2452 paddress (target_gdbarch (), pc));
2453 discard = 1;
2456 #if !USE_SIGTRAP_SIGINFO
2457 else if (!breakpoint_inserted_here_p (regcache->aspace (), pc))
2459 linux_nat_debug_printf ("previous breakpoint of %s, at %s gone",
2460 lp->ptid.to_string ().c_str (),
2461 paddress (target_gdbarch (), lp->stop_pc));
2463 discard = 1;
2465 #endif
2467 if (discard)
2469 linux_nat_debug_printf ("pending event of %s cancelled.",
2470 lp->ptid.to_string ().c_str ());
2472 lp->status = 0;
2473 linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
2474 return 0;
2478 return 1;
2481 /* Count the LWP's that have had events. */
2483 static int
2484 count_events_callback (struct lwp_info *lp, int *count)
2486 gdb_assert (count != NULL);
2488 /* Select only resumed LWPs that have an event pending. */
2489 if (lp->resumed && lwp_status_pending_p (lp))
2490 (*count)++;
2492 return 0;
2495 /* Select the LWP (if any) that is currently being single-stepped. */
2497 static int
2498 select_singlestep_lwp_callback (struct lwp_info *lp)
2500 if (lp->last_resume_kind == resume_step
2501 && lp->status != 0)
2502 return 1;
2503 else
2504 return 0;
2507 /* Returns true if LP has a status pending. */
2509 static int
2510 lwp_status_pending_p (struct lwp_info *lp)
2512 /* We check for lp->waitstatus in addition to lp->status, because we
2513 can have pending process exits recorded in lp->status and
2514 W_EXITCODE(0,0) happens to be 0. */
2515 return lp->status != 0 || lp->waitstatus.kind () != TARGET_WAITKIND_IGNORE;
2518 /* Select the Nth LWP that has had an event. */
2520 static int
2521 select_event_lwp_callback (struct lwp_info *lp, int *selector)
2523 gdb_assert (selector != NULL);
2525 /* Select only resumed LWPs that have an event pending. */
2526 if (lp->resumed && lwp_status_pending_p (lp))
2527 if ((*selector)-- == 0)
2528 return 1;
2530 return 0;
2533 /* Called when the LWP stopped for a signal/trap. If it stopped for a
2534 trap check what caused it (breakpoint, watchpoint, trace, etc.),
2535 and save the result in the LWP's stop_reason field. If it stopped
2536 for a breakpoint, decrement the PC if necessary on the lwp's
2537 architecture. */
2539 static void
2540 save_stop_reason (struct lwp_info *lp)
2542 struct regcache *regcache;
2543 struct gdbarch *gdbarch;
2544 CORE_ADDR pc;
2545 CORE_ADDR sw_bp_pc;
2546 #if USE_SIGTRAP_SIGINFO
2547 siginfo_t siginfo;
2548 #endif
2550 gdb_assert (lp->stop_reason == TARGET_STOPPED_BY_NO_REASON);
2551 gdb_assert (lp->status != 0);
2553 if (!linux_target->low_status_is_event (lp->status))
2554 return;
2556 inferior *inf = find_inferior_ptid (linux_target, lp->ptid);
2557 if (inf->starting_up)
2558 return;
2560 regcache = get_thread_regcache (linux_target, lp->ptid);
2561 gdbarch = regcache->arch ();
2563 pc = regcache_read_pc (regcache);
2564 sw_bp_pc = pc - gdbarch_decr_pc_after_break (gdbarch);
2566 #if USE_SIGTRAP_SIGINFO
2567 if (linux_nat_get_siginfo (lp->ptid, &siginfo))
2569 if (siginfo.si_signo == SIGTRAP)
2571 if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code)
2572 && GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
2574 /* The si_code is ambiguous on this arch -- check debug
2575 registers. */
2576 if (!check_stopped_by_watchpoint (lp))
2577 lp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
2579 else if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code))
2581 /* If we determine the LWP stopped for a SW breakpoint,
2582 trust it. Particularly don't check watchpoint
2583 registers, because, at least on s390, we'd find
2584 stopped-by-watchpoint as long as there's a watchpoint
2585 set. */
2586 lp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
2588 else if (GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
2590 /* This can indicate either a hardware breakpoint or
2591 hardware watchpoint. Check debug registers. */
2592 if (!check_stopped_by_watchpoint (lp))
2593 lp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
2595 else if (siginfo.si_code == TRAP_TRACE)
2597 linux_nat_debug_printf ("%s stopped by trace",
2598 lp->ptid.to_string ().c_str ());
2600 /* We may have single stepped an instruction that
2601 triggered a watchpoint. In that case, on some
2602 architectures (such as x86), instead of TRAP_HWBKPT,
2603 si_code indicates TRAP_TRACE, and we need to check
2604 the debug registers separately. */
2605 check_stopped_by_watchpoint (lp);
2609 #else
2610 if ((!lp->step || lp->stop_pc == sw_bp_pc)
2611 && software_breakpoint_inserted_here_p (regcache->aspace (),
2612 sw_bp_pc))
2614 /* The LWP was either continued, or stepped a software
2615 breakpoint instruction. */
2616 lp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
2619 if (hardware_breakpoint_inserted_here_p (regcache->aspace (), pc))
2620 lp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
2622 if (lp->stop_reason == TARGET_STOPPED_BY_NO_REASON)
2623 check_stopped_by_watchpoint (lp);
2624 #endif
2626 if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
2628 linux_nat_debug_printf ("%s stopped by software breakpoint",
2629 lp->ptid.to_string ().c_str ());
2631 /* Back up the PC if necessary. */
2632 if (pc != sw_bp_pc)
2633 regcache_write_pc (regcache, sw_bp_pc);
2635 /* Update this so we record the correct stop PC below. */
2636 pc = sw_bp_pc;
2638 else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
2640 linux_nat_debug_printf ("%s stopped by hardware breakpoint",
2641 lp->ptid.to_string ().c_str ());
2643 else if (lp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
2645 linux_nat_debug_printf ("%s stopped by hardware watchpoint",
2646 lp->ptid.to_string ().c_str ());
2649 lp->stop_pc = pc;
2653 /* Returns true if the LWP had stopped for a software breakpoint. */
2655 bool
2656 linux_nat_target::stopped_by_sw_breakpoint ()
2658 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2660 gdb_assert (lp != NULL);
2662 return lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2665 /* Implement the supports_stopped_by_sw_breakpoint method. */
2667 bool
2668 linux_nat_target::supports_stopped_by_sw_breakpoint ()
2670 return USE_SIGTRAP_SIGINFO;
2673 /* Returns true if the LWP had stopped for a hardware
2674 breakpoint/watchpoint. */
2676 bool
2677 linux_nat_target::stopped_by_hw_breakpoint ()
2679 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2681 gdb_assert (lp != NULL);
2683 return lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2686 /* Implement the supports_stopped_by_hw_breakpoint method. */
2688 bool
2689 linux_nat_target::supports_stopped_by_hw_breakpoint ()
2691 return USE_SIGTRAP_SIGINFO;
2694 /* Select one LWP out of those that have events pending. */
2696 static void
2697 select_event_lwp (ptid_t filter, struct lwp_info **orig_lp, int *status)
2699 int num_events = 0;
2700 int random_selector;
2701 struct lwp_info *event_lp = NULL;
2703 /* Record the wait status for the original LWP. */
2704 (*orig_lp)->status = *status;
2706 /* In all-stop, give preference to the LWP that is being
2707 single-stepped. There will be at most one, and it will be the
2708 LWP that the core is most interested in. If we didn't do this,
2709 then we'd have to handle pending step SIGTRAPs somehow in case
2710 the core later continues the previously-stepped thread, as
2711 otherwise we'd report the pending SIGTRAP then, and the core, not
2712 having stepped the thread, wouldn't understand what the trap was
2713 for, and therefore would report it to the user as a random
2714 signal. */
2715 if (!target_is_non_stop_p ())
2717 event_lp = iterate_over_lwps (filter, select_singlestep_lwp_callback);
2718 if (event_lp != NULL)
2720 linux_nat_debug_printf ("Select single-step %s",
2721 event_lp->ptid.to_string ().c_str ());
2725 if (event_lp == NULL)
2727 /* Pick one at random, out of those which have had events. */
2729 /* First see how many events we have. */
2730 iterate_over_lwps (filter,
2731 [&] (struct lwp_info *info)
2733 return count_events_callback (info, &num_events);
2735 gdb_assert (num_events > 0);
2737 /* Now randomly pick a LWP out of those that have had
2738 events. */
2739 random_selector = (int)
2740 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2742 if (num_events > 1)
2743 linux_nat_debug_printf ("Found %d events, selecting #%d",
2744 num_events, random_selector);
2746 event_lp
2747 = (iterate_over_lwps
2748 (filter,
2749 [&] (struct lwp_info *info)
2751 return select_event_lwp_callback (info,
2752 &random_selector);
2753 }));
2756 if (event_lp != NULL)
2758 /* Switch the event LWP. */
2759 *orig_lp = event_lp;
2760 *status = event_lp->status;
2763 /* Flush the wait status for the event LWP. */
2764 (*orig_lp)->status = 0;
2767 /* Return non-zero if LP has been resumed. */
2769 static int
2770 resumed_callback (struct lwp_info *lp)
2772 return lp->resumed;
2775 /* Check if we should go on and pass this event to common code.
2777 If so, save the status to the lwp_info structure associated to LWPID. */
2779 static void
2780 linux_nat_filter_event (int lwpid, int status)
2782 struct lwp_info *lp;
2783 int event = linux_ptrace_get_extended_event (status);
2785 lp = find_lwp_pid (ptid_t (lwpid));
2787 /* Check for events reported by anything not in our LWP list. */
2788 if (lp == nullptr)
2790 if (WIFSTOPPED (status))
2792 if (WSTOPSIG (status) == SIGTRAP && event == PTRACE_EVENT_EXEC)
2794 /* A non-leader thread exec'ed after we've seen the
2795 leader zombie, and removed it from our lists (in
2796 check_zombie_leaders). The non-leader thread changes
2797 its tid to the tgid. */
2798 linux_nat_debug_printf
2799 ("Re-adding thread group leader LWP %d after exec.",
2800 lwpid);
2802 lp = add_lwp (ptid_t (lwpid, lwpid));
2803 lp->stopped = 1;
2804 lp->resumed = 1;
2805 add_thread (linux_target, lp->ptid);
2807 else
2809 /* A process we are controlling has forked and the new
2810 child's stop was reported to us by the kernel. Save
2811 its PID and go back to waiting for the fork event to
2812 be reported - the stopped process might be returned
2813 from waitpid before or after the fork event is. */
2814 linux_nat_debug_printf
2815 ("Saving LWP %d status %s in stopped_pids list",
2816 lwpid, status_to_str (status).c_str ());
2817 add_to_pid_list (&stopped_pids, lwpid, status);
2820 else
2822 /* Don't report an event for the exit of an LWP not in our
2823 list, i.e. not part of any inferior we're debugging.
2824 This can happen if we detach from a program we originally
2825 forked and then it exits. However, note that we may have
2826 earlier deleted a leader of an inferior we're debugging,
2827 in check_zombie_leaders. Re-add it back here if so. */
2828 for (inferior *inf : all_inferiors (linux_target))
2830 if (inf->pid == lwpid)
2832 linux_nat_debug_printf
2833 ("Re-adding thread group leader LWP %d after exit.",
2834 lwpid);
2836 lp = add_lwp (ptid_t (lwpid, lwpid));
2837 lp->resumed = 1;
2838 add_thread (linux_target, lp->ptid);
2839 break;
2844 if (lp == nullptr)
2845 return;
2848 /* This LWP is stopped now. (And if dead, this prevents it from
2849 ever being continued.) */
2850 lp->stopped = 1;
2852 if (WIFSTOPPED (status) && lp->must_set_ptrace_flags)
2854 inferior *inf = find_inferior_pid (linux_target, lp->ptid.pid ());
2855 int options = linux_nat_ptrace_options (inf->attach_flag);
2857 linux_enable_event_reporting (lp->ptid.lwp (), options);
2858 lp->must_set_ptrace_flags = 0;
2861 /* Handle GNU/Linux's syscall SIGTRAPs. */
2862 if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP)
2864 /* No longer need the sysgood bit. The ptrace event ends up
2865 recorded in lp->waitstatus if we care for it. We can carry
2866 on handling the event like a regular SIGTRAP from here
2867 on. */
2868 status = W_STOPCODE (SIGTRAP);
2869 if (linux_handle_syscall_trap (lp, 0))
2870 return;
2872 else
2874 /* Almost all other ptrace-stops are known to be outside of system
2875 calls, with further exceptions in linux_handle_extended_wait. */
2876 lp->syscall_state = TARGET_WAITKIND_IGNORE;
2879 /* Handle GNU/Linux's extended waitstatus for trace events. */
2880 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP
2881 && linux_is_extended_waitstatus (status))
2883 linux_nat_debug_printf ("Handling extended status 0x%06x", status);
2885 if (linux_handle_extended_wait (lp, status))
2886 return;
2889 /* Check if the thread has exited. */
2890 if (WIFEXITED (status) || WIFSIGNALED (status))
2892 if (!report_thread_events && !is_leader (lp))
2894 linux_nat_debug_printf ("%s exited.",
2895 lp->ptid.to_string ().c_str ());
2897 /* If this was not the leader exiting, then the exit signal
2898 was not the end of the debugged application and should be
2899 ignored. */
2900 exit_lwp (lp);
2901 return;
2904 /* Note that even if the leader was ptrace-stopped, it can still
2905 exit, if e.g., some other thread brings down the whole
2906 process (calls `exit'). So don't assert that the lwp is
2907 resumed. */
2908 linux_nat_debug_printf ("LWP %ld exited (resumed=%d)",
2909 lp->ptid.lwp (), lp->resumed);
2911 /* Dead LWP's aren't expected to reported a pending sigstop. */
2912 lp->signalled = 0;
2914 /* Store the pending event in the waitstatus, because
2915 W_EXITCODE(0,0) == 0. */
2916 lp->waitstatus = host_status_to_waitstatus (status);
2917 return;
2920 /* Make sure we don't report a SIGSTOP that we sent ourselves in
2921 an attempt to stop an LWP. */
2922 if (lp->signalled
2923 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGSTOP)
2925 lp->signalled = 0;
2927 if (lp->last_resume_kind == resume_stop)
2929 linux_nat_debug_printf ("resume_stop SIGSTOP caught for %s.",
2930 lp->ptid.to_string ().c_str ());
2932 else
2934 /* This is a delayed SIGSTOP. Filter out the event. */
2936 linux_nat_debug_printf
2937 ("%s %s, 0, 0 (discard delayed SIGSTOP)",
2938 lp->step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2939 lp->ptid.to_string ().c_str ());
2941 linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
2942 gdb_assert (lp->resumed);
2943 return;
2947 /* Make sure we don't report a SIGINT that we have already displayed
2948 for another thread. */
2949 if (lp->ignore_sigint
2950 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGINT)
2952 linux_nat_debug_printf ("Delayed SIGINT caught for %s.",
2953 lp->ptid.to_string ().c_str ());
2955 /* This is a delayed SIGINT. */
2956 lp->ignore_sigint = 0;
2958 linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
2959 linux_nat_debug_printf ("%s %s, 0, 0 (discard SIGINT)",
2960 lp->step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2961 lp->ptid.to_string ().c_str ());
2962 gdb_assert (lp->resumed);
2964 /* Discard the event. */
2965 return;
2968 /* Don't report signals that GDB isn't interested in, such as
2969 signals that are neither printed nor stopped upon. Stopping all
2970 threads can be a bit time-consuming, so if we want decent
2971 performance with heavily multi-threaded programs, especially when
2972 they're using a high frequency timer, we'd better avoid it if we
2973 can. */
2974 if (WIFSTOPPED (status))
2976 enum gdb_signal signo = gdb_signal_from_host (WSTOPSIG (status));
2978 if (!target_is_non_stop_p ())
2980 /* Only do the below in all-stop, as we currently use SIGSTOP
2981 to implement target_stop (see linux_nat_stop) in
2982 non-stop. */
2983 if (signo == GDB_SIGNAL_INT && signal_pass_state (signo) == 0)
2985 /* If ^C/BREAK is typed at the tty/console, SIGINT gets
2986 forwarded to the entire process group, that is, all LWPs
2987 will receive it - unless they're using CLONE_THREAD to
2988 share signals. Since we only want to report it once, we
2989 mark it as ignored for all LWPs except this one. */
2990 iterate_over_lwps (ptid_t (lp->ptid.pid ()), set_ignore_sigint);
2991 lp->ignore_sigint = 0;
2993 else
2994 maybe_clear_ignore_sigint (lp);
2997 /* When using hardware single-step, we need to report every signal.
2998 Otherwise, signals in pass_mask may be short-circuited
2999 except signals that might be caused by a breakpoint, or SIGSTOP
3000 if we sent the SIGSTOP and are waiting for it to arrive. */
3001 if (!lp->step
3002 && WSTOPSIG (status) && sigismember (&pass_mask, WSTOPSIG (status))
3003 && (WSTOPSIG (status) != SIGSTOP
3004 || !linux_target->find_thread (lp->ptid)->stop_requested)
3005 && !linux_wstatus_maybe_breakpoint (status))
3007 linux_resume_one_lwp (lp, lp->step, signo);
3008 linux_nat_debug_printf
3009 ("%s %s, %s (preempt 'handle')",
3010 lp->step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3011 lp->ptid.to_string ().c_str (),
3012 (signo != GDB_SIGNAL_0
3013 ? strsignal (gdb_signal_to_host (signo)) : "0"));
3014 return;
3018 /* An interesting event. */
3019 gdb_assert (lp);
3020 lp->status = status;
3021 save_stop_reason (lp);
3024 /* Detect zombie thread group leaders, and "exit" them. We can't reap
3025 their exits until all other threads in the group have exited. */
3027 static void
3028 check_zombie_leaders (void)
3030 for (inferior *inf : all_inferiors ())
3032 struct lwp_info *leader_lp;
3034 if (inf->pid == 0)
3035 continue;
3037 leader_lp = find_lwp_pid (ptid_t (inf->pid));
3038 if (leader_lp != NULL
3039 /* Check if there are other threads in the group, as we may
3040 have raced with the inferior simply exiting. Note this
3041 isn't a watertight check. If the inferior is
3042 multi-threaded and is exiting, it may be we see the
3043 leader as zombie before we reap all the non-leader
3044 threads. See comments below. */
3045 && num_lwps (inf->pid) > 1
3046 && linux_proc_pid_is_zombie (inf->pid))
3048 /* A zombie leader in a multi-threaded program can mean one
3049 of three things:
3051 #1 - Only the leader exited, not the whole program, e.g.,
3052 with pthread_exit. Since we can't reap the leader's exit
3053 status until all other threads are gone and reaped too,
3054 we want to delete the zombie leader right away, as it
3055 can't be debugged, we can't read its registers, etc.
3056 This is the main reason we check for zombie leaders
3057 disappearing.
3059 #2 - The whole thread-group/process exited (a group exit,
3060 via e.g. exit(3), and there is (or will be shortly) an
3061 exit reported for each thread in the process, and then
3062 finally an exit for the leader once the non-leaders are
3063 reaped.
3065 #3 - There are 3 or more threads in the group, and a
3066 thread other than the leader exec'd. See comments on
3067 exec events at the top of the file.
3069 Ideally we would never delete the leader for case #2.
3070 Instead, we want to collect the exit status of each
3071 non-leader thread, and then finally collect the exit
3072 status of the leader as normal and use its exit code as
3073 whole-process exit code. Unfortunately, there's no
3074 race-free way to distinguish cases #1 and #2. We can't
3075 assume the exit events for the non-leaders threads are
3076 already pending in the kernel, nor can we assume the
3077 non-leader threads are in zombie state already. Between
3078 the leader becoming zombie and the non-leaders exiting
3079 and becoming zombie themselves, there's a small time
3080 window, so such a check would be racy. Temporarily
3081 pausing all threads and checking to see if all threads
3082 exit or not before re-resuming them would work in the
3083 case that all threads are running right now, but it
3084 wouldn't work if some thread is currently already
3085 ptrace-stopped, e.g., due to scheduler-locking.
3087 So what we do is we delete the leader anyhow, and then
3088 later on when we see its exit status, we re-add it back.
3089 We also make sure that we only report a whole-process
3090 exit when we see the leader exiting, as opposed to when
3091 the last LWP in the LWP list exits, which can be a
3092 non-leader if we deleted the leader here. */
3093 linux_nat_debug_printf ("Thread group leader %d zombie "
3094 "(it exited, or another thread execd), "
3095 "deleting it.",
3096 inf->pid);
3097 exit_lwp (leader_lp);
3102 /* Convenience function that is called when the kernel reports an exit
3103 event. This decides whether to report the event to GDB as a
3104 process exit event, a thread exit event, or to suppress the
3105 event. */
3107 static ptid_t
3108 filter_exit_event (struct lwp_info *event_child,
3109 struct target_waitstatus *ourstatus)
3111 ptid_t ptid = event_child->ptid;
3113 if (!is_leader (event_child))
3115 if (report_thread_events)
3116 ourstatus->set_thread_exited (0);
3117 else
3118 ourstatus->set_ignore ();
3120 exit_lwp (event_child);
3123 return ptid;
3126 static ptid_t
3127 linux_nat_wait_1 (ptid_t ptid, struct target_waitstatus *ourstatus,
3128 target_wait_flags target_options)
3130 LINUX_NAT_SCOPED_DEBUG_ENTER_EXIT;
3132 sigset_t prev_mask;
3133 enum resume_kind last_resume_kind;
3134 struct lwp_info *lp;
3135 int status;
3137 /* The first time we get here after starting a new inferior, we may
3138 not have added it to the LWP list yet - this is the earliest
3139 moment at which we know its PID. */
3140 if (ptid.is_pid () && find_lwp_pid (ptid) == nullptr)
3142 ptid_t lwp_ptid (ptid.pid (), ptid.pid ());
3144 /* Upgrade the main thread's ptid. */
3145 thread_change_ptid (linux_target, ptid, lwp_ptid);
3146 lp = add_initial_lwp (lwp_ptid);
3147 lp->resumed = 1;
3150 /* Make sure SIGCHLD is blocked until the sigsuspend below. */
3151 block_child_signals (&prev_mask);
3153 /* First check if there is a LWP with a wait status pending. */
3154 lp = iterate_over_lwps (ptid, status_callback);
3155 if (lp != NULL)
3157 linux_nat_debug_printf ("Using pending wait status %s for %s.",
3158 pending_status_str (lp).c_str (),
3159 lp->ptid.to_string ().c_str ());
3162 /* But if we don't find a pending event, we'll have to wait. Always
3163 pull all events out of the kernel. We'll randomly select an
3164 event LWP out of all that have events, to prevent starvation. */
3166 while (lp == NULL)
3168 pid_t lwpid;
3170 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
3171 quirks:
3173 - If the thread group leader exits while other threads in the
3174 thread group still exist, waitpid(TGID, ...) hangs. That
3175 waitpid won't return an exit status until the other threads
3176 in the group are reaped.
3178 - When a non-leader thread execs, that thread just vanishes
3179 without reporting an exit (so we'd hang if we waited for it
3180 explicitly in that case). The exec event is reported to
3181 the TGID pid. */
3183 errno = 0;
3184 lwpid = my_waitpid (-1, &status, __WALL | WNOHANG);
3186 linux_nat_debug_printf ("waitpid(-1, ...) returned %d, %s",
3187 lwpid,
3188 errno ? safe_strerror (errno) : "ERRNO-OK");
3190 if (lwpid > 0)
3192 linux_nat_debug_printf ("waitpid %ld received %s",
3193 (long) lwpid,
3194 status_to_str (status).c_str ());
3196 linux_nat_filter_event (lwpid, status);
3197 /* Retry until nothing comes out of waitpid. A single
3198 SIGCHLD can indicate more than one child stopped. */
3199 continue;
3202 /* Now that we've pulled all events out of the kernel, resume
3203 LWPs that don't have an interesting event to report. */
3204 iterate_over_lwps (minus_one_ptid,
3205 [] (struct lwp_info *info)
3207 return resume_stopped_resumed_lwps (info, minus_one_ptid);
3210 /* ... and find an LWP with a status to report to the core, if
3211 any. */
3212 lp = iterate_over_lwps (ptid, status_callback);
3213 if (lp != NULL)
3214 break;
3216 /* Check for zombie thread group leaders. Those can't be reaped
3217 until all other threads in the thread group are. */
3218 check_zombie_leaders ();
3220 /* If there are no resumed children left, bail. We'd be stuck
3221 forever in the sigsuspend call below otherwise. */
3222 if (iterate_over_lwps (ptid, resumed_callback) == NULL)
3224 linux_nat_debug_printf ("exit (no resumed LWP)");
3226 ourstatus->set_no_resumed ();
3228 restore_child_signals_mask (&prev_mask);
3229 return minus_one_ptid;
3232 /* No interesting event to report to the core. */
3234 if (target_options & TARGET_WNOHANG)
3236 linux_nat_debug_printf ("no interesting events found");
3238 ourstatus->set_ignore ();
3239 restore_child_signals_mask (&prev_mask);
3240 return minus_one_ptid;
3243 /* We shouldn't end up here unless we want to try again. */
3244 gdb_assert (lp == NULL);
3246 /* Block until we get an event reported with SIGCHLD. */
3247 wait_for_signal ();
3250 gdb_assert (lp);
3252 status = lp->status;
3253 lp->status = 0;
3255 if (!target_is_non_stop_p ())
3257 /* Now stop all other LWP's ... */
3258 iterate_over_lwps (minus_one_ptid, stop_callback);
3260 /* ... and wait until all of them have reported back that
3261 they're no longer running. */
3262 iterate_over_lwps (minus_one_ptid, stop_wait_callback);
3265 /* If we're not waiting for a specific LWP, choose an event LWP from
3266 among those that have had events. Giving equal priority to all
3267 LWPs that have had events helps prevent starvation. */
3268 if (ptid == minus_one_ptid || ptid.is_pid ())
3269 select_event_lwp (ptid, &lp, &status);
3271 gdb_assert (lp != NULL);
3273 /* Now that we've selected our final event LWP, un-adjust its PC if
3274 it was a software breakpoint, and we can't reliably support the
3275 "stopped by software breakpoint" stop reason. */
3276 if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3277 && !USE_SIGTRAP_SIGINFO)
3279 struct regcache *regcache = get_thread_regcache (linux_target, lp->ptid);
3280 struct gdbarch *gdbarch = regcache->arch ();
3281 int decr_pc = gdbarch_decr_pc_after_break (gdbarch);
3283 if (decr_pc != 0)
3285 CORE_ADDR pc;
3287 pc = regcache_read_pc (regcache);
3288 regcache_write_pc (regcache, pc + decr_pc);
3292 /* We'll need this to determine whether to report a SIGSTOP as
3293 GDB_SIGNAL_0. Need to take a copy because resume_clear_callback
3294 clears it. */
3295 last_resume_kind = lp->last_resume_kind;
3297 if (!target_is_non_stop_p ())
3299 /* In all-stop, from the core's perspective, all LWPs are now
3300 stopped until a new resume action is sent over. */
3301 iterate_over_lwps (minus_one_ptid, resume_clear_callback);
3303 else
3305 resume_clear_callback (lp);
3308 if (linux_target->low_status_is_event (status))
3310 linux_nat_debug_printf ("trap ptid is %s.",
3311 lp->ptid.to_string ().c_str ());
3314 if (lp->waitstatus.kind () != TARGET_WAITKIND_IGNORE)
3316 *ourstatus = lp->waitstatus;
3317 lp->waitstatus.set_ignore ();
3319 else
3320 *ourstatus = host_status_to_waitstatus (status);
3322 linux_nat_debug_printf ("event found");
3324 restore_child_signals_mask (&prev_mask);
3326 if (last_resume_kind == resume_stop
3327 && ourstatus->kind () == TARGET_WAITKIND_STOPPED
3328 && WSTOPSIG (status) == SIGSTOP)
3330 /* A thread that has been requested to stop by GDB with
3331 target_stop, and it stopped cleanly, so report as SIG0. The
3332 use of SIGSTOP is an implementation detail. */
3333 ourstatus->set_stopped (GDB_SIGNAL_0);
3336 if (ourstatus->kind () == TARGET_WAITKIND_EXITED
3337 || ourstatus->kind () == TARGET_WAITKIND_SIGNALLED)
3338 lp->core = -1;
3339 else
3340 lp->core = linux_common_core_of_thread (lp->ptid);
3342 if (ourstatus->kind () == TARGET_WAITKIND_EXITED)
3343 return filter_exit_event (lp, ourstatus);
3345 return lp->ptid;
3348 /* Resume LWPs that are currently stopped without any pending status
3349 to report, but are resumed from the core's perspective. */
3351 static int
3352 resume_stopped_resumed_lwps (struct lwp_info *lp, const ptid_t wait_ptid)
3354 inferior *inf = find_inferior_ptid (linux_target, lp->ptid);
3356 if (!lp->stopped)
3358 linux_nat_debug_printf ("NOT resuming LWP %s, not stopped",
3359 lp->ptid.to_string ().c_str ());
3361 else if (!lp->resumed)
3363 linux_nat_debug_printf ("NOT resuming LWP %s, not resumed",
3364 lp->ptid.to_string ().c_str ());
3366 else if (lwp_status_pending_p (lp))
3368 linux_nat_debug_printf ("NOT resuming LWP %s, has pending status",
3369 lp->ptid.to_string ().c_str ());
3371 else if (inf->vfork_child != nullptr)
3373 linux_nat_debug_printf ("NOT resuming LWP %s (vfork parent)",
3374 lp->ptid.to_string ().c_str ());
3376 else
3378 struct regcache *regcache = get_thread_regcache (linux_target, lp->ptid);
3379 struct gdbarch *gdbarch = regcache->arch ();
3383 CORE_ADDR pc = regcache_read_pc (regcache);
3384 int leave_stopped = 0;
3386 /* Don't bother if there's a breakpoint at PC that we'd hit
3387 immediately, and we're not waiting for this LWP. */
3388 if (!lp->ptid.matches (wait_ptid))
3390 if (breakpoint_inserted_here_p (regcache->aspace (), pc))
3391 leave_stopped = 1;
3394 if (!leave_stopped)
3396 linux_nat_debug_printf
3397 ("resuming stopped-resumed LWP %s at %s: step=%d",
3398 lp->ptid.to_string ().c_str (), paddress (gdbarch, pc),
3399 lp->step);
3401 linux_resume_one_lwp_throw (lp, lp->step, GDB_SIGNAL_0);
3404 catch (const gdb_exception_error &ex)
3406 if (!check_ptrace_stopped_lwp_gone (lp))
3407 throw;
3411 return 0;
3414 ptid_t
3415 linux_nat_target::wait (ptid_t ptid, struct target_waitstatus *ourstatus,
3416 target_wait_flags target_options)
3418 LINUX_NAT_SCOPED_DEBUG_ENTER_EXIT;
3420 ptid_t event_ptid;
3422 linux_nat_debug_printf ("[%s], [%s]", ptid.to_string ().c_str (),
3423 target_options_to_string (target_options).c_str ());
3425 /* Flush the async file first. */
3426 if (target_is_async_p ())
3427 async_file_flush ();
3429 /* Resume LWPs that are currently stopped without any pending status
3430 to report, but are resumed from the core's perspective. LWPs get
3431 in this state if we find them stopping at a time we're not
3432 interested in reporting the event (target_wait on a
3433 specific_process, for example, see linux_nat_wait_1), and
3434 meanwhile the event became uninteresting. Don't bother resuming
3435 LWPs we're not going to wait for if they'd stop immediately. */
3436 if (target_is_non_stop_p ())
3437 iterate_over_lwps (minus_one_ptid,
3438 [=] (struct lwp_info *info)
3440 return resume_stopped_resumed_lwps (info, ptid);
3443 event_ptid = linux_nat_wait_1 (ptid, ourstatus, target_options);
3445 /* If we requested any event, and something came out, assume there
3446 may be more. If we requested a specific lwp or process, also
3447 assume there may be more. */
3448 if (target_is_async_p ()
3449 && ((ourstatus->kind () != TARGET_WAITKIND_IGNORE
3450 && ourstatus->kind () != TARGET_WAITKIND_NO_RESUMED)
3451 || ptid != minus_one_ptid))
3452 async_file_mark ();
3454 return event_ptid;
3457 /* Kill one LWP. */
3459 static void
3460 kill_one_lwp (pid_t pid)
3462 /* PTRACE_KILL may resume the inferior. Send SIGKILL first. */
3464 errno = 0;
3465 kill_lwp (pid, SIGKILL);
3467 if (debug_linux_nat)
3469 int save_errno = errno;
3471 linux_nat_debug_printf
3472 ("kill (SIGKILL) %ld, 0, 0 (%s)", (long) pid,
3473 save_errno != 0 ? safe_strerror (save_errno) : "OK");
3476 /* Some kernels ignore even SIGKILL for processes under ptrace. */
3478 errno = 0;
3479 ptrace (PTRACE_KILL, pid, 0, 0);
3480 if (debug_linux_nat)
3482 int save_errno = errno;
3484 linux_nat_debug_printf
3485 ("PTRACE_KILL %ld, 0, 0 (%s)", (long) pid,
3486 save_errno ? safe_strerror (save_errno) : "OK");
3490 /* Wait for an LWP to die. */
3492 static void
3493 kill_wait_one_lwp (pid_t pid)
3495 pid_t res;
3497 /* We must make sure that there are no pending events (delayed
3498 SIGSTOPs, pending SIGTRAPs, etc.) to make sure the current
3499 program doesn't interfere with any following debugging session. */
3503 res = my_waitpid (pid, NULL, __WALL);
3504 if (res != (pid_t) -1)
3506 linux_nat_debug_printf ("wait %ld received unknown.", (long) pid);
3508 /* The Linux kernel sometimes fails to kill a thread
3509 completely after PTRACE_KILL; that goes from the stop
3510 point in do_fork out to the one in get_signal_to_deliver
3511 and waits again. So kill it again. */
3512 kill_one_lwp (pid);
3515 while (res == pid);
3517 gdb_assert (res == -1 && errno == ECHILD);
3520 /* Callback for iterate_over_lwps. */
3522 static int
3523 kill_callback (struct lwp_info *lp)
3525 kill_one_lwp (lp->ptid.lwp ());
3526 return 0;
3529 /* Callback for iterate_over_lwps. */
3531 static int
3532 kill_wait_callback (struct lwp_info *lp)
3534 kill_wait_one_lwp (lp->ptid.lwp ());
3535 return 0;
3538 /* Kill the fork children of any threads of inferior INF that are
3539 stopped at a fork event. */
3541 static void
3542 kill_unfollowed_fork_children (struct inferior *inf)
3544 for (thread_info *thread : inf->non_exited_threads ())
3546 struct target_waitstatus *ws = &thread->pending_follow;
3548 if (ws->kind () == TARGET_WAITKIND_FORKED
3549 || ws->kind () == TARGET_WAITKIND_VFORKED)
3551 ptid_t child_ptid = ws->child_ptid ();
3552 int child_pid = child_ptid.pid ();
3553 int child_lwp = child_ptid.lwp ();
3555 kill_one_lwp (child_lwp);
3556 kill_wait_one_lwp (child_lwp);
3558 /* Let the arch-specific native code know this process is
3559 gone. */
3560 linux_target->low_forget_process (child_pid);
3565 void
3566 linux_nat_target::kill ()
3568 /* If we're stopped while forking and we haven't followed yet,
3569 kill the other task. We need to do this first because the
3570 parent will be sleeping if this is a vfork. */
3571 kill_unfollowed_fork_children (current_inferior ());
3573 if (forks_exist_p ())
3574 linux_fork_killall ();
3575 else
3577 ptid_t ptid = ptid_t (inferior_ptid.pid ());
3579 /* Stop all threads before killing them, since ptrace requires
3580 that the thread is stopped to successfully PTRACE_KILL. */
3581 iterate_over_lwps (ptid, stop_callback);
3582 /* ... and wait until all of them have reported back that
3583 they're no longer running. */
3584 iterate_over_lwps (ptid, stop_wait_callback);
3586 /* Kill all LWP's ... */
3587 iterate_over_lwps (ptid, kill_callback);
3589 /* ... and wait until we've flushed all events. */
3590 iterate_over_lwps (ptid, kill_wait_callback);
3593 target_mourn_inferior (inferior_ptid);
3596 void
3597 linux_nat_target::mourn_inferior ()
3599 LINUX_NAT_SCOPED_DEBUG_ENTER_EXIT;
3601 int pid = inferior_ptid.pid ();
3603 purge_lwp_list (pid);
3605 close_proc_mem_file (pid);
3607 if (! forks_exist_p ())
3608 /* Normal case, no other forks available. */
3609 inf_ptrace_target::mourn_inferior ();
3610 else
3611 /* Multi-fork case. The current inferior_ptid has exited, but
3612 there are other viable forks to debug. Delete the exiting
3613 one and context-switch to the first available. */
3614 linux_fork_mourn_inferior ();
3616 /* Let the arch-specific native code know this process is gone. */
3617 linux_target->low_forget_process (pid);
3620 /* Convert a native/host siginfo object, into/from the siginfo in the
3621 layout of the inferiors' architecture. */
3623 static void
3624 siginfo_fixup (siginfo_t *siginfo, gdb_byte *inf_siginfo, int direction)
3626 /* If the low target didn't do anything, then just do a straight
3627 memcpy. */
3628 if (!linux_target->low_siginfo_fixup (siginfo, inf_siginfo, direction))
3630 if (direction == 1)
3631 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
3632 else
3633 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
3637 static enum target_xfer_status
3638 linux_xfer_siginfo (ptid_t ptid, enum target_object object,
3639 const char *annex, gdb_byte *readbuf,
3640 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
3641 ULONGEST *xfered_len)
3643 siginfo_t siginfo;
3644 gdb_byte inf_siginfo[sizeof (siginfo_t)];
3646 gdb_assert (object == TARGET_OBJECT_SIGNAL_INFO);
3647 gdb_assert (readbuf || writebuf);
3649 if (offset > sizeof (siginfo))
3650 return TARGET_XFER_E_IO;
3652 if (!linux_nat_get_siginfo (ptid, &siginfo))
3653 return TARGET_XFER_E_IO;
3655 /* When GDB is built as a 64-bit application, ptrace writes into
3656 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
3657 inferior with a 64-bit GDB should look the same as debugging it
3658 with a 32-bit GDB, we need to convert it. GDB core always sees
3659 the converted layout, so any read/write will have to be done
3660 post-conversion. */
3661 siginfo_fixup (&siginfo, inf_siginfo, 0);
3663 if (offset + len > sizeof (siginfo))
3664 len = sizeof (siginfo) - offset;
3666 if (readbuf != NULL)
3667 memcpy (readbuf, inf_siginfo + offset, len);
3668 else
3670 memcpy (inf_siginfo + offset, writebuf, len);
3672 /* Convert back to ptrace layout before flushing it out. */
3673 siginfo_fixup (&siginfo, inf_siginfo, 1);
3675 int pid = get_ptrace_pid (ptid);
3676 errno = 0;
3677 ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo);
3678 if (errno != 0)
3679 return TARGET_XFER_E_IO;
3682 *xfered_len = len;
3683 return TARGET_XFER_OK;
3686 static enum target_xfer_status
3687 linux_nat_xfer_osdata (enum target_object object,
3688 const char *annex, gdb_byte *readbuf,
3689 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
3690 ULONGEST *xfered_len);
3692 static enum target_xfer_status
3693 linux_proc_xfer_memory_partial (int pid, gdb_byte *readbuf,
3694 const gdb_byte *writebuf, ULONGEST offset,
3695 LONGEST len, ULONGEST *xfered_len);
3697 enum target_xfer_status
3698 linux_nat_target::xfer_partial (enum target_object object,
3699 const char *annex, gdb_byte *readbuf,
3700 const gdb_byte *writebuf,
3701 ULONGEST offset, ULONGEST len, ULONGEST *xfered_len)
3703 if (object == TARGET_OBJECT_SIGNAL_INFO)
3704 return linux_xfer_siginfo (inferior_ptid, object, annex, readbuf, writebuf,
3705 offset, len, xfered_len);
3707 /* The target is connected but no live inferior is selected. Pass
3708 this request down to a lower stratum (e.g., the executable
3709 file). */
3710 if (object == TARGET_OBJECT_MEMORY && inferior_ptid == null_ptid)
3711 return TARGET_XFER_EOF;
3713 if (object == TARGET_OBJECT_AUXV)
3714 return memory_xfer_auxv (this, object, annex, readbuf, writebuf,
3715 offset, len, xfered_len);
3717 if (object == TARGET_OBJECT_OSDATA)
3718 return linux_nat_xfer_osdata (object, annex, readbuf, writebuf,
3719 offset, len, xfered_len);
3721 if (object == TARGET_OBJECT_MEMORY)
3723 /* GDB calculates all addresses in the largest possible address
3724 width. The address width must be masked before its final use
3725 by linux_proc_xfer_partial.
3727 Compare ADDR_BIT first to avoid a compiler warning on shift overflow. */
3728 int addr_bit = gdbarch_addr_bit (target_gdbarch ());
3730 if (addr_bit < (sizeof (ULONGEST) * HOST_CHAR_BIT))
3731 offset &= ((ULONGEST) 1 << addr_bit) - 1;
3733 /* If /proc/pid/mem is writable, don't fallback to ptrace. If
3734 the write via /proc/pid/mem fails because the inferior execed
3735 (and we haven't seen the exec event yet), a subsequent ptrace
3736 poke would incorrectly write memory to the post-exec address
3737 space, while the core was trying to write to the pre-exec
3738 address space. */
3739 if (proc_mem_file_is_writable ())
3740 return linux_proc_xfer_memory_partial (inferior_ptid.pid (), readbuf,
3741 writebuf, offset, len,
3742 xfered_len);
3745 return inf_ptrace_target::xfer_partial (object, annex, readbuf, writebuf,
3746 offset, len, xfered_len);
3749 bool
3750 linux_nat_target::thread_alive (ptid_t ptid)
3752 /* As long as a PTID is in lwp list, consider it alive. */
3753 return find_lwp_pid (ptid) != NULL;
3756 /* Implement the to_update_thread_list target method for this
3757 target. */
3759 void
3760 linux_nat_target::update_thread_list ()
3762 /* We add/delete threads from the list as clone/exit events are
3763 processed, so just try deleting exited threads still in the
3764 thread list. */
3765 delete_exited_threads ();
3767 /* Update the processor core that each lwp/thread was last seen
3768 running on. */
3769 for (lwp_info *lwp : all_lwps ())
3771 /* Avoid accessing /proc if the thread hasn't run since we last
3772 time we fetched the thread's core. Accessing /proc becomes
3773 noticeably expensive when we have thousands of LWPs. */
3774 if (lwp->core == -1)
3775 lwp->core = linux_common_core_of_thread (lwp->ptid);
3779 std::string
3780 linux_nat_target::pid_to_str (ptid_t ptid)
3782 if (ptid.lwp_p ()
3783 && (ptid.pid () != ptid.lwp ()
3784 || num_lwps (ptid.pid ()) > 1))
3785 return string_printf ("LWP %ld", ptid.lwp ());
3787 return normal_pid_to_str (ptid);
3790 const char *
3791 linux_nat_target::thread_name (struct thread_info *thr)
3793 return linux_proc_tid_get_name (thr->ptid);
3796 /* Accepts an integer PID; Returns a string representing a file that
3797 can be opened to get the symbols for the child process. */
3799 const char *
3800 linux_nat_target::pid_to_exec_file (int pid)
3802 return linux_proc_pid_to_exec_file (pid);
3805 /* Object representing an /proc/PID/mem open file. We keep one such
3806 file open per inferior.
3808 It might be tempting to think about only ever opening one file at
3809 most for all inferiors, closing/reopening the file as we access
3810 memory of different inferiors, to minimize number of file
3811 descriptors open, which can otherwise run into resource limits.
3812 However, that does not work correctly -- if the inferior execs and
3813 we haven't processed the exec event yet, and, we opened a
3814 /proc/PID/mem file, we will get a mem file accessing the post-exec
3815 address space, thinking we're opening it for the pre-exec address
3816 space. That is dangerous as we can poke memory (e.g. clearing
3817 breakpoints) in the post-exec memory by mistake, corrupting the
3818 inferior. For that reason, we open the mem file as early as
3819 possible, right after spawning, forking or attaching to the
3820 inferior, when the inferior is stopped and thus before it has a
3821 chance of execing.
3823 Note that after opening the file, even if the thread we opened it
3824 for subsequently exits, the open file is still usable for accessing
3825 memory. It's only when the whole process exits or execs that the
3826 file becomes invalid, at which point reads/writes return EOF. */
3828 class proc_mem_file
3830 public:
3831 proc_mem_file (ptid_t ptid, int fd)
3832 : m_ptid (ptid), m_fd (fd)
3834 gdb_assert (m_fd != -1);
3837 ~proc_mem_file ()
3839 linux_nat_debug_printf ("closing fd %d for /proc/%d/task/%ld/mem",
3840 m_fd, m_ptid.pid (), m_ptid.lwp ());
3841 close (m_fd);
3844 DISABLE_COPY_AND_ASSIGN (proc_mem_file);
3846 int fd ()
3848 return m_fd;
3851 private:
3852 /* The LWP this file was opened for. Just for debugging
3853 purposes. */
3854 ptid_t m_ptid;
3856 /* The file descriptor. */
3857 int m_fd = -1;
3860 /* The map between an inferior process id, and the open /proc/PID/mem
3861 file. This is stored in a map instead of in a per-inferior
3862 structure because we need to be able to access memory of processes
3863 which don't have a corresponding struct inferior object. E.g.,
3864 with "detach-on-fork on" (the default), and "follow-fork parent"
3865 (also default), we don't create an inferior for the fork child, but
3866 we still need to remove breakpoints from the fork child's
3867 memory. */
3868 static std::unordered_map<int, proc_mem_file> proc_mem_file_map;
3870 /* Close the /proc/PID/mem file for PID. */
3872 static void
3873 close_proc_mem_file (pid_t pid)
3875 proc_mem_file_map.erase (pid);
3878 /* Open the /proc/PID/mem file for the process (thread group) of PTID.
3879 We actually open /proc/PID/task/LWP/mem, as that's the LWP we know
3880 exists and is stopped right now. We prefer the
3881 /proc/PID/task/LWP/mem form over /proc/LWP/mem to avoid tid-reuse
3882 races, just in case this is ever called on an already-waited
3883 LWP. */
3885 static void
3886 open_proc_mem_file (ptid_t ptid)
3888 auto iter = proc_mem_file_map.find (ptid.pid ());
3889 gdb_assert (iter == proc_mem_file_map.end ());
3891 char filename[64];
3892 xsnprintf (filename, sizeof filename,
3893 "/proc/%d/task/%ld/mem", ptid.pid (), ptid.lwp ());
3895 int fd = gdb_open_cloexec (filename, O_RDWR | O_LARGEFILE, 0).release ();
3897 if (fd == -1)
3899 warning (_("opening /proc/PID/mem file for lwp %d.%ld failed: %s (%d)"),
3900 ptid.pid (), ptid.lwp (),
3901 safe_strerror (errno), errno);
3902 return;
3905 proc_mem_file_map.emplace (std::piecewise_construct,
3906 std::forward_as_tuple (ptid.pid ()),
3907 std::forward_as_tuple (ptid, fd));
3909 linux_nat_debug_printf ("opened fd %d for lwp %d.%ld",
3910 fd, ptid.pid (), ptid.lwp ());
3913 /* Helper for linux_proc_xfer_memory_partial and
3914 proc_mem_file_is_writable. FD is the already opened /proc/pid/mem
3915 file, and PID is the pid of the corresponding process. The rest of
3916 the arguments are like linux_proc_xfer_memory_partial's. */
3918 static enum target_xfer_status
3919 linux_proc_xfer_memory_partial_fd (int fd, int pid,
3920 gdb_byte *readbuf, const gdb_byte *writebuf,
3921 ULONGEST offset, LONGEST len,
3922 ULONGEST *xfered_len)
3924 ssize_t ret;
3926 gdb_assert (fd != -1);
3928 /* Use pread64/pwrite64 if available, since they save a syscall and
3929 can handle 64-bit offsets even on 32-bit platforms (for instance,
3930 SPARC debugging a SPARC64 application). But only use them if the
3931 offset isn't so high that when cast to off_t it'd be negative, as
3932 seen on SPARC64. pread64/pwrite64 outright reject such offsets.
3933 lseek does not. */
3934 #ifdef HAVE_PREAD64
3935 if ((off_t) offset >= 0)
3936 ret = (readbuf != nullptr
3937 ? pread64 (fd, readbuf, len, offset)
3938 : pwrite64 (fd, writebuf, len, offset));
3939 else
3940 #endif
3942 ret = lseek (fd, offset, SEEK_SET);
3943 if (ret != -1)
3944 ret = (readbuf != nullptr
3945 ? read (fd, readbuf, len)
3946 : write (fd, writebuf, len));
3949 if (ret == -1)
3951 linux_nat_debug_printf ("accessing fd %d for pid %d failed: %s (%d)",
3952 fd, pid, safe_strerror (errno), errno);
3953 return TARGET_XFER_E_IO;
3955 else if (ret == 0)
3957 /* EOF means the address space is gone, the whole process exited
3958 or execed. */
3959 linux_nat_debug_printf ("accessing fd %d for pid %d got EOF",
3960 fd, pid);
3961 return TARGET_XFER_EOF;
3963 else
3965 *xfered_len = ret;
3966 return TARGET_XFER_OK;
3970 /* Implement the to_xfer_partial target method using /proc/PID/mem.
3971 Because we can use a single read/write call, this can be much more
3972 efficient than banging away at PTRACE_PEEKTEXT. Also, unlike
3973 PTRACE_PEEKTEXT/PTRACE_POKETEXT, this works with running
3974 threads. */
3976 static enum target_xfer_status
3977 linux_proc_xfer_memory_partial (int pid, gdb_byte *readbuf,
3978 const gdb_byte *writebuf, ULONGEST offset,
3979 LONGEST len, ULONGEST *xfered_len)
3981 auto iter = proc_mem_file_map.find (pid);
3982 if (iter == proc_mem_file_map.end ())
3983 return TARGET_XFER_EOF;
3985 int fd = iter->second.fd ();
3987 return linux_proc_xfer_memory_partial_fd (fd, pid, readbuf, writebuf, offset,
3988 len, xfered_len);
3991 /* Check whether /proc/pid/mem is writable in the current kernel, and
3992 return true if so. It wasn't writable before Linux 2.6.39, but
3993 there's no way to know whether the feature was backported to older
3994 kernels. So we check to see if it works. The result is cached,
3995 and this is guaranteed to be called once early during inferior
3996 startup, so that any warning is printed out consistently between
3997 GDB invocations. Note we don't call it during GDB startup instead
3998 though, because then we might warn with e.g. just "gdb --version"
3999 on sandboxed systems. See PR gdb/29907. */
4001 static bool
4002 proc_mem_file_is_writable ()
4004 static gdb::optional<bool> writable;
4006 if (writable.has_value ())
4007 return *writable;
4009 writable.emplace (false);
4011 /* We check whether /proc/pid/mem is writable by trying to write to
4012 one of our variables via /proc/self/mem. */
4014 int fd = gdb_open_cloexec ("/proc/self/mem", O_RDWR | O_LARGEFILE, 0).release ();
4016 if (fd == -1)
4018 warning (_("opening /proc/self/mem file failed: %s (%d)"),
4019 safe_strerror (errno), errno);
4020 return *writable;
4023 SCOPE_EXIT { close (fd); };
4025 /* This is the variable we try to write to. Note OFFSET below. */
4026 volatile gdb_byte test_var = 0;
4028 gdb_byte writebuf[] = {0x55};
4029 ULONGEST offset = (uintptr_t) &test_var;
4030 ULONGEST xfered_len;
4032 enum target_xfer_status res
4033 = linux_proc_xfer_memory_partial_fd (fd, getpid (), nullptr, writebuf,
4034 offset, 1, &xfered_len);
4036 if (res == TARGET_XFER_OK)
4038 gdb_assert (xfered_len == 1);
4039 gdb_assert (test_var == 0x55);
4040 /* Success. */
4041 *writable = true;
4044 return *writable;
4047 /* Parse LINE as a signal set and add its set bits to SIGS. */
4049 static void
4050 add_line_to_sigset (const char *line, sigset_t *sigs)
4052 int len = strlen (line) - 1;
4053 const char *p;
4054 int signum;
4056 if (line[len] != '\n')
4057 error (_("Could not parse signal set: %s"), line);
4059 p = line;
4060 signum = len * 4;
4061 while (len-- > 0)
4063 int digit;
4065 if (*p >= '0' && *p <= '9')
4066 digit = *p - '0';
4067 else if (*p >= 'a' && *p <= 'f')
4068 digit = *p - 'a' + 10;
4069 else
4070 error (_("Could not parse signal set: %s"), line);
4072 signum -= 4;
4074 if (digit & 1)
4075 sigaddset (sigs, signum + 1);
4076 if (digit & 2)
4077 sigaddset (sigs, signum + 2);
4078 if (digit & 4)
4079 sigaddset (sigs, signum + 3);
4080 if (digit & 8)
4081 sigaddset (sigs, signum + 4);
4083 p++;
4087 /* Find process PID's pending signals from /proc/pid/status and set
4088 SIGS to match. */
4090 void
4091 linux_proc_pending_signals (int pid, sigset_t *pending,
4092 sigset_t *blocked, sigset_t *ignored)
4094 char buffer[PATH_MAX], fname[PATH_MAX];
4096 sigemptyset (pending);
4097 sigemptyset (blocked);
4098 sigemptyset (ignored);
4099 xsnprintf (fname, sizeof fname, "/proc/%d/status", pid);
4100 gdb_file_up procfile = gdb_fopen_cloexec (fname, "r");
4101 if (procfile == NULL)
4102 error (_("Could not open %s"), fname);
4104 while (fgets (buffer, PATH_MAX, procfile.get ()) != NULL)
4106 /* Normal queued signals are on the SigPnd line in the status
4107 file. However, 2.6 kernels also have a "shared" pending
4108 queue for delivering signals to a thread group, so check for
4109 a ShdPnd line also.
4111 Unfortunately some Red Hat kernels include the shared pending
4112 queue but not the ShdPnd status field. */
4114 if (startswith (buffer, "SigPnd:\t"))
4115 add_line_to_sigset (buffer + 8, pending);
4116 else if (startswith (buffer, "ShdPnd:\t"))
4117 add_line_to_sigset (buffer + 8, pending);
4118 else if (startswith (buffer, "SigBlk:\t"))
4119 add_line_to_sigset (buffer + 8, blocked);
4120 else if (startswith (buffer, "SigIgn:\t"))
4121 add_line_to_sigset (buffer + 8, ignored);
4125 static enum target_xfer_status
4126 linux_nat_xfer_osdata (enum target_object object,
4127 const char *annex, gdb_byte *readbuf,
4128 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
4129 ULONGEST *xfered_len)
4131 gdb_assert (object == TARGET_OBJECT_OSDATA);
4133 *xfered_len = linux_common_xfer_osdata (annex, readbuf, offset, len);
4134 if (*xfered_len == 0)
4135 return TARGET_XFER_EOF;
4136 else
4137 return TARGET_XFER_OK;
4140 std::vector<static_tracepoint_marker>
4141 linux_nat_target::static_tracepoint_markers_by_strid (const char *strid)
4143 char s[IPA_CMD_BUF_SIZE];
4144 int pid = inferior_ptid.pid ();
4145 std::vector<static_tracepoint_marker> markers;
4146 const char *p = s;
4147 ptid_t ptid = ptid_t (pid, 0);
4148 static_tracepoint_marker marker;
4150 /* Pause all */
4151 target_stop (ptid);
4153 strcpy (s, "qTfSTM");
4154 agent_run_command (pid, s, strlen (s) + 1);
4156 /* Unpause all. */
4157 SCOPE_EXIT { target_continue_no_signal (ptid); };
4159 while (*p++ == 'm')
4163 parse_static_tracepoint_marker_definition (p, &p, &marker);
4165 if (strid == NULL || marker.str_id == strid)
4166 markers.push_back (std::move (marker));
4168 while (*p++ == ','); /* comma-separated list */
4170 strcpy (s, "qTsSTM");
4171 agent_run_command (pid, s, strlen (s) + 1);
4172 p = s;
4175 return markers;
4178 /* target_can_async_p implementation. */
4180 bool
4181 linux_nat_target::can_async_p ()
4183 /* This flag should be checked in the common target.c code. */
4184 gdb_assert (target_async_permitted);
4186 /* Otherwise, this targets is always able to support async mode. */
4187 return true;
4190 bool
4191 linux_nat_target::supports_non_stop ()
4193 return true;
4196 /* to_always_non_stop_p implementation. */
4198 bool
4199 linux_nat_target::always_non_stop_p ()
4201 return true;
4204 bool
4205 linux_nat_target::supports_multi_process ()
4207 return true;
4210 bool
4211 linux_nat_target::supports_disable_randomization ()
4213 return true;
4216 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
4217 so we notice when any child changes state, and notify the
4218 event-loop; it allows us to use sigsuspend in linux_nat_wait_1
4219 above to wait for the arrival of a SIGCHLD. */
4221 static void
4222 sigchld_handler (int signo)
4224 int old_errno = errno;
4226 if (debug_linux_nat)
4227 gdb_stdlog->write_async_safe ("sigchld\n", sizeof ("sigchld\n") - 1);
4229 if (signo == SIGCHLD)
4231 /* Let the event loop know that there are events to handle. */
4232 linux_nat_target::async_file_mark_if_open ();
4235 errno = old_errno;
4238 /* Callback registered with the target events file descriptor. */
4240 static void
4241 handle_target_event (int error, gdb_client_data client_data)
4243 inferior_event_handler (INF_REG_EVENT);
4246 /* target_async implementation. */
4248 void
4249 linux_nat_target::async (bool enable)
4251 if (enable == is_async_p ())
4252 return;
4254 /* Block child signals while we create/destroy the pipe, as their
4255 handler writes to it. */
4256 gdb::block_signals blocker;
4258 if (enable)
4260 if (!async_file_open ())
4261 internal_error ("creating event pipe failed.");
4263 add_file_handler (async_wait_fd (), handle_target_event, NULL,
4264 "linux-nat");
4266 /* There may be pending events to handle. Tell the event loop
4267 to poll them. */
4268 async_file_mark ();
4270 else
4272 delete_file_handler (async_wait_fd ());
4273 async_file_close ();
4277 /* Stop an LWP, and push a GDB_SIGNAL_0 stop status if no other
4278 event came out. */
4280 static int
4281 linux_nat_stop_lwp (struct lwp_info *lwp)
4283 if (!lwp->stopped)
4285 linux_nat_debug_printf ("running -> suspending %s",
4286 lwp->ptid.to_string ().c_str ());
4289 if (lwp->last_resume_kind == resume_stop)
4291 linux_nat_debug_printf ("already stopping LWP %ld at GDB's request",
4292 lwp->ptid.lwp ());
4293 return 0;
4296 stop_callback (lwp);
4297 lwp->last_resume_kind = resume_stop;
4299 else
4301 /* Already known to be stopped; do nothing. */
4303 if (debug_linux_nat)
4305 if (linux_target->find_thread (lwp->ptid)->stop_requested)
4306 linux_nat_debug_printf ("already stopped/stop_requested %s",
4307 lwp->ptid.to_string ().c_str ());
4308 else
4309 linux_nat_debug_printf ("already stopped/no stop_requested yet %s",
4310 lwp->ptid.to_string ().c_str ());
4313 return 0;
4316 void
4317 linux_nat_target::stop (ptid_t ptid)
4319 LINUX_NAT_SCOPED_DEBUG_ENTER_EXIT;
4320 iterate_over_lwps (ptid, linux_nat_stop_lwp);
4323 /* When requests are passed down from the linux-nat layer to the
4324 single threaded inf-ptrace layer, ptids of (lwpid,0,0) form are
4325 used. The address space pointer is stored in the inferior object,
4326 but the common code that is passed such ptid can't tell whether
4327 lwpid is a "main" process id or not (it assumes so). We reverse
4328 look up the "main" process id from the lwp here. */
4330 struct address_space *
4331 linux_nat_target::thread_address_space (ptid_t ptid)
4333 struct lwp_info *lwp;
4334 struct inferior *inf;
4335 int pid;
4337 if (ptid.lwp () == 0)
4339 /* An (lwpid,0,0) ptid. Look up the lwp object to get at the
4340 tgid. */
4341 lwp = find_lwp_pid (ptid);
4342 pid = lwp->ptid.pid ();
4344 else
4346 /* A (pid,lwpid,0) ptid. */
4347 pid = ptid.pid ();
4350 inf = find_inferior_pid (this, pid);
4351 gdb_assert (inf != NULL);
4352 return inf->aspace;
4355 /* Return the cached value of the processor core for thread PTID. */
4358 linux_nat_target::core_of_thread (ptid_t ptid)
4360 struct lwp_info *info = find_lwp_pid (ptid);
4362 if (info)
4363 return info->core;
4364 return -1;
4367 /* Implementation of to_filesystem_is_local. */
4369 bool
4370 linux_nat_target::filesystem_is_local ()
4372 struct inferior *inf = current_inferior ();
4374 if (inf->fake_pid_p || inf->pid == 0)
4375 return true;
4377 return linux_ns_same (inf->pid, LINUX_NS_MNT);
4380 /* Convert the INF argument passed to a to_fileio_* method
4381 to a process ID suitable for passing to its corresponding
4382 linux_mntns_* function. If INF is non-NULL then the
4383 caller is requesting the filesystem seen by INF. If INF
4384 is NULL then the caller is requesting the filesystem seen
4385 by the GDB. We fall back to GDB's filesystem in the case
4386 that INF is non-NULL but its PID is unknown. */
4388 static pid_t
4389 linux_nat_fileio_pid_of (struct inferior *inf)
4391 if (inf == NULL || inf->fake_pid_p || inf->pid == 0)
4392 return getpid ();
4393 else
4394 return inf->pid;
4397 /* Implementation of to_fileio_open. */
4400 linux_nat_target::fileio_open (struct inferior *inf, const char *filename,
4401 int flags, int mode, int warn_if_slow,
4402 fileio_error *target_errno)
4404 int nat_flags;
4405 mode_t nat_mode;
4406 int fd;
4408 if (fileio_to_host_openflags (flags, &nat_flags) == -1
4409 || fileio_to_host_mode (mode, &nat_mode) == -1)
4411 *target_errno = FILEIO_EINVAL;
4412 return -1;
4415 fd = linux_mntns_open_cloexec (linux_nat_fileio_pid_of (inf),
4416 filename, nat_flags, nat_mode);
4417 if (fd == -1)
4418 *target_errno = host_to_fileio_error (errno);
4420 return fd;
4423 /* Implementation of to_fileio_readlink. */
4425 gdb::optional<std::string>
4426 linux_nat_target::fileio_readlink (struct inferior *inf, const char *filename,
4427 fileio_error *target_errno)
4429 char buf[PATH_MAX];
4430 int len;
4432 len = linux_mntns_readlink (linux_nat_fileio_pid_of (inf),
4433 filename, buf, sizeof (buf));
4434 if (len < 0)
4436 *target_errno = host_to_fileio_error (errno);
4437 return {};
4440 return std::string (buf, len);
4443 /* Implementation of to_fileio_unlink. */
4446 linux_nat_target::fileio_unlink (struct inferior *inf, const char *filename,
4447 fileio_error *target_errno)
4449 int ret;
4451 ret = linux_mntns_unlink (linux_nat_fileio_pid_of (inf),
4452 filename);
4453 if (ret == -1)
4454 *target_errno = host_to_fileio_error (errno);
4456 return ret;
4459 /* Implementation of the to_thread_events method. */
4461 void
4462 linux_nat_target::thread_events (int enable)
4464 report_thread_events = enable;
4467 linux_nat_target::linux_nat_target ()
4469 /* We don't change the stratum; this target will sit at
4470 process_stratum and thread_db will set at thread_stratum. This
4471 is a little strange, since this is a multi-threaded-capable
4472 target, but we want to be on the stack below thread_db, and we
4473 also want to be used for single-threaded processes. */
4476 /* See linux-nat.h. */
4478 bool
4479 linux_nat_get_siginfo (ptid_t ptid, siginfo_t *siginfo)
4481 int pid = get_ptrace_pid (ptid);
4482 return ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, siginfo) == 0;
4485 /* See nat/linux-nat.h. */
4487 ptid_t
4488 current_lwp_ptid (void)
4490 gdb_assert (inferior_ptid.lwp_p ());
4491 return inferior_ptid;
4494 void _initialize_linux_nat ();
4495 void
4496 _initialize_linux_nat ()
4498 add_setshow_boolean_cmd ("linux-nat", class_maintenance,
4499 &debug_linux_nat, _("\
4500 Set debugging of GNU/Linux native target."), _(" \
4501 Show debugging of GNU/Linux native target."), _(" \
4502 When on, print debug messages relating to the GNU/Linux native target."),
4503 nullptr,
4504 show_debug_linux_nat,
4505 &setdebuglist, &showdebuglist);
4507 add_setshow_boolean_cmd ("linux-namespaces", class_maintenance,
4508 &debug_linux_namespaces, _("\
4509 Set debugging of GNU/Linux namespaces module."), _("\
4510 Show debugging of GNU/Linux namespaces module."), _("\
4511 Enables printf debugging output."),
4512 NULL,
4513 NULL,
4514 &setdebuglist, &showdebuglist);
4516 /* Install a SIGCHLD handler. */
4517 sigchld_action.sa_handler = sigchld_handler;
4518 sigemptyset (&sigchld_action.sa_mask);
4519 sigchld_action.sa_flags = SA_RESTART;
4521 /* Make it the default. */
4522 sigaction (SIGCHLD, &sigchld_action, NULL);
4524 /* Make sure we don't block SIGCHLD during a sigsuspend. */
4525 gdb_sigmask (SIG_SETMASK, NULL, &suspend_mask);
4526 sigdelset (&suspend_mask, SIGCHLD);
4528 sigemptyset (&blocked_mask);
4530 lwp_lwpid_htab_create ();
4534 /* FIXME: kettenis/2000-08-26: The stuff on this page is specific to
4535 the GNU/Linux Threads library and therefore doesn't really belong
4536 here. */
4538 /* NPTL reserves the first two RT signals, but does not provide any
4539 way for the debugger to query the signal numbers - fortunately
4540 they don't change. */
4541 static int lin_thread_signals[] = { __SIGRTMIN, __SIGRTMIN + 1 };
4543 /* See linux-nat.h. */
4545 unsigned int
4546 lin_thread_get_thread_signal_num (void)
4548 return sizeof (lin_thread_signals) / sizeof (lin_thread_signals[0]);
4551 /* See linux-nat.h. */
4554 lin_thread_get_thread_signal (unsigned int i)
4556 gdb_assert (i < lin_thread_get_thread_signal_num ());
4557 return lin_thread_signals[i];