Fix null pointer dereference in process_debug_info()
[binutils-gdb.git] / gdb / ravenscar-thread.c
blobecc9235c9174dd767bc7a00392fd2ad551b966e3
1 /* Ada Ravenscar thread support.
3 Copyright (C) 2004-2024 Free Software Foundation, Inc.
5 This file is part of GDB.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20 #include "gdbcore.h"
21 #include "gdbthread.h"
22 #include "ada-lang.h"
23 #include "target.h"
24 #include "inferior.h"
25 #include "command.h"
26 #include "ravenscar-thread.h"
27 #include "observable.h"
28 #include "gdbcmd.h"
29 #include "top.h"
30 #include "regcache.h"
31 #include "objfiles.h"
32 #include <unordered_map>
34 /* This module provides support for "Ravenscar" tasks (Ada) when
35 debugging on bare-metal targets.
37 The typical situation is when debugging a bare-metal target over
38 the remote protocol. In that situation, the system does not know
39 about high-level concepts such as threads, only about some code
40 running on one or more CPUs. And since the remote protocol does not
41 provide any handling for CPUs, the de facto standard for handling
42 them is to have one thread per CPU, where the thread's ptid has
43 its lwp field set to the CPU number (eg: 1 for the first CPU,
44 2 for the second one, etc). This module will make that assumption.
46 This module then creates and maintains the list of threads based
47 on the list of Ada tasks, with one thread per Ada task. The convention
48 is that threads corresponding to the CPUs (see assumption above)
49 have a ptid_t of the form (PID, LWP, 0), while threads corresponding
50 to our Ada tasks have a ptid_t of the form (PID, 0, TID) where TID
51 is the Ada task's ID as extracted from Ada runtime information.
53 Switching to a given Ada task (or its underlying thread) is performed
54 by fetching the registers of that task from the memory area where
55 the registers were saved. For any of the other operations, the
56 operation is performed by first finding the CPU on which the task
57 is running, switching to its corresponding ptid, and then performing
58 the operation on that ptid using the target beneath us. */
60 /* If true, ravenscar task support is enabled. */
61 static bool ravenscar_task_support = true;
63 static const char running_thread_name[] = "__gnat_running_thread_table";
65 static const char known_tasks_name[] = "system__tasking__debug__known_tasks";
66 static const char first_task_name[] = "system__tasking__debug__first_task";
68 static const char ravenscar_runtime_initializer[]
69 = "system__bb__threads__initialize";
71 static const target_info ravenscar_target_info = {
72 "ravenscar",
73 N_("Ravenscar tasks."),
74 N_("Ravenscar tasks support.")
77 struct ravenscar_thread_target final : public target_ops
79 ravenscar_thread_target ()
80 : m_base_ptid (inferior_ptid)
84 const target_info &info () const override
85 { return ravenscar_target_info; }
87 strata stratum () const override { return thread_stratum; }
89 ptid_t wait (ptid_t, struct target_waitstatus *, target_wait_flags) override;
90 void resume (ptid_t, int, enum gdb_signal) override;
92 void fetch_registers (struct regcache *, int) override;
93 void store_registers (struct regcache *, int) override;
95 void prepare_to_store (struct regcache *) override;
97 bool stopped_by_sw_breakpoint () override;
99 bool stopped_by_hw_breakpoint () override;
101 bool stopped_by_watchpoint () override;
103 bool stopped_data_address (CORE_ADDR *) override;
105 enum target_xfer_status xfer_partial (enum target_object object,
106 const char *annex,
107 gdb_byte *readbuf,
108 const gdb_byte *writebuf,
109 ULONGEST offset, ULONGEST len,
110 ULONGEST *xfered_len) override;
112 bool thread_alive (ptid_t ptid) override;
114 int core_of_thread (ptid_t ptid) override;
116 void update_thread_list () override;
118 std::string pid_to_str (ptid_t) override;
120 ptid_t get_ada_task_ptid (long lwp, ULONGEST thread) override;
122 struct btrace_target_info *enable_btrace (thread_info *tp,
123 const struct btrace_config *conf)
124 override
126 process_stratum_target *proc_target
127 = as_process_stratum_target (this->beneath ());
128 ptid_t underlying = get_base_thread_from_ravenscar_task (tp->ptid);
129 tp = proc_target->find_thread (underlying);
131 return beneath ()->enable_btrace (tp, conf);
134 void mourn_inferior () override;
136 void close () override
138 delete this;
141 thread_info *add_active_thread ();
143 private:
145 /* PTID of the last thread that received an event.
146 This can be useful to determine the associated task that received
147 the event, to make it the current task. */
148 ptid_t m_base_ptid;
150 ptid_t active_task (int cpu);
151 bool task_is_currently_active (ptid_t ptid);
152 bool runtime_initialized ();
153 int get_thread_base_cpu (ptid_t ptid);
154 ptid_t get_base_thread_from_ravenscar_task (ptid_t ptid);
155 void add_thread (struct ada_task_info *task);
157 /* Like switch_to_thread, but uses the base ptid for the thread. */
158 void set_base_thread_from_ravenscar_task (ptid_t ptid)
160 process_stratum_target *proc_target
161 = as_process_stratum_target (this->beneath ());
162 ptid_t underlying = get_base_thread_from_ravenscar_task (ptid);
163 switch_to_thread (proc_target->find_thread (underlying));
166 /* Some targets use lazy FPU initialization. On these, the FP
167 registers for a given task might be uninitialized, or stored in
168 the per-task context, or simply be the live registers on the CPU.
169 This enum is used to encode this information. */
170 enum fpu_state
172 /* This target doesn't do anything special for FP registers -- if
173 any exist, they are treated just identical to non-FP
174 registers. */
175 NOTHING_SPECIAL,
176 /* This target uses the lazy FP scheme, and the FP registers are
177 taken from the CPU. This can happen for any task, because if a
178 task switch occurs, the registers aren't immediately written to
179 the per-task context -- this is deferred until the current task
180 causes an FPU trap. */
181 LIVE_FP_REGISTERS,
182 /* This target uses the lazy FP scheme, and the FP registers are
183 not available. Maybe this task never initialized the FPU, or
184 maybe GDB couldn't find the required symbol. */
185 NO_FP_REGISTERS
188 /* Return the FPU state. */
189 fpu_state get_fpu_state (struct regcache *regcache,
190 const ravenscar_arch_ops *arch_ops);
192 /* This maps a TID to the CPU on which it was running. This is
193 needed because sometimes the runtime will report an active task
194 that hasn't yet been put on the list of tasks that is read by
195 ada-tasks.c. */
196 std::unordered_map<ULONGEST, int> m_cpu_map;
199 /* Return true iff PTID corresponds to a ravenscar task. */
201 static bool
202 is_ravenscar_task (ptid_t ptid)
204 /* By construction, ravenscar tasks have their LWP set to zero.
205 Also make sure that the TID is nonzero, as some remotes, when
206 asked for the list of threads, will return the first thread
207 as having its TID set to zero. For instance, TSIM version
208 2.0.48 for LEON3 sends 'm0' as a reply to the 'qfThreadInfo'
209 query, which the remote protocol layer then treats as a thread
210 whose TID is 0. This is obviously not a ravenscar task. */
211 return ptid.lwp () == 0 && ptid.tid () != 0;
214 /* Given PTID, which can be either a ravenscar task or a CPU thread,
215 return which CPU that ptid is running on.
217 This assume that PTID is a valid ptid_t. Otherwise, a gdb_assert
218 will be triggered. */
221 ravenscar_thread_target::get_thread_base_cpu (ptid_t ptid)
223 int base_cpu;
225 if (is_ravenscar_task (ptid))
227 /* Prefer to not read inferior memory if possible, to avoid
228 reentrancy problems with xfer_partial. */
229 auto iter = m_cpu_map.find (ptid.tid ());
231 if (iter != m_cpu_map.end ())
232 base_cpu = iter->second;
233 else
235 struct ada_task_info *task_info = ada_get_task_info_from_ptid (ptid);
237 gdb_assert (task_info != NULL);
238 base_cpu = task_info->base_cpu;
241 else
243 /* We assume that the LWP of the PTID is equal to the CPU number. */
244 base_cpu = ptid.lwp ();
247 return base_cpu;
250 /* Given a ravenscar task (identified by its ptid_t PTID), return true
251 if this task is the currently active task on the cpu that task is
252 running on.
254 In other words, this function determine which CPU this task is
255 currently running on, and then return nonzero if the CPU in question
256 is executing the code for that task. If that's the case, then
257 that task's registers are in the CPU bank. Otherwise, the task
258 is currently suspended, and its registers have been saved in memory. */
260 bool
261 ravenscar_thread_target::task_is_currently_active (ptid_t ptid)
263 ptid_t active_task_ptid = active_task (get_thread_base_cpu (ptid));
265 return ptid == active_task_ptid;
268 /* Return the CPU thread (as a ptid_t) on which the given ravenscar
269 task is running.
271 This is the thread that corresponds to the CPU on which the task
272 is running. */
274 ptid_t
275 ravenscar_thread_target::get_base_thread_from_ravenscar_task (ptid_t ptid)
277 int base_cpu;
279 if (!is_ravenscar_task (ptid))
280 return ptid;
282 base_cpu = get_thread_base_cpu (ptid);
283 return ptid_t (ptid.pid (), base_cpu);
286 /* Fetch the ravenscar running thread from target memory, make sure
287 there's a corresponding thread in the thread list, and return it.
288 If the runtime is not initialized, return NULL. */
290 thread_info *
291 ravenscar_thread_target::add_active_thread ()
293 process_stratum_target *proc_target
294 = as_process_stratum_target (this->beneath ());
296 int base_cpu;
298 gdb_assert (!is_ravenscar_task (m_base_ptid));
299 base_cpu = get_thread_base_cpu (m_base_ptid);
301 if (!runtime_initialized ())
302 return nullptr;
304 /* It's possible for runtime_initialized to return true but for it
305 not to be fully initialized. For example, this can happen for a
306 breakpoint placed at the task's beginning. */
307 ptid_t active_ptid = active_task (base_cpu);
308 if (active_ptid == null_ptid)
309 return nullptr;
311 /* The running thread may not have been added to
312 system.tasking.debug's list yet; so ravenscar_update_thread_list
313 may not always add it to the thread list. Add it here. */
314 thread_info *active_thr = proc_target->find_thread (active_ptid);
315 if (active_thr == nullptr)
317 active_thr = ::add_thread (proc_target, active_ptid);
318 m_cpu_map[active_ptid.tid ()] = base_cpu;
320 return active_thr;
323 /* The Ravenscar Runtime exports a symbol which contains the ID of
324 the thread that is currently running. Try to locate that symbol
325 and return its associated minimal symbol.
326 Return NULL if not found. */
328 static struct bound_minimal_symbol
329 get_running_thread_msymbol ()
331 struct bound_minimal_symbol msym;
333 msym = lookup_minimal_symbol (running_thread_name, NULL, NULL);
334 if (!msym.minsym)
335 /* Older versions of the GNAT runtime were using a different
336 (less ideal) name for the symbol where the active thread ID
337 is stored. If we couldn't find the symbol using the latest
338 name, then try the old one. */
339 msym = lookup_minimal_symbol ("running_thread", NULL, NULL);
341 return msym;
344 /* Return True if the Ada Ravenscar run-time can be found in the
345 application. */
347 static bool
348 has_ravenscar_runtime ()
350 struct bound_minimal_symbol msym_ravenscar_runtime_initializer
351 = lookup_minimal_symbol (ravenscar_runtime_initializer, NULL, NULL);
352 struct bound_minimal_symbol msym_known_tasks
353 = lookup_minimal_symbol (known_tasks_name, NULL, NULL);
354 struct bound_minimal_symbol msym_first_task
355 = lookup_minimal_symbol (first_task_name, NULL, NULL);
356 struct bound_minimal_symbol msym_running_thread
357 = get_running_thread_msymbol ();
359 return (msym_ravenscar_runtime_initializer.minsym
360 && (msym_known_tasks.minsym || msym_first_task.minsym)
361 && msym_running_thread.minsym);
364 /* Return True if the Ada Ravenscar run-time can be found in the
365 application, and if it has been initialized on target. */
367 bool
368 ravenscar_thread_target::runtime_initialized ()
370 return active_task (1) != null_ptid;
373 /* Return the ID of the thread that is currently running.
374 Return 0 if the ID could not be determined. */
376 static CORE_ADDR
377 get_running_thread_id (int cpu)
379 struct bound_minimal_symbol object_msym = get_running_thread_msymbol ();
380 int object_size;
381 int buf_size;
382 gdb_byte *buf;
383 CORE_ADDR object_addr;
384 struct type *builtin_type_void_data_ptr
385 = builtin_type (current_inferior ()->arch ())->builtin_data_ptr;
387 if (!object_msym.minsym)
388 return 0;
390 object_size = builtin_type_void_data_ptr->length ();
391 object_addr = (object_msym.value_address ()
392 + (cpu - 1) * object_size);
393 buf_size = object_size;
394 buf = (gdb_byte *) alloca (buf_size);
395 read_memory (object_addr, buf, buf_size);
396 return extract_typed_address (buf, builtin_type_void_data_ptr);
399 void
400 ravenscar_thread_target::resume (ptid_t ptid, int step,
401 enum gdb_signal siggnal)
403 /* If we see a wildcard resume, we simply pass that on. Otherwise,
404 arrange to resume the base ptid. */
405 inferior_ptid = m_base_ptid;
406 if (ptid.is_pid ())
408 /* We only have one process, so resume all threads of it. */
409 ptid = minus_one_ptid;
411 else if (ptid != minus_one_ptid)
412 ptid = m_base_ptid;
413 beneath ()->resume (ptid, step, siggnal);
416 ptid_t
417 ravenscar_thread_target::wait (ptid_t ptid,
418 struct target_waitstatus *status,
419 target_wait_flags options)
421 process_stratum_target *beneath
422 = as_process_stratum_target (this->beneath ());
423 ptid_t event_ptid;
425 if (ptid != minus_one_ptid)
426 ptid = m_base_ptid;
427 event_ptid = beneath->wait (ptid, status, 0);
428 /* Find any new threads that might have been created, and return the
429 active thread.
431 Only do it if the program is still alive, though. Otherwise,
432 this causes problems when debugging through the remote protocol,
433 because we might try switching threads (and thus sending packets)
434 after the remote has disconnected. */
435 if (status->kind () != TARGET_WAITKIND_EXITED
436 && status->kind () != TARGET_WAITKIND_SIGNALLED
437 && runtime_initialized ())
439 m_base_ptid = event_ptid;
440 this->update_thread_list ();
441 thread_info *thr = this->add_active_thread ();
442 if (thr != nullptr)
443 return thr->ptid;
445 return event_ptid;
448 /* Add the thread associated to the given TASK to the thread list
449 (if the thread has already been added, this is a no-op). */
451 void
452 ravenscar_thread_target::add_thread (struct ada_task_info *task)
454 if (current_inferior ()->find_thread (task->ptid) == NULL)
456 ::add_thread (current_inferior ()->process_target (), task->ptid);
457 m_cpu_map[task->ptid.tid ()] = task->base_cpu;
461 void
462 ravenscar_thread_target::update_thread_list ()
464 /* iterate_over_live_ada_tasks requires that inferior_ptid be set,
465 but this isn't always the case in target methods. So, we ensure
466 it here. */
467 scoped_restore save_ptid = make_scoped_restore (&inferior_ptid,
468 m_base_ptid);
470 /* Do not clear the thread list before adding the Ada task, to keep
471 the thread that the process stratum has included into it
472 (m_base_ptid) and the running thread, that may not have been included
473 to system.tasking.debug's list yet. */
475 iterate_over_live_ada_tasks ([this] (struct ada_task_info *task)
477 this->add_thread (task);
481 ptid_t
482 ravenscar_thread_target::active_task (int cpu)
484 CORE_ADDR tid = get_running_thread_id (cpu);
486 if (tid == 0)
487 return null_ptid;
488 else
489 return ptid_t (m_base_ptid.pid (), 0, tid);
492 bool
493 ravenscar_thread_target::thread_alive (ptid_t ptid)
495 /* Ravenscar tasks are non-terminating. */
496 return true;
499 std::string
500 ravenscar_thread_target::pid_to_str (ptid_t ptid)
502 if (!is_ravenscar_task (ptid))
503 return beneath ()->pid_to_str (ptid);
505 return string_printf ("Ravenscar Thread 0x%s",
506 phex_nz (ptid.tid (), sizeof (ULONGEST)));
509 CORE_ADDR
510 ravenscar_arch_ops::get_stack_base (struct regcache *regcache) const
512 struct gdbarch *gdbarch = regcache->arch ();
513 const int sp_regnum = gdbarch_sp_regnum (gdbarch);
514 ULONGEST stack_address;
515 regcache_cooked_read_unsigned (regcache, sp_regnum, &stack_address);
516 return (CORE_ADDR) stack_address;
519 void
520 ravenscar_arch_ops::supply_one_register (struct regcache *regcache,
521 int regnum,
522 CORE_ADDR descriptor,
523 CORE_ADDR stack_base) const
525 CORE_ADDR addr;
526 if (regnum >= first_stack_register && regnum <= last_stack_register)
527 addr = stack_base;
528 else
529 addr = descriptor;
530 addr += offsets[regnum];
532 struct gdbarch *gdbarch = regcache->arch ();
533 int size = register_size (gdbarch, regnum);
534 gdb_byte *buf = (gdb_byte *) alloca (size);
535 read_memory (addr, buf, size);
536 regcache->raw_supply (regnum, buf);
539 void
540 ravenscar_arch_ops::fetch_register (struct regcache *regcache,
541 int regnum) const
543 gdb_assert (regnum != -1);
545 struct gdbarch *gdbarch = regcache->arch ();
546 /* The tid is the thread_id field, which is a pointer to the thread. */
547 CORE_ADDR thread_descriptor_address
548 = (CORE_ADDR) regcache->ptid ().tid ();
550 int sp_regno = -1;
551 CORE_ADDR stack_address = 0;
552 if (regnum >= first_stack_register && regnum <= last_stack_register)
554 /* We must supply SP for get_stack_base, so recurse. */
555 sp_regno = gdbarch_sp_regnum (gdbarch);
556 gdb_assert (!(sp_regno >= first_stack_register
557 && sp_regno <= last_stack_register));
558 fetch_register (regcache, sp_regno);
559 stack_address = get_stack_base (regcache);
562 if (regnum < offsets.size () && offsets[regnum] != -1)
563 supply_one_register (regcache, regnum, thread_descriptor_address,
564 stack_address);
567 void
568 ravenscar_arch_ops::store_one_register (struct regcache *regcache, int regnum,
569 CORE_ADDR descriptor,
570 CORE_ADDR stack_base) const
572 CORE_ADDR addr;
573 if (regnum >= first_stack_register && regnum <= last_stack_register)
574 addr = stack_base;
575 else
576 addr = descriptor;
577 addr += offsets[regnum];
579 struct gdbarch *gdbarch = regcache->arch ();
580 int size = register_size (gdbarch, regnum);
581 gdb_byte *buf = (gdb_byte *) alloca (size);
582 regcache->raw_collect (regnum, buf);
583 write_memory (addr, buf, size);
586 void
587 ravenscar_arch_ops::store_register (struct regcache *regcache,
588 int regnum) const
590 gdb_assert (regnum != -1);
592 /* The tid is the thread_id field, which is a pointer to the thread. */
593 CORE_ADDR thread_descriptor_address
594 = (CORE_ADDR) regcache->ptid ().tid ();
596 CORE_ADDR stack_address = 0;
597 if (regnum >= first_stack_register && regnum <= last_stack_register)
598 stack_address = get_stack_base (regcache);
600 if (regnum < offsets.size () && offsets[regnum] != -1)
601 store_one_register (regcache, regnum, thread_descriptor_address,
602 stack_address);
605 /* Temporarily set the ptid of a regcache to some other value. When
606 this object is destroyed, the regcache's original ptid is
607 restored. */
609 class temporarily_change_regcache_ptid
611 public:
613 temporarily_change_regcache_ptid (struct regcache *regcache, ptid_t new_ptid)
614 : m_regcache (regcache),
615 m_save_ptid (regcache->ptid ())
617 m_regcache->set_ptid (new_ptid);
620 ~temporarily_change_regcache_ptid ()
622 m_regcache->set_ptid (m_save_ptid);
625 private:
627 /* The regcache. */
628 struct regcache *m_regcache;
629 /* The saved ptid. */
630 ptid_t m_save_ptid;
633 ravenscar_thread_target::fpu_state
634 ravenscar_thread_target::get_fpu_state (struct regcache *regcache,
635 const ravenscar_arch_ops *arch_ops)
637 /* We want to return true if the special FP register handling is
638 needed. If this target doesn't have lazy FP, then no special
639 treatment is ever needed. */
640 if (!arch_ops->on_demand_fp ())
641 return NOTHING_SPECIAL;
643 bound_minimal_symbol fpu_context
644 = lookup_minimal_symbol ("system__bb__cpu_primitives__current_fpu_context",
645 nullptr, nullptr);
646 /* If the symbol can't be found, just fall back. */
647 if (fpu_context.minsym == nullptr)
648 return NO_FP_REGISTERS;
650 type *ptr_type
651 = builtin_type (current_inferior ()->arch ())->builtin_data_ptr;
652 ptr_type = lookup_pointer_type (ptr_type);
653 value *val = value_from_pointer (ptr_type, fpu_context.value_address ());
655 int cpu = get_thread_base_cpu (regcache->ptid ());
656 /* The array index type has a lower bound of 1 -- it is Ada code --
657 so subtract 1 here. */
658 val = value_ptradd (val, cpu - 1);
660 val = value_ind (val);
661 CORE_ADDR fpu_task = value_as_long (val);
663 /* The tid is the thread_id field, which is a pointer to the thread. */
664 CORE_ADDR thread_descriptor_address
665 = (CORE_ADDR) regcache->ptid ().tid ();
666 if (fpu_task == (thread_descriptor_address
667 + arch_ops->get_fpu_context_offset ()))
668 return LIVE_FP_REGISTERS;
670 int v_init_offset = arch_ops->get_v_init_offset ();
671 gdb_byte init = 0;
672 read_memory (thread_descriptor_address + v_init_offset, &init, 1);
673 return init ? NOTHING_SPECIAL : NO_FP_REGISTERS;
676 void
677 ravenscar_thread_target::fetch_registers (struct regcache *regcache,
678 int regnum)
680 ptid_t ptid = regcache->ptid ();
682 if (runtime_initialized () && is_ravenscar_task (ptid))
684 struct gdbarch *gdbarch = regcache->arch ();
685 bool is_active = task_is_currently_active (ptid);
686 struct ravenscar_arch_ops *arch_ops = gdbarch_ravenscar_ops (gdbarch);
687 std::optional<fpu_state> fp_state;
689 int low_reg = regnum == -1 ? 0 : regnum;
690 int high_reg = regnum == -1 ? gdbarch_num_regs (gdbarch) : regnum + 1;
692 ptid_t base = get_base_thread_from_ravenscar_task (ptid);
693 for (int i = low_reg; i < high_reg; ++i)
695 bool use_beneath = false;
696 if (arch_ops->is_fp_register (i))
698 if (!fp_state.has_value ())
699 fp_state = get_fpu_state (regcache, arch_ops);
700 if (*fp_state == NO_FP_REGISTERS)
701 continue;
702 if (*fp_state == LIVE_FP_REGISTERS
703 || (is_active && *fp_state == NOTHING_SPECIAL))
704 use_beneath = true;
706 else
707 use_beneath = is_active;
709 if (use_beneath)
711 temporarily_change_regcache_ptid changer (regcache, base);
712 beneath ()->fetch_registers (regcache, i);
714 else
715 arch_ops->fetch_register (regcache, i);
718 else
719 beneath ()->fetch_registers (regcache, regnum);
722 void
723 ravenscar_thread_target::store_registers (struct regcache *regcache,
724 int regnum)
726 ptid_t ptid = regcache->ptid ();
728 if (runtime_initialized () && is_ravenscar_task (ptid))
730 struct gdbarch *gdbarch = regcache->arch ();
731 bool is_active = task_is_currently_active (ptid);
732 struct ravenscar_arch_ops *arch_ops = gdbarch_ravenscar_ops (gdbarch);
733 std::optional<fpu_state> fp_state;
735 int low_reg = regnum == -1 ? 0 : regnum;
736 int high_reg = regnum == -1 ? gdbarch_num_regs (gdbarch) : regnum + 1;
738 ptid_t base = get_base_thread_from_ravenscar_task (ptid);
739 for (int i = low_reg; i < high_reg; ++i)
741 bool use_beneath = false;
742 if (arch_ops->is_fp_register (i))
744 if (!fp_state.has_value ())
745 fp_state = get_fpu_state (regcache, arch_ops);
746 if (*fp_state == NO_FP_REGISTERS)
747 continue;
748 if (*fp_state == LIVE_FP_REGISTERS
749 || (is_active && *fp_state == NOTHING_SPECIAL))
750 use_beneath = true;
752 else
753 use_beneath = is_active;
755 if (use_beneath)
757 temporarily_change_regcache_ptid changer (regcache, base);
758 beneath ()->store_registers (regcache, i);
760 else
761 arch_ops->store_register (regcache, i);
764 else
765 beneath ()->store_registers (regcache, regnum);
768 void
769 ravenscar_thread_target::prepare_to_store (struct regcache *regcache)
771 ptid_t ptid = regcache->ptid ();
773 if (runtime_initialized () && is_ravenscar_task (ptid))
775 if (task_is_currently_active (ptid))
777 ptid_t base = get_base_thread_from_ravenscar_task (ptid);
778 temporarily_change_regcache_ptid changer (regcache, base);
779 beneath ()->prepare_to_store (regcache);
781 else
783 /* Nothing. */
786 else
787 beneath ()->prepare_to_store (regcache);
790 /* Implement the to_stopped_by_sw_breakpoint target_ops "method". */
792 bool
793 ravenscar_thread_target::stopped_by_sw_breakpoint ()
795 scoped_restore_current_thread saver;
796 set_base_thread_from_ravenscar_task (inferior_ptid);
797 return beneath ()->stopped_by_sw_breakpoint ();
800 /* Implement the to_stopped_by_hw_breakpoint target_ops "method". */
802 bool
803 ravenscar_thread_target::stopped_by_hw_breakpoint ()
805 scoped_restore_current_thread saver;
806 set_base_thread_from_ravenscar_task (inferior_ptid);
807 return beneath ()->stopped_by_hw_breakpoint ();
810 /* Implement the to_stopped_by_watchpoint target_ops "method". */
812 bool
813 ravenscar_thread_target::stopped_by_watchpoint ()
815 scoped_restore_current_thread saver;
816 set_base_thread_from_ravenscar_task (inferior_ptid);
817 return beneath ()->stopped_by_watchpoint ();
820 /* Implement the to_stopped_data_address target_ops "method". */
822 bool
823 ravenscar_thread_target::stopped_data_address (CORE_ADDR *addr_p)
825 scoped_restore_current_thread saver;
826 set_base_thread_from_ravenscar_task (inferior_ptid);
827 return beneath ()->stopped_data_address (addr_p);
830 void
831 ravenscar_thread_target::mourn_inferior ()
833 m_base_ptid = null_ptid;
834 target_ops *beneath = this->beneath ();
835 current_inferior ()->unpush_target (this);
836 beneath->mourn_inferior ();
839 /* Implement the to_core_of_thread target_ops "method". */
842 ravenscar_thread_target::core_of_thread (ptid_t ptid)
844 scoped_restore_current_thread saver;
845 set_base_thread_from_ravenscar_task (inferior_ptid);
846 return beneath ()->core_of_thread (inferior_ptid);
849 /* Implement the target xfer_partial method. */
851 enum target_xfer_status
852 ravenscar_thread_target::xfer_partial (enum target_object object,
853 const char *annex,
854 gdb_byte *readbuf,
855 const gdb_byte *writebuf,
856 ULONGEST offset, ULONGEST len,
857 ULONGEST *xfered_len)
859 scoped_restore save_ptid = make_scoped_restore (&inferior_ptid);
860 /* Calling get_base_thread_from_ravenscar_task can read memory from
861 the inferior. However, that function is written to prefer our
862 internal map, so it should not result in recursive calls in
863 practice. */
864 inferior_ptid = get_base_thread_from_ravenscar_task (inferior_ptid);
865 return beneath ()->xfer_partial (object, annex, readbuf, writebuf,
866 offset, len, xfered_len);
869 /* Observer on inferior_created: push ravenscar thread stratum if needed. */
871 static void
872 ravenscar_inferior_created (inferior *inf)
874 const char *err_msg;
876 if (!ravenscar_task_support
877 || gdbarch_ravenscar_ops (current_inferior ()->arch ()) == NULL
878 || !has_ravenscar_runtime ())
879 return;
881 err_msg = ada_get_tcb_types_info ();
882 if (err_msg != NULL)
884 warning (_("%s. Task/thread support disabled."), err_msg);
885 return;
888 ravenscar_thread_target *rtarget = new ravenscar_thread_target ();
889 inf->push_target (target_ops_up (rtarget));
890 thread_info *thr = rtarget->add_active_thread ();
891 if (thr != nullptr)
892 switch_to_thread (thr);
895 ptid_t
896 ravenscar_thread_target::get_ada_task_ptid (long lwp, ULONGEST thread)
898 return ptid_t (m_base_ptid.pid (), 0, thread);
901 /* Command-list for the "set/show ravenscar" prefix command. */
902 static struct cmd_list_element *set_ravenscar_list;
903 static struct cmd_list_element *show_ravenscar_list;
905 /* Implement the "show ravenscar task-switching" command. */
907 static void
908 show_ravenscar_task_switching_command (struct ui_file *file, int from_tty,
909 struct cmd_list_element *c,
910 const char *value)
912 if (ravenscar_task_support)
913 gdb_printf (file, _("\
914 Support for Ravenscar task/thread switching is enabled\n"));
915 else
916 gdb_printf (file, _("\
917 Support for Ravenscar task/thread switching is disabled\n"));
920 /* Module startup initialization function, automagically called by
921 init.c. */
923 void _initialize_ravenscar ();
924 void
925 _initialize_ravenscar ()
927 /* Notice when the inferior is created in order to push the
928 ravenscar ops if needed. */
929 gdb::observers::inferior_created.attach (ravenscar_inferior_created,
930 "ravenscar-thread");
932 add_setshow_prefix_cmd
933 ("ravenscar", no_class,
934 _("Prefix command for changing Ravenscar-specific settings."),
935 _("Prefix command for showing Ravenscar-specific settings."),
936 &set_ravenscar_list, &show_ravenscar_list,
937 &setlist, &showlist);
939 add_setshow_boolean_cmd ("task-switching", class_obscure,
940 &ravenscar_task_support, _("\
941 Enable or disable support for GNAT Ravenscar tasks."), _("\
942 Show whether support for GNAT Ravenscar tasks is enabled."),
943 _("\
944 Enable or disable support for task/thread switching with the GNAT\n\
945 Ravenscar run-time library for bareboard configuration."),
946 NULL, show_ravenscar_task_switching_command,
947 &set_ravenscar_list, &show_ravenscar_list);