Automatic date update in version.in
[binutils-gdb.git] / gdb / ravenscar-thread.c
blob1718c367ff63c568db8c65c7f2f2d696c7a34b08
1 /* Ada Ravenscar thread support.
3 Copyright (C) 2004-2022 Free Software Foundation, Inc.
5 This file is part of GDB.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20 #include "defs.h"
21 #include "gdbcore.h"
22 #include "gdbthread.h"
23 #include "ada-lang.h"
24 #include "target.h"
25 #include "inferior.h"
26 #include "command.h"
27 #include "ravenscar-thread.h"
28 #include "observable.h"
29 #include "gdbcmd.h"
30 #include "top.h"
31 #include "regcache.h"
32 #include "objfiles.h"
33 #include <unordered_map>
35 /* This module provides support for "Ravenscar" tasks (Ada) when
36 debugging on bare-metal targets.
38 The typical situation is when debugging a bare-metal target over
39 the remote protocol. In that situation, the system does not know
40 about high-level concepts such as threads, only about some code
41 running on one or more CPUs. And since the remote protocol does not
42 provide any handling for CPUs, the de facto standard for handling
43 them is to have one thread per CPU, where the thread's ptid has
44 its lwp field set to the CPU number (eg: 1 for the first CPU,
45 2 for the second one, etc). This module will make that assumption.
47 This module then creates and maintains the list of threads based
48 on the list of Ada tasks, with one thread per Ada task. The convention
49 is that threads corresponding to the CPUs (see assumption above)
50 have a ptid_t of the form (PID, LWP, 0), while threads corresponding
51 to our Ada tasks have a ptid_t of the form (PID, 0, TID) where TID
52 is the Ada task's ID as extracted from Ada runtime information.
54 Switching to a given Ada task (or its underlying thread) is performed
55 by fetching the registers of that task from the memory area where
56 the registers were saved. For any of the other operations, the
57 operation is performed by first finding the CPU on which the task
58 is running, switching to its corresponding ptid, and then performing
59 the operation on that ptid using the target beneath us. */
61 /* If true, ravenscar task support is enabled. */
62 static bool ravenscar_task_support = true;
64 static const char running_thread_name[] = "__gnat_running_thread_table";
66 static const char known_tasks_name[] = "system__tasking__debug__known_tasks";
67 static const char first_task_name[] = "system__tasking__debug__first_task";
69 static const char ravenscar_runtime_initializer[]
70 = "system__bb__threads__initialize";
72 static const target_info ravenscar_target_info = {
73 "ravenscar",
74 N_("Ravenscar tasks."),
75 N_("Ravenscar tasks support.")
78 struct ravenscar_thread_target final : public target_ops
80 ravenscar_thread_target ()
81 : m_base_ptid (inferior_ptid)
85 const target_info &info () const override
86 { return ravenscar_target_info; }
88 strata stratum () const override { return thread_stratum; }
90 ptid_t wait (ptid_t, struct target_waitstatus *, target_wait_flags) override;
91 void resume (ptid_t, int, enum gdb_signal) override;
93 void fetch_registers (struct regcache *, int) override;
94 void store_registers (struct regcache *, int) override;
96 void prepare_to_store (struct regcache *) override;
98 bool stopped_by_sw_breakpoint () override;
100 bool stopped_by_hw_breakpoint () override;
102 bool stopped_by_watchpoint () override;
104 bool stopped_data_address (CORE_ADDR *) override;
106 enum target_xfer_status xfer_partial (enum target_object object,
107 const char *annex,
108 gdb_byte *readbuf,
109 const gdb_byte *writebuf,
110 ULONGEST offset, ULONGEST len,
111 ULONGEST *xfered_len) override;
113 bool thread_alive (ptid_t ptid) override;
115 int core_of_thread (ptid_t ptid) override;
117 void update_thread_list () override;
119 std::string pid_to_str (ptid_t) override;
121 ptid_t get_ada_task_ptid (long lwp, ULONGEST thread) override;
123 struct btrace_target_info *enable_btrace (thread_info *tp,
124 const struct btrace_config *conf)
125 override
127 process_stratum_target *proc_target
128 = as_process_stratum_target (this->beneath ());
129 ptid_t underlying = get_base_thread_from_ravenscar_task (tp->ptid);
130 tp = find_thread_ptid (proc_target, underlying);
132 return beneath ()->enable_btrace (tp, conf);
135 void mourn_inferior () override;
137 void close () override
139 delete this;
142 thread_info *add_active_thread ();
144 private:
146 /* PTID of the last thread that received an event.
147 This can be useful to determine the associated task that received
148 the event, to make it the current task. */
149 ptid_t m_base_ptid;
151 ptid_t active_task (int cpu);
152 bool task_is_currently_active (ptid_t ptid);
153 bool runtime_initialized ();
154 int get_thread_base_cpu (ptid_t ptid);
155 ptid_t get_base_thread_from_ravenscar_task (ptid_t ptid);
156 void add_thread (struct ada_task_info *task);
158 /* Like switch_to_thread, but uses the base ptid for the thread. */
159 void set_base_thread_from_ravenscar_task (ptid_t ptid)
161 process_stratum_target *proc_target
162 = as_process_stratum_target (this->beneath ());
163 ptid_t underlying = get_base_thread_from_ravenscar_task (ptid);
164 switch_to_thread (find_thread_ptid (proc_target, underlying));
167 /* Some targets use lazy FPU initialization. On these, the FP
168 registers for a given task might be uninitialized, or stored in
169 the per-task context, or simply be the live registers on the CPU.
170 This enum is used to encode this information. */
171 enum fpu_state
173 /* This target doesn't do anything special for FP registers -- if
174 any exist, they are treated just identical to non-FP
175 registers. */
176 NOTHING_SPECIAL,
177 /* This target uses the lazy FP scheme, and the FP registers are
178 taken from the CPU. This can happen for any task, because if a
179 task switch occurs, the registers aren't immediately written to
180 the per-task context -- this is deferred until the current task
181 causes an FPU trap. */
182 LIVE_FP_REGISTERS,
183 /* This target uses the lazy FP scheme, and the FP registers are
184 not available. Maybe this task never initialized the FPU, or
185 maybe GDB couldn't find the required symbol. */
186 NO_FP_REGISTERS
189 /* Return the FPU state. */
190 fpu_state get_fpu_state (struct regcache *regcache,
191 const ravenscar_arch_ops *arch_ops);
193 /* This maps a TID to the CPU on which it was running. This is
194 needed because sometimes the runtime will report an active task
195 that hasn't yet been put on the list of tasks that is read by
196 ada-tasks.c. */
197 std::unordered_map<ULONGEST, int> m_cpu_map;
200 /* Return true iff PTID corresponds to a ravenscar task. */
202 static bool
203 is_ravenscar_task (ptid_t ptid)
205 /* By construction, ravenscar tasks have their LWP set to zero.
206 Also make sure that the TID is nonzero, as some remotes, when
207 asked for the list of threads, will return the first thread
208 as having its TID set to zero. For instance, TSIM version
209 2.0.48 for LEON3 sends 'm0' as a reply to the 'qfThreadInfo'
210 query, which the remote protocol layer then treats as a thread
211 whose TID is 0. This is obviously not a ravenscar task. */
212 return ptid.lwp () == 0 && ptid.tid () != 0;
215 /* Given PTID, which can be either a ravenscar task or a CPU thread,
216 return which CPU that ptid is running on.
218 This assume that PTID is a valid ptid_t. Otherwise, a gdb_assert
219 will be triggered. */
222 ravenscar_thread_target::get_thread_base_cpu (ptid_t ptid)
224 int base_cpu;
226 if (is_ravenscar_task (ptid))
228 /* Prefer to not read inferior memory if possible, to avoid
229 reentrancy problems with xfer_partial. */
230 auto iter = m_cpu_map.find (ptid.tid ());
232 if (iter != m_cpu_map.end ())
233 base_cpu = iter->second;
234 else
236 struct ada_task_info *task_info = ada_get_task_info_from_ptid (ptid);
238 gdb_assert (task_info != NULL);
239 base_cpu = task_info->base_cpu;
242 else
244 /* We assume that the LWP of the PTID is equal to the CPU number. */
245 base_cpu = ptid.lwp ();
248 return base_cpu;
251 /* Given a ravenscar task (identified by its ptid_t PTID), return true
252 if this task is the currently active task on the cpu that task is
253 running on.
255 In other words, this function determine which CPU this task is
256 currently running on, and then return nonzero if the CPU in question
257 is executing the code for that task. If that's the case, then
258 that task's registers are in the CPU bank. Otherwise, the task
259 is currently suspended, and its registers have been saved in memory. */
261 bool
262 ravenscar_thread_target::task_is_currently_active (ptid_t ptid)
264 ptid_t active_task_ptid = active_task (get_thread_base_cpu (ptid));
266 return ptid == active_task_ptid;
269 /* Return the CPU thread (as a ptid_t) on which the given ravenscar
270 task is running.
272 This is the thread that corresponds to the CPU on which the task
273 is running. */
275 ptid_t
276 ravenscar_thread_target::get_base_thread_from_ravenscar_task (ptid_t ptid)
278 int base_cpu;
280 if (!is_ravenscar_task (ptid))
281 return ptid;
283 base_cpu = get_thread_base_cpu (ptid);
284 return ptid_t (ptid.pid (), base_cpu);
287 /* Fetch the ravenscar running thread from target memory, make sure
288 there's a corresponding thread in the thread list, and return it.
289 If the runtime is not initialized, return NULL. */
291 thread_info *
292 ravenscar_thread_target::add_active_thread ()
294 process_stratum_target *proc_target
295 = as_process_stratum_target (this->beneath ());
297 int base_cpu;
299 gdb_assert (!is_ravenscar_task (m_base_ptid));
300 base_cpu = get_thread_base_cpu (m_base_ptid);
302 if (!runtime_initialized ())
303 return nullptr;
305 /* Make sure we set m_base_ptid before calling active_task
306 as the latter relies on it. */
307 ptid_t active_ptid = active_task (base_cpu);
308 gdb_assert (active_ptid != null_ptid);
310 /* The running thread may not have been added to
311 system.tasking.debug's list yet; so ravenscar_update_thread_list
312 may not always add it to the thread list. Add it here. */
313 thread_info *active_thr = find_thread_ptid (proc_target, active_ptid);
314 if (active_thr == nullptr)
316 active_thr = ::add_thread (proc_target, active_ptid);
317 m_cpu_map[active_ptid.tid ()] = base_cpu;
319 return active_thr;
322 /* The Ravenscar Runtime exports a symbol which contains the ID of
323 the thread that is currently running. Try to locate that symbol
324 and return its associated minimal symbol.
325 Return NULL if not found. */
327 static struct bound_minimal_symbol
328 get_running_thread_msymbol ()
330 struct bound_minimal_symbol msym;
332 msym = lookup_minimal_symbol (running_thread_name, NULL, NULL);
333 if (!msym.minsym)
334 /* Older versions of the GNAT runtime were using a different
335 (less ideal) name for the symbol where the active thread ID
336 is stored. If we couldn't find the symbol using the latest
337 name, then try the old one. */
338 msym = lookup_minimal_symbol ("running_thread", NULL, NULL);
340 return msym;
343 /* Return True if the Ada Ravenscar run-time can be found in the
344 application. */
346 static bool
347 has_ravenscar_runtime ()
349 struct bound_minimal_symbol msym_ravenscar_runtime_initializer
350 = lookup_minimal_symbol (ravenscar_runtime_initializer, NULL, NULL);
351 struct bound_minimal_symbol msym_known_tasks
352 = lookup_minimal_symbol (known_tasks_name, NULL, NULL);
353 struct bound_minimal_symbol msym_first_task
354 = lookup_minimal_symbol (first_task_name, NULL, NULL);
355 struct bound_minimal_symbol msym_running_thread
356 = get_running_thread_msymbol ();
358 return (msym_ravenscar_runtime_initializer.minsym
359 && (msym_known_tasks.minsym || msym_first_task.minsym)
360 && msym_running_thread.minsym);
363 /* Return True if the Ada Ravenscar run-time can be found in the
364 application, and if it has been initialized on target. */
366 bool
367 ravenscar_thread_target::runtime_initialized ()
369 return active_task (1) != null_ptid;
372 /* Return the ID of the thread that is currently running.
373 Return 0 if the ID could not be determined. */
375 static CORE_ADDR
376 get_running_thread_id (int cpu)
378 struct bound_minimal_symbol object_msym = get_running_thread_msymbol ();
379 int object_size;
380 int buf_size;
381 gdb_byte *buf;
382 CORE_ADDR object_addr;
383 struct type *builtin_type_void_data_ptr
384 = builtin_type (target_gdbarch ())->builtin_data_ptr;
386 if (!object_msym.minsym)
387 return 0;
389 object_size = TYPE_LENGTH (builtin_type_void_data_ptr);
390 object_addr = (object_msym.value_address ()
391 + (cpu - 1) * object_size);
392 buf_size = object_size;
393 buf = (gdb_byte *) alloca (buf_size);
394 read_memory (object_addr, buf, buf_size);
395 return extract_typed_address (buf, builtin_type_void_data_ptr);
398 void
399 ravenscar_thread_target::resume (ptid_t ptid, int step,
400 enum gdb_signal siggnal)
402 /* If we see a wildcard resume, we simply pass that on. Otherwise,
403 arrange to resume the base ptid. */
404 inferior_ptid = m_base_ptid;
405 if (ptid.is_pid ())
407 /* We only have one process, so resume all threads of it. */
408 ptid = minus_one_ptid;
410 else if (ptid != minus_one_ptid)
411 ptid = m_base_ptid;
412 beneath ()->resume (ptid, step, siggnal);
415 ptid_t
416 ravenscar_thread_target::wait (ptid_t ptid,
417 struct target_waitstatus *status,
418 target_wait_flags options)
420 process_stratum_target *beneath
421 = as_process_stratum_target (this->beneath ());
422 ptid_t event_ptid;
424 if (ptid != minus_one_ptid)
425 ptid = m_base_ptid;
426 event_ptid = beneath->wait (ptid, status, 0);
427 /* Find any new threads that might have been created, and return the
428 active thread.
430 Only do it if the program is still alive, though. Otherwise,
431 this causes problems when debugging through the remote protocol,
432 because we might try switching threads (and thus sending packets)
433 after the remote has disconnected. */
434 if (status->kind () != TARGET_WAITKIND_EXITED
435 && status->kind () != TARGET_WAITKIND_SIGNALLED
436 && runtime_initialized ())
438 m_base_ptid = event_ptid;
439 this->update_thread_list ();
440 return this->add_active_thread ()->ptid;
442 return event_ptid;
445 /* Add the thread associated to the given TASK to the thread list
446 (if the thread has already been added, this is a no-op). */
448 void
449 ravenscar_thread_target::add_thread (struct ada_task_info *task)
451 if (find_thread_ptid (current_inferior (), task->ptid) == NULL)
453 ::add_thread (current_inferior ()->process_target (), task->ptid);
454 m_cpu_map[task->ptid.tid ()] = task->base_cpu;
458 void
459 ravenscar_thread_target::update_thread_list ()
461 /* iterate_over_live_ada_tasks requires that inferior_ptid be set,
462 but this isn't always the case in target methods. So, we ensure
463 it here. */
464 scoped_restore save_ptid = make_scoped_restore (&inferior_ptid,
465 m_base_ptid);
467 /* Do not clear the thread list before adding the Ada task, to keep
468 the thread that the process stratum has included into it
469 (m_base_ptid) and the running thread, that may not have been included
470 to system.tasking.debug's list yet. */
472 iterate_over_live_ada_tasks ([=] (struct ada_task_info *task)
474 this->add_thread (task);
478 ptid_t
479 ravenscar_thread_target::active_task (int cpu)
481 CORE_ADDR tid = get_running_thread_id (cpu);
483 if (tid == 0)
484 return null_ptid;
485 else
486 return ptid_t (m_base_ptid.pid (), 0, tid);
489 bool
490 ravenscar_thread_target::thread_alive (ptid_t ptid)
492 /* Ravenscar tasks are non-terminating. */
493 return true;
496 std::string
497 ravenscar_thread_target::pid_to_str (ptid_t ptid)
499 if (!is_ravenscar_task (ptid))
500 return beneath ()->pid_to_str (ptid);
502 return string_printf ("Ravenscar Thread 0x%s",
503 phex_nz (ptid.tid (), sizeof (ULONGEST)));
506 CORE_ADDR
507 ravenscar_arch_ops::get_stack_base (struct regcache *regcache) const
509 struct gdbarch *gdbarch = regcache->arch ();
510 const int sp_regnum = gdbarch_sp_regnum (gdbarch);
511 ULONGEST stack_address;
512 regcache_cooked_read_unsigned (regcache, sp_regnum, &stack_address);
513 return (CORE_ADDR) stack_address;
516 void
517 ravenscar_arch_ops::supply_one_register (struct regcache *regcache,
518 int regnum,
519 CORE_ADDR descriptor,
520 CORE_ADDR stack_base) const
522 CORE_ADDR addr;
523 if (regnum >= first_stack_register && regnum <= last_stack_register)
524 addr = stack_base;
525 else
526 addr = descriptor;
527 addr += offsets[regnum];
529 struct gdbarch *gdbarch = regcache->arch ();
530 int size = register_size (gdbarch, regnum);
531 gdb_byte *buf = (gdb_byte *) alloca (size);
532 read_memory (addr, buf, size);
533 regcache->raw_supply (regnum, buf);
536 void
537 ravenscar_arch_ops::fetch_register (struct regcache *regcache,
538 int regnum) const
540 gdb_assert (regnum != -1);
542 struct gdbarch *gdbarch = regcache->arch ();
543 /* The tid is the thread_id field, which is a pointer to the thread. */
544 CORE_ADDR thread_descriptor_address
545 = (CORE_ADDR) regcache->ptid ().tid ();
547 int sp_regno = -1;
548 CORE_ADDR stack_address = 0;
549 if (regnum >= first_stack_register && regnum <= last_stack_register)
551 /* We must supply SP for get_stack_base, so recurse. */
552 sp_regno = gdbarch_sp_regnum (gdbarch);
553 gdb_assert (!(sp_regno >= first_stack_register
554 && sp_regno <= last_stack_register));
555 fetch_register (regcache, sp_regno);
556 stack_address = get_stack_base (regcache);
559 if (regnum < offsets.size () && offsets[regnum] != -1)
560 supply_one_register (regcache, regnum, thread_descriptor_address,
561 stack_address);
564 void
565 ravenscar_arch_ops::store_one_register (struct regcache *regcache, int regnum,
566 CORE_ADDR descriptor,
567 CORE_ADDR stack_base) const
569 CORE_ADDR addr;
570 if (regnum >= first_stack_register && regnum <= last_stack_register)
571 addr = stack_base;
572 else
573 addr = descriptor;
574 addr += offsets[regnum];
576 struct gdbarch *gdbarch = regcache->arch ();
577 int size = register_size (gdbarch, regnum);
578 gdb_byte *buf = (gdb_byte *) alloca (size);
579 regcache->raw_collect (regnum, buf);
580 write_memory (addr, buf, size);
583 void
584 ravenscar_arch_ops::store_register (struct regcache *regcache,
585 int regnum) const
587 gdb_assert (regnum != -1);
589 /* The tid is the thread_id field, which is a pointer to the thread. */
590 CORE_ADDR thread_descriptor_address
591 = (CORE_ADDR) regcache->ptid ().tid ();
593 CORE_ADDR stack_address = 0;
594 if (regnum >= first_stack_register && regnum <= last_stack_register)
595 stack_address = get_stack_base (regcache);
597 if (regnum < offsets.size () && offsets[regnum] != -1)
598 store_one_register (regcache, regnum, thread_descriptor_address,
599 stack_address);
602 /* Temporarily set the ptid of a regcache to some other value. When
603 this object is destroyed, the regcache's original ptid is
604 restored. */
606 class temporarily_change_regcache_ptid
608 public:
610 temporarily_change_regcache_ptid (struct regcache *regcache, ptid_t new_ptid)
611 : m_regcache (regcache),
612 m_save_ptid (regcache->ptid ())
614 m_regcache->set_ptid (new_ptid);
617 ~temporarily_change_regcache_ptid ()
619 m_regcache->set_ptid (m_save_ptid);
622 private:
624 /* The regcache. */
625 struct regcache *m_regcache;
626 /* The saved ptid. */
627 ptid_t m_save_ptid;
630 ravenscar_thread_target::fpu_state
631 ravenscar_thread_target::get_fpu_state (struct regcache *regcache,
632 const ravenscar_arch_ops *arch_ops)
634 /* We want to return true if the special FP register handling is
635 needed. If this target doesn't have lazy FP, then no special
636 treatment is ever needed. */
637 if (!arch_ops->on_demand_fp ())
638 return NOTHING_SPECIAL;
640 bound_minimal_symbol fpu_context
641 = lookup_minimal_symbol ("system__bb__cpu_primitives__current_fpu_context",
642 nullptr, nullptr);
643 /* If the symbol can't be found, just fall back. */
644 if (fpu_context.minsym == nullptr)
645 return NO_FP_REGISTERS;
647 struct type *ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
648 ptr_type = lookup_pointer_type (ptr_type);
649 value *val = value_from_pointer (ptr_type, fpu_context.value_address ());
651 int cpu = get_thread_base_cpu (regcache->ptid ());
652 /* The array index type has a lower bound of 1 -- it is Ada code --
653 so subtract 1 here. */
654 val = value_ptradd (val, cpu - 1);
656 val = value_ind (val);
657 CORE_ADDR fpu_task = value_as_long (val);
659 /* The tid is the thread_id field, which is a pointer to the thread. */
660 CORE_ADDR thread_descriptor_address
661 = (CORE_ADDR) regcache->ptid ().tid ();
662 if (fpu_task == (thread_descriptor_address
663 + arch_ops->get_fpu_context_offset ()))
664 return LIVE_FP_REGISTERS;
666 int v_init_offset = arch_ops->get_v_init_offset ();
667 gdb_byte init = 0;
668 read_memory (thread_descriptor_address + v_init_offset, &init, 1);
669 return init ? NOTHING_SPECIAL : NO_FP_REGISTERS;
672 void
673 ravenscar_thread_target::fetch_registers (struct regcache *regcache,
674 int regnum)
676 ptid_t ptid = regcache->ptid ();
678 if (runtime_initialized () && is_ravenscar_task (ptid))
680 struct gdbarch *gdbarch = regcache->arch ();
681 bool is_active = task_is_currently_active (ptid);
682 struct ravenscar_arch_ops *arch_ops = gdbarch_ravenscar_ops (gdbarch);
683 gdb::optional<fpu_state> fp_state;
685 int low_reg = regnum == -1 ? 0 : regnum;
686 int high_reg = regnum == -1 ? gdbarch_num_regs (gdbarch) : regnum + 1;
688 ptid_t base = get_base_thread_from_ravenscar_task (ptid);
689 for (int i = low_reg; i < high_reg; ++i)
691 bool use_beneath = false;
692 if (arch_ops->is_fp_register (i))
694 if (!fp_state.has_value ())
695 fp_state = get_fpu_state (regcache, arch_ops);
696 if (*fp_state == NO_FP_REGISTERS)
697 continue;
698 if (*fp_state == LIVE_FP_REGISTERS
699 || (is_active && *fp_state == NOTHING_SPECIAL))
700 use_beneath = true;
702 else
703 use_beneath = is_active;
705 if (use_beneath)
707 temporarily_change_regcache_ptid changer (regcache, base);
708 beneath ()->fetch_registers (regcache, i);
710 else
711 arch_ops->fetch_register (regcache, i);
714 else
715 beneath ()->fetch_registers (regcache, regnum);
718 void
719 ravenscar_thread_target::store_registers (struct regcache *regcache,
720 int regnum)
722 ptid_t ptid = regcache->ptid ();
724 if (runtime_initialized () && is_ravenscar_task (ptid))
726 struct gdbarch *gdbarch = regcache->arch ();
727 bool is_active = task_is_currently_active (ptid);
728 struct ravenscar_arch_ops *arch_ops = gdbarch_ravenscar_ops (gdbarch);
729 gdb::optional<fpu_state> fp_state;
731 int low_reg = regnum == -1 ? 0 : regnum;
732 int high_reg = regnum == -1 ? gdbarch_num_regs (gdbarch) : regnum + 1;
734 ptid_t base = get_base_thread_from_ravenscar_task (ptid);
735 for (int i = low_reg; i < high_reg; ++i)
737 bool use_beneath = false;
738 if (arch_ops->is_fp_register (i))
740 if (!fp_state.has_value ())
741 fp_state = get_fpu_state (regcache, arch_ops);
742 if (*fp_state == NO_FP_REGISTERS)
743 continue;
744 if (*fp_state == LIVE_FP_REGISTERS
745 || (is_active && *fp_state == NOTHING_SPECIAL))
746 use_beneath = true;
748 else
749 use_beneath = is_active;
751 if (use_beneath)
753 temporarily_change_regcache_ptid changer (regcache, base);
754 beneath ()->store_registers (regcache, i);
756 else
757 arch_ops->store_register (regcache, i);
760 else
761 beneath ()->store_registers (regcache, regnum);
764 void
765 ravenscar_thread_target::prepare_to_store (struct regcache *regcache)
767 ptid_t ptid = regcache->ptid ();
769 if (runtime_initialized () && is_ravenscar_task (ptid))
771 if (task_is_currently_active (ptid))
773 ptid_t base = get_base_thread_from_ravenscar_task (ptid);
774 temporarily_change_regcache_ptid changer (regcache, base);
775 beneath ()->prepare_to_store (regcache);
777 else
779 /* Nothing. */
782 else
783 beneath ()->prepare_to_store (regcache);
786 /* Implement the to_stopped_by_sw_breakpoint target_ops "method". */
788 bool
789 ravenscar_thread_target::stopped_by_sw_breakpoint ()
791 scoped_restore_current_thread saver;
792 set_base_thread_from_ravenscar_task (inferior_ptid);
793 return beneath ()->stopped_by_sw_breakpoint ();
796 /* Implement the to_stopped_by_hw_breakpoint target_ops "method". */
798 bool
799 ravenscar_thread_target::stopped_by_hw_breakpoint ()
801 scoped_restore_current_thread saver;
802 set_base_thread_from_ravenscar_task (inferior_ptid);
803 return beneath ()->stopped_by_hw_breakpoint ();
806 /* Implement the to_stopped_by_watchpoint target_ops "method". */
808 bool
809 ravenscar_thread_target::stopped_by_watchpoint ()
811 scoped_restore_current_thread saver;
812 set_base_thread_from_ravenscar_task (inferior_ptid);
813 return beneath ()->stopped_by_watchpoint ();
816 /* Implement the to_stopped_data_address target_ops "method". */
818 bool
819 ravenscar_thread_target::stopped_data_address (CORE_ADDR *addr_p)
821 scoped_restore_current_thread saver;
822 set_base_thread_from_ravenscar_task (inferior_ptid);
823 return beneath ()->stopped_data_address (addr_p);
826 void
827 ravenscar_thread_target::mourn_inferior ()
829 m_base_ptid = null_ptid;
830 target_ops *beneath = this->beneath ();
831 current_inferior ()->unpush_target (this);
832 beneath->mourn_inferior ();
835 /* Implement the to_core_of_thread target_ops "method". */
838 ravenscar_thread_target::core_of_thread (ptid_t ptid)
840 scoped_restore_current_thread saver;
841 set_base_thread_from_ravenscar_task (inferior_ptid);
842 return beneath ()->core_of_thread (inferior_ptid);
845 /* Implement the target xfer_partial method. */
847 enum target_xfer_status
848 ravenscar_thread_target::xfer_partial (enum target_object object,
849 const char *annex,
850 gdb_byte *readbuf,
851 const gdb_byte *writebuf,
852 ULONGEST offset, ULONGEST len,
853 ULONGEST *xfered_len)
855 scoped_restore save_ptid = make_scoped_restore (&inferior_ptid);
856 /* Calling get_base_thread_from_ravenscar_task can read memory from
857 the inferior. However, that function is written to prefer our
858 internal map, so it should not result in recursive calls in
859 practice. */
860 inferior_ptid = get_base_thread_from_ravenscar_task (inferior_ptid);
861 return beneath ()->xfer_partial (object, annex, readbuf, writebuf,
862 offset, len, xfered_len);
865 /* Observer on inferior_created: push ravenscar thread stratum if needed. */
867 static void
868 ravenscar_inferior_created (inferior *inf)
870 const char *err_msg;
872 if (!ravenscar_task_support
873 || gdbarch_ravenscar_ops (target_gdbarch ()) == NULL
874 || !has_ravenscar_runtime ())
875 return;
877 err_msg = ada_get_tcb_types_info ();
878 if (err_msg != NULL)
880 warning (_("%s. Task/thread support disabled."), err_msg);
881 return;
884 ravenscar_thread_target *rtarget = new ravenscar_thread_target ();
885 inf->push_target (target_ops_up (rtarget));
886 thread_info *thr = rtarget->add_active_thread ();
887 if (thr != nullptr)
888 switch_to_thread (thr);
891 ptid_t
892 ravenscar_thread_target::get_ada_task_ptid (long lwp, ULONGEST thread)
894 return ptid_t (m_base_ptid.pid (), 0, thread);
897 /* Command-list for the "set/show ravenscar" prefix command. */
898 static struct cmd_list_element *set_ravenscar_list;
899 static struct cmd_list_element *show_ravenscar_list;
901 /* Implement the "show ravenscar task-switching" command. */
903 static void
904 show_ravenscar_task_switching_command (struct ui_file *file, int from_tty,
905 struct cmd_list_element *c,
906 const char *value)
908 if (ravenscar_task_support)
909 gdb_printf (file, _("\
910 Support for Ravenscar task/thread switching is enabled\n"));
911 else
912 gdb_printf (file, _("\
913 Support for Ravenscar task/thread switching is disabled\n"));
916 /* Module startup initialization function, automagically called by
917 init.c. */
919 void _initialize_ravenscar ();
920 void
921 _initialize_ravenscar ()
923 /* Notice when the inferior is created in order to push the
924 ravenscar ops if needed. */
925 gdb::observers::inferior_created.attach (ravenscar_inferior_created,
926 "ravenscar-thread");
928 add_setshow_prefix_cmd
929 ("ravenscar", no_class,
930 _("Prefix command for changing Ravenscar-specific settings."),
931 _("Prefix command for showing Ravenscar-specific settings."),
932 &set_ravenscar_list, &show_ravenscar_list,
933 &setlist, &showlist);
935 add_setshow_boolean_cmd ("task-switching", class_obscure,
936 &ravenscar_task_support, _("\
937 Enable or disable support for GNAT Ravenscar tasks."), _("\
938 Show whether support for GNAT Ravenscar tasks is enabled."),
939 _("\
940 Enable or disable support for task/thread switching with the GNAT\n\
941 Ravenscar run-time library for bareboard configuration."),
942 NULL, show_ravenscar_task_switching_command,
943 &set_ravenscar_list, &show_ravenscar_list);