Automatic date update in version.in
[binutils-gdb.git] / gdb / aarch64-tdep.c
bloba1eab01abb8c3e212ba5bc9eafd48b21501930ad
1 /* Common target dependent code for GDB on AArch64 systems.
3 Copyright (C) 2009-2023 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
6 This file is part of GDB.
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21 #include "defs.h"
23 #include "frame.h"
24 #include "language.h"
25 #include "gdbcmd.h"
26 #include "gdbcore.h"
27 #include "dis-asm.h"
28 #include "regcache.h"
29 #include "reggroups.h"
30 #include "value.h"
31 #include "arch-utils.h"
32 #include "osabi.h"
33 #include "frame-unwind.h"
34 #include "frame-base.h"
35 #include "trad-frame.h"
36 #include "objfiles.h"
37 #include "dwarf2.h"
38 #include "dwarf2/frame.h"
39 #include "gdbtypes.h"
40 #include "prologue-value.h"
41 #include "target-descriptions.h"
42 #include "user-regs.h"
43 #include "ax-gdb.h"
44 #include "gdbsupport/selftest.h"
46 #include "aarch64-tdep.h"
47 #include "aarch64-ravenscar-thread.h"
49 #include "record.h"
50 #include "record-full.h"
51 #include "arch/aarch64-insn.h"
52 #include "gdbarch.h"
54 #include "opcode/aarch64.h"
55 #include <algorithm>
56 #include <unordered_map>
58 /* For inferior_ptid and current_inferior (). */
59 #include "inferior.h"
60 /* For std::sqrt and std::pow. */
61 #include <cmath>
63 /* A Homogeneous Floating-Point or Short-Vector Aggregate may have at most
64 four members. */
65 #define HA_MAX_NUM_FLDS 4
67 /* All possible aarch64 target descriptors. */
68 static std::unordered_map <aarch64_features, target_desc *> tdesc_aarch64_map;
70 /* The standard register names, and all the valid aliases for them.
71 We're not adding fp here, that name is already taken, see
72 _initialize_frame_reg. */
73 static const struct
75 const char *const name;
76 int regnum;
77 } aarch64_register_aliases[] =
79 /* Link register alias for x30. */
80 {"lr", AARCH64_LR_REGNUM},
81 /* SP is the canonical name for x31 according to aarch64_r_register_names,
82 so we're adding an x31 alias for sp. */
83 {"x31", AARCH64_SP_REGNUM},
84 /* specials */
85 {"ip0", AARCH64_X0_REGNUM + 16},
86 {"ip1", AARCH64_X0_REGNUM + 17}
89 /* The required core 'R' registers. */
90 static const char *const aarch64_r_register_names[] =
92 /* These registers must appear in consecutive RAW register number
93 order and they must begin with AARCH64_X0_REGNUM! */
94 "x0", "x1", "x2", "x3",
95 "x4", "x5", "x6", "x7",
96 "x8", "x9", "x10", "x11",
97 "x12", "x13", "x14", "x15",
98 "x16", "x17", "x18", "x19",
99 "x20", "x21", "x22", "x23",
100 "x24", "x25", "x26", "x27",
101 "x28", "x29", "x30", "sp",
102 "pc", "cpsr"
105 /* The FP/SIMD 'V' registers. */
106 static const char *const aarch64_v_register_names[] =
108 /* These registers must appear in consecutive RAW register number
109 order and they must begin with AARCH64_V0_REGNUM! */
110 "v0", "v1", "v2", "v3",
111 "v4", "v5", "v6", "v7",
112 "v8", "v9", "v10", "v11",
113 "v12", "v13", "v14", "v15",
114 "v16", "v17", "v18", "v19",
115 "v20", "v21", "v22", "v23",
116 "v24", "v25", "v26", "v27",
117 "v28", "v29", "v30", "v31",
118 "fpsr",
119 "fpcr"
122 /* The SVE 'Z' and 'P' registers. */
123 static const char *const aarch64_sve_register_names[] =
125 /* These registers must appear in consecutive RAW register number
126 order and they must begin with AARCH64_SVE_Z0_REGNUM! */
127 "z0", "z1", "z2", "z3",
128 "z4", "z5", "z6", "z7",
129 "z8", "z9", "z10", "z11",
130 "z12", "z13", "z14", "z15",
131 "z16", "z17", "z18", "z19",
132 "z20", "z21", "z22", "z23",
133 "z24", "z25", "z26", "z27",
134 "z28", "z29", "z30", "z31",
135 "fpsr", "fpcr",
136 "p0", "p1", "p2", "p3",
137 "p4", "p5", "p6", "p7",
138 "p8", "p9", "p10", "p11",
139 "p12", "p13", "p14", "p15",
140 "ffr", "vg"
143 static const char *const aarch64_pauth_register_names[] =
145 /* Authentication mask for data pointer, low half/user pointers. */
146 "pauth_dmask",
147 /* Authentication mask for code pointer, low half/user pointers. */
148 "pauth_cmask",
149 /* Authentication mask for data pointer, high half / kernel pointers. */
150 "pauth_dmask_high",
151 /* Authentication mask for code pointer, high half / kernel pointers. */
152 "pauth_cmask_high"
155 static const char *const aarch64_mte_register_names[] =
157 /* Tag Control Register. */
158 "tag_ctl"
161 static int aarch64_stack_frame_destroyed_p (struct gdbarch *, CORE_ADDR);
163 /* AArch64 prologue cache structure. */
164 struct aarch64_prologue_cache
166 /* The program counter at the start of the function. It is used to
167 identify this frame as a prologue frame. */
168 CORE_ADDR func;
170 /* The program counter at the time this frame was created; i.e. where
171 this function was called from. It is used to identify this frame as a
172 stub frame. */
173 CORE_ADDR prev_pc;
175 /* The stack pointer at the time this frame was created; i.e. the
176 caller's stack pointer when this function was called. It is used
177 to identify this frame. */
178 CORE_ADDR prev_sp;
180 /* Is the target available to read from? */
181 int available_p;
183 /* The frame base for this frame is just prev_sp - frame size.
184 FRAMESIZE is the distance from the frame pointer to the
185 initial stack pointer. */
186 int framesize;
188 /* The register used to hold the frame pointer for this frame. */
189 int framereg;
191 /* Saved register offsets. */
192 trad_frame_saved_reg *saved_regs;
195 /* Holds information used to read/write from/to ZA
196 pseudo-registers.
198 With this information, the read/write code can be simplified so it
199 deals only with the required information to map a ZA pseudo-register
200 to the exact bytes into the ZA contents buffer. Otherwise we'd need
201 to use a lot of conditionals. */
203 struct za_offsets
205 /* Offset, into ZA, of the starting byte of the pseudo-register. */
206 size_t starting_offset;
207 /* The size of the contiguous chunks of the pseudo-register. */
208 size_t chunk_size;
209 /* The number of pseudo-register chunks contained in ZA. */
210 size_t chunks;
211 /* The offset between each contiguous chunk. */
212 size_t stride_size;
215 /* Holds data that is helpful to determine the individual fields that make
216 up the names of the ZA pseudo-registers. It is also very helpful to
217 determine offsets, stride and sizes for reading ZA tiles and tile
218 slices. */
220 struct za_pseudo_encoding
222 /* The slice index (0 ~ svl). Only used for tile slices. */
223 uint8_t slice_index;
224 /* The tile number (0 ~ 15). */
225 uint8_t tile_index;
226 /* Direction (horizontal/vertical). Only used for tile slices. */
227 bool horizontal;
228 /* Qualifier index (0 ~ 4). These map to B, H, S, D and Q. */
229 uint8_t qualifier_index;
232 static void
233 show_aarch64_debug (struct ui_file *file, int from_tty,
234 struct cmd_list_element *c, const char *value)
236 gdb_printf (file, _("AArch64 debugging is %s.\n"), value);
239 namespace {
241 /* Abstract instruction reader. */
243 class abstract_instruction_reader
245 public:
246 /* Read in one instruction. */
247 virtual ULONGEST read (CORE_ADDR memaddr, int len,
248 enum bfd_endian byte_order) = 0;
251 /* Instruction reader from real target. */
253 class instruction_reader : public abstract_instruction_reader
255 public:
256 ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
257 override
259 return read_code_unsigned_integer (memaddr, len, byte_order);
263 } // namespace
265 /* If address signing is enabled, mask off the signature bits from the link
266 register, which is passed by value in ADDR, using the register values in
267 THIS_FRAME. */
269 static CORE_ADDR
270 aarch64_frame_unmask_lr (aarch64_gdbarch_tdep *tdep,
271 frame_info_ptr this_frame, CORE_ADDR addr)
273 if (tdep->has_pauth ()
274 && frame_unwind_register_unsigned (this_frame,
275 tdep->ra_sign_state_regnum))
277 /* VA range select (bit 55) tells us whether to use the low half masks
278 or the high half masks. */
279 int cmask_num;
280 if (tdep->pauth_reg_count > 2 && addr & VA_RANGE_SELECT_BIT_MASK)
281 cmask_num = AARCH64_PAUTH_CMASK_HIGH_REGNUM (tdep->pauth_reg_base);
282 else
283 cmask_num = AARCH64_PAUTH_CMASK_REGNUM (tdep->pauth_reg_base);
285 /* By default, we assume TBI and discard the top 8 bits plus the VA range
286 select bit (55). */
287 CORE_ADDR mask = AARCH64_TOP_BITS_MASK;
288 mask |= frame_unwind_register_unsigned (this_frame, cmask_num);
289 addr = aarch64_remove_top_bits (addr, mask);
291 /* Record in the frame that the link register required unmasking. */
292 set_frame_previous_pc_masked (this_frame);
295 return addr;
298 /* Implement the "get_pc_address_flags" gdbarch method. */
300 static std::string
301 aarch64_get_pc_address_flags (frame_info_ptr frame, CORE_ADDR pc)
303 if (pc != 0 && get_frame_pc_masked (frame))
304 return "PAC";
306 return "";
309 /* Analyze a prologue, looking for a recognizable stack frame
310 and frame pointer. Scan until we encounter a store that could
311 clobber the stack frame unexpectedly, or an unknown instruction. */
313 static CORE_ADDR
314 aarch64_analyze_prologue (struct gdbarch *gdbarch,
315 CORE_ADDR start, CORE_ADDR limit,
316 struct aarch64_prologue_cache *cache,
317 abstract_instruction_reader& reader)
319 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
320 int i;
322 /* Whether the stack has been set. This should be true when we notice a SP
323 to FP move or if we are using the SP as the base register for storing
324 data, in case the FP is omitted. */
325 bool seen_stack_set = false;
327 /* Track X registers and D registers in prologue. */
328 pv_t regs[AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT];
330 for (i = 0; i < AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT; i++)
331 regs[i] = pv_register (i, 0);
332 pv_area stack (AARCH64_SP_REGNUM, gdbarch_addr_bit (gdbarch));
334 for (; start < limit; start += 4)
336 uint32_t insn;
337 aarch64_inst inst;
339 insn = reader.read (start, 4, byte_order_for_code);
341 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
342 break;
344 if (inst.opcode->iclass == addsub_imm
345 && (inst.opcode->op == OP_ADD
346 || strcmp ("sub", inst.opcode->name) == 0))
348 unsigned rd = inst.operands[0].reg.regno;
349 unsigned rn = inst.operands[1].reg.regno;
351 gdb_assert (aarch64_num_of_operands (inst.opcode) == 3);
352 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd_SP);
353 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn_SP);
354 gdb_assert (inst.operands[2].type == AARCH64_OPND_AIMM);
356 if (inst.opcode->op == OP_ADD)
358 regs[rd] = pv_add_constant (regs[rn],
359 inst.operands[2].imm.value);
361 else
363 regs[rd] = pv_add_constant (regs[rn],
364 -inst.operands[2].imm.value);
367 /* Did we move SP to FP? */
368 if (rn == AARCH64_SP_REGNUM && rd == AARCH64_FP_REGNUM)
369 seen_stack_set = true;
371 else if (inst.opcode->iclass == addsub_ext
372 && strcmp ("sub", inst.opcode->name) == 0)
374 unsigned rd = inst.operands[0].reg.regno;
375 unsigned rn = inst.operands[1].reg.regno;
376 unsigned rm = inst.operands[2].reg.regno;
378 gdb_assert (aarch64_num_of_operands (inst.opcode) == 3);
379 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd_SP);
380 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn_SP);
381 gdb_assert (inst.operands[2].type == AARCH64_OPND_Rm_EXT);
383 regs[rd] = pv_subtract (regs[rn], regs[rm]);
385 else if (inst.opcode->iclass == branch_imm)
387 /* Stop analysis on branch. */
388 break;
390 else if (inst.opcode->iclass == condbranch)
392 /* Stop analysis on branch. */
393 break;
395 else if (inst.opcode->iclass == branch_reg)
397 /* Stop analysis on branch. */
398 break;
400 else if (inst.opcode->iclass == compbranch)
402 /* Stop analysis on branch. */
403 break;
405 else if (inst.opcode->op == OP_MOVZ)
407 unsigned rd = inst.operands[0].reg.regno;
409 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
410 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
411 gdb_assert (inst.operands[1].type == AARCH64_OPND_HALF);
412 gdb_assert (inst.operands[1].shifter.kind == AARCH64_MOD_LSL);
414 /* If this shows up before we set the stack, keep going. Otherwise
415 stop the analysis. */
416 if (seen_stack_set)
417 break;
419 regs[rd] = pv_constant (inst.operands[1].imm.value
420 << inst.operands[1].shifter.amount);
422 else if (inst.opcode->iclass == log_shift
423 && strcmp (inst.opcode->name, "orr") == 0)
425 unsigned rd = inst.operands[0].reg.regno;
426 unsigned rn = inst.operands[1].reg.regno;
427 unsigned rm = inst.operands[2].reg.regno;
429 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
430 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn);
431 gdb_assert (inst.operands[2].type == AARCH64_OPND_Rm_SFT);
433 if (inst.operands[2].shifter.amount == 0
434 && rn == AARCH64_SP_REGNUM)
435 regs[rd] = regs[rm];
436 else
438 aarch64_debug_printf ("prologue analysis gave up "
439 "addr=%s opcode=0x%x (orr x register)",
440 core_addr_to_string_nz (start), insn);
442 break;
445 else if (inst.opcode->op == OP_STUR)
447 unsigned rt = inst.operands[0].reg.regno;
448 unsigned rn = inst.operands[1].addr.base_regno;
449 int size = aarch64_get_qualifier_esize (inst.operands[0].qualifier);
451 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
452 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt);
453 gdb_assert (inst.operands[1].type == AARCH64_OPND_ADDR_SIMM9);
454 gdb_assert (!inst.operands[1].addr.offset.is_reg);
456 stack.store
457 (pv_add_constant (regs[rn], inst.operands[1].addr.offset.imm),
458 size, regs[rt]);
460 /* Are we storing with SP as a base? */
461 if (rn == AARCH64_SP_REGNUM)
462 seen_stack_set = true;
464 else if ((inst.opcode->iclass == ldstpair_off
465 || (inst.opcode->iclass == ldstpair_indexed
466 && inst.operands[2].addr.preind))
467 && strcmp ("stp", inst.opcode->name) == 0)
469 /* STP with addressing mode Pre-indexed and Base register. */
470 unsigned rt1;
471 unsigned rt2;
472 unsigned rn = inst.operands[2].addr.base_regno;
473 int32_t imm = inst.operands[2].addr.offset.imm;
474 int size = aarch64_get_qualifier_esize (inst.operands[0].qualifier);
476 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
477 || inst.operands[0].type == AARCH64_OPND_Ft);
478 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rt2
479 || inst.operands[1].type == AARCH64_OPND_Ft2);
480 gdb_assert (inst.operands[2].type == AARCH64_OPND_ADDR_SIMM7);
481 gdb_assert (!inst.operands[2].addr.offset.is_reg);
483 /* If recording this store would invalidate the store area
484 (perhaps because rn is not known) then we should abandon
485 further prologue analysis. */
486 if (stack.store_would_trash (pv_add_constant (regs[rn], imm)))
487 break;
489 if (stack.store_would_trash (pv_add_constant (regs[rn], imm + 8)))
490 break;
492 rt1 = inst.operands[0].reg.regno;
493 rt2 = inst.operands[1].reg.regno;
494 if (inst.operands[0].type == AARCH64_OPND_Ft)
496 rt1 += AARCH64_X_REGISTER_COUNT;
497 rt2 += AARCH64_X_REGISTER_COUNT;
500 stack.store (pv_add_constant (regs[rn], imm), size, regs[rt1]);
501 stack.store (pv_add_constant (regs[rn], imm + size), size, regs[rt2]);
503 if (inst.operands[2].addr.writeback)
504 regs[rn] = pv_add_constant (regs[rn], imm);
506 /* Ignore the instruction that allocates stack space and sets
507 the SP. */
508 if (rn == AARCH64_SP_REGNUM && !inst.operands[2].addr.writeback)
509 seen_stack_set = true;
511 else if ((inst.opcode->iclass == ldst_imm9 /* Signed immediate. */
512 || (inst.opcode->iclass == ldst_pos /* Unsigned immediate. */
513 && (inst.opcode->op == OP_STR_POS
514 || inst.opcode->op == OP_STRF_POS)))
515 && inst.operands[1].addr.base_regno == AARCH64_SP_REGNUM
516 && strcmp ("str", inst.opcode->name) == 0)
518 /* STR (immediate) */
519 unsigned int rt = inst.operands[0].reg.regno;
520 int32_t imm = inst.operands[1].addr.offset.imm;
521 unsigned int rn = inst.operands[1].addr.base_regno;
522 int size = aarch64_get_qualifier_esize (inst.operands[0].qualifier);
523 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
524 || inst.operands[0].type == AARCH64_OPND_Ft);
526 if (inst.operands[0].type == AARCH64_OPND_Ft)
527 rt += AARCH64_X_REGISTER_COUNT;
529 stack.store (pv_add_constant (regs[rn], imm), size, regs[rt]);
530 if (inst.operands[1].addr.writeback)
531 regs[rn] = pv_add_constant (regs[rn], imm);
533 /* Are we storing with SP as a base? */
534 if (rn == AARCH64_SP_REGNUM)
535 seen_stack_set = true;
537 else if (inst.opcode->iclass == testbranch)
539 /* Stop analysis on branch. */
540 break;
542 else if (inst.opcode->iclass == ic_system)
544 aarch64_gdbarch_tdep *tdep
545 = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
546 int ra_state_val = 0;
548 if (insn == 0xd503233f /* paciasp. */
549 || insn == 0xd503237f /* pacibsp. */)
551 /* Return addresses are mangled. */
552 ra_state_val = 1;
554 else if (insn == 0xd50323bf /* autiasp. */
555 || insn == 0xd50323ff /* autibsp. */)
557 /* Return addresses are not mangled. */
558 ra_state_val = 0;
560 else if (IS_BTI (insn))
561 /* We don't need to do anything special for a BTI instruction. */
562 continue;
563 else
565 aarch64_debug_printf ("prologue analysis gave up addr=%s"
566 " opcode=0x%x (iclass)",
567 core_addr_to_string_nz (start), insn);
568 break;
571 if (tdep->has_pauth () && cache != nullptr)
573 int regnum = tdep->ra_sign_state_regnum;
574 cache->saved_regs[regnum].set_value (ra_state_val);
577 else
579 aarch64_debug_printf ("prologue analysis gave up addr=%s"
580 " opcode=0x%x",
581 core_addr_to_string_nz (start), insn);
583 break;
587 if (cache == NULL)
588 return start;
590 if (pv_is_register (regs[AARCH64_FP_REGNUM], AARCH64_SP_REGNUM))
592 /* Frame pointer is fp. Frame size is constant. */
593 cache->framereg = AARCH64_FP_REGNUM;
594 cache->framesize = -regs[AARCH64_FP_REGNUM].k;
596 else if (pv_is_register (regs[AARCH64_SP_REGNUM], AARCH64_SP_REGNUM))
598 /* Try the stack pointer. */
599 cache->framesize = -regs[AARCH64_SP_REGNUM].k;
600 cache->framereg = AARCH64_SP_REGNUM;
602 else
604 /* We're just out of luck. We don't know where the frame is. */
605 cache->framereg = -1;
606 cache->framesize = 0;
609 for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
611 CORE_ADDR offset;
613 if (stack.find_reg (gdbarch, i, &offset))
614 cache->saved_regs[i].set_addr (offset);
617 for (i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
619 int regnum = gdbarch_num_regs (gdbarch);
620 CORE_ADDR offset;
622 if (stack.find_reg (gdbarch, i + AARCH64_X_REGISTER_COUNT,
623 &offset))
624 cache->saved_regs[i + regnum + AARCH64_D0_REGNUM].set_addr (offset);
627 return start;
630 static CORE_ADDR
631 aarch64_analyze_prologue (struct gdbarch *gdbarch,
632 CORE_ADDR start, CORE_ADDR limit,
633 struct aarch64_prologue_cache *cache)
635 instruction_reader reader;
637 return aarch64_analyze_prologue (gdbarch, start, limit, cache,
638 reader);
641 #if GDB_SELF_TEST
643 namespace selftests {
645 /* Instruction reader from manually cooked instruction sequences. */
647 class instruction_reader_test : public abstract_instruction_reader
649 public:
650 template<size_t SIZE>
651 explicit instruction_reader_test (const uint32_t (&insns)[SIZE])
652 : m_insns (insns), m_insns_size (SIZE)
655 ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
656 override
658 SELF_CHECK (len == 4);
659 SELF_CHECK (memaddr % 4 == 0);
660 SELF_CHECK (memaddr / 4 < m_insns_size);
662 return m_insns[memaddr / 4];
665 private:
666 const uint32_t *m_insns;
667 size_t m_insns_size;
670 static void
671 aarch64_analyze_prologue_test (void)
673 struct gdbarch_info info;
675 info.bfd_arch_info = bfd_scan_arch ("aarch64");
677 struct gdbarch *gdbarch = gdbarch_find_by_info (info);
678 SELF_CHECK (gdbarch != NULL);
680 struct aarch64_prologue_cache cache;
681 cache.saved_regs = trad_frame_alloc_saved_regs (gdbarch);
683 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
685 /* Test the simple prologue in which frame pointer is used. */
687 static const uint32_t insns[] = {
688 0xa9af7bfd, /* stp x29, x30, [sp,#-272]! */
689 0x910003fd, /* mov x29, sp */
690 0x97ffffe6, /* bl 0x400580 */
692 instruction_reader_test reader (insns);
694 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
695 SELF_CHECK (end == 4 * 2);
697 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
698 SELF_CHECK (cache.framesize == 272);
700 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
702 if (i == AARCH64_FP_REGNUM)
703 SELF_CHECK (cache.saved_regs[i].addr () == -272);
704 else if (i == AARCH64_LR_REGNUM)
705 SELF_CHECK (cache.saved_regs[i].addr () == -264);
706 else
707 SELF_CHECK (cache.saved_regs[i].is_realreg ()
708 && cache.saved_regs[i].realreg () == i);
711 for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
713 int num_regs = gdbarch_num_regs (gdbarch);
714 int regnum = i + num_regs + AARCH64_D0_REGNUM;
716 SELF_CHECK (cache.saved_regs[regnum].is_realreg ()
717 && cache.saved_regs[regnum].realreg () == regnum);
721 /* Test a prologue in which STR is used and frame pointer is not
722 used. */
724 static const uint32_t insns[] = {
725 0xf81d0ff3, /* str x19, [sp, #-48]! */
726 0xb9002fe0, /* str w0, [sp, #44] */
727 0xf90013e1, /* str x1, [sp, #32]*/
728 0xfd000fe0, /* str d0, [sp, #24] */
729 0xaa0203f3, /* mov x19, x2 */
730 0xf94013e0, /* ldr x0, [sp, #32] */
732 instruction_reader_test reader (insns);
734 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
735 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
737 SELF_CHECK (end == 4 * 5);
739 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
740 SELF_CHECK (cache.framesize == 48);
742 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
744 if (i == 1)
745 SELF_CHECK (cache.saved_regs[i].addr () == -16);
746 else if (i == 19)
747 SELF_CHECK (cache.saved_regs[i].addr () == -48);
748 else
749 SELF_CHECK (cache.saved_regs[i].is_realreg ()
750 && cache.saved_regs[i].realreg () == i);
753 for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
755 int num_regs = gdbarch_num_regs (gdbarch);
756 int regnum = i + num_regs + AARCH64_D0_REGNUM;
759 if (i == 0)
760 SELF_CHECK (cache.saved_regs[regnum].addr () == -24);
761 else
762 SELF_CHECK (cache.saved_regs[regnum].is_realreg ()
763 && cache.saved_regs[regnum].realreg () == regnum);
767 /* Test handling of movz before setting the frame pointer. */
769 static const uint32_t insns[] = {
770 0xa9bf7bfd, /* stp x29, x30, [sp, #-16]! */
771 0x52800020, /* mov w0, #0x1 */
772 0x910003fd, /* mov x29, sp */
773 0x528000a2, /* mov w2, #0x5 */
774 0x97fffff8, /* bl 6e4 */
777 instruction_reader_test reader (insns);
779 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
780 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
782 /* We should stop at the 4th instruction. */
783 SELF_CHECK (end == (4 - 1) * 4);
784 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
785 SELF_CHECK (cache.framesize == 16);
788 /* Test handling of movz/stp when using the stack pointer as frame
789 pointer. */
791 static const uint32_t insns[] = {
792 0xa9bc7bfd, /* stp x29, x30, [sp, #-64]! */
793 0x52800020, /* mov w0, #0x1 */
794 0x290207e0, /* stp w0, w1, [sp, #16] */
795 0xa9018fe2, /* stp x2, x3, [sp, #24] */
796 0x528000a2, /* mov w2, #0x5 */
797 0x97fffff8, /* bl 6e4 */
800 instruction_reader_test reader (insns);
802 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
803 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
805 /* We should stop at the 5th instruction. */
806 SELF_CHECK (end == (5 - 1) * 4);
807 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
808 SELF_CHECK (cache.framesize == 64);
811 /* Test handling of movz/str when using the stack pointer as frame
812 pointer */
814 static const uint32_t insns[] = {
815 0xa9bc7bfd, /* stp x29, x30, [sp, #-64]! */
816 0x52800020, /* mov w0, #0x1 */
817 0xb9002be4, /* str w4, [sp, #40] */
818 0xf9001be5, /* str x5, [sp, #48] */
819 0x528000a2, /* mov w2, #0x5 */
820 0x97fffff8, /* bl 6e4 */
823 instruction_reader_test reader (insns);
825 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
826 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
828 /* We should stop at the 5th instruction. */
829 SELF_CHECK (end == (5 - 1) * 4);
830 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
831 SELF_CHECK (cache.framesize == 64);
834 /* Test handling of movz/stur when using the stack pointer as frame
835 pointer. */
837 static const uint32_t insns[] = {
838 0xa9bc7bfd, /* stp x29, x30, [sp, #-64]! */
839 0x52800020, /* mov w0, #0x1 */
840 0xb80343e6, /* stur w6, [sp, #52] */
841 0xf80383e7, /* stur x7, [sp, #56] */
842 0x528000a2, /* mov w2, #0x5 */
843 0x97fffff8, /* bl 6e4 */
846 instruction_reader_test reader (insns);
848 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
849 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
851 /* We should stop at the 5th instruction. */
852 SELF_CHECK (end == (5 - 1) * 4);
853 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
854 SELF_CHECK (cache.framesize == 64);
857 /* Test handling of movz when there is no frame pointer set or no stack
858 pointer used. */
860 static const uint32_t insns[] = {
861 0xa9bf7bfd, /* stp x29, x30, [sp, #-16]! */
862 0x52800020, /* mov w0, #0x1 */
863 0x528000a2, /* mov w2, #0x5 */
864 0x97fffff8, /* bl 6e4 */
867 instruction_reader_test reader (insns);
869 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
870 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
872 /* We should stop at the 4th instruction. */
873 SELF_CHECK (end == (4 - 1) * 4);
874 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
875 SELF_CHECK (cache.framesize == 16);
878 /* Test a prologue in which there is a return address signing instruction. */
879 if (tdep->has_pauth ())
881 static const uint32_t insns[] = {
882 0xd503233f, /* paciasp */
883 0xa9bd7bfd, /* stp x29, x30, [sp, #-48]! */
884 0x910003fd, /* mov x29, sp */
885 0xf801c3f3, /* str x19, [sp, #28] */
886 0xb9401fa0, /* ldr x19, [x29, #28] */
888 instruction_reader_test reader (insns);
890 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
891 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache,
892 reader);
894 SELF_CHECK (end == 4 * 4);
895 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
896 SELF_CHECK (cache.framesize == 48);
898 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
900 if (i == 19)
901 SELF_CHECK (cache.saved_regs[i].addr () == -20);
902 else if (i == AARCH64_FP_REGNUM)
903 SELF_CHECK (cache.saved_regs[i].addr () == -48);
904 else if (i == AARCH64_LR_REGNUM)
905 SELF_CHECK (cache.saved_regs[i].addr () == -40);
906 else
907 SELF_CHECK (cache.saved_regs[i].is_realreg ()
908 && cache.saved_regs[i].realreg () == i);
911 if (tdep->has_pauth ())
913 int regnum = tdep->ra_sign_state_regnum;
914 SELF_CHECK (cache.saved_regs[regnum].is_value ());
918 /* Test a prologue with a BTI instruction. */
920 static const uint32_t insns[] = {
921 0xd503245f, /* bti */
922 0xa9bd7bfd, /* stp x29, x30, [sp, #-48]! */
923 0x910003fd, /* mov x29, sp */
924 0xf801c3f3, /* str x19, [sp, #28] */
925 0xb9401fa0, /* ldr x19, [x29, #28] */
927 instruction_reader_test reader (insns);
929 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
930 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache,
931 reader);
933 SELF_CHECK (end == 4 * 4);
934 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
935 SELF_CHECK (cache.framesize == 48);
937 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
939 if (i == 19)
940 SELF_CHECK (cache.saved_regs[i].addr () == -20);
941 else if (i == AARCH64_FP_REGNUM)
942 SELF_CHECK (cache.saved_regs[i].addr () == -48);
943 else if (i == AARCH64_LR_REGNUM)
944 SELF_CHECK (cache.saved_regs[i].addr () == -40);
945 else
946 SELF_CHECK (cache.saved_regs[i].is_realreg ()
947 && cache.saved_regs[i].realreg () == i);
951 } // namespace selftests
952 #endif /* GDB_SELF_TEST */
954 /* Implement the "skip_prologue" gdbarch method. */
956 static CORE_ADDR
957 aarch64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
959 CORE_ADDR func_addr, func_end_addr, limit_pc;
961 /* See if we can determine the end of the prologue via the symbol
962 table. If so, then return either PC, or the PC after the
963 prologue, whichever is greater. */
964 bool func_addr_found
965 = find_pc_partial_function (pc, NULL, &func_addr, &func_end_addr);
967 if (func_addr_found)
969 CORE_ADDR post_prologue_pc
970 = skip_prologue_using_sal (gdbarch, func_addr);
972 if (post_prologue_pc != 0)
973 return std::max (pc, post_prologue_pc);
976 /* Can't determine prologue from the symbol table, need to examine
977 instructions. */
979 /* Find an upper limit on the function prologue using the debug
980 information. If the debug information could not be used to
981 provide that bound, then use an arbitrary large number as the
982 upper bound. */
983 limit_pc = skip_prologue_using_sal (gdbarch, pc);
984 if (limit_pc == 0)
985 limit_pc = pc + 128; /* Magic. */
987 limit_pc
988 = func_end_addr == 0 ? limit_pc : std::min (limit_pc, func_end_addr - 4);
990 /* Try disassembling prologue. */
991 return aarch64_analyze_prologue (gdbarch, pc, limit_pc, NULL);
994 /* Scan the function prologue for THIS_FRAME and populate the prologue
995 cache CACHE. */
997 static void
998 aarch64_scan_prologue (frame_info_ptr this_frame,
999 struct aarch64_prologue_cache *cache)
1001 CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
1002 CORE_ADDR prologue_start;
1003 CORE_ADDR prologue_end;
1004 CORE_ADDR prev_pc = get_frame_pc (this_frame);
1005 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1007 cache->prev_pc = prev_pc;
1009 /* Assume we do not find a frame. */
1010 cache->framereg = -1;
1011 cache->framesize = 0;
1013 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
1014 &prologue_end))
1016 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
1018 if (sal.line == 0)
1020 /* No line info so use the current PC. */
1021 prologue_end = prev_pc;
1023 else if (sal.end < prologue_end)
1025 /* The next line begins after the function end. */
1026 prologue_end = sal.end;
1029 prologue_end = std::min (prologue_end, prev_pc);
1030 aarch64_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
1032 else
1034 CORE_ADDR frame_loc;
1036 frame_loc = get_frame_register_unsigned (this_frame, AARCH64_FP_REGNUM);
1037 if (frame_loc == 0)
1038 return;
1040 cache->framereg = AARCH64_FP_REGNUM;
1041 cache->framesize = 16;
1042 cache->saved_regs[29].set_addr (0);
1043 cache->saved_regs[30].set_addr (8);
1047 /* Fill in *CACHE with information about the prologue of *THIS_FRAME. This
1048 function may throw an exception if the inferior's registers or memory is
1049 not available. */
1051 static void
1052 aarch64_make_prologue_cache_1 (frame_info_ptr this_frame,
1053 struct aarch64_prologue_cache *cache)
1055 CORE_ADDR unwound_fp;
1056 int reg;
1058 aarch64_scan_prologue (this_frame, cache);
1060 if (cache->framereg == -1)
1061 return;
1063 unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
1064 if (unwound_fp == 0)
1065 return;
1067 cache->prev_sp = unwound_fp;
1068 if (!aarch64_stack_frame_destroyed_p (get_frame_arch (this_frame),
1069 cache->prev_pc))
1070 cache->prev_sp += cache->framesize;
1072 /* Calculate actual addresses of saved registers using offsets
1073 determined by aarch64_analyze_prologue. */
1074 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
1075 if (cache->saved_regs[reg].is_addr ())
1076 cache->saved_regs[reg].set_addr (cache->saved_regs[reg].addr ()
1077 + cache->prev_sp);
1079 cache->func = get_frame_func (this_frame);
1081 cache->available_p = 1;
1084 /* Allocate and fill in *THIS_CACHE with information about the prologue of
1085 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
1086 Return a pointer to the current aarch64_prologue_cache in
1087 *THIS_CACHE. */
1089 static struct aarch64_prologue_cache *
1090 aarch64_make_prologue_cache (frame_info_ptr this_frame, void **this_cache)
1092 struct aarch64_prologue_cache *cache;
1094 if (*this_cache != NULL)
1095 return (struct aarch64_prologue_cache *) *this_cache;
1097 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
1098 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
1099 *this_cache = cache;
1103 aarch64_make_prologue_cache_1 (this_frame, cache);
1105 catch (const gdb_exception_error &ex)
1107 if (ex.error != NOT_AVAILABLE_ERROR)
1108 throw;
1111 return cache;
1114 /* Implement the "stop_reason" frame_unwind method. */
1116 static enum unwind_stop_reason
1117 aarch64_prologue_frame_unwind_stop_reason (frame_info_ptr this_frame,
1118 void **this_cache)
1120 struct aarch64_prologue_cache *cache
1121 = aarch64_make_prologue_cache (this_frame, this_cache);
1123 if (!cache->available_p)
1124 return UNWIND_UNAVAILABLE;
1126 /* Halt the backtrace at "_start". */
1127 gdbarch *arch = get_frame_arch (this_frame);
1128 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (arch);
1129 if (cache->prev_pc <= tdep->lowest_pc)
1130 return UNWIND_OUTERMOST;
1132 /* We've hit a wall, stop. */
1133 if (cache->prev_sp == 0)
1134 return UNWIND_OUTERMOST;
1136 return UNWIND_NO_REASON;
1139 /* Our frame ID for a normal frame is the current function's starting
1140 PC and the caller's SP when we were called. */
1142 static void
1143 aarch64_prologue_this_id (frame_info_ptr this_frame,
1144 void **this_cache, struct frame_id *this_id)
1146 struct aarch64_prologue_cache *cache
1147 = aarch64_make_prologue_cache (this_frame, this_cache);
1149 if (!cache->available_p)
1150 *this_id = frame_id_build_unavailable_stack (cache->func);
1151 else
1152 *this_id = frame_id_build (cache->prev_sp, cache->func);
1155 /* Implement the "prev_register" frame_unwind method. */
1157 static struct value *
1158 aarch64_prologue_prev_register (frame_info_ptr this_frame,
1159 void **this_cache, int prev_regnum)
1161 struct aarch64_prologue_cache *cache
1162 = aarch64_make_prologue_cache (this_frame, this_cache);
1164 /* If we are asked to unwind the PC, then we need to return the LR
1165 instead. The prologue may save PC, but it will point into this
1166 frame's prologue, not the next frame's resume location. */
1167 if (prev_regnum == AARCH64_PC_REGNUM)
1169 CORE_ADDR lr;
1170 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1171 aarch64_gdbarch_tdep *tdep
1172 = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
1174 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
1176 if (tdep->has_pauth ()
1177 && cache->saved_regs[tdep->ra_sign_state_regnum].is_value ())
1178 lr = aarch64_frame_unmask_lr (tdep, this_frame, lr);
1180 return frame_unwind_got_constant (this_frame, prev_regnum, lr);
1183 /* SP is generally not saved to the stack, but this frame is
1184 identified by the next frame's stack pointer at the time of the
1185 call. The value was already reconstructed into PREV_SP. */
1187 +----------+ ^
1188 | saved lr | |
1189 +->| saved fp |--+
1190 | | |
1191 | | | <- Previous SP
1192 | +----------+
1193 | | saved lr |
1194 +--| saved fp |<- FP
1196 | |<- SP
1197 +----------+ */
1198 if (prev_regnum == AARCH64_SP_REGNUM)
1199 return frame_unwind_got_constant (this_frame, prev_regnum,
1200 cache->prev_sp);
1202 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
1203 prev_regnum);
1206 /* AArch64 prologue unwinder. */
1207 static frame_unwind aarch64_prologue_unwind =
1209 "aarch64 prologue",
1210 NORMAL_FRAME,
1211 aarch64_prologue_frame_unwind_stop_reason,
1212 aarch64_prologue_this_id,
1213 aarch64_prologue_prev_register,
1214 NULL,
1215 default_frame_sniffer
1218 /* Allocate and fill in *THIS_CACHE with information about the prologue of
1219 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
1220 Return a pointer to the current aarch64_prologue_cache in
1221 *THIS_CACHE. */
1223 static struct aarch64_prologue_cache *
1224 aarch64_make_stub_cache (frame_info_ptr this_frame, void **this_cache)
1226 struct aarch64_prologue_cache *cache;
1228 if (*this_cache != NULL)
1229 return (struct aarch64_prologue_cache *) *this_cache;
1231 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
1232 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
1233 *this_cache = cache;
1237 cache->prev_sp = get_frame_register_unsigned (this_frame,
1238 AARCH64_SP_REGNUM);
1239 cache->prev_pc = get_frame_pc (this_frame);
1240 cache->available_p = 1;
1242 catch (const gdb_exception_error &ex)
1244 if (ex.error != NOT_AVAILABLE_ERROR)
1245 throw;
1248 return cache;
1251 /* Implement the "stop_reason" frame_unwind method. */
1253 static enum unwind_stop_reason
1254 aarch64_stub_frame_unwind_stop_reason (frame_info_ptr this_frame,
1255 void **this_cache)
1257 struct aarch64_prologue_cache *cache
1258 = aarch64_make_stub_cache (this_frame, this_cache);
1260 if (!cache->available_p)
1261 return UNWIND_UNAVAILABLE;
1263 return UNWIND_NO_REASON;
1266 /* Our frame ID for a stub frame is the current SP and LR. */
1268 static void
1269 aarch64_stub_this_id (frame_info_ptr this_frame,
1270 void **this_cache, struct frame_id *this_id)
1272 struct aarch64_prologue_cache *cache
1273 = aarch64_make_stub_cache (this_frame, this_cache);
1275 if (cache->available_p)
1276 *this_id = frame_id_build (cache->prev_sp, cache->prev_pc);
1277 else
1278 *this_id = frame_id_build_unavailable_stack (cache->prev_pc);
1281 /* Implement the "sniffer" frame_unwind method. */
1283 static int
1284 aarch64_stub_unwind_sniffer (const struct frame_unwind *self,
1285 frame_info_ptr this_frame,
1286 void **this_prologue_cache)
1288 CORE_ADDR addr_in_block;
1289 gdb_byte dummy[4];
1291 addr_in_block = get_frame_address_in_block (this_frame);
1292 if (in_plt_section (addr_in_block)
1293 /* We also use the stub winder if the target memory is unreadable
1294 to avoid having the prologue unwinder trying to read it. */
1295 || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0)
1296 return 1;
1298 return 0;
1301 /* AArch64 stub unwinder. */
1302 static frame_unwind aarch64_stub_unwind =
1304 "aarch64 stub",
1305 NORMAL_FRAME,
1306 aarch64_stub_frame_unwind_stop_reason,
1307 aarch64_stub_this_id,
1308 aarch64_prologue_prev_register,
1309 NULL,
1310 aarch64_stub_unwind_sniffer
1313 /* Return the frame base address of *THIS_FRAME. */
1315 static CORE_ADDR
1316 aarch64_normal_frame_base (frame_info_ptr this_frame, void **this_cache)
1318 struct aarch64_prologue_cache *cache
1319 = aarch64_make_prologue_cache (this_frame, this_cache);
1321 return cache->prev_sp - cache->framesize;
1324 /* AArch64 default frame base information. */
1325 static frame_base aarch64_normal_base =
1327 &aarch64_prologue_unwind,
1328 aarch64_normal_frame_base,
1329 aarch64_normal_frame_base,
1330 aarch64_normal_frame_base
1333 /* Return the value of the REGNUM register in the previous frame of
1334 *THIS_FRAME. */
1336 static struct value *
1337 aarch64_dwarf2_prev_register (frame_info_ptr this_frame,
1338 void **this_cache, int regnum)
1340 gdbarch *arch = get_frame_arch (this_frame);
1341 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (arch);
1342 CORE_ADDR lr;
1344 switch (regnum)
1346 case AARCH64_PC_REGNUM:
1347 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
1348 lr = aarch64_frame_unmask_lr (tdep, this_frame, lr);
1349 return frame_unwind_got_constant (this_frame, regnum, lr);
1351 default:
1352 internal_error (_("Unexpected register %d"), regnum);
1356 static const unsigned char op_lit0 = DW_OP_lit0;
1357 static const unsigned char op_lit1 = DW_OP_lit1;
1359 /* Implement the "init_reg" dwarf2_frame_ops method. */
1361 static void
1362 aarch64_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
1363 struct dwarf2_frame_state_reg *reg,
1364 frame_info_ptr this_frame)
1366 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
1368 switch (regnum)
1370 case AARCH64_PC_REGNUM:
1371 reg->how = DWARF2_FRAME_REG_FN;
1372 reg->loc.fn = aarch64_dwarf2_prev_register;
1373 return;
1375 case AARCH64_SP_REGNUM:
1376 reg->how = DWARF2_FRAME_REG_CFA;
1377 return;
1380 /* Init pauth registers. */
1381 if (tdep->has_pauth ())
1383 if (regnum == tdep->ra_sign_state_regnum)
1385 /* Initialize RA_STATE to zero. */
1386 reg->how = DWARF2_FRAME_REG_SAVED_VAL_EXP;
1387 reg->loc.exp.start = &op_lit0;
1388 reg->loc.exp.len = 1;
1389 return;
1391 else if (regnum >= tdep->pauth_reg_base
1392 && regnum < tdep->pauth_reg_base + tdep->pauth_reg_count)
1394 reg->how = DWARF2_FRAME_REG_SAME_VALUE;
1395 return;
1400 /* Implement the execute_dwarf_cfa_vendor_op method. */
1402 static bool
1403 aarch64_execute_dwarf_cfa_vendor_op (struct gdbarch *gdbarch, gdb_byte op,
1404 struct dwarf2_frame_state *fs)
1406 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
1407 struct dwarf2_frame_state_reg *ra_state;
1409 if (op == DW_CFA_AARCH64_negate_ra_state)
1411 /* On systems without pauth, treat as a nop. */
1412 if (!tdep->has_pauth ())
1413 return true;
1415 /* Allocate RA_STATE column if it's not allocated yet. */
1416 fs->regs.alloc_regs (AARCH64_DWARF_RA_SIGN_STATE + 1);
1418 /* Toggle the status of RA_STATE between 0 and 1. */
1419 ra_state = &(fs->regs.reg[AARCH64_DWARF_RA_SIGN_STATE]);
1420 ra_state->how = DWARF2_FRAME_REG_SAVED_VAL_EXP;
1422 if (ra_state->loc.exp.start == nullptr
1423 || ra_state->loc.exp.start == &op_lit0)
1424 ra_state->loc.exp.start = &op_lit1;
1425 else
1426 ra_state->loc.exp.start = &op_lit0;
1428 ra_state->loc.exp.len = 1;
1430 return true;
1433 return false;
1436 /* Used for matching BRK instructions for AArch64. */
1437 static constexpr uint32_t BRK_INSN_MASK = 0xffe0001f;
1438 static constexpr uint32_t BRK_INSN_BASE = 0xd4200000;
1440 /* Implementation of gdbarch_program_breakpoint_here_p for aarch64. */
1442 static bool
1443 aarch64_program_breakpoint_here_p (gdbarch *gdbarch, CORE_ADDR address)
1445 const uint32_t insn_len = 4;
1446 gdb_byte target_mem[4];
1448 /* Enable the automatic memory restoration from breakpoints while
1449 we read the memory. Otherwise we may find temporary breakpoints, ones
1450 inserted by GDB, and flag them as permanent breakpoints. */
1451 scoped_restore restore_memory
1452 = make_scoped_restore_show_memory_breakpoints (0);
1454 if (target_read_memory (address, target_mem, insn_len) == 0)
1456 uint32_t insn =
1457 (uint32_t) extract_unsigned_integer (target_mem, insn_len,
1458 gdbarch_byte_order_for_code (gdbarch));
1460 /* Check if INSN is a BRK instruction pattern. There are multiple choices
1461 of such instructions with different immediate values. Different OS'
1462 may use a different variation, but they have the same outcome. */
1463 return ((insn & BRK_INSN_MASK) == BRK_INSN_BASE);
1466 return false;
1469 /* When arguments must be pushed onto the stack, they go on in reverse
1470 order. The code below implements a FILO (stack) to do this. */
1472 struct stack_item_t
1474 /* Value to pass on stack. It can be NULL if this item is for stack
1475 padding. */
1476 const gdb_byte *data;
1478 /* Size in bytes of value to pass on stack. */
1479 int len;
1482 /* Implement the gdbarch type alignment method, overrides the generic
1483 alignment algorithm for anything that is aarch64 specific. */
1485 static ULONGEST
1486 aarch64_type_align (gdbarch *gdbarch, struct type *t)
1488 t = check_typedef (t);
1489 if (t->code () == TYPE_CODE_ARRAY && t->is_vector ())
1491 /* Use the natural alignment for vector types (the same for
1492 scalar type), but the maximum alignment is 128-bit. */
1493 if (t->length () > 16)
1494 return 16;
1495 else
1496 return t->length ();
1499 /* Allow the common code to calculate the alignment. */
1500 return 0;
1503 /* Worker function for aapcs_is_vfp_call_or_return_candidate.
1505 Return the number of register required, or -1 on failure.
1507 When encountering a base element, if FUNDAMENTAL_TYPE is not set then set it
1508 to the element, else fail if the type of this element does not match the
1509 existing value. */
1511 static int
1512 aapcs_is_vfp_call_or_return_candidate_1 (struct type *type,
1513 struct type **fundamental_type)
1515 if (type == nullptr)
1516 return -1;
1518 switch (type->code ())
1520 case TYPE_CODE_FLT:
1521 case TYPE_CODE_DECFLOAT:
1522 if (type->length () > 16)
1523 return -1;
1525 if (*fundamental_type == nullptr)
1526 *fundamental_type = type;
1527 else if (type->length () != (*fundamental_type)->length ()
1528 || type->code () != (*fundamental_type)->code ())
1529 return -1;
1531 return 1;
1533 case TYPE_CODE_COMPLEX:
1535 struct type *target_type = check_typedef (type->target_type ());
1536 if (target_type->length () > 16)
1537 return -1;
1539 if (*fundamental_type == nullptr)
1540 *fundamental_type = target_type;
1541 else if (target_type->length () != (*fundamental_type)->length ()
1542 || target_type->code () != (*fundamental_type)->code ())
1543 return -1;
1545 return 2;
1548 case TYPE_CODE_ARRAY:
1550 if (type->is_vector ())
1552 if (type->length () != 8 && type->length () != 16)
1553 return -1;
1555 if (*fundamental_type == nullptr)
1556 *fundamental_type = type;
1557 else if (type->length () != (*fundamental_type)->length ()
1558 || type->code () != (*fundamental_type)->code ())
1559 return -1;
1561 return 1;
1563 else
1565 struct type *target_type = type->target_type ();
1566 int count = aapcs_is_vfp_call_or_return_candidate_1
1567 (target_type, fundamental_type);
1569 if (count == -1)
1570 return count;
1572 count *= (type->length () / target_type->length ());
1573 return count;
1577 case TYPE_CODE_STRUCT:
1578 case TYPE_CODE_UNION:
1580 int count = 0;
1582 for (int i = 0; i < type->num_fields (); i++)
1584 /* Ignore any static fields. */
1585 if (type->field (i).is_static ())
1586 continue;
1588 struct type *member = check_typedef (type->field (i).type ());
1590 int sub_count = aapcs_is_vfp_call_or_return_candidate_1
1591 (member, fundamental_type);
1592 if (sub_count == -1)
1593 return -1;
1594 count += sub_count;
1597 /* Ensure there is no padding between the fields (allowing for empty
1598 zero length structs) */
1599 int ftype_length = (*fundamental_type == nullptr)
1600 ? 0 : (*fundamental_type)->length ();
1601 if (count * ftype_length != type->length ())
1602 return -1;
1604 return count;
1607 default:
1608 break;
1611 return -1;
1614 /* Return true if an argument, whose type is described by TYPE, can be passed or
1615 returned in simd/fp registers, providing enough parameter passing registers
1616 are available. This is as described in the AAPCS64.
1618 Upon successful return, *COUNT returns the number of needed registers,
1619 *FUNDAMENTAL_TYPE contains the type of those registers.
1621 Candidate as per the AAPCS64 5.4.2.C is either a:
1622 - float.
1623 - short-vector.
1624 - HFA (Homogeneous Floating-point Aggregate, 4.3.5.1). A Composite type where
1625 all the members are floats and has at most 4 members.
1626 - HVA (Homogeneous Short-vector Aggregate, 4.3.5.2). A Composite type where
1627 all the members are short vectors and has at most 4 members.
1628 - Complex (7.1.1)
1630 Note that HFAs and HVAs can include nested structures and arrays. */
1632 static bool
1633 aapcs_is_vfp_call_or_return_candidate (struct type *type, int *count,
1634 struct type **fundamental_type)
1636 if (type == nullptr)
1637 return false;
1639 *fundamental_type = nullptr;
1641 int ag_count = aapcs_is_vfp_call_or_return_candidate_1 (type,
1642 fundamental_type);
1644 if (ag_count > 0 && ag_count <= HA_MAX_NUM_FLDS)
1646 *count = ag_count;
1647 return true;
1649 else
1650 return false;
1653 /* AArch64 function call information structure. */
1654 struct aarch64_call_info
1656 /* the current argument number. */
1657 unsigned argnum = 0;
1659 /* The next general purpose register number, equivalent to NGRN as
1660 described in the AArch64 Procedure Call Standard. */
1661 unsigned ngrn = 0;
1663 /* The next SIMD and floating point register number, equivalent to
1664 NSRN as described in the AArch64 Procedure Call Standard. */
1665 unsigned nsrn = 0;
1667 /* The next stacked argument address, equivalent to NSAA as
1668 described in the AArch64 Procedure Call Standard. */
1669 unsigned nsaa = 0;
1671 /* Stack item vector. */
1672 std::vector<stack_item_t> si;
1675 /* Pass a value in a sequence of consecutive X registers. The caller
1676 is responsible for ensuring sufficient registers are available. */
1678 static void
1679 pass_in_x (struct gdbarch *gdbarch, struct regcache *regcache,
1680 struct aarch64_call_info *info, struct type *type,
1681 struct value *arg)
1683 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1684 int len = type->length ();
1685 enum type_code typecode = type->code ();
1686 int regnum = AARCH64_X0_REGNUM + info->ngrn;
1687 const bfd_byte *buf = arg->contents ().data ();
1689 info->argnum++;
1691 while (len > 0)
1693 int partial_len = len < X_REGISTER_SIZE ? len : X_REGISTER_SIZE;
1694 CORE_ADDR regval = extract_unsigned_integer (buf, partial_len,
1695 byte_order);
1698 /* Adjust sub-word struct/union args when big-endian. */
1699 if (byte_order == BFD_ENDIAN_BIG
1700 && partial_len < X_REGISTER_SIZE
1701 && (typecode == TYPE_CODE_STRUCT || typecode == TYPE_CODE_UNION))
1702 regval <<= ((X_REGISTER_SIZE - partial_len) * TARGET_CHAR_BIT);
1704 aarch64_debug_printf ("arg %d in %s = 0x%s", info->argnum,
1705 gdbarch_register_name (gdbarch, regnum),
1706 phex (regval, X_REGISTER_SIZE));
1708 regcache_cooked_write_unsigned (regcache, regnum, regval);
1709 len -= partial_len;
1710 buf += partial_len;
1711 regnum++;
1715 /* Attempt to marshall a value in a V register. Return 1 if
1716 successful, or 0 if insufficient registers are available. This
1717 function, unlike the equivalent pass_in_x() function does not
1718 handle arguments spread across multiple registers. */
1720 static int
1721 pass_in_v (struct gdbarch *gdbarch,
1722 struct regcache *regcache,
1723 struct aarch64_call_info *info,
1724 int len, const bfd_byte *buf)
1726 if (info->nsrn < 8)
1728 int regnum = AARCH64_V0_REGNUM + info->nsrn;
1729 /* Enough space for a full vector register. */
1730 gdb_byte reg[register_size (gdbarch, regnum)];
1731 gdb_assert (len <= sizeof (reg));
1733 info->argnum++;
1734 info->nsrn++;
1736 memset (reg, 0, sizeof (reg));
1737 /* PCS C.1, the argument is allocated to the least significant
1738 bits of V register. */
1739 memcpy (reg, buf, len);
1740 regcache->cooked_write (regnum, reg);
1742 aarch64_debug_printf ("arg %d in %s", info->argnum,
1743 gdbarch_register_name (gdbarch, regnum));
1745 return 1;
1747 info->nsrn = 8;
1748 return 0;
1751 /* Marshall an argument onto the stack. */
1753 static void
1754 pass_on_stack (struct aarch64_call_info *info, struct type *type,
1755 struct value *arg)
1757 const bfd_byte *buf = arg->contents ().data ();
1758 int len = type->length ();
1759 int align;
1760 stack_item_t item;
1762 info->argnum++;
1764 align = type_align (type);
1766 /* PCS C.17 Stack should be aligned to the larger of 8 bytes or the
1767 Natural alignment of the argument's type. */
1768 align = align_up (align, 8);
1770 /* The AArch64 PCS requires at most doubleword alignment. */
1771 if (align > 16)
1772 align = 16;
1774 aarch64_debug_printf ("arg %d len=%d @ sp + %d\n", info->argnum, len,
1775 info->nsaa);
1777 item.len = len;
1778 item.data = buf;
1779 info->si.push_back (item);
1781 info->nsaa += len;
1782 if (info->nsaa & (align - 1))
1784 /* Push stack alignment padding. */
1785 int pad = align - (info->nsaa & (align - 1));
1787 item.len = pad;
1788 item.data = NULL;
1790 info->si.push_back (item);
1791 info->nsaa += pad;
1795 /* Marshall an argument into a sequence of one or more consecutive X
1796 registers or, if insufficient X registers are available then onto
1797 the stack. */
1799 static void
1800 pass_in_x_or_stack (struct gdbarch *gdbarch, struct regcache *regcache,
1801 struct aarch64_call_info *info, struct type *type,
1802 struct value *arg)
1804 int len = type->length ();
1805 int nregs = (len + X_REGISTER_SIZE - 1) / X_REGISTER_SIZE;
1807 /* PCS C.13 - Pass in registers if we have enough spare */
1808 if (info->ngrn + nregs <= 8)
1810 pass_in_x (gdbarch, regcache, info, type, arg);
1811 info->ngrn += nregs;
1813 else
1815 info->ngrn = 8;
1816 pass_on_stack (info, type, arg);
1820 /* Pass a value, which is of type arg_type, in a V register. Assumes value is a
1821 aapcs_is_vfp_call_or_return_candidate and there are enough spare V
1822 registers. A return value of false is an error state as the value will have
1823 been partially passed to the stack. */
1824 static bool
1825 pass_in_v_vfp_candidate (struct gdbarch *gdbarch, struct regcache *regcache,
1826 struct aarch64_call_info *info, struct type *arg_type,
1827 struct value *arg)
1829 switch (arg_type->code ())
1831 case TYPE_CODE_FLT:
1832 case TYPE_CODE_DECFLOAT:
1833 return pass_in_v (gdbarch, regcache, info, arg_type->length (),
1834 arg->contents ().data ());
1835 break;
1837 case TYPE_CODE_COMPLEX:
1839 const bfd_byte *buf = arg->contents ().data ();
1840 struct type *target_type = check_typedef (arg_type->target_type ());
1842 if (!pass_in_v (gdbarch, regcache, info, target_type->length (),
1843 buf))
1844 return false;
1846 return pass_in_v (gdbarch, regcache, info, target_type->length (),
1847 buf + target_type->length ());
1850 case TYPE_CODE_ARRAY:
1851 if (arg_type->is_vector ())
1852 return pass_in_v (gdbarch, regcache, info, arg_type->length (),
1853 arg->contents ().data ());
1854 [[fallthrough]];
1856 case TYPE_CODE_STRUCT:
1857 case TYPE_CODE_UNION:
1858 for (int i = 0; i < arg_type->num_fields (); i++)
1860 /* Don't include static fields. */
1861 if (arg_type->field (i).is_static ())
1862 continue;
1864 struct value *field = arg->primitive_field (0, i, arg_type);
1865 struct type *field_type = check_typedef (field->type ());
1867 if (!pass_in_v_vfp_candidate (gdbarch, regcache, info, field_type,
1868 field))
1869 return false;
1871 return true;
1873 default:
1874 return false;
1878 /* Implement the "push_dummy_call" gdbarch method. */
1880 static CORE_ADDR
1881 aarch64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
1882 struct regcache *regcache, CORE_ADDR bp_addr,
1883 int nargs,
1884 struct value **args, CORE_ADDR sp,
1885 function_call_return_method return_method,
1886 CORE_ADDR struct_addr)
1888 int argnum;
1889 struct aarch64_call_info info;
1891 /* We need to know what the type of the called function is in order
1892 to determine the number of named/anonymous arguments for the
1893 actual argument placement, and the return type in order to handle
1894 return value correctly.
1896 The generic code above us views the decision of return in memory
1897 or return in registers as a two stage processes. The language
1898 handler is consulted first and may decide to return in memory (eg
1899 class with copy constructor returned by value), this will cause
1900 the generic code to allocate space AND insert an initial leading
1901 argument.
1903 If the language code does not decide to pass in memory then the
1904 target code is consulted.
1906 If the language code decides to pass in memory we want to move
1907 the pointer inserted as the initial argument from the argument
1908 list and into X8, the conventional AArch64 struct return pointer
1909 register. */
1911 /* Set the return address. For the AArch64, the return breakpoint
1912 is always at BP_ADDR. */
1913 regcache_cooked_write_unsigned (regcache, AARCH64_LR_REGNUM, bp_addr);
1915 /* If we were given an initial argument for the return slot, lose it. */
1916 if (return_method == return_method_hidden_param)
1918 args++;
1919 nargs--;
1922 /* The struct_return pointer occupies X8. */
1923 if (return_method != return_method_normal)
1925 aarch64_debug_printf ("struct return in %s = 0x%s",
1926 gdbarch_register_name
1927 (gdbarch, AARCH64_STRUCT_RETURN_REGNUM),
1928 paddress (gdbarch, struct_addr));
1930 regcache_cooked_write_unsigned (regcache, AARCH64_STRUCT_RETURN_REGNUM,
1931 struct_addr);
1934 for (argnum = 0; argnum < nargs; argnum++)
1936 struct value *arg = args[argnum];
1937 struct type *arg_type, *fundamental_type;
1938 int len, elements;
1940 arg_type = check_typedef (arg->type ());
1941 len = arg_type->length ();
1943 /* If arg can be passed in v registers as per the AAPCS64, then do so if
1944 if there are enough spare registers. */
1945 if (aapcs_is_vfp_call_or_return_candidate (arg_type, &elements,
1946 &fundamental_type))
1948 if (info.nsrn + elements <= 8)
1950 /* We know that we have sufficient registers available therefore
1951 this will never need to fallback to the stack. */
1952 if (!pass_in_v_vfp_candidate (gdbarch, regcache, &info, arg_type,
1953 arg))
1954 gdb_assert_not_reached ("Failed to push args");
1956 else
1958 info.nsrn = 8;
1959 pass_on_stack (&info, arg_type, arg);
1961 continue;
1964 switch (arg_type->code ())
1966 case TYPE_CODE_INT:
1967 case TYPE_CODE_BOOL:
1968 case TYPE_CODE_CHAR:
1969 case TYPE_CODE_RANGE:
1970 case TYPE_CODE_ENUM:
1971 if (len < 4 && !is_fixed_point_type (arg_type))
1973 /* Promote to 32 bit integer. */
1974 if (arg_type->is_unsigned ())
1975 arg_type = builtin_type (gdbarch)->builtin_uint32;
1976 else
1977 arg_type = builtin_type (gdbarch)->builtin_int32;
1978 arg = value_cast (arg_type, arg);
1980 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1981 break;
1983 case TYPE_CODE_STRUCT:
1984 case TYPE_CODE_ARRAY:
1985 case TYPE_CODE_UNION:
1986 if (len > 16)
1988 /* PCS B.7 Aggregates larger than 16 bytes are passed by
1989 invisible reference. */
1991 /* Allocate aligned storage. */
1992 sp = align_down (sp - len, 16);
1994 /* Write the real data into the stack. */
1995 write_memory (sp, arg->contents ().data (), len);
1997 /* Construct the indirection. */
1998 arg_type = lookup_pointer_type (arg_type);
1999 arg = value_from_pointer (arg_type, sp);
2000 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
2002 else
2003 /* PCS C.15 / C.18 multiple values pass. */
2004 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
2005 break;
2007 default:
2008 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
2009 break;
2013 /* Make sure stack retains 16 byte alignment. */
2014 if (info.nsaa & 15)
2015 sp -= 16 - (info.nsaa & 15);
2017 while (!info.si.empty ())
2019 const stack_item_t &si = info.si.back ();
2021 sp -= si.len;
2022 if (si.data != NULL)
2023 write_memory (sp, si.data, si.len);
2024 info.si.pop_back ();
2027 /* Finally, update the SP register. */
2028 regcache_cooked_write_unsigned (regcache, AARCH64_SP_REGNUM, sp);
2030 return sp;
2033 /* Implement the "frame_align" gdbarch method. */
2035 static CORE_ADDR
2036 aarch64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
2038 /* Align the stack to sixteen bytes. */
2039 return sp & ~(CORE_ADDR) 15;
2042 /* Return the type for an AdvSISD Q register. */
2044 static struct type *
2045 aarch64_vnq_type (struct gdbarch *gdbarch)
2047 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
2049 if (tdep->vnq_type == NULL)
2051 struct type *t;
2052 struct type *elem;
2054 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
2055 TYPE_CODE_UNION);
2057 elem = builtin_type (gdbarch)->builtin_uint128;
2058 append_composite_type_field (t, "u", elem);
2060 elem = builtin_type (gdbarch)->builtin_int128;
2061 append_composite_type_field (t, "s", elem);
2063 tdep->vnq_type = t;
2066 return tdep->vnq_type;
2069 /* Return the type for an AdvSISD D register. */
2071 static struct type *
2072 aarch64_vnd_type (struct gdbarch *gdbarch)
2074 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
2076 if (tdep->vnd_type == NULL)
2078 struct type *t;
2079 struct type *elem;
2081 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
2082 TYPE_CODE_UNION);
2084 elem = builtin_type (gdbarch)->builtin_double;
2085 append_composite_type_field (t, "f", elem);
2087 elem = builtin_type (gdbarch)->builtin_uint64;
2088 append_composite_type_field (t, "u", elem);
2090 elem = builtin_type (gdbarch)->builtin_int64;
2091 append_composite_type_field (t, "s", elem);
2093 tdep->vnd_type = t;
2096 return tdep->vnd_type;
2099 /* Return the type for an AdvSISD S register. */
2101 static struct type *
2102 aarch64_vns_type (struct gdbarch *gdbarch)
2104 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
2106 if (tdep->vns_type == NULL)
2108 struct type *t;
2109 struct type *elem;
2111 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
2112 TYPE_CODE_UNION);
2114 elem = builtin_type (gdbarch)->builtin_float;
2115 append_composite_type_field (t, "f", elem);
2117 elem = builtin_type (gdbarch)->builtin_uint32;
2118 append_composite_type_field (t, "u", elem);
2120 elem = builtin_type (gdbarch)->builtin_int32;
2121 append_composite_type_field (t, "s", elem);
2123 tdep->vns_type = t;
2126 return tdep->vns_type;
2129 /* Return the type for an AdvSISD H register. */
2131 static struct type *
2132 aarch64_vnh_type (struct gdbarch *gdbarch)
2134 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
2136 if (tdep->vnh_type == NULL)
2138 struct type *t;
2139 struct type *elem;
2141 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
2142 TYPE_CODE_UNION);
2144 elem = builtin_type (gdbarch)->builtin_bfloat16;
2145 append_composite_type_field (t, "bf", elem);
2147 elem = builtin_type (gdbarch)->builtin_half;
2148 append_composite_type_field (t, "f", elem);
2150 elem = builtin_type (gdbarch)->builtin_uint16;
2151 append_composite_type_field (t, "u", elem);
2153 elem = builtin_type (gdbarch)->builtin_int16;
2154 append_composite_type_field (t, "s", elem);
2156 tdep->vnh_type = t;
2159 return tdep->vnh_type;
2162 /* Return the type for an AdvSISD B register. */
2164 static struct type *
2165 aarch64_vnb_type (struct gdbarch *gdbarch)
2167 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
2169 if (tdep->vnb_type == NULL)
2171 struct type *t;
2172 struct type *elem;
2174 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
2175 TYPE_CODE_UNION);
2177 elem = builtin_type (gdbarch)->builtin_uint8;
2178 append_composite_type_field (t, "u", elem);
2180 elem = builtin_type (gdbarch)->builtin_int8;
2181 append_composite_type_field (t, "s", elem);
2183 tdep->vnb_type = t;
2186 return tdep->vnb_type;
2189 /* Return TRUE if REGNUM is a ZA tile slice pseudo-register number. Return
2190 FALSE otherwise. */
2192 static bool
2193 is_sme_tile_slice_pseudo_register (struct gdbarch *gdbarch, int regnum)
2195 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
2197 gdb_assert (tdep->has_sme ());
2198 gdb_assert (tdep->sme_svq > 0);
2199 gdb_assert (tdep->sme_pseudo_base <= regnum);
2200 gdb_assert (regnum < tdep->sme_pseudo_base + tdep->sme_pseudo_count);
2202 if (tdep->sme_tile_slice_pseudo_base <= regnum
2203 && regnum < tdep->sme_tile_slice_pseudo_base
2204 + tdep->sme_tile_slice_pseudo_count)
2205 return true;
2207 return false;
2210 /* Given REGNUM, a ZA pseudo-register number, return, in ENCODING, the
2211 decoded fields that make up its name. */
2213 static void
2214 aarch64_za_decode_pseudos (struct gdbarch *gdbarch, int regnum,
2215 struct za_pseudo_encoding &encoding)
2217 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
2219 gdb_assert (tdep->has_sme ());
2220 gdb_assert (tdep->sme_svq > 0);
2221 gdb_assert (tdep->sme_pseudo_base <= regnum);
2222 gdb_assert (regnum < tdep->sme_pseudo_base + tdep->sme_pseudo_count);
2224 if (is_sme_tile_slice_pseudo_register (gdbarch, regnum))
2226 /* Calculate the tile slice pseudo-register offset relative to the other
2227 tile slice pseudo-registers. */
2228 int offset = regnum - tdep->sme_tile_slice_pseudo_base;
2230 /* Fetch the qualifier. We can have 160 to 2560 possible tile slice
2231 pseudo-registers. Each qualifier (we have 5 of them: B, H, S, D
2232 and Q) covers 32 * svq pseudo-registers, so we divide the offset by
2233 that constant. */
2234 size_t qualifier = offset / (tdep->sme_svq * 32);
2235 encoding.qualifier_index = qualifier;
2237 /* Prepare to fetch the direction (d), tile number (t) and slice
2238 number (s). */
2239 int dts = offset % (tdep->sme_svq * 32);
2241 /* The direction is represented by the even/odd numbers. Even-numbered
2242 pseudo-registers are horizontal tile slices and odd-numbered
2243 pseudo-registers are vertical tile slices. */
2244 encoding.horizontal = !(dts & 1);
2246 /* Fetch the tile number. The tile number is closely related to the
2247 qualifier. B has 1 tile, H has 2 tiles, S has 4 tiles, D has 8 tiles
2248 and Q has 16 tiles. */
2249 encoding.tile_index = (dts >> 1) & ((1 << qualifier) - 1);
2251 /* Fetch the slice number. The slice number is closely related to the
2252 qualifier and the svl. */
2253 encoding.slice_index = dts >> (qualifier + 1);
2255 else
2257 /* Calculate the tile pseudo-register offset relative to the other
2258 tile pseudo-registers. */
2259 int offset = regnum - tdep->sme_tile_pseudo_base;
2261 encoding.qualifier_index = std::floor (std::log2 (offset + 1));
2262 /* Calculate the tile number. */
2263 encoding.tile_index = (offset + 1) - (1 << encoding.qualifier_index);
2264 /* Direction and slice index don't get used for tiles. Set them to
2265 0/false values. */
2266 encoding.slice_index = 0;
2267 encoding.horizontal = false;
2271 /* Return the type for a ZA tile slice pseudo-register based on ENCODING. */
2273 static struct type *
2274 aarch64_za_tile_slice_type (struct gdbarch *gdbarch,
2275 const struct za_pseudo_encoding &encoding)
2277 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
2279 gdb_assert (tdep->has_sme ());
2280 gdb_assert (tdep->sme_svq > 0);
2282 if (tdep->sme_tile_slice_type_q == nullptr)
2284 /* Q tile slice type. */
2285 tdep->sme_tile_slice_type_q
2286 = init_vector_type (builtin_type (gdbarch)->builtin_uint128,
2287 tdep->sme_svq);
2288 /* D tile slice type. */
2289 tdep->sme_tile_slice_type_d
2290 = init_vector_type (builtin_type (gdbarch)->builtin_uint64,
2291 tdep->sme_svq * 2);
2292 /* S tile slice type. */
2293 tdep->sme_tile_slice_type_s
2294 = init_vector_type (builtin_type (gdbarch)->builtin_uint32,
2295 tdep->sme_svq * 4);
2296 /* H tile slice type. */
2297 tdep->sme_tile_slice_type_h
2298 = init_vector_type (builtin_type (gdbarch)->builtin_uint16,
2299 tdep->sme_svq * 8);
2300 /* B tile slice type. */
2301 tdep->sme_tile_slice_type_b
2302 = init_vector_type (builtin_type (gdbarch)->builtin_uint8,
2303 tdep->sme_svq * 16);
2306 switch (encoding.qualifier_index)
2308 case 4:
2309 return tdep->sme_tile_slice_type_q;
2310 case 3:
2311 return tdep->sme_tile_slice_type_d;
2312 case 2:
2313 return tdep->sme_tile_slice_type_s;
2314 case 1:
2315 return tdep->sme_tile_slice_type_h;
2316 case 0:
2317 return tdep->sme_tile_slice_type_b;
2318 default:
2319 error (_("Invalid qualifier index %s for tile slice pseudo register."),
2320 pulongest (encoding.qualifier_index));
2323 gdb_assert_not_reached ("Unknown qualifier for ZA tile slice register");
2326 /* Return the type for a ZA tile pseudo-register based on ENCODING. */
2328 static struct type *
2329 aarch64_za_tile_type (struct gdbarch *gdbarch,
2330 const struct za_pseudo_encoding &encoding)
2332 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
2334 gdb_assert (tdep->has_sme ());
2335 gdb_assert (tdep->sme_svq > 0);
2337 if (tdep->sme_tile_type_q == nullptr)
2339 struct type *inner_vectors_type;
2341 /* Q tile type. */
2342 inner_vectors_type
2343 = init_vector_type (builtin_type (gdbarch)->builtin_uint128,
2344 tdep->sme_svq);
2345 tdep->sme_tile_type_q
2346 = init_vector_type (inner_vectors_type, tdep->sme_svq);
2348 /* D tile type. */
2349 inner_vectors_type
2350 = init_vector_type (builtin_type (gdbarch)->builtin_uint64,
2351 tdep->sme_svq * 2);
2352 tdep->sme_tile_type_d
2353 = init_vector_type (inner_vectors_type, tdep->sme_svq * 2);
2355 /* S tile type. */
2356 inner_vectors_type
2357 = init_vector_type (builtin_type (gdbarch)->builtin_uint32,
2358 tdep->sme_svq * 4);
2359 tdep->sme_tile_type_s
2360 = init_vector_type (inner_vectors_type, tdep->sme_svq * 4);
2362 /* H tile type. */
2363 inner_vectors_type
2364 = init_vector_type (builtin_type (gdbarch)->builtin_uint16,
2365 tdep->sme_svq * 8);
2366 tdep->sme_tile_type_h
2367 = init_vector_type (inner_vectors_type, tdep->sme_svq * 8);
2369 /* B tile type. */
2370 inner_vectors_type
2371 = init_vector_type (builtin_type (gdbarch)->builtin_uint8,
2372 tdep->sme_svq * 16);
2373 tdep->sme_tile_type_b
2374 = init_vector_type (inner_vectors_type, tdep->sme_svq * 16);
2377 switch (encoding.qualifier_index)
2379 case 4:
2380 return tdep->sme_tile_type_q;
2381 case 3:
2382 return tdep->sme_tile_type_d;
2383 case 2:
2384 return tdep->sme_tile_type_s;
2385 case 1:
2386 return tdep->sme_tile_type_h;
2387 case 0:
2388 return tdep->sme_tile_type_b;
2389 default:
2390 error (_("Invalid qualifier index %s for ZA tile pseudo register."),
2391 pulongest (encoding.qualifier_index));
2394 gdb_assert_not_reached ("unknown qualifier for tile pseudo-register");
2397 /* Return the type for an AdvSISD V register. */
2399 static struct type *
2400 aarch64_vnv_type (struct gdbarch *gdbarch)
2402 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
2404 if (tdep->vnv_type == NULL)
2406 /* The other AArch64 pseudo registers (Q,D,H,S,B) refer to a single value
2407 slice from the non-pseudo vector registers. However NEON V registers
2408 are always vector registers, and need constructing as such. */
2409 const struct builtin_type *bt = builtin_type (gdbarch);
2411 struct type *t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnv",
2412 TYPE_CODE_UNION);
2414 struct type *sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
2415 TYPE_CODE_UNION);
2416 append_composite_type_field (sub, "f",
2417 init_vector_type (bt->builtin_double, 2));
2418 append_composite_type_field (sub, "u",
2419 init_vector_type (bt->builtin_uint64, 2));
2420 append_composite_type_field (sub, "s",
2421 init_vector_type (bt->builtin_int64, 2));
2422 append_composite_type_field (t, "d", sub);
2424 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
2425 TYPE_CODE_UNION);
2426 append_composite_type_field (sub, "f",
2427 init_vector_type (bt->builtin_float, 4));
2428 append_composite_type_field (sub, "u",
2429 init_vector_type (bt->builtin_uint32, 4));
2430 append_composite_type_field (sub, "s",
2431 init_vector_type (bt->builtin_int32, 4));
2432 append_composite_type_field (t, "s", sub);
2434 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
2435 TYPE_CODE_UNION);
2436 append_composite_type_field (sub, "bf",
2437 init_vector_type (bt->builtin_bfloat16, 8));
2438 append_composite_type_field (sub, "f",
2439 init_vector_type (bt->builtin_half, 8));
2440 append_composite_type_field (sub, "u",
2441 init_vector_type (bt->builtin_uint16, 8));
2442 append_composite_type_field (sub, "s",
2443 init_vector_type (bt->builtin_int16, 8));
2444 append_composite_type_field (t, "h", sub);
2446 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
2447 TYPE_CODE_UNION);
2448 append_composite_type_field (sub, "u",
2449 init_vector_type (bt->builtin_uint8, 16));
2450 append_composite_type_field (sub, "s",
2451 init_vector_type (bt->builtin_int8, 16));
2452 append_composite_type_field (t, "b", sub);
2454 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
2455 TYPE_CODE_UNION);
2456 append_composite_type_field (sub, "u",
2457 init_vector_type (bt->builtin_uint128, 1));
2458 append_composite_type_field (sub, "s",
2459 init_vector_type (bt->builtin_int128, 1));
2460 append_composite_type_field (t, "q", sub);
2462 tdep->vnv_type = t;
2465 return tdep->vnv_type;
2468 /* Implement the "dwarf2_reg_to_regnum" gdbarch method. */
2470 static int
2471 aarch64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
2473 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
2475 if (reg >= AARCH64_DWARF_X0 && reg <= AARCH64_DWARF_X0 + 30)
2476 return AARCH64_X0_REGNUM + reg - AARCH64_DWARF_X0;
2478 if (reg == AARCH64_DWARF_SP)
2479 return AARCH64_SP_REGNUM;
2481 if (reg == AARCH64_DWARF_PC)
2482 return AARCH64_PC_REGNUM;
2484 if (reg >= AARCH64_DWARF_V0 && reg <= AARCH64_DWARF_V0 + 31)
2485 return AARCH64_V0_REGNUM + reg - AARCH64_DWARF_V0;
2487 if (reg == AARCH64_DWARF_SVE_VG)
2488 return AARCH64_SVE_VG_REGNUM;
2490 if (reg == AARCH64_DWARF_SVE_FFR)
2491 return AARCH64_SVE_FFR_REGNUM;
2493 if (reg >= AARCH64_DWARF_SVE_P0 && reg <= AARCH64_DWARF_SVE_P0 + 15)
2494 return AARCH64_SVE_P0_REGNUM + reg - AARCH64_DWARF_SVE_P0;
2496 if (reg >= AARCH64_DWARF_SVE_Z0 && reg <= AARCH64_DWARF_SVE_Z0 + 15)
2497 return AARCH64_SVE_Z0_REGNUM + reg - AARCH64_DWARF_SVE_Z0;
2499 if (tdep->has_pauth ())
2501 if (reg == AARCH64_DWARF_RA_SIGN_STATE)
2502 return tdep->ra_sign_state_regnum;
2505 return -1;
2508 /* Implement the "print_insn" gdbarch method. */
2510 static int
2511 aarch64_gdb_print_insn (bfd_vma memaddr, disassemble_info *info)
2513 info->symbols = NULL;
2514 return default_print_insn (memaddr, info);
2517 /* AArch64 BRK software debug mode instruction.
2518 Note that AArch64 code is always little-endian.
2519 1101.0100.0010.0000.0000.0000.0000.0000 = 0xd4200000. */
2520 constexpr gdb_byte aarch64_default_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
2522 typedef BP_MANIPULATION (aarch64_default_breakpoint) aarch64_breakpoint;
2524 /* Extract from an array REGS containing the (raw) register state a
2525 function return value of type TYPE, and copy that, in virtual
2526 format, into VALBUF. */
2528 static void
2529 aarch64_extract_return_value (struct type *type, struct regcache *regs,
2530 gdb_byte *valbuf)
2532 struct gdbarch *gdbarch = regs->arch ();
2533 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2534 int elements;
2535 struct type *fundamental_type;
2537 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
2538 &fundamental_type))
2540 int len = fundamental_type->length ();
2542 for (int i = 0; i < elements; i++)
2544 int regno = AARCH64_V0_REGNUM + i;
2545 /* Enough space for a full vector register. */
2546 gdb_byte buf[register_size (gdbarch, regno)];
2547 gdb_assert (len <= sizeof (buf));
2549 aarch64_debug_printf
2550 ("read HFA or HVA return value element %d from %s",
2551 i + 1, gdbarch_register_name (gdbarch, regno));
2553 regs->cooked_read (regno, buf);
2555 memcpy (valbuf, buf, len);
2556 valbuf += len;
2559 else if (type->code () == TYPE_CODE_INT
2560 || type->code () == TYPE_CODE_CHAR
2561 || type->code () == TYPE_CODE_BOOL
2562 || type->code () == TYPE_CODE_PTR
2563 || TYPE_IS_REFERENCE (type)
2564 || type->code () == TYPE_CODE_ENUM)
2566 /* If the type is a plain integer, then the access is
2567 straight-forward. Otherwise we have to play around a bit
2568 more. */
2569 int len = type->length ();
2570 int regno = AARCH64_X0_REGNUM;
2571 ULONGEST tmp;
2573 while (len > 0)
2575 /* By using store_unsigned_integer we avoid having to do
2576 anything special for small big-endian values. */
2577 regcache_cooked_read_unsigned (regs, regno++, &tmp);
2578 store_unsigned_integer (valbuf,
2579 (len > X_REGISTER_SIZE
2580 ? X_REGISTER_SIZE : len), byte_order, tmp);
2581 len -= X_REGISTER_SIZE;
2582 valbuf += X_REGISTER_SIZE;
2585 else
2587 /* For a structure or union the behaviour is as if the value had
2588 been stored to word-aligned memory and then loaded into
2589 registers with 64-bit load instruction(s). */
2590 int len = type->length ();
2591 int regno = AARCH64_X0_REGNUM;
2592 bfd_byte buf[X_REGISTER_SIZE];
2594 while (len > 0)
2596 regs->cooked_read (regno++, buf);
2597 memcpy (valbuf, buf, len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
2598 len -= X_REGISTER_SIZE;
2599 valbuf += X_REGISTER_SIZE;
2605 /* Will a function return an aggregate type in memory or in a
2606 register? Return 0 if an aggregate type can be returned in a
2607 register, 1 if it must be returned in memory. */
2609 static int
2610 aarch64_return_in_memory (struct gdbarch *gdbarch, struct type *type)
2612 type = check_typedef (type);
2613 int elements;
2614 struct type *fundamental_type;
2616 if (TYPE_HAS_DYNAMIC_LENGTH (type))
2617 return 1;
2619 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
2620 &fundamental_type))
2622 /* v0-v7 are used to return values and one register is allocated
2623 for one member. However, HFA or HVA has at most four members. */
2624 return 0;
2627 if (type->length () > 16
2628 || !language_pass_by_reference (type).trivially_copyable)
2630 /* PCS B.6 Aggregates larger than 16 bytes are passed by
2631 invisible reference. */
2633 return 1;
2636 return 0;
2639 /* Write into appropriate registers a function return value of type
2640 TYPE, given in virtual format. */
2642 static void
2643 aarch64_store_return_value (struct type *type, struct regcache *regs,
2644 const gdb_byte *valbuf)
2646 struct gdbarch *gdbarch = regs->arch ();
2647 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2648 int elements;
2649 struct type *fundamental_type;
2651 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
2652 &fundamental_type))
2654 int len = fundamental_type->length ();
2656 for (int i = 0; i < elements; i++)
2658 int regno = AARCH64_V0_REGNUM + i;
2659 /* Enough space for a full vector register. */
2660 gdb_byte tmpbuf[register_size (gdbarch, regno)];
2661 gdb_assert (len <= sizeof (tmpbuf));
2663 aarch64_debug_printf
2664 ("write HFA or HVA return value element %d to %s",
2665 i + 1, gdbarch_register_name (gdbarch, regno));
2667 /* Depending on whether the target supports SVE or not, the V
2668 registers may report a size > 16 bytes. In that case, read the
2669 original contents of the register before overriding it with a new
2670 value that has a potential size <= 16 bytes. */
2671 regs->cooked_read (regno, tmpbuf);
2672 memcpy (tmpbuf, valbuf,
2673 len > V_REGISTER_SIZE ? V_REGISTER_SIZE : len);
2674 regs->cooked_write (regno, tmpbuf);
2675 valbuf += len;
2678 else if (type->code () == TYPE_CODE_INT
2679 || type->code () == TYPE_CODE_CHAR
2680 || type->code () == TYPE_CODE_BOOL
2681 || type->code () == TYPE_CODE_PTR
2682 || TYPE_IS_REFERENCE (type)
2683 || type->code () == TYPE_CODE_ENUM)
2685 if (type->length () <= X_REGISTER_SIZE)
2687 /* Values of one word or less are zero/sign-extended and
2688 returned in r0. */
2689 bfd_byte tmpbuf[X_REGISTER_SIZE];
2690 LONGEST val = unpack_long (type, valbuf);
2692 store_signed_integer (tmpbuf, X_REGISTER_SIZE, byte_order, val);
2693 regs->cooked_write (AARCH64_X0_REGNUM, tmpbuf);
2695 else
2697 /* Integral values greater than one word are stored in
2698 consecutive registers starting with r0. This will always
2699 be a multiple of the regiser size. */
2700 int len = type->length ();
2701 int regno = AARCH64_X0_REGNUM;
2703 while (len > 0)
2705 regs->cooked_write (regno++, valbuf);
2706 len -= X_REGISTER_SIZE;
2707 valbuf += X_REGISTER_SIZE;
2711 else
2713 /* For a structure or union the behaviour is as if the value had
2714 been stored to word-aligned memory and then loaded into
2715 registers with 64-bit load instruction(s). */
2716 int len = type->length ();
2717 int regno = AARCH64_X0_REGNUM;
2718 bfd_byte tmpbuf[X_REGISTER_SIZE];
2720 while (len > 0)
2722 memcpy (tmpbuf, valbuf,
2723 len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
2724 regs->cooked_write (regno++, tmpbuf);
2725 len -= X_REGISTER_SIZE;
2726 valbuf += X_REGISTER_SIZE;
2731 /* Implement the "return_value" gdbarch method. */
2733 static enum return_value_convention
2734 aarch64_return_value (struct gdbarch *gdbarch, struct value *func_value,
2735 struct type *valtype, struct regcache *regcache,
2736 struct value **read_value, const gdb_byte *writebuf)
2738 if (valtype->code () == TYPE_CODE_STRUCT
2739 || valtype->code () == TYPE_CODE_UNION
2740 || valtype->code () == TYPE_CODE_ARRAY)
2742 if (aarch64_return_in_memory (gdbarch, valtype))
2744 /* From the AAPCS64's Result Return section:
2746 "Otherwise, the caller shall reserve a block of memory of
2747 sufficient size and alignment to hold the result. The address
2748 of the memory block shall be passed as an additional argument to
2749 the function in x8. */
2751 aarch64_debug_printf ("return value in memory");
2753 if (read_value != nullptr)
2755 CORE_ADDR addr;
2757 regcache->cooked_read (AARCH64_STRUCT_RETURN_REGNUM, &addr);
2758 *read_value = value_at_non_lval (valtype, addr);
2761 return RETURN_VALUE_ABI_RETURNS_ADDRESS;
2765 if (writebuf)
2766 aarch64_store_return_value (valtype, regcache, writebuf);
2768 if (read_value)
2770 *read_value = value::allocate (valtype);
2771 aarch64_extract_return_value (valtype, regcache,
2772 (*read_value)->contents_raw ().data ());
2775 aarch64_debug_printf ("return value in registers");
2777 return RETURN_VALUE_REGISTER_CONVENTION;
2780 /* Implement the "get_longjmp_target" gdbarch method. */
2782 static int
2783 aarch64_get_longjmp_target (frame_info_ptr frame, CORE_ADDR *pc)
2785 CORE_ADDR jb_addr;
2786 gdb_byte buf[X_REGISTER_SIZE];
2787 struct gdbarch *gdbarch = get_frame_arch (frame);
2788 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
2789 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2791 jb_addr = get_frame_register_unsigned (frame, AARCH64_X0_REGNUM);
2793 if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
2794 X_REGISTER_SIZE))
2795 return 0;
2797 *pc = extract_unsigned_integer (buf, X_REGISTER_SIZE, byte_order);
2798 return 1;
2801 /* Implement the "gen_return_address" gdbarch method. */
2803 static void
2804 aarch64_gen_return_address (struct gdbarch *gdbarch,
2805 struct agent_expr *ax, struct axs_value *value,
2806 CORE_ADDR scope)
2808 value->type = register_type (gdbarch, AARCH64_LR_REGNUM);
2809 value->kind = axs_lvalue_register;
2810 value->u.reg = AARCH64_LR_REGNUM;
2814 /* Return TRUE if REGNUM is a W pseudo-register number. Return FALSE
2815 otherwise. */
2817 static bool
2818 is_w_pseudo_register (struct gdbarch *gdbarch, int regnum)
2820 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
2822 if (tdep->w_pseudo_base <= regnum
2823 && regnum < tdep->w_pseudo_base + tdep->w_pseudo_count)
2824 return true;
2826 return false;
2829 /* Return TRUE if REGNUM is a SME pseudo-register number. Return FALSE
2830 otherwise. */
2832 static bool
2833 is_sme_pseudo_register (struct gdbarch *gdbarch, int regnum)
2835 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
2837 if (tdep->has_sme () && tdep->sme_pseudo_base <= regnum
2838 && regnum < tdep->sme_pseudo_base + tdep->sme_pseudo_count)
2839 return true;
2841 return false;
2844 /* Convert ENCODING into a ZA tile slice name. */
2846 static const std::string
2847 aarch64_za_tile_slice_name (const struct za_pseudo_encoding &encoding)
2849 gdb_assert (encoding.qualifier_index >= 0);
2850 gdb_assert (encoding.qualifier_index <= 4);
2851 gdb_assert (encoding.tile_index >= 0);
2852 gdb_assert (encoding.tile_index <= 15);
2853 gdb_assert (encoding.slice_index >= 0);
2854 gdb_assert (encoding.slice_index <= 255);
2856 const char orientation = encoding.horizontal ? 'h' : 'v';
2858 const char qualifiers[6] = "bhsdq";
2859 const char qualifier = qualifiers [encoding.qualifier_index];
2860 return string_printf ("za%d%c%c%d", encoding.tile_index, orientation,
2861 qualifier, encoding.slice_index);
2864 /* Convert ENCODING into a ZA tile name. */
2866 static const std::string
2867 aarch64_za_tile_name (const struct za_pseudo_encoding &encoding)
2869 /* Tiles don't use the slice number and the direction fields. */
2870 gdb_assert (encoding.qualifier_index >= 0);
2871 gdb_assert (encoding.qualifier_index <= 4);
2872 gdb_assert (encoding.tile_index >= 0);
2873 gdb_assert (encoding.tile_index <= 15);
2875 const char qualifiers[6] = "bhsdq";
2876 const char qualifier = qualifiers [encoding.qualifier_index];
2877 return (string_printf ("za%d%c", encoding.tile_index, qualifier));
2880 /* Given a SME pseudo-register REGNUM, return its type. */
2882 static struct type *
2883 aarch64_sme_pseudo_register_type (struct gdbarch *gdbarch, int regnum)
2885 struct za_pseudo_encoding encoding;
2887 /* Decode the SME pseudo-register number. */
2888 aarch64_za_decode_pseudos (gdbarch, regnum, encoding);
2890 if (is_sme_tile_slice_pseudo_register (gdbarch, regnum))
2891 return aarch64_za_tile_slice_type (gdbarch, encoding);
2892 else
2893 return aarch64_za_tile_type (gdbarch, encoding);
2896 /* Return the pseudo register name corresponding to register regnum. */
2898 static const char *
2899 aarch64_pseudo_register_name (struct gdbarch *gdbarch, int regnum)
2901 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
2903 /* W pseudo-registers. Bottom halves of the X registers. */
2904 static const char *const w_name[] =
2906 "w0", "w1", "w2", "w3",
2907 "w4", "w5", "w6", "w7",
2908 "w8", "w9", "w10", "w11",
2909 "w12", "w13", "w14", "w15",
2910 "w16", "w17", "w18", "w19",
2911 "w20", "w21", "w22", "w23",
2912 "w24", "w25", "w26", "w27",
2913 "w28", "w29", "w30",
2916 static const char *const q_name[] =
2918 "q0", "q1", "q2", "q3",
2919 "q4", "q5", "q6", "q7",
2920 "q8", "q9", "q10", "q11",
2921 "q12", "q13", "q14", "q15",
2922 "q16", "q17", "q18", "q19",
2923 "q20", "q21", "q22", "q23",
2924 "q24", "q25", "q26", "q27",
2925 "q28", "q29", "q30", "q31",
2928 static const char *const d_name[] =
2930 "d0", "d1", "d2", "d3",
2931 "d4", "d5", "d6", "d7",
2932 "d8", "d9", "d10", "d11",
2933 "d12", "d13", "d14", "d15",
2934 "d16", "d17", "d18", "d19",
2935 "d20", "d21", "d22", "d23",
2936 "d24", "d25", "d26", "d27",
2937 "d28", "d29", "d30", "d31",
2940 static const char *const s_name[] =
2942 "s0", "s1", "s2", "s3",
2943 "s4", "s5", "s6", "s7",
2944 "s8", "s9", "s10", "s11",
2945 "s12", "s13", "s14", "s15",
2946 "s16", "s17", "s18", "s19",
2947 "s20", "s21", "s22", "s23",
2948 "s24", "s25", "s26", "s27",
2949 "s28", "s29", "s30", "s31",
2952 static const char *const h_name[] =
2954 "h0", "h1", "h2", "h3",
2955 "h4", "h5", "h6", "h7",
2956 "h8", "h9", "h10", "h11",
2957 "h12", "h13", "h14", "h15",
2958 "h16", "h17", "h18", "h19",
2959 "h20", "h21", "h22", "h23",
2960 "h24", "h25", "h26", "h27",
2961 "h28", "h29", "h30", "h31",
2964 static const char *const b_name[] =
2966 "b0", "b1", "b2", "b3",
2967 "b4", "b5", "b6", "b7",
2968 "b8", "b9", "b10", "b11",
2969 "b12", "b13", "b14", "b15",
2970 "b16", "b17", "b18", "b19",
2971 "b20", "b21", "b22", "b23",
2972 "b24", "b25", "b26", "b27",
2973 "b28", "b29", "b30", "b31",
2976 int p_regnum = regnum - gdbarch_num_regs (gdbarch);
2978 if (p_regnum >= AARCH64_Q0_REGNUM && p_regnum < AARCH64_Q0_REGNUM + 32)
2979 return q_name[p_regnum - AARCH64_Q0_REGNUM];
2981 if (p_regnum >= AARCH64_D0_REGNUM && p_regnum < AARCH64_D0_REGNUM + 32)
2982 return d_name[p_regnum - AARCH64_D0_REGNUM];
2984 if (p_regnum >= AARCH64_S0_REGNUM && p_regnum < AARCH64_S0_REGNUM + 32)
2985 return s_name[p_regnum - AARCH64_S0_REGNUM];
2987 if (p_regnum >= AARCH64_H0_REGNUM && p_regnum < AARCH64_H0_REGNUM + 32)
2988 return h_name[p_regnum - AARCH64_H0_REGNUM];
2990 if (p_regnum >= AARCH64_B0_REGNUM && p_regnum < AARCH64_B0_REGNUM + 32)
2991 return b_name[p_regnum - AARCH64_B0_REGNUM];
2993 /* W pseudo-registers? */
2994 if (is_w_pseudo_register (gdbarch, regnum))
2995 return w_name[regnum - tdep->w_pseudo_base];
2997 if (tdep->has_sve ())
2999 static const char *const sve_v_name[] =
3001 "v0", "v1", "v2", "v3",
3002 "v4", "v5", "v6", "v7",
3003 "v8", "v9", "v10", "v11",
3004 "v12", "v13", "v14", "v15",
3005 "v16", "v17", "v18", "v19",
3006 "v20", "v21", "v22", "v23",
3007 "v24", "v25", "v26", "v27",
3008 "v28", "v29", "v30", "v31",
3011 if (p_regnum >= AARCH64_SVE_V0_REGNUM
3012 && p_regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
3013 return sve_v_name[p_regnum - AARCH64_SVE_V0_REGNUM];
3016 if (is_sme_pseudo_register (gdbarch, regnum))
3017 return tdep->sme_pseudo_names[regnum - tdep->sme_pseudo_base].c_str ();
3019 /* RA_STATE is used for unwinding only. Do not assign it a name - this
3020 prevents it from being read by methods such as
3021 mi_cmd_trace_frame_collected. */
3022 if (tdep->has_pauth () && regnum == tdep->ra_sign_state_regnum)
3023 return "";
3025 internal_error (_("aarch64_pseudo_register_name: bad register number %d"),
3026 p_regnum);
3029 /* Implement the "pseudo_register_type" tdesc_arch_data method. */
3031 static struct type *
3032 aarch64_pseudo_register_type (struct gdbarch *gdbarch, int regnum)
3034 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
3036 int p_regnum = regnum - gdbarch_num_regs (gdbarch);
3038 if (p_regnum >= AARCH64_Q0_REGNUM && p_regnum < AARCH64_Q0_REGNUM + 32)
3039 return aarch64_vnq_type (gdbarch);
3041 if (p_regnum >= AARCH64_D0_REGNUM && p_regnum < AARCH64_D0_REGNUM + 32)
3042 return aarch64_vnd_type (gdbarch);
3044 if (p_regnum >= AARCH64_S0_REGNUM && p_regnum < AARCH64_S0_REGNUM + 32)
3045 return aarch64_vns_type (gdbarch);
3047 if (p_regnum >= AARCH64_H0_REGNUM && p_regnum < AARCH64_H0_REGNUM + 32)
3048 return aarch64_vnh_type (gdbarch);
3050 if (p_regnum >= AARCH64_B0_REGNUM && p_regnum < AARCH64_B0_REGNUM + 32)
3051 return aarch64_vnb_type (gdbarch);
3053 if (tdep->has_sve () && p_regnum >= AARCH64_SVE_V0_REGNUM
3054 && p_regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
3055 return aarch64_vnv_type (gdbarch);
3057 /* W pseudo-registers are 32-bit. */
3058 if (is_w_pseudo_register (gdbarch, regnum))
3059 return builtin_type (gdbarch)->builtin_uint32;
3061 if (is_sme_pseudo_register (gdbarch, regnum))
3062 return aarch64_sme_pseudo_register_type (gdbarch, regnum);
3064 if (tdep->has_pauth () && regnum == tdep->ra_sign_state_regnum)
3065 return builtin_type (gdbarch)->builtin_uint64;
3067 internal_error (_("aarch64_pseudo_register_type: bad register number %d"),
3068 p_regnum);
3071 /* Implement the "pseudo_register_reggroup_p" tdesc_arch_data method. */
3073 static int
3074 aarch64_pseudo_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
3075 const struct reggroup *group)
3077 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
3079 int p_regnum = regnum - gdbarch_num_regs (gdbarch);
3081 if (p_regnum >= AARCH64_Q0_REGNUM && p_regnum < AARCH64_Q0_REGNUM + 32)
3082 return group == all_reggroup || group == vector_reggroup;
3083 else if (p_regnum >= AARCH64_D0_REGNUM && p_regnum < AARCH64_D0_REGNUM + 32)
3084 return (group == all_reggroup || group == vector_reggroup
3085 || group == float_reggroup);
3086 else if (p_regnum >= AARCH64_S0_REGNUM && p_regnum < AARCH64_S0_REGNUM + 32)
3087 return (group == all_reggroup || group == vector_reggroup
3088 || group == float_reggroup);
3089 else if (p_regnum >= AARCH64_H0_REGNUM && p_regnum < AARCH64_H0_REGNUM + 32)
3090 return group == all_reggroup || group == vector_reggroup;
3091 else if (p_regnum >= AARCH64_B0_REGNUM && p_regnum < AARCH64_B0_REGNUM + 32)
3092 return group == all_reggroup || group == vector_reggroup;
3093 else if (tdep->has_sve () && p_regnum >= AARCH64_SVE_V0_REGNUM
3094 && p_regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
3095 return group == all_reggroup || group == vector_reggroup;
3096 else if (is_sme_pseudo_register (gdbarch, regnum))
3097 return group == all_reggroup || group == vector_reggroup;
3098 /* RA_STATE is used for unwinding only. Do not assign it to any groups. */
3099 if (tdep->has_pauth () && regnum == tdep->ra_sign_state_regnum)
3100 return 0;
3102 return group == all_reggroup;
3105 /* Helper for aarch64_pseudo_read_value. */
3107 static value *
3108 aarch64_pseudo_read_value_1 (frame_info_ptr next_frame,
3109 const int pseudo_reg_num, int raw_regnum_offset)
3111 unsigned v_regnum = AARCH64_V0_REGNUM + raw_regnum_offset;
3113 return pseudo_from_raw_part (next_frame, pseudo_reg_num, v_regnum, 0);
3116 /* Helper function for reading/writing ZA pseudo-registers. Given REGNUM,
3117 a ZA pseudo-register number, return the information on positioning of the
3118 bytes that must be read from/written to. */
3120 static za_offsets
3121 aarch64_za_offsets_from_regnum (struct gdbarch *gdbarch, int regnum)
3123 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
3125 gdb_assert (tdep->has_sme ());
3126 gdb_assert (tdep->sme_svq > 0);
3127 gdb_assert (tdep->sme_pseudo_base <= regnum);
3128 gdb_assert (regnum < tdep->sme_pseudo_base + tdep->sme_pseudo_count);
3130 struct za_pseudo_encoding encoding;
3132 /* Decode the ZA pseudo-register number. */
3133 aarch64_za_decode_pseudos (gdbarch, regnum, encoding);
3135 /* Fetch the streaming vector length. */
3136 size_t svl = sve_vl_from_vq (tdep->sme_svq);
3137 za_offsets offsets;
3139 if (is_sme_tile_slice_pseudo_register (gdbarch, regnum))
3141 if (encoding.horizontal)
3143 /* Horizontal tile slices are contiguous ranges of svl bytes. */
3145 /* The starting offset depends on the tile index (to locate the tile
3146 in the ZA buffer), the slice index (to locate the slice within the
3147 tile) and the qualifier. */
3148 offsets.starting_offset
3149 = encoding.tile_index * svl + encoding.slice_index
3150 * (svl >> encoding.qualifier_index);
3151 /* Horizontal tile slice data is contiguous and thus doesn't have
3152 a stride. */
3153 offsets.stride_size = 0;
3154 /* Horizontal tile slice data is contiguous and thus only has 1
3155 chunk. */
3156 offsets.chunks = 1;
3157 /* The chunk size is always svl bytes. */
3158 offsets.chunk_size = svl;
3160 else
3162 /* Vertical tile slices are non-contiguous ranges of
3163 (1 << qualifier_index) bytes. */
3165 /* The starting offset depends on the tile number (to locate the
3166 tile in the ZA buffer), the slice index (to locate the element
3167 within the tile slice) and the qualifier. */
3168 offsets.starting_offset
3169 = encoding.tile_index * svl + encoding.slice_index
3170 * (1 << encoding.qualifier_index);
3171 /* The offset between vertical tile slices depends on the qualifier
3172 and svl. */
3173 offsets.stride_size = svl << encoding.qualifier_index;
3174 /* The number of chunks depends on svl and the qualifier size. */
3175 offsets.chunks = svl >> encoding.qualifier_index;
3176 /* The chunk size depends on the qualifier. */
3177 offsets.chunk_size = 1 << encoding.qualifier_index;
3180 else
3182 /* ZA tile pseudo-register. */
3184 /* Starting offset depends on the tile index and qualifier. */
3185 offsets.starting_offset = encoding.tile_index * svl;
3186 /* The offset between tile slices depends on the qualifier and svl. */
3187 offsets.stride_size = svl << encoding.qualifier_index;
3188 /* The number of chunks depends on the qualifier and svl. */
3189 offsets.chunks = svl >> encoding.qualifier_index;
3190 /* The chunk size is always svl bytes. */
3191 offsets.chunk_size = svl;
3194 return offsets;
3197 /* Given REGNUM, a SME pseudo-register number, return its value in RESULT. */
3199 static value *
3200 aarch64_sme_pseudo_register_read (gdbarch *gdbarch, frame_info_ptr next_frame,
3201 const int pseudo_reg_num)
3203 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
3205 gdb_assert (tdep->has_sme ());
3206 gdb_assert (tdep->sme_svq > 0);
3207 gdb_assert (tdep->sme_pseudo_base <= pseudo_reg_num);
3208 gdb_assert (pseudo_reg_num < tdep->sme_pseudo_base + tdep->sme_pseudo_count);
3210 /* Fetch the offsets that we need in order to read from the correct blocks
3211 of ZA. */
3212 za_offsets offsets
3213 = aarch64_za_offsets_from_regnum (gdbarch, pseudo_reg_num);
3215 /* Fetch the contents of ZA. */
3216 value *za_value = value_of_register (tdep->sme_za_regnum, next_frame);
3217 value *result = value::allocate_register (next_frame, pseudo_reg_num);
3219 /* Copy the requested data. */
3220 for (int chunks = 0; chunks < offsets.chunks; chunks++)
3222 int src_offset = offsets.starting_offset + chunks * offsets.stride_size;
3223 int dst_offset = chunks * offsets.chunk_size;
3224 za_value->contents_copy (result, dst_offset, src_offset,
3225 offsets.chunk_size);
3228 return result;
3231 /* Implement the "pseudo_register_read_value" gdbarch method. */
3233 static value *
3234 aarch64_pseudo_read_value (gdbarch *gdbarch, frame_info_ptr next_frame,
3235 const int pseudo_reg_num)
3237 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
3239 if (is_w_pseudo_register (gdbarch, pseudo_reg_num))
3241 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3242 /* Default offset for little endian. */
3243 int offset = 0;
3245 if (byte_order == BFD_ENDIAN_BIG)
3246 offset = 4;
3248 /* Find the correct X register to extract the data from. */
3249 int x_regnum
3250 = AARCH64_X0_REGNUM + (pseudo_reg_num - tdep->w_pseudo_base);
3252 /* Read the bottom 4 bytes of X. */
3253 return pseudo_from_raw_part (next_frame, pseudo_reg_num, x_regnum,
3254 offset);
3256 else if (is_sme_pseudo_register (gdbarch, pseudo_reg_num))
3257 return aarch64_sme_pseudo_register_read (gdbarch, next_frame,
3258 pseudo_reg_num);
3260 /* Offset in the "pseudo-register space". */
3261 int pseudo_offset = pseudo_reg_num - gdbarch_num_regs (gdbarch);
3263 if (pseudo_offset >= AARCH64_Q0_REGNUM
3264 && pseudo_offset < AARCH64_Q0_REGNUM + 32)
3265 return aarch64_pseudo_read_value_1 (next_frame, pseudo_reg_num,
3266 pseudo_offset - AARCH64_Q0_REGNUM);
3268 if (pseudo_offset >= AARCH64_D0_REGNUM
3269 && pseudo_offset < AARCH64_D0_REGNUM + 32)
3270 return aarch64_pseudo_read_value_1 (next_frame, pseudo_reg_num,
3271 pseudo_offset - AARCH64_D0_REGNUM);
3273 if (pseudo_offset >= AARCH64_S0_REGNUM
3274 && pseudo_offset < AARCH64_S0_REGNUM + 32)
3275 return aarch64_pseudo_read_value_1 (next_frame, pseudo_reg_num,
3276 pseudo_offset - AARCH64_S0_REGNUM);
3278 if (pseudo_offset >= AARCH64_H0_REGNUM
3279 && pseudo_offset < AARCH64_H0_REGNUM + 32)
3280 return aarch64_pseudo_read_value_1 (next_frame, pseudo_reg_num,
3281 pseudo_offset - AARCH64_H0_REGNUM);
3283 if (pseudo_offset >= AARCH64_B0_REGNUM
3284 && pseudo_offset < AARCH64_B0_REGNUM + 32)
3285 return aarch64_pseudo_read_value_1 (next_frame, pseudo_reg_num,
3286 pseudo_offset - AARCH64_B0_REGNUM);
3288 if (tdep->has_sve () && pseudo_offset >= AARCH64_SVE_V0_REGNUM
3289 && pseudo_offset < AARCH64_SVE_V0_REGNUM + 32)
3290 return aarch64_pseudo_read_value_1 (next_frame, pseudo_reg_num,
3291 pseudo_offset - AARCH64_SVE_V0_REGNUM);
3293 gdb_assert_not_reached ("regnum out of bound");
3296 /* Helper for aarch64_pseudo_write. */
3298 static void
3299 aarch64_pseudo_write_1 (gdbarch *gdbarch, frame_info_ptr next_frame,
3300 int regnum_offset,
3301 gdb::array_view<const gdb_byte> buf)
3303 unsigned raw_regnum = AARCH64_V0_REGNUM + regnum_offset;
3305 /* Enough space for a full vector register. */
3306 int raw_reg_size = register_size (gdbarch, raw_regnum);
3307 gdb_byte raw_buf[raw_reg_size];
3308 static_assert (AARCH64_V0_REGNUM == AARCH64_SVE_Z0_REGNUM);
3310 /* Ensure the register buffer is zero, we want gdb writes of the
3311 various 'scalar' pseudo registers to behavior like architectural
3312 writes, register width bytes are written the remainder are set to
3313 zero. */
3314 memset (raw_buf, 0, register_size (gdbarch, AARCH64_V0_REGNUM));
3316 gdb::array_view<gdb_byte> raw_view (raw_buf, raw_reg_size);
3317 copy (buf, raw_view.slice (0, buf.size ()));
3318 put_frame_register (next_frame, raw_regnum, raw_view);
3321 /* Given REGNUM, a SME pseudo-register number, store the bytes from DATA to the
3322 pseudo-register. */
3324 static void
3325 aarch64_sme_pseudo_register_write (gdbarch *gdbarch, frame_info_ptr next_frame,
3326 const int regnum,
3327 gdb::array_view<const gdb_byte> data)
3329 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
3331 gdb_assert (tdep->has_sme ());
3332 gdb_assert (tdep->sme_svq > 0);
3333 gdb_assert (tdep->sme_pseudo_base <= regnum);
3334 gdb_assert (regnum < tdep->sme_pseudo_base + tdep->sme_pseudo_count);
3336 /* Fetch the offsets that we need in order to write to the correct blocks
3337 of ZA. */
3338 za_offsets offsets = aarch64_za_offsets_from_regnum (gdbarch, regnum);
3340 /* Fetch the contents of ZA. */
3341 value *za_value = value_of_register (tdep->sme_za_regnum, next_frame);
3344 /* Create a view only on the portion of za we want to write. */
3345 gdb::array_view<gdb_byte> za_view
3346 = za_value->contents_writeable ().slice (offsets.starting_offset);
3348 /* Copy the requested data. */
3349 for (int chunks = 0; chunks < offsets.chunks; chunks++)
3351 gdb::array_view<const gdb_byte> src
3352 = data.slice (chunks * offsets.chunk_size, offsets.chunk_size);
3353 gdb::array_view<gdb_byte> dst
3354 = za_view.slice (chunks * offsets.stride_size, offsets.chunk_size);
3355 copy (src, dst);
3359 /* Write back to ZA. */
3360 put_frame_register (next_frame, tdep->sme_za_regnum,
3361 za_value->contents_raw ());
3364 /* Implement the "pseudo_register_write" gdbarch method. */
3366 static void
3367 aarch64_pseudo_write (gdbarch *gdbarch, frame_info_ptr next_frame,
3368 const int pseudo_reg_num,
3369 gdb::array_view<const gdb_byte> buf)
3371 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
3373 if (is_w_pseudo_register (gdbarch, pseudo_reg_num))
3375 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3376 /* Default offset for little endian. */
3377 int offset = 0;
3379 if (byte_order == BFD_ENDIAN_BIG)
3380 offset = 4;
3382 /* Find the correct X register to extract the data from. */
3383 int x_regnum = AARCH64_X0_REGNUM + (pseudo_reg_num - tdep->w_pseudo_base);
3385 /* First zero-out the contents of X. */
3386 gdb_byte bytes[8] {};
3387 gdb::array_view<gdb_byte> bytes_view (bytes);
3388 copy (buf, bytes_view.slice (offset, 4));
3390 /* Write to the bottom 4 bytes of X. */
3391 put_frame_register (next_frame, x_regnum, bytes_view);
3392 return;
3394 else if (is_sme_pseudo_register (gdbarch, pseudo_reg_num))
3396 aarch64_sme_pseudo_register_write (gdbarch, next_frame, pseudo_reg_num,
3397 buf);
3398 return;
3401 /* Offset in the "pseudo-register space". */
3402 int pseudo_offset = pseudo_reg_num - gdbarch_num_regs (gdbarch);
3404 if (pseudo_offset >= AARCH64_Q0_REGNUM
3405 && pseudo_offset < AARCH64_Q0_REGNUM + 32)
3406 return aarch64_pseudo_write_1 (gdbarch, next_frame,
3407 pseudo_offset - AARCH64_Q0_REGNUM, buf);
3409 if (pseudo_offset >= AARCH64_D0_REGNUM
3410 && pseudo_offset < AARCH64_D0_REGNUM + 32)
3411 return aarch64_pseudo_write_1 (gdbarch, next_frame,
3412 pseudo_offset - AARCH64_D0_REGNUM, buf);
3414 if (pseudo_offset >= AARCH64_S0_REGNUM
3415 && pseudo_offset < AARCH64_S0_REGNUM + 32)
3416 return aarch64_pseudo_write_1 (gdbarch, next_frame,
3417 pseudo_offset - AARCH64_S0_REGNUM, buf);
3419 if (pseudo_offset >= AARCH64_H0_REGNUM
3420 && pseudo_offset < AARCH64_H0_REGNUM + 32)
3421 return aarch64_pseudo_write_1 (gdbarch, next_frame,
3422 pseudo_offset - AARCH64_H0_REGNUM, buf);
3424 if (pseudo_offset >= AARCH64_B0_REGNUM
3425 && pseudo_offset < AARCH64_B0_REGNUM + 32)
3426 return aarch64_pseudo_write_1 (gdbarch, next_frame,
3427 pseudo_offset - AARCH64_B0_REGNUM, buf);
3429 if (tdep->has_sve () && pseudo_offset >= AARCH64_SVE_V0_REGNUM
3430 && pseudo_offset < AARCH64_SVE_V0_REGNUM + 32)
3431 return aarch64_pseudo_write_1 (gdbarch, next_frame,
3432 pseudo_offset - AARCH64_SVE_V0_REGNUM, buf);
3434 gdb_assert_not_reached ("regnum out of bound");
3437 /* Callback function for user_reg_add. */
3439 static struct value *
3440 value_of_aarch64_user_reg (frame_info_ptr frame, const void *baton)
3442 const int *reg_p = (const int *) baton;
3444 return value_of_register (*reg_p, get_next_frame_sentinel_okay (frame));
3447 /* Implement the "software_single_step" gdbarch method, needed to
3448 single step through atomic sequences on AArch64. */
3450 static std::vector<CORE_ADDR>
3451 aarch64_software_single_step (struct regcache *regcache)
3453 struct gdbarch *gdbarch = regcache->arch ();
3454 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
3455 const int insn_size = 4;
3456 const int atomic_sequence_length = 16; /* Instruction sequence length. */
3457 CORE_ADDR pc = regcache_read_pc (regcache);
3458 CORE_ADDR breaks[2] = { CORE_ADDR_MAX, CORE_ADDR_MAX };
3459 CORE_ADDR loc = pc;
3460 CORE_ADDR closing_insn = 0;
3462 ULONGEST insn_from_memory;
3463 if (!safe_read_memory_unsigned_integer (loc, insn_size,
3464 byte_order_for_code,
3465 &insn_from_memory))
3467 /* Assume we don't have a atomic sequence, as we couldn't read the
3468 instruction in this location. */
3469 return {};
3472 uint32_t insn = insn_from_memory;
3473 int index;
3474 int insn_count;
3475 int bc_insn_count = 0; /* Conditional branch instruction count. */
3476 int last_breakpoint = 0; /* Defaults to 0 (no breakpoints placed). */
3477 aarch64_inst inst;
3479 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
3480 return {};
3482 /* Look for a Load Exclusive instruction which begins the sequence. */
3483 if (inst.opcode->iclass != ldstexcl || bit (insn, 22) == 0)
3484 return {};
3486 for (insn_count = 0; insn_count < atomic_sequence_length; ++insn_count)
3488 loc += insn_size;
3490 if (!safe_read_memory_unsigned_integer (loc, insn_size,
3491 byte_order_for_code,
3492 &insn_from_memory))
3494 /* Assume we don't have a atomic sequence, as we couldn't read the
3495 instruction in this location. */
3496 return {};
3499 insn = insn_from_memory;
3500 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
3501 return {};
3502 /* Check if the instruction is a conditional branch. */
3503 if (inst.opcode->iclass == condbranch)
3505 gdb_assert (inst.operands[0].type == AARCH64_OPND_ADDR_PCREL19);
3507 if (bc_insn_count >= 1)
3508 return {};
3510 /* It is, so we'll try to set a breakpoint at the destination. */
3511 breaks[1] = loc + inst.operands[0].imm.value;
3513 bc_insn_count++;
3514 last_breakpoint++;
3517 /* Look for the Store Exclusive which closes the atomic sequence. */
3518 if (inst.opcode->iclass == ldstexcl && bit (insn, 22) == 0)
3520 closing_insn = loc;
3521 break;
3525 /* We didn't find a closing Store Exclusive instruction, fall back. */
3526 if (!closing_insn)
3527 return {};
3529 /* Insert breakpoint after the end of the atomic sequence. */
3530 breaks[0] = loc + insn_size;
3532 /* Check for duplicated breakpoints, and also check that the second
3533 breakpoint is not within the atomic sequence. */
3534 if (last_breakpoint
3535 && (breaks[1] == breaks[0]
3536 || (breaks[1] >= pc && breaks[1] <= closing_insn)))
3537 last_breakpoint = 0;
3539 std::vector<CORE_ADDR> next_pcs;
3541 /* Insert the breakpoint at the end of the sequence, and one at the
3542 destination of the conditional branch, if it exists. */
3543 for (index = 0; index <= last_breakpoint; index++)
3544 next_pcs.push_back (breaks[index]);
3546 return next_pcs;
3549 struct aarch64_displaced_step_copy_insn_closure
3550 : public displaced_step_copy_insn_closure
3552 /* It is true when condition instruction, such as B.CON, TBZ, etc,
3553 is being displaced stepping. */
3554 bool cond = false;
3556 /* PC adjustment offset after displaced stepping. If 0, then we don't
3557 write the PC back, assuming the PC is already the right address. */
3558 int32_t pc_adjust = 0;
3561 /* Data when visiting instructions for displaced stepping. */
3563 struct aarch64_displaced_step_data
3565 struct aarch64_insn_data base;
3567 /* The address where the instruction will be executed at. */
3568 CORE_ADDR new_addr;
3569 /* Buffer of instructions to be copied to NEW_ADDR to execute. */
3570 uint32_t insn_buf[AARCH64_DISPLACED_MODIFIED_INSNS];
3571 /* Number of instructions in INSN_BUF. */
3572 unsigned insn_count;
3573 /* Registers when doing displaced stepping. */
3574 struct regcache *regs;
3576 aarch64_displaced_step_copy_insn_closure *dsc;
3579 /* Implementation of aarch64_insn_visitor method "b". */
3581 static void
3582 aarch64_displaced_step_b (const int is_bl, const int32_t offset,
3583 struct aarch64_insn_data *data)
3585 struct aarch64_displaced_step_data *dsd
3586 = (struct aarch64_displaced_step_data *) data;
3587 int64_t new_offset = data->insn_addr - dsd->new_addr + offset;
3589 if (can_encode_int32 (new_offset, 28))
3591 /* Emit B rather than BL, because executing BL on a new address
3592 will get the wrong address into LR. In order to avoid this,
3593 we emit B, and update LR if the instruction is BL. */
3594 emit_b (dsd->insn_buf, 0, new_offset);
3595 dsd->insn_count++;
3597 else
3599 /* Write NOP. */
3600 emit_nop (dsd->insn_buf);
3601 dsd->insn_count++;
3602 dsd->dsc->pc_adjust = offset;
3605 if (is_bl)
3607 /* Update LR. */
3608 regcache_cooked_write_unsigned (dsd->regs, AARCH64_LR_REGNUM,
3609 data->insn_addr + 4);
3613 /* Implementation of aarch64_insn_visitor method "b_cond". */
3615 static void
3616 aarch64_displaced_step_b_cond (const unsigned cond, const int32_t offset,
3617 struct aarch64_insn_data *data)
3619 struct aarch64_displaced_step_data *dsd
3620 = (struct aarch64_displaced_step_data *) data;
3622 /* GDB has to fix up PC after displaced step this instruction
3623 differently according to the condition is true or false. Instead
3624 of checking COND against conditional flags, we can use
3625 the following instructions, and GDB can tell how to fix up PC
3626 according to the PC value.
3628 B.COND TAKEN ; If cond is true, then jump to TAKEN.
3629 INSN1 ;
3630 TAKEN:
3631 INSN2
3634 emit_bcond (dsd->insn_buf, cond, 8);
3635 dsd->dsc->cond = true;
3636 dsd->dsc->pc_adjust = offset;
3637 dsd->insn_count = 1;
3640 /* Dynamically allocate a new register. If we know the register
3641 statically, we should make it a global as above instead of using this
3642 helper function. */
3644 static struct aarch64_register
3645 aarch64_register (unsigned num, int is64)
3647 return (struct aarch64_register) { num, is64 };
3650 /* Implementation of aarch64_insn_visitor method "cb". */
3652 static void
3653 aarch64_displaced_step_cb (const int32_t offset, const int is_cbnz,
3654 const unsigned rn, int is64,
3655 struct aarch64_insn_data *data)
3657 struct aarch64_displaced_step_data *dsd
3658 = (struct aarch64_displaced_step_data *) data;
3660 /* The offset is out of range for a compare and branch
3661 instruction. We can use the following instructions instead:
3663 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
3664 INSN1 ;
3665 TAKEN:
3666 INSN2
3668 emit_cb (dsd->insn_buf, is_cbnz, aarch64_register (rn, is64), 8);
3669 dsd->insn_count = 1;
3670 dsd->dsc->cond = true;
3671 dsd->dsc->pc_adjust = offset;
3674 /* Implementation of aarch64_insn_visitor method "tb". */
3676 static void
3677 aarch64_displaced_step_tb (const int32_t offset, int is_tbnz,
3678 const unsigned rt, unsigned bit,
3679 struct aarch64_insn_data *data)
3681 struct aarch64_displaced_step_data *dsd
3682 = (struct aarch64_displaced_step_data *) data;
3684 /* The offset is out of range for a test bit and branch
3685 instruction We can use the following instructions instead:
3687 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
3688 INSN1 ;
3689 TAKEN:
3690 INSN2
3693 emit_tb (dsd->insn_buf, is_tbnz, bit, aarch64_register (rt, 1), 8);
3694 dsd->insn_count = 1;
3695 dsd->dsc->cond = true;
3696 dsd->dsc->pc_adjust = offset;
3699 /* Implementation of aarch64_insn_visitor method "adr". */
3701 static void
3702 aarch64_displaced_step_adr (const int32_t offset, const unsigned rd,
3703 const int is_adrp, struct aarch64_insn_data *data)
3705 struct aarch64_displaced_step_data *dsd
3706 = (struct aarch64_displaced_step_data *) data;
3707 /* We know exactly the address the ADR{P,} instruction will compute.
3708 We can just write it to the destination register. */
3709 CORE_ADDR address = data->insn_addr + offset;
3711 if (is_adrp)
3713 /* Clear the lower 12 bits of the offset to get the 4K page. */
3714 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
3715 address & ~0xfff);
3717 else
3718 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
3719 address);
3721 dsd->dsc->pc_adjust = 4;
3722 emit_nop (dsd->insn_buf);
3723 dsd->insn_count = 1;
3726 /* Implementation of aarch64_insn_visitor method "ldr_literal". */
3728 static void
3729 aarch64_displaced_step_ldr_literal (const int32_t offset, const int is_sw,
3730 const unsigned rt, const int is64,
3731 struct aarch64_insn_data *data)
3733 struct aarch64_displaced_step_data *dsd
3734 = (struct aarch64_displaced_step_data *) data;
3735 CORE_ADDR address = data->insn_addr + offset;
3736 struct aarch64_memory_operand zero = { MEMORY_OPERAND_OFFSET, 0 };
3738 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rt,
3739 address);
3741 if (is_sw)
3742 dsd->insn_count = emit_ldrsw (dsd->insn_buf, aarch64_register (rt, 1),
3743 aarch64_register (rt, 1), zero);
3744 else
3745 dsd->insn_count = emit_ldr (dsd->insn_buf, aarch64_register (rt, is64),
3746 aarch64_register (rt, 1), zero);
3748 dsd->dsc->pc_adjust = 4;
3751 /* Implementation of aarch64_insn_visitor method "others". */
3753 static void
3754 aarch64_displaced_step_others (const uint32_t insn,
3755 struct aarch64_insn_data *data)
3757 struct aarch64_displaced_step_data *dsd
3758 = (struct aarch64_displaced_step_data *) data;
3760 uint32_t masked_insn = (insn & CLEAR_Rn_MASK);
3761 if (masked_insn == BLR)
3763 /* Emit a BR to the same register and then update LR to the original
3764 address (similar to aarch64_displaced_step_b). */
3765 aarch64_emit_insn (dsd->insn_buf, insn & 0xffdfffff);
3766 regcache_cooked_write_unsigned (dsd->regs, AARCH64_LR_REGNUM,
3767 data->insn_addr + 4);
3769 else
3770 aarch64_emit_insn (dsd->insn_buf, insn);
3771 dsd->insn_count = 1;
3773 if (masked_insn == RET || masked_insn == BR || masked_insn == BLR)
3774 dsd->dsc->pc_adjust = 0;
3775 else
3776 dsd->dsc->pc_adjust = 4;
3779 static const struct aarch64_insn_visitor visitor =
3781 aarch64_displaced_step_b,
3782 aarch64_displaced_step_b_cond,
3783 aarch64_displaced_step_cb,
3784 aarch64_displaced_step_tb,
3785 aarch64_displaced_step_adr,
3786 aarch64_displaced_step_ldr_literal,
3787 aarch64_displaced_step_others,
3790 /* Implement the "displaced_step_copy_insn" gdbarch method. */
3792 displaced_step_copy_insn_closure_up
3793 aarch64_displaced_step_copy_insn (struct gdbarch *gdbarch,
3794 CORE_ADDR from, CORE_ADDR to,
3795 struct regcache *regs)
3797 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
3798 struct aarch64_displaced_step_data dsd;
3799 aarch64_inst inst;
3800 ULONGEST insn_from_memory;
3802 if (!safe_read_memory_unsigned_integer (from, 4, byte_order_for_code,
3803 &insn_from_memory))
3804 return nullptr;
3806 uint32_t insn = insn_from_memory;
3808 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
3809 return NULL;
3811 /* Look for a Load Exclusive instruction which begins the sequence. */
3812 if (inst.opcode->iclass == ldstexcl && bit (insn, 22))
3814 /* We can't displaced step atomic sequences. */
3815 return NULL;
3818 std::unique_ptr<aarch64_displaced_step_copy_insn_closure> dsc
3819 (new aarch64_displaced_step_copy_insn_closure);
3820 dsd.base.insn_addr = from;
3821 dsd.new_addr = to;
3822 dsd.regs = regs;
3823 dsd.dsc = dsc.get ();
3824 dsd.insn_count = 0;
3825 aarch64_relocate_instruction (insn, &visitor,
3826 (struct aarch64_insn_data *) &dsd);
3827 gdb_assert (dsd.insn_count <= AARCH64_DISPLACED_MODIFIED_INSNS);
3829 if (dsd.insn_count != 0)
3831 int i;
3833 /* Instruction can be relocated to scratch pad. Copy
3834 relocated instruction(s) there. */
3835 for (i = 0; i < dsd.insn_count; i++)
3837 displaced_debug_printf ("writing insn %.8x at %s",
3838 dsd.insn_buf[i],
3839 paddress (gdbarch, to + i * 4));
3841 write_memory_unsigned_integer (to + i * 4, 4, byte_order_for_code,
3842 (ULONGEST) dsd.insn_buf[i]);
3845 else
3847 dsc = NULL;
3850 /* This is a work around for a problem with g++ 4.8. */
3851 return displaced_step_copy_insn_closure_up (dsc.release ());
3854 /* Implement the "displaced_step_fixup" gdbarch method. */
3856 void
3857 aarch64_displaced_step_fixup (struct gdbarch *gdbarch,
3858 struct displaced_step_copy_insn_closure *dsc_,
3859 CORE_ADDR from, CORE_ADDR to,
3860 struct regcache *regs, bool completed_p)
3862 CORE_ADDR pc = regcache_read_pc (regs);
3864 /* If the displaced instruction didn't complete successfully then all we
3865 need to do is restore the program counter. */
3866 if (!completed_p)
3868 pc = from + (pc - to);
3869 regcache_write_pc (regs, pc);
3870 return;
3873 aarch64_displaced_step_copy_insn_closure *dsc
3874 = (aarch64_displaced_step_copy_insn_closure *) dsc_;
3876 displaced_debug_printf ("PC after stepping: %s (was %s).",
3877 paddress (gdbarch, pc), paddress (gdbarch, to));
3879 if (dsc->cond)
3881 displaced_debug_printf ("[Conditional] pc_adjust before: %d",
3882 dsc->pc_adjust);
3884 if (pc - to == 8)
3886 /* Condition is true. */
3888 else if (pc - to == 4)
3890 /* Condition is false. */
3891 dsc->pc_adjust = 4;
3893 else
3894 gdb_assert_not_reached ("Unexpected PC value after displaced stepping");
3896 displaced_debug_printf ("[Conditional] pc_adjust after: %d",
3897 dsc->pc_adjust);
3900 displaced_debug_printf ("%s PC by %d",
3901 dsc->pc_adjust ? "adjusting" : "not adjusting",
3902 dsc->pc_adjust);
3904 if (dsc->pc_adjust != 0)
3906 /* Make sure the previous instruction was executed (that is, the PC
3907 has changed). If the PC didn't change, then discard the adjustment
3908 offset. Otherwise we may skip an instruction before its execution
3909 took place. */
3910 if ((pc - to) == 0)
3912 displaced_debug_printf ("PC did not move. Discarding PC adjustment.");
3913 dsc->pc_adjust = 0;
3916 displaced_debug_printf ("fixup: set PC to %s:%d",
3917 paddress (gdbarch, from), dsc->pc_adjust);
3919 regcache_cooked_write_unsigned (regs, AARCH64_PC_REGNUM,
3920 from + dsc->pc_adjust);
3924 /* Implement the "displaced_step_hw_singlestep" gdbarch method. */
3926 bool
3927 aarch64_displaced_step_hw_singlestep (struct gdbarch *gdbarch)
3929 return true;
3932 /* Get the correct target description for the given VQ value.
3933 If VQ is zero then it is assumed SVE is not supported.
3934 (It is not possible to set VQ to zero on an SVE system).
3936 MTE_P indicates the presence of the Memory Tagging Extension feature.
3938 TLS_P indicates the presence of the Thread Local Storage feature. */
3940 const target_desc *
3941 aarch64_read_description (const aarch64_features &features)
3943 if (features.vq > AARCH64_MAX_SVE_VQ)
3944 error (_("VQ is %" PRIu64 ", maximum supported value is %d"), features.vq,
3945 AARCH64_MAX_SVE_VQ);
3947 struct target_desc *tdesc = tdesc_aarch64_map[features];
3949 if (tdesc == NULL)
3951 tdesc = aarch64_create_target_description (features);
3952 tdesc_aarch64_map[features] = tdesc;
3955 return tdesc;
3958 /* Return the VQ used when creating the target description TDESC. */
3960 static uint64_t
3961 aarch64_get_tdesc_vq (const struct target_desc *tdesc)
3963 const struct tdesc_feature *feature_sve;
3965 if (!tdesc_has_registers (tdesc))
3966 return 0;
3968 feature_sve = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sve");
3970 if (feature_sve == nullptr)
3971 return 0;
3973 uint64_t vl = tdesc_register_bitsize (feature_sve,
3974 aarch64_sve_register_names[0]) / 8;
3975 return sve_vq_from_vl (vl);
3979 /* Return the svq (streaming vector quotient) used when creating the target
3980 description TDESC. */
3982 static uint64_t
3983 aarch64_get_tdesc_svq (const struct target_desc *tdesc)
3985 const struct tdesc_feature *feature_sme;
3987 if (!tdesc_has_registers (tdesc))
3988 return 0;
3990 feature_sme = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sme");
3992 if (feature_sme == nullptr)
3993 return 0;
3995 size_t svl_squared = tdesc_register_bitsize (feature_sme, "za");
3997 /* We have the total size of the ZA matrix, in bits. Figure out the svl
3998 value. */
3999 size_t svl = std::sqrt (svl_squared / 8);
4001 /* Now extract svq. */
4002 return sve_vq_from_vl (svl);
4005 /* Get the AArch64 features present in the given target description. */
4007 aarch64_features
4008 aarch64_features_from_target_desc (const struct target_desc *tdesc)
4010 aarch64_features features;
4012 if (tdesc == nullptr)
4013 return features;
4015 features.vq = aarch64_get_tdesc_vq (tdesc);
4017 /* We need to look for a couple pauth feature name variations. */
4018 features.pauth
4019 = (tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.pauth") != nullptr);
4021 if (!features.pauth)
4022 features.pauth = (tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.pauth_v2")
4023 != nullptr);
4025 features.mte
4026 = (tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.mte") != nullptr);
4028 const struct tdesc_feature *tls_feature
4029 = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.tls");
4031 if (tls_feature != nullptr)
4033 /* We have TLS registers. Find out how many. */
4034 if (tdesc_unnumbered_register (tls_feature, "tpidr2"))
4035 features.tls = 2;
4036 else
4037 features.tls = 1;
4040 features.svq = aarch64_get_tdesc_svq (tdesc);
4042 /* Check for the SME2 feature. */
4043 features.sme2 = (tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sme2")
4044 != nullptr);
4046 return features;
4049 /* Implement the "cannot_store_register" gdbarch method. */
4051 static int
4052 aarch64_cannot_store_register (struct gdbarch *gdbarch, int regnum)
4054 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
4056 if (!tdep->has_pauth ())
4057 return 0;
4059 /* Pointer authentication registers are read-only. */
4060 return (regnum >= tdep->pauth_reg_base
4061 && regnum < tdep->pauth_reg_base + tdep->pauth_reg_count);
4064 /* Implement the stack_frame_destroyed_p gdbarch method. */
4066 static int
4067 aarch64_stack_frame_destroyed_p (struct gdbarch *gdbarch, CORE_ADDR pc)
4069 CORE_ADDR func_start, func_end;
4070 if (!find_pc_partial_function (pc, NULL, &func_start, &func_end))
4071 return 0;
4073 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
4075 ULONGEST insn_from_memory;
4076 if (!safe_read_memory_unsigned_integer (pc, 4, byte_order_for_code,
4077 &insn_from_memory))
4078 return 0;
4080 uint32_t insn = insn_from_memory;
4082 aarch64_inst inst;
4083 if (aarch64_decode_insn (insn, &inst, 1, nullptr) != 0)
4084 return 0;
4086 return streq (inst.opcode->name, "ret");
4089 /* AArch64 implementation of the remove_non_address_bits gdbarch hook. Remove
4090 non address bits from a pointer value. */
4092 static CORE_ADDR
4093 aarch64_remove_non_address_bits (struct gdbarch *gdbarch, CORE_ADDR pointer)
4095 /* By default, we assume TBI and discard the top 8 bits plus the VA range
4096 select bit (55). Below we try to fetch information about pointer
4097 authentication masks in order to make non-address removal more
4098 precise. */
4099 CORE_ADDR mask = AARCH64_TOP_BITS_MASK;
4101 /* Check if we have an inferior first. If not, just use the default
4102 mask.
4104 We use the inferior_ptid here because the pointer authentication masks
4105 should be the same across threads of a process. Since we may not have
4106 access to the current thread (gdb may have switched to no inferiors
4107 momentarily), we use the inferior ptid. */
4108 if (inferior_ptid != null_ptid)
4110 /* If we do have an inferior, attempt to fetch its thread's thread_info
4111 struct. */
4112 thread_info *thread = current_inferior ()->find_thread (inferior_ptid);
4114 /* If the thread is running, we will not be able to fetch the mask
4115 registers. */
4116 if (thread != nullptr && thread->state != THREAD_RUNNING)
4118 /* Otherwise, fetch the register cache and the masks. */
4119 struct regcache *regs
4120 = get_thread_regcache (current_inferior ()->process_target (),
4121 inferior_ptid);
4123 /* Use the gdbarch from the register cache to check for pointer
4124 authentication support, as it matches the features found in
4125 that particular thread. */
4126 aarch64_gdbarch_tdep *tdep
4127 = gdbarch_tdep<aarch64_gdbarch_tdep> (regs->arch ());
4129 /* Is there pointer authentication support? */
4130 if (tdep->has_pauth ())
4132 CORE_ADDR cmask, dmask;
4133 int dmask_regnum
4134 = AARCH64_PAUTH_DMASK_REGNUM (tdep->pauth_reg_base);
4135 int cmask_regnum
4136 = AARCH64_PAUTH_CMASK_REGNUM (tdep->pauth_reg_base);
4138 /* If we have a kernel address and we have kernel-mode address
4139 mask registers, use those instead. */
4140 if (tdep->pauth_reg_count > 2
4141 && pointer & VA_RANGE_SELECT_BIT_MASK)
4143 dmask_regnum
4144 = AARCH64_PAUTH_DMASK_HIGH_REGNUM (tdep->pauth_reg_base);
4145 cmask_regnum
4146 = AARCH64_PAUTH_CMASK_HIGH_REGNUM (tdep->pauth_reg_base);
4149 /* We have both a code mask and a data mask. For now they are
4150 the same, but this may change in the future. */
4151 if (regs->cooked_read (dmask_regnum, &dmask) != REG_VALID)
4152 dmask = mask;
4154 if (regs->cooked_read (cmask_regnum, &cmask) != REG_VALID)
4155 cmask = mask;
4157 mask |= aarch64_mask_from_pac_registers (cmask, dmask);
4162 return aarch64_remove_top_bits (pointer, mask);
4165 /* Given NAMES, a vector of strings, initialize it with all the SME
4166 pseudo-register names for the current streaming vector length. */
4168 static void
4169 aarch64_initialize_sme_pseudo_names (struct gdbarch *gdbarch,
4170 std::vector<std::string> &names)
4172 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
4174 gdb_assert (tdep->has_sme ());
4175 gdb_assert (tdep->sme_tile_slice_pseudo_base > 0);
4176 gdb_assert (tdep->sme_tile_pseudo_base > 0);
4178 for (int i = 0; i < tdep->sme_tile_slice_pseudo_count; i++)
4180 int regnum = tdep->sme_tile_slice_pseudo_base + i;
4181 struct za_pseudo_encoding encoding;
4182 aarch64_za_decode_pseudos (gdbarch, regnum, encoding);
4183 names.push_back (aarch64_za_tile_slice_name (encoding));
4185 for (int i = 0; i < AARCH64_ZA_TILES_NUM; i++)
4187 int regnum = tdep->sme_tile_pseudo_base + i;
4188 struct za_pseudo_encoding encoding;
4189 aarch64_za_decode_pseudos (gdbarch, regnum, encoding);
4190 names.push_back (aarch64_za_tile_name (encoding));
4194 /* Initialize the current architecture based on INFO. If possible,
4195 re-use an architecture from ARCHES, which is a list of
4196 architectures already created during this debugging session.
4198 Called e.g. at program startup, when reading a core file, and when
4199 reading a binary file. */
4201 static struct gdbarch *
4202 aarch64_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
4204 const struct tdesc_feature *feature_core, *feature_fpu, *feature_sve;
4205 const struct tdesc_feature *feature_pauth;
4206 bool valid_p = true;
4207 int i, num_regs = 0, num_pseudo_regs = 0;
4208 int first_pauth_regnum = -1, ra_sign_state_offset = -1;
4209 int first_mte_regnum = -1, first_tls_regnum = -1;
4210 uint64_t vq = aarch64_get_tdesc_vq (info.target_desc);
4211 uint64_t svq = aarch64_get_tdesc_svq (info.target_desc);
4213 if (vq > AARCH64_MAX_SVE_VQ)
4214 internal_error (_("VQ out of bounds: %s (max %d)"),
4215 pulongest (vq), AARCH64_MAX_SVE_VQ);
4217 if (svq > AARCH64_MAX_SVE_VQ)
4218 internal_error (_("Streaming vector quotient (svq) out of bounds: %s"
4219 " (max %d)"),
4220 pulongest (svq), AARCH64_MAX_SVE_VQ);
4222 /* If there is already a candidate, use it. */
4223 for (gdbarch_list *best_arch = gdbarch_list_lookup_by_info (arches, &info);
4224 best_arch != nullptr;
4225 best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
4227 aarch64_gdbarch_tdep *tdep
4228 = gdbarch_tdep<aarch64_gdbarch_tdep> (best_arch->gdbarch);
4229 if (tdep && tdep->vq == vq && tdep->sme_svq == svq)
4230 return best_arch->gdbarch;
4233 /* Ensure we always have a target descriptor, and that it is for the given VQ
4234 value. */
4235 const struct target_desc *tdesc = info.target_desc;
4236 if (!tdesc_has_registers (tdesc) || vq != aarch64_get_tdesc_vq (tdesc)
4237 || svq != aarch64_get_tdesc_svq (tdesc))
4239 aarch64_features features;
4240 features.vq = vq;
4241 features.svq = svq;
4242 tdesc = aarch64_read_description (features);
4244 gdb_assert (tdesc);
4246 feature_core = tdesc_find_feature (tdesc,"org.gnu.gdb.aarch64.core");
4247 feature_fpu = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.fpu");
4248 feature_sve = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sve");
4249 const struct tdesc_feature *feature_mte
4250 = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.mte");
4251 const struct tdesc_feature *feature_tls
4252 = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.tls");
4254 if (feature_core == nullptr)
4255 return nullptr;
4257 tdesc_arch_data_up tdesc_data = tdesc_data_alloc ();
4259 /* Validate the description provides the mandatory core R registers
4260 and allocate their numbers. */
4261 for (i = 0; i < ARRAY_SIZE (aarch64_r_register_names); i++)
4262 valid_p &= tdesc_numbered_register (feature_core, tdesc_data.get (),
4263 AARCH64_X0_REGNUM + i,
4264 aarch64_r_register_names[i]);
4266 num_regs = AARCH64_X0_REGNUM + i;
4268 /* Add the V registers. */
4269 if (feature_fpu != nullptr)
4271 if (feature_sve != nullptr)
4272 error (_("Program contains both fpu and SVE features."));
4274 /* Validate the description provides the mandatory V registers
4275 and allocate their numbers. */
4276 for (i = 0; i < ARRAY_SIZE (aarch64_v_register_names); i++)
4277 valid_p &= tdesc_numbered_register (feature_fpu, tdesc_data.get (),
4278 AARCH64_V0_REGNUM + i,
4279 aarch64_v_register_names[i]);
4281 num_regs = AARCH64_V0_REGNUM + i;
4284 /* Add the SVE registers. */
4285 if (feature_sve != nullptr)
4287 /* Validate the description provides the mandatory SVE registers
4288 and allocate their numbers. */
4289 for (i = 0; i < ARRAY_SIZE (aarch64_sve_register_names); i++)
4290 valid_p &= tdesc_numbered_register (feature_sve, tdesc_data.get (),
4291 AARCH64_SVE_Z0_REGNUM + i,
4292 aarch64_sve_register_names[i]);
4294 num_regs = AARCH64_SVE_Z0_REGNUM + i;
4295 num_pseudo_regs += 32; /* add the Vn register pseudos. */
4298 if (feature_fpu != nullptr || feature_sve != nullptr)
4300 num_pseudo_regs += 32; /* add the Qn scalar register pseudos */
4301 num_pseudo_regs += 32; /* add the Dn scalar register pseudos */
4302 num_pseudo_regs += 32; /* add the Sn scalar register pseudos */
4303 num_pseudo_regs += 32; /* add the Hn scalar register pseudos */
4304 num_pseudo_regs += 32; /* add the Bn scalar register pseudos */
4307 int first_sme_regnum = -1;
4308 int first_sme2_regnum = -1;
4309 int first_sme_pseudo_regnum = -1;
4310 const struct tdesc_feature *feature_sme
4311 = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sme");
4312 if (feature_sme != nullptr)
4314 /* Record the first SME register. */
4315 first_sme_regnum = num_regs;
4317 valid_p &= tdesc_numbered_register (feature_sme, tdesc_data.get (),
4318 num_regs++, "svg");
4320 valid_p &= tdesc_numbered_register (feature_sme, tdesc_data.get (),
4321 num_regs++, "svcr");
4323 valid_p &= tdesc_numbered_register (feature_sme, tdesc_data.get (),
4324 num_regs++, "za");
4326 /* Record the first SME pseudo register. */
4327 first_sme_pseudo_regnum = num_pseudo_regs;
4329 /* Add the ZA tile slice pseudo registers. The number of tile slice
4330 pseudo-registers depend on the svl, and is always a multiple of 5. */
4331 num_pseudo_regs += (svq << 5) * 5;
4333 /* Add the ZA tile pseudo registers. */
4334 num_pseudo_regs += AARCH64_ZA_TILES_NUM;
4336 /* Now check for the SME2 feature. SME2 is only available if SME is
4337 available. */
4338 const struct tdesc_feature *feature_sme2
4339 = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sme2");
4340 if (feature_sme2 != nullptr)
4342 /* Record the first SME2 register. */
4343 first_sme2_regnum = num_regs;
4345 valid_p &= tdesc_numbered_register (feature_sme2, tdesc_data.get (),
4346 num_regs++, "zt0");
4350 /* Add the TLS register. */
4351 int tls_register_count = 0;
4352 if (feature_tls != nullptr)
4354 first_tls_regnum = num_regs;
4356 /* Look for the TLS registers. tpidr is required, but tpidr2 is
4357 optional. */
4358 valid_p
4359 = tdesc_numbered_register (feature_tls, tdesc_data.get (),
4360 first_tls_regnum, "tpidr");
4362 if (valid_p)
4364 tls_register_count++;
4366 bool has_tpidr2
4367 = tdesc_numbered_register (feature_tls, tdesc_data.get (),
4368 first_tls_regnum + tls_register_count,
4369 "tpidr2");
4371 /* Figure out how many TLS registers we have. */
4372 if (has_tpidr2)
4373 tls_register_count++;
4375 num_regs += tls_register_count;
4377 else
4379 warning (_("Provided TLS register feature doesn't contain "
4380 "required tpidr register."));
4381 return nullptr;
4385 /* We have two versions of the pauth target description due to a past bug
4386 where GDB would crash when seeing the first version of the pauth target
4387 description. */
4388 feature_pauth = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.pauth");
4389 if (feature_pauth == nullptr)
4390 feature_pauth = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.pauth_v2");
4392 /* Add the pauth registers. */
4393 int pauth_masks = 0;
4394 if (feature_pauth != NULL)
4396 first_pauth_regnum = num_regs;
4397 ra_sign_state_offset = num_pseudo_regs;
4399 /* Size of the expected register set with all 4 masks. */
4400 int set_size = ARRAY_SIZE (aarch64_pauth_register_names);
4402 /* QEMU exposes a couple additional masks for the high half of the
4403 address. We should either have 2 registers or 4 registers. */
4404 if (tdesc_unnumbered_register (feature_pauth,
4405 "pauth_dmask_high") == 0)
4407 /* We did not find pauth_dmask_high, assume we only have
4408 2 masks. We are not dealing with QEMU/Emulators then. */
4409 set_size -= 2;
4412 /* Validate the descriptor provides the mandatory PAUTH registers and
4413 allocate their numbers. */
4414 for (i = 0; i < set_size; i++)
4415 valid_p &= tdesc_numbered_register (feature_pauth, tdesc_data.get (),
4416 first_pauth_regnum + i,
4417 aarch64_pauth_register_names[i]);
4419 num_regs += i;
4420 num_pseudo_regs += 1; /* Count RA_STATE pseudo register. */
4421 pauth_masks = set_size;
4424 /* Add the MTE registers. */
4425 if (feature_mte != NULL)
4427 first_mte_regnum = num_regs;
4428 /* Validate the descriptor provides the mandatory MTE registers and
4429 allocate their numbers. */
4430 for (i = 0; i < ARRAY_SIZE (aarch64_mte_register_names); i++)
4431 valid_p &= tdesc_numbered_register (feature_mte, tdesc_data.get (),
4432 first_mte_regnum + i,
4433 aarch64_mte_register_names[i]);
4435 num_regs += i;
4437 /* W pseudo-registers */
4438 int first_w_regnum = num_pseudo_regs;
4439 num_pseudo_regs += 31;
4441 if (!valid_p)
4442 return nullptr;
4444 /* AArch64 code is always little-endian. */
4445 info.byte_order_for_code = BFD_ENDIAN_LITTLE;
4447 gdbarch *gdbarch
4448 = gdbarch_alloc (&info, gdbarch_tdep_up (new aarch64_gdbarch_tdep));
4449 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
4451 /* This should be low enough for everything. */
4452 tdep->lowest_pc = 0x20;
4453 tdep->jb_pc = -1; /* Longjump support not enabled by default. */
4454 tdep->jb_elt_size = 8;
4455 tdep->vq = vq;
4456 tdep->pauth_reg_base = first_pauth_regnum;
4457 tdep->pauth_reg_count = pauth_masks;
4458 tdep->ra_sign_state_regnum = -1;
4459 tdep->mte_reg_base = first_mte_regnum;
4460 tdep->tls_regnum_base = first_tls_regnum;
4461 tdep->tls_register_count = tls_register_count;
4463 /* Set the SME register set details. The pseudo-registers will be adjusted
4464 later. */
4465 tdep->sme_reg_base = first_sme_regnum;
4466 tdep->sme_svg_regnum = first_sme_regnum;
4467 tdep->sme_svcr_regnum = first_sme_regnum + 1;
4468 tdep->sme_za_regnum = first_sme_regnum + 2;
4469 tdep->sme_svq = svq;
4471 /* Set the SME2 register set details. */
4472 tdep->sme2_zt0_regnum = first_sme2_regnum;
4474 set_gdbarch_push_dummy_call (gdbarch, aarch64_push_dummy_call);
4475 set_gdbarch_frame_align (gdbarch, aarch64_frame_align);
4477 /* Advance PC across function entry code. */
4478 set_gdbarch_skip_prologue (gdbarch, aarch64_skip_prologue);
4480 /* The stack grows downward. */
4481 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
4483 /* Breakpoint manipulation. */
4484 set_gdbarch_breakpoint_kind_from_pc (gdbarch,
4485 aarch64_breakpoint::kind_from_pc);
4486 set_gdbarch_sw_breakpoint_from_kind (gdbarch,
4487 aarch64_breakpoint::bp_from_kind);
4488 set_gdbarch_have_nonsteppable_watchpoint (gdbarch, 1);
4489 set_gdbarch_software_single_step (gdbarch, aarch64_software_single_step);
4491 /* Information about registers, etc. */
4492 set_gdbarch_sp_regnum (gdbarch, AARCH64_SP_REGNUM);
4493 set_gdbarch_pc_regnum (gdbarch, AARCH64_PC_REGNUM);
4494 set_gdbarch_num_regs (gdbarch, num_regs);
4496 set_gdbarch_num_pseudo_regs (gdbarch, num_pseudo_regs);
4497 set_gdbarch_pseudo_register_read_value (gdbarch, aarch64_pseudo_read_value);
4498 set_gdbarch_pseudo_register_write (gdbarch, aarch64_pseudo_write);
4499 set_tdesc_pseudo_register_name (gdbarch, aarch64_pseudo_register_name);
4500 set_tdesc_pseudo_register_type (gdbarch, aarch64_pseudo_register_type);
4501 set_tdesc_pseudo_register_reggroup_p (gdbarch,
4502 aarch64_pseudo_register_reggroup_p);
4503 set_gdbarch_cannot_store_register (gdbarch, aarch64_cannot_store_register);
4505 /* ABI */
4506 set_gdbarch_short_bit (gdbarch, 16);
4507 set_gdbarch_int_bit (gdbarch, 32);
4508 set_gdbarch_float_bit (gdbarch, 32);
4509 set_gdbarch_double_bit (gdbarch, 64);
4510 set_gdbarch_long_double_bit (gdbarch, 128);
4511 set_gdbarch_long_bit (gdbarch, 64);
4512 set_gdbarch_long_long_bit (gdbarch, 64);
4513 set_gdbarch_ptr_bit (gdbarch, 64);
4514 set_gdbarch_char_signed (gdbarch, 0);
4515 set_gdbarch_wchar_signed (gdbarch, 0);
4516 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
4517 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
4518 set_gdbarch_long_double_format (gdbarch, floatformats_ieee_quad);
4519 set_gdbarch_type_align (gdbarch, aarch64_type_align);
4521 /* Detect whether PC is at a point where the stack has been destroyed. */
4522 set_gdbarch_stack_frame_destroyed_p (gdbarch, aarch64_stack_frame_destroyed_p);
4524 /* Internal <-> external register number maps. */
4525 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, aarch64_dwarf_reg_to_regnum);
4527 /* Returning results. */
4528 set_gdbarch_return_value_as_value (gdbarch, aarch64_return_value);
4530 /* Disassembly. */
4531 set_gdbarch_print_insn (gdbarch, aarch64_gdb_print_insn);
4533 /* Virtual tables. */
4534 set_gdbarch_vbit_in_delta (gdbarch, 1);
4536 /* Hook in the ABI-specific overrides, if they have been registered. */
4537 info.target_desc = tdesc;
4538 info.tdesc_data = tdesc_data.get ();
4539 gdbarch_init_osabi (info, gdbarch);
4541 dwarf2_frame_set_init_reg (gdbarch, aarch64_dwarf2_frame_init_reg);
4542 /* Register DWARF CFA vendor handler. */
4543 set_gdbarch_execute_dwarf_cfa_vendor_op (gdbarch,
4544 aarch64_execute_dwarf_cfa_vendor_op);
4546 /* Permanent/Program breakpoint handling. */
4547 set_gdbarch_program_breakpoint_here_p (gdbarch,
4548 aarch64_program_breakpoint_here_p);
4550 /* Add some default predicates. */
4551 frame_unwind_append_unwinder (gdbarch, &aarch64_stub_unwind);
4552 dwarf2_append_unwinders (gdbarch);
4553 frame_unwind_append_unwinder (gdbarch, &aarch64_prologue_unwind);
4555 frame_base_set_default (gdbarch, &aarch64_normal_base);
4557 /* Now we have tuned the configuration, set a few final things,
4558 based on what the OS ABI has told us. */
4560 if (tdep->jb_pc >= 0)
4561 set_gdbarch_get_longjmp_target (gdbarch, aarch64_get_longjmp_target);
4563 set_gdbarch_gen_return_address (gdbarch, aarch64_gen_return_address);
4565 set_gdbarch_get_pc_address_flags (gdbarch, aarch64_get_pc_address_flags);
4567 tdesc_use_registers (gdbarch, tdesc, std::move (tdesc_data));
4569 /* Fetch the updated number of registers after we're done adding all
4570 entries from features we don't explicitly care about. This is the case
4571 for bare metal debugging stubs that include a lot of system registers. */
4572 num_regs = gdbarch_num_regs (gdbarch);
4574 /* With the number of real registers updated, setup the pseudo-registers and
4575 record their numbers. */
4577 /* Setup W pseudo-register numbers. */
4578 tdep->w_pseudo_base = first_w_regnum + num_regs;
4579 tdep->w_pseudo_count = 31;
4581 /* Pointer authentication pseudo-registers. */
4582 if (tdep->has_pauth ())
4583 tdep->ra_sign_state_regnum = ra_sign_state_offset + num_regs;
4585 /* Architecture hook to remove bits of a pointer that are not part of the
4586 address, like memory tags (MTE) and pointer authentication signatures. */
4587 set_gdbarch_remove_non_address_bits (gdbarch,
4588 aarch64_remove_non_address_bits);
4590 /* SME pseudo-registers. */
4591 if (tdep->has_sme ())
4593 tdep->sme_pseudo_base = num_regs + first_sme_pseudo_regnum;
4594 tdep->sme_tile_slice_pseudo_base = tdep->sme_pseudo_base;
4595 tdep->sme_tile_slice_pseudo_count = (svq * 32) * 5;
4596 tdep->sme_tile_pseudo_base
4597 = tdep->sme_pseudo_base + tdep->sme_tile_slice_pseudo_count;
4598 tdep->sme_pseudo_count
4599 = tdep->sme_tile_slice_pseudo_count + AARCH64_ZA_TILES_NUM;
4601 /* The SME ZA pseudo-registers are a set of 160 to 2560 pseudo-registers
4602 depending on the value of svl.
4604 The tile pseudo-registers are organized around their qualifiers
4605 (b, h, s, d and q). Their numbers are distributed as follows:
4608 h 1~2
4609 s 3~6
4610 d 7~14
4611 q 15~30
4613 The naming of the tile pseudo-registers follows the pattern za<t><q>,
4614 where:
4616 <t> is the tile number, with the following possible values based on
4617 the qualifiers:
4619 Qualifier - Allocated indexes
4621 b - 0
4622 h - 0~1
4623 s - 0~3
4624 d - 0~7
4625 q - 0~15
4627 <q> is the qualifier: b, h, s, d and q.
4629 The tile slice pseudo-registers are organized around their
4630 qualifiers as well (b, h, s, d and q), but also around their
4631 direction (h - horizontal and v - vertical).
4633 Even-numbered tile slice pseudo-registers are horizontally-oriented
4634 and odd-numbered tile slice pseudo-registers are vertically-oriented.
4636 Their numbers are distributed as follows:
4638 Qualifier - Allocated indexes
4640 b tile slices - 0~511
4641 h tile slices - 512~1023
4642 s tile slices - 1024~1535
4643 d tile slices - 1536~2047
4644 q tile slices - 2048~2559
4646 The naming of the tile slice pseudo-registers follows the pattern
4647 za<t><d><q><s>, where:
4649 <t> is the tile number as described for the tile pseudo-registers.
4650 <d> is the direction of the tile slice (h or v)
4651 <q> is the qualifier of the tile slice (b, h, s, d or q)
4652 <s> is the slice number, defined as follows:
4654 Qualifier - Allocated indexes
4656 b - 0~15
4657 h - 0~7
4658 s - 0~3
4659 d - 0~1
4660 q - 0
4662 We have helper functions to translate to/from register index from/to
4663 the set of fields that make the pseudo-register names. */
4665 /* Build the array of pseudo-register names available for this
4666 particular gdbarch configuration. */
4667 aarch64_initialize_sme_pseudo_names (gdbarch, tdep->sme_pseudo_names);
4670 /* Add standard register aliases. */
4671 for (i = 0; i < ARRAY_SIZE (aarch64_register_aliases); i++)
4672 user_reg_add (gdbarch, aarch64_register_aliases[i].name,
4673 value_of_aarch64_user_reg,
4674 &aarch64_register_aliases[i].regnum);
4676 register_aarch64_ravenscar_ops (gdbarch);
4678 return gdbarch;
4681 static void
4682 aarch64_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
4684 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
4686 if (tdep == NULL)
4687 return;
4689 gdb_printf (file, _("aarch64_dump_tdep: Lowest pc = 0x%s\n"),
4690 paddress (gdbarch, tdep->lowest_pc));
4692 /* SME fields. */
4693 gdb_printf (file, _("aarch64_dump_tdep: sme_tile_type_q = %s\n"),
4694 host_address_to_string (tdep->sme_tile_type_q));
4695 gdb_printf (file, _("aarch64_dump_tdep: sme_tile_type_d = %s\n"),
4696 host_address_to_string (tdep->sme_tile_type_d));
4697 gdb_printf (file, _("aarch64_dump_tdep: sme_tile_type_s = %s\n"),
4698 host_address_to_string (tdep->sme_tile_type_s));
4699 gdb_printf (file, _("aarch64_dump_tdep: sme_tile_type_h = %s\n"),
4700 host_address_to_string (tdep->sme_tile_type_h));
4701 gdb_printf (file, _("aarch64_dump_tdep: sme_tile_type_n = %s\n"),
4702 host_address_to_string (tdep->sme_tile_type_b));
4703 gdb_printf (file, _("aarch64_dump_tdep: sme_tile_slice_type_q = %s\n"),
4704 host_address_to_string (tdep->sme_tile_slice_type_q));
4705 gdb_printf (file, _("aarch64_dump_tdep: sme_tile_slice_type_d = %s\n"),
4706 host_address_to_string (tdep->sme_tile_slice_type_d));
4707 gdb_printf (file, _("aarch64_dump_tdep: sme_tile_slice_type_s = %s\n"),
4708 host_address_to_string (tdep->sme_tile_slice_type_s));
4709 gdb_printf (file, _("aarch64_dump_tdep: sme_tile_slice_type_h = %s\n"),
4710 host_address_to_string (tdep->sme_tile_slice_type_h));
4711 gdb_printf (file, _("aarch64_dump_tdep: sme_tile_slice_type_b = %s\n"),
4712 host_address_to_string (tdep->sme_tile_slice_type_b));
4713 gdb_printf (file, _("aarch64_dump_tdep: sme_reg_base = %s\n"),
4714 pulongest (tdep->sme_reg_base));
4715 gdb_printf (file, _("aarch64_dump_tdep: sme_svg_regnum = %s\n"),
4716 pulongest (tdep->sme_svg_regnum));
4717 gdb_printf (file, _("aarch64_dump_tdep: sme_svcr_regnum = %s\n"),
4718 pulongest (tdep->sme_svcr_regnum));
4719 gdb_printf (file, _("aarch64_dump_tdep: sme_za_regnum = %s\n"),
4720 pulongest (tdep->sme_za_regnum));
4721 gdb_printf (file, _("aarch64_dump_tdep: sme_pseudo_base = %s\n"),
4722 pulongest (tdep->sme_pseudo_base));
4723 gdb_printf (file, _("aarch64_dump_tdep: sme_pseudo_count = %s\n"),
4724 pulongest (tdep->sme_pseudo_count));
4725 gdb_printf (file, _("aarch64_dump_tdep: sme_tile_slice_pseudo_base = %s\n"),
4726 pulongest (tdep->sme_tile_slice_pseudo_base));
4727 gdb_printf (file, _("aarch64_dump_tdep: sme_tile_slice_pseudo_count = %s\n"),
4728 pulongest (tdep->sme_tile_slice_pseudo_count));
4729 gdb_printf (file, _("aarch64_dump_tdep: sme_tile_pseudo_base = %s\n"),
4730 pulongest (tdep->sme_tile_pseudo_base));
4731 gdb_printf (file, _("aarch64_dump_tdep: sme_svq = %s\n"),
4732 pulongest (tdep->sme_svq));
4735 #if GDB_SELF_TEST
4736 namespace selftests
4738 static void aarch64_process_record_test (void);
4740 #endif
4742 void _initialize_aarch64_tdep ();
4743 void
4744 _initialize_aarch64_tdep ()
4746 gdbarch_register (bfd_arch_aarch64, aarch64_gdbarch_init,
4747 aarch64_dump_tdep);
4749 /* Debug this file's internals. */
4750 add_setshow_boolean_cmd ("aarch64", class_maintenance, &aarch64_debug, _("\
4751 Set AArch64 debugging."), _("\
4752 Show AArch64 debugging."), _("\
4753 When on, AArch64 specific debugging is enabled."),
4754 NULL,
4755 show_aarch64_debug,
4756 &setdebuglist, &showdebuglist);
4758 #if GDB_SELF_TEST
4759 selftests::register_test ("aarch64-analyze-prologue",
4760 selftests::aarch64_analyze_prologue_test);
4761 selftests::register_test ("aarch64-process-record",
4762 selftests::aarch64_process_record_test);
4763 #endif
4766 /* AArch64 process record-replay related structures, defines etc. */
4768 #define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \
4769 do \
4771 unsigned int reg_len = LENGTH; \
4772 if (reg_len) \
4774 REGS = XNEWVEC (uint32_t, reg_len); \
4775 memcpy(&REGS[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \
4778 while (0)
4780 #define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \
4781 do \
4783 unsigned int mem_len = LENGTH; \
4784 if (mem_len) \
4786 MEMS = XNEWVEC (struct aarch64_mem_r, mem_len); \
4787 memcpy(MEMS, &RECORD_BUF[0], \
4788 sizeof(struct aarch64_mem_r) * LENGTH); \
4791 while (0)
4793 /* AArch64 record/replay structures and enumerations. */
4795 struct aarch64_mem_r
4797 uint64_t len; /* Record length. */
4798 uint64_t addr; /* Memory address. */
4801 enum aarch64_record_result
4803 AARCH64_RECORD_SUCCESS,
4804 AARCH64_RECORD_UNSUPPORTED,
4805 AARCH64_RECORD_UNKNOWN
4808 struct aarch64_insn_decode_record
4810 struct gdbarch *gdbarch;
4811 struct regcache *regcache;
4812 CORE_ADDR this_addr; /* Address of insn to be recorded. */
4813 uint32_t aarch64_insn; /* Insn to be recorded. */
4814 uint32_t mem_rec_count; /* Count of memory records. */
4815 uint32_t reg_rec_count; /* Count of register records. */
4816 uint32_t *aarch64_regs; /* Registers to be recorded. */
4817 struct aarch64_mem_r *aarch64_mems; /* Memory locations to be recorded. */
4820 /* Record handler for data processing - register instructions. */
4822 static unsigned int
4823 aarch64_record_data_proc_reg (aarch64_insn_decode_record *aarch64_insn_r)
4825 uint8_t reg_rd, insn_bits24_27, insn_bits21_23;
4826 uint32_t record_buf[4];
4828 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
4829 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
4830 insn_bits21_23 = bits (aarch64_insn_r->aarch64_insn, 21, 23);
4832 if (!bit (aarch64_insn_r->aarch64_insn, 28))
4834 uint8_t setflags;
4836 /* Logical (shifted register). */
4837 if (insn_bits24_27 == 0x0a)
4838 setflags = (bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03);
4839 /* Add/subtract. */
4840 else if (insn_bits24_27 == 0x0b)
4841 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
4842 else
4843 return AARCH64_RECORD_UNKNOWN;
4845 record_buf[0] = reg_rd;
4846 aarch64_insn_r->reg_rec_count = 1;
4847 if (setflags)
4848 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
4850 else
4852 if (insn_bits24_27 == 0x0b)
4854 /* Data-processing (3 source). */
4855 record_buf[0] = reg_rd;
4856 aarch64_insn_r->reg_rec_count = 1;
4858 else if (insn_bits24_27 == 0x0a)
4860 if (insn_bits21_23 == 0x00)
4862 /* Add/subtract (with carry). */
4863 record_buf[0] = reg_rd;
4864 aarch64_insn_r->reg_rec_count = 1;
4865 if (bit (aarch64_insn_r->aarch64_insn, 29))
4867 record_buf[1] = AARCH64_CPSR_REGNUM;
4868 aarch64_insn_r->reg_rec_count = 2;
4871 else if (insn_bits21_23 == 0x02)
4873 /* Conditional compare (register) and conditional compare
4874 (immediate) instructions. */
4875 record_buf[0] = AARCH64_CPSR_REGNUM;
4876 aarch64_insn_r->reg_rec_count = 1;
4878 else if (insn_bits21_23 == 0x04 || insn_bits21_23 == 0x06)
4880 /* Conditional select. */
4881 /* Data-processing (2 source). */
4882 /* Data-processing (1 source). */
4883 record_buf[0] = reg_rd;
4884 aarch64_insn_r->reg_rec_count = 1;
4886 else
4887 return AARCH64_RECORD_UNKNOWN;
4891 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
4892 record_buf);
4893 return AARCH64_RECORD_SUCCESS;
4896 /* Record handler for data processing - immediate instructions. */
4898 static unsigned int
4899 aarch64_record_data_proc_imm (aarch64_insn_decode_record *aarch64_insn_r)
4901 uint8_t reg_rd, insn_bit23, insn_bits24_27, setflags;
4902 uint32_t record_buf[4];
4904 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
4905 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
4906 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
4908 if (insn_bits24_27 == 0x00 /* PC rel addressing. */
4909 || insn_bits24_27 == 0x03 /* Bitfield and Extract. */
4910 || (insn_bits24_27 == 0x02 && insn_bit23)) /* Move wide (immediate). */
4912 record_buf[0] = reg_rd;
4913 aarch64_insn_r->reg_rec_count = 1;
4915 else if (insn_bits24_27 == 0x01)
4917 /* Add/Subtract (immediate). */
4918 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
4919 record_buf[0] = reg_rd;
4920 aarch64_insn_r->reg_rec_count = 1;
4921 if (setflags)
4922 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
4924 else if (insn_bits24_27 == 0x02 && !insn_bit23)
4926 /* Logical (immediate). */
4927 setflags = bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03;
4928 record_buf[0] = reg_rd;
4929 aarch64_insn_r->reg_rec_count = 1;
4930 if (setflags)
4931 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
4933 else
4934 return AARCH64_RECORD_UNKNOWN;
4936 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
4937 record_buf);
4938 return AARCH64_RECORD_SUCCESS;
4941 /* Record handler for branch, exception generation and system instructions. */
4943 static unsigned int
4944 aarch64_record_branch_except_sys (aarch64_insn_decode_record *aarch64_insn_r)
4947 aarch64_gdbarch_tdep *tdep
4948 = gdbarch_tdep<aarch64_gdbarch_tdep> (aarch64_insn_r->gdbarch);
4949 uint8_t insn_bits24_27, insn_bits28_31, insn_bits22_23;
4950 uint32_t record_buf[4];
4952 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
4953 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
4954 insn_bits22_23 = bits (aarch64_insn_r->aarch64_insn, 22, 23);
4956 if (insn_bits28_31 == 0x0d)
4958 /* Exception generation instructions. */
4959 if (insn_bits24_27 == 0x04)
4961 if (!bits (aarch64_insn_r->aarch64_insn, 2, 4)
4962 && !bits (aarch64_insn_r->aarch64_insn, 21, 23)
4963 && bits (aarch64_insn_r->aarch64_insn, 0, 1) == 0x01)
4965 ULONGEST svc_number;
4967 regcache_raw_read_unsigned (aarch64_insn_r->regcache, 8,
4968 &svc_number);
4969 return tdep->aarch64_syscall_record (aarch64_insn_r->regcache,
4970 svc_number);
4972 else
4973 return AARCH64_RECORD_UNSUPPORTED;
4975 /* System instructions. */
4976 else if (insn_bits24_27 == 0x05 && insn_bits22_23 == 0x00)
4978 uint32_t reg_rt, reg_crn;
4980 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
4981 reg_crn = bits (aarch64_insn_r->aarch64_insn, 12, 15);
4983 /* Record rt in case of sysl and mrs instructions. */
4984 if (bit (aarch64_insn_r->aarch64_insn, 21))
4986 record_buf[0] = reg_rt;
4987 aarch64_insn_r->reg_rec_count = 1;
4989 /* Record cpsr for hint and msr(immediate) instructions. */
4990 else if (reg_crn == 0x02 || reg_crn == 0x04)
4992 record_buf[0] = AARCH64_CPSR_REGNUM;
4993 aarch64_insn_r->reg_rec_count = 1;
4996 /* Unconditional branch (register). */
4997 else if((insn_bits24_27 & 0x0e) == 0x06)
4999 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
5000 if (bits (aarch64_insn_r->aarch64_insn, 21, 22) == 0x01)
5001 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
5003 else
5004 return AARCH64_RECORD_UNKNOWN;
5006 /* Unconditional branch (immediate). */
5007 else if ((insn_bits28_31 & 0x07) == 0x01 && (insn_bits24_27 & 0x0c) == 0x04)
5009 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
5010 if (bit (aarch64_insn_r->aarch64_insn, 31))
5011 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
5013 else
5014 /* Compare & branch (immediate), Test & branch (immediate) and
5015 Conditional branch (immediate). */
5016 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
5018 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
5019 record_buf);
5020 return AARCH64_RECORD_SUCCESS;
5023 /* Record handler for advanced SIMD load and store instructions. */
5025 static unsigned int
5026 aarch64_record_asimd_load_store (aarch64_insn_decode_record *aarch64_insn_r)
5028 CORE_ADDR address;
5029 uint64_t addr_offset = 0;
5030 uint32_t record_buf[24];
5031 uint64_t record_buf_mem[24];
5032 uint32_t reg_rn, reg_rt;
5033 uint32_t reg_index = 0, mem_index = 0;
5034 uint8_t opcode_bits, size_bits;
5036 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
5037 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
5038 size_bits = bits (aarch64_insn_r->aarch64_insn, 10, 11);
5039 opcode_bits = bits (aarch64_insn_r->aarch64_insn, 12, 15);
5040 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn, &address);
5042 if (record_debug)
5043 debug_printf ("Process record: Advanced SIMD load/store\n");
5045 /* Load/store single structure. */
5046 if (bit (aarch64_insn_r->aarch64_insn, 24))
5048 uint8_t sindex, scale, selem, esize, replicate = 0;
5049 scale = opcode_bits >> 2;
5050 selem = ((opcode_bits & 0x02) |
5051 bit (aarch64_insn_r->aarch64_insn, 21)) + 1;
5052 switch (scale)
5054 case 1:
5055 if (size_bits & 0x01)
5056 return AARCH64_RECORD_UNKNOWN;
5057 break;
5058 case 2:
5059 if ((size_bits >> 1) & 0x01)
5060 return AARCH64_RECORD_UNKNOWN;
5061 if (size_bits & 0x01)
5063 if (!((opcode_bits >> 1) & 0x01))
5064 scale = 3;
5065 else
5066 return AARCH64_RECORD_UNKNOWN;
5068 break;
5069 case 3:
5070 if (bit (aarch64_insn_r->aarch64_insn, 22) && !(opcode_bits & 0x01))
5072 scale = size_bits;
5073 replicate = 1;
5074 break;
5076 else
5077 return AARCH64_RECORD_UNKNOWN;
5078 default:
5079 break;
5081 esize = 8 << scale;
5082 if (replicate)
5083 for (sindex = 0; sindex < selem; sindex++)
5085 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
5086 reg_rt = (reg_rt + 1) % 32;
5088 else
5090 for (sindex = 0; sindex < selem; sindex++)
5092 if (bit (aarch64_insn_r->aarch64_insn, 22))
5093 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
5094 else
5096 record_buf_mem[mem_index++] = esize / 8;
5097 record_buf_mem[mem_index++] = address + addr_offset;
5099 addr_offset = addr_offset + (esize / 8);
5100 reg_rt = (reg_rt + 1) % 32;
5104 /* Load/store multiple structure. */
5105 else
5107 uint8_t selem, esize, rpt, elements;
5108 uint8_t eindex, rindex;
5110 esize = 8 << size_bits;
5111 if (bit (aarch64_insn_r->aarch64_insn, 30))
5112 elements = 128 / esize;
5113 else
5114 elements = 64 / esize;
5116 switch (opcode_bits)
5118 /*LD/ST4 (4 Registers). */
5119 case 0:
5120 rpt = 1;
5121 selem = 4;
5122 break;
5123 /*LD/ST1 (4 Registers). */
5124 case 2:
5125 rpt = 4;
5126 selem = 1;
5127 break;
5128 /*LD/ST3 (3 Registers). */
5129 case 4:
5130 rpt = 1;
5131 selem = 3;
5132 break;
5133 /*LD/ST1 (3 Registers). */
5134 case 6:
5135 rpt = 3;
5136 selem = 1;
5137 break;
5138 /*LD/ST1 (1 Register). */
5139 case 7:
5140 rpt = 1;
5141 selem = 1;
5142 break;
5143 /*LD/ST2 (2 Registers). */
5144 case 8:
5145 rpt = 1;
5146 selem = 2;
5147 break;
5148 /*LD/ST1 (2 Registers). */
5149 case 10:
5150 rpt = 2;
5151 selem = 1;
5152 break;
5153 default:
5154 return AARCH64_RECORD_UNSUPPORTED;
5155 break;
5157 for (rindex = 0; rindex < rpt; rindex++)
5158 for (eindex = 0; eindex < elements; eindex++)
5160 uint8_t reg_tt, sindex;
5161 reg_tt = (reg_rt + rindex) % 32;
5162 for (sindex = 0; sindex < selem; sindex++)
5164 if (bit (aarch64_insn_r->aarch64_insn, 22))
5165 record_buf[reg_index++] = reg_tt + AARCH64_V0_REGNUM;
5166 else
5168 record_buf_mem[mem_index++] = esize / 8;
5169 record_buf_mem[mem_index++] = address + addr_offset;
5171 addr_offset = addr_offset + (esize / 8);
5172 reg_tt = (reg_tt + 1) % 32;
5177 if (bit (aarch64_insn_r->aarch64_insn, 23))
5178 record_buf[reg_index++] = reg_rn;
5180 aarch64_insn_r->reg_rec_count = reg_index;
5181 aarch64_insn_r->mem_rec_count = mem_index / 2;
5182 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
5183 record_buf_mem);
5184 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
5185 record_buf);
5186 return AARCH64_RECORD_SUCCESS;
5189 /* Record handler for load and store instructions. */
5191 static unsigned int
5192 aarch64_record_load_store (aarch64_insn_decode_record *aarch64_insn_r)
5194 uint8_t insn_bits24_27, insn_bits28_29, insn_bits10_11;
5195 uint8_t insn_bit23, insn_bit21;
5196 uint8_t opc, size_bits, ld_flag, vector_flag;
5197 uint32_t reg_rn, reg_rt, reg_rt2;
5198 uint64_t datasize, offset;
5199 uint32_t record_buf[8];
5200 uint64_t record_buf_mem[8];
5201 CORE_ADDR address;
5203 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
5204 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
5205 insn_bits28_29 = bits (aarch64_insn_r->aarch64_insn, 28, 29);
5206 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
5207 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
5208 ld_flag = bit (aarch64_insn_r->aarch64_insn, 22);
5209 vector_flag = bit (aarch64_insn_r->aarch64_insn, 26);
5210 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
5211 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
5212 reg_rt2 = bits (aarch64_insn_r->aarch64_insn, 10, 14);
5213 size_bits = bits (aarch64_insn_r->aarch64_insn, 30, 31);
5215 /* Load/store exclusive. */
5216 if (insn_bits24_27 == 0x08 && insn_bits28_29 == 0x00)
5218 if (record_debug)
5219 debug_printf ("Process record: load/store exclusive\n");
5221 if (ld_flag)
5223 record_buf[0] = reg_rt;
5224 aarch64_insn_r->reg_rec_count = 1;
5225 if (insn_bit21)
5227 record_buf[1] = reg_rt2;
5228 aarch64_insn_r->reg_rec_count = 2;
5231 else
5233 if (insn_bit21)
5234 datasize = (8 << size_bits) * 2;
5235 else
5236 datasize = (8 << size_bits);
5237 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
5238 &address);
5239 record_buf_mem[0] = datasize / 8;
5240 record_buf_mem[1] = address;
5241 aarch64_insn_r->mem_rec_count = 1;
5242 if (!insn_bit23)
5244 /* Save register rs. */
5245 record_buf[0] = bits (aarch64_insn_r->aarch64_insn, 16, 20);
5246 aarch64_insn_r->reg_rec_count = 1;
5250 /* Load register (literal) instructions decoding. */
5251 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x01)
5253 if (record_debug)
5254 debug_printf ("Process record: load register (literal)\n");
5255 if (vector_flag)
5256 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
5257 else
5258 record_buf[0] = reg_rt;
5259 aarch64_insn_r->reg_rec_count = 1;
5261 /* All types of load/store pair instructions decoding. */
5262 else if ((insn_bits24_27 & 0x0a) == 0x08 && insn_bits28_29 == 0x02)
5264 if (record_debug)
5265 debug_printf ("Process record: load/store pair\n");
5267 if (ld_flag)
5269 if (vector_flag)
5271 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
5272 record_buf[1] = reg_rt2 + AARCH64_V0_REGNUM;
5274 else
5276 record_buf[0] = reg_rt;
5277 record_buf[1] = reg_rt2;
5279 aarch64_insn_r->reg_rec_count = 2;
5281 else
5283 uint16_t imm7_off;
5284 imm7_off = bits (aarch64_insn_r->aarch64_insn, 15, 21);
5285 if (!vector_flag)
5286 size_bits = size_bits >> 1;
5287 datasize = 8 << (2 + size_bits);
5288 offset = (imm7_off & 0x40) ? (~imm7_off & 0x007f) + 1 : imm7_off;
5289 offset = offset << (2 + size_bits);
5290 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
5291 &address);
5292 if (!((insn_bits24_27 & 0x0b) == 0x08 && insn_bit23))
5294 if (imm7_off & 0x40)
5295 address = address - offset;
5296 else
5297 address = address + offset;
5300 record_buf_mem[0] = datasize / 8;
5301 record_buf_mem[1] = address;
5302 record_buf_mem[2] = datasize / 8;
5303 record_buf_mem[3] = address + (datasize / 8);
5304 aarch64_insn_r->mem_rec_count = 2;
5306 if (bit (aarch64_insn_r->aarch64_insn, 23))
5307 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
5309 /* Load/store register (unsigned immediate) instructions. */
5310 else if ((insn_bits24_27 & 0x0b) == 0x09 && insn_bits28_29 == 0x03)
5312 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
5313 if (!(opc >> 1))
5315 if (opc & 0x01)
5316 ld_flag = 0x01;
5317 else
5318 ld_flag = 0x0;
5320 else
5322 if (size_bits == 0x3 && vector_flag == 0x0 && opc == 0x2)
5324 /* PRFM (immediate) */
5325 return AARCH64_RECORD_SUCCESS;
5327 else if (size_bits == 0x2 && vector_flag == 0x0 && opc == 0x2)
5329 /* LDRSW (immediate) */
5330 ld_flag = 0x1;
5332 else
5334 if (opc & 0x01)
5335 ld_flag = 0x01;
5336 else
5337 ld_flag = 0x0;
5341 if (record_debug)
5343 debug_printf ("Process record: load/store (unsigned immediate):"
5344 " size %x V %d opc %x\n", size_bits, vector_flag,
5345 opc);
5348 if (!ld_flag)
5350 offset = bits (aarch64_insn_r->aarch64_insn, 10, 21);
5351 datasize = 8 << size_bits;
5352 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
5353 &address);
5354 offset = offset << size_bits;
5355 address = address + offset;
5357 record_buf_mem[0] = datasize >> 3;
5358 record_buf_mem[1] = address;
5359 aarch64_insn_r->mem_rec_count = 1;
5361 else
5363 if (vector_flag)
5364 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
5365 else
5366 record_buf[0] = reg_rt;
5367 aarch64_insn_r->reg_rec_count = 1;
5370 /* Load/store register (register offset) instructions. */
5371 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
5372 && insn_bits10_11 == 0x02 && insn_bit21)
5374 if (record_debug)
5375 debug_printf ("Process record: load/store (register offset)\n");
5376 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
5377 if (!(opc >> 1))
5378 if (opc & 0x01)
5379 ld_flag = 0x01;
5380 else
5381 ld_flag = 0x0;
5382 else
5383 if (size_bits != 0x03)
5384 ld_flag = 0x01;
5385 else
5386 return AARCH64_RECORD_UNKNOWN;
5388 if (!ld_flag)
5390 ULONGEST reg_rm_val;
5392 regcache_raw_read_unsigned (aarch64_insn_r->regcache,
5393 bits (aarch64_insn_r->aarch64_insn, 16, 20), &reg_rm_val);
5394 if (bit (aarch64_insn_r->aarch64_insn, 12))
5395 offset = reg_rm_val << size_bits;
5396 else
5397 offset = reg_rm_val;
5398 datasize = 8 << size_bits;
5399 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
5400 &address);
5401 address = address + offset;
5402 record_buf_mem[0] = datasize >> 3;
5403 record_buf_mem[1] = address;
5404 aarch64_insn_r->mem_rec_count = 1;
5406 else
5408 if (vector_flag)
5409 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
5410 else
5411 record_buf[0] = reg_rt;
5412 aarch64_insn_r->reg_rec_count = 1;
5415 /* Load/store register (immediate and unprivileged) instructions. */
5416 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
5417 && !insn_bit21)
5419 if (record_debug)
5421 debug_printf ("Process record: load/store "
5422 "(immediate and unprivileged)\n");
5424 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
5425 if (!(opc >> 1))
5426 if (opc & 0x01)
5427 ld_flag = 0x01;
5428 else
5429 ld_flag = 0x0;
5430 else
5431 if (size_bits != 0x03)
5432 ld_flag = 0x01;
5433 else
5434 return AARCH64_RECORD_UNKNOWN;
5436 if (!ld_flag)
5438 uint16_t imm9_off;
5439 imm9_off = bits (aarch64_insn_r->aarch64_insn, 12, 20);
5440 offset = (imm9_off & 0x0100) ? (((~imm9_off) & 0x01ff) + 1) : imm9_off;
5441 datasize = 8 << size_bits;
5442 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
5443 &address);
5444 if (insn_bits10_11 != 0x01)
5446 if (imm9_off & 0x0100)
5447 address = address - offset;
5448 else
5449 address = address + offset;
5451 record_buf_mem[0] = datasize >> 3;
5452 record_buf_mem[1] = address;
5453 aarch64_insn_r->mem_rec_count = 1;
5455 else
5457 if (vector_flag)
5458 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
5459 else
5460 record_buf[0] = reg_rt;
5461 aarch64_insn_r->reg_rec_count = 1;
5463 if (insn_bits10_11 == 0x01 || insn_bits10_11 == 0x03)
5464 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
5466 /* Advanced SIMD load/store instructions. */
5467 else
5468 return aarch64_record_asimd_load_store (aarch64_insn_r);
5470 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
5471 record_buf_mem);
5472 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
5473 record_buf);
5474 return AARCH64_RECORD_SUCCESS;
5477 /* Record handler for data processing SIMD and floating point instructions. */
5479 static unsigned int
5480 aarch64_record_data_proc_simd_fp (aarch64_insn_decode_record *aarch64_insn_r)
5482 uint8_t insn_bit21, opcode, rmode, reg_rd;
5483 uint8_t insn_bits24_27, insn_bits28_31, insn_bits10_11, insn_bits12_15;
5484 uint8_t insn_bits11_14;
5485 uint32_t record_buf[2];
5487 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
5488 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
5489 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
5490 insn_bits12_15 = bits (aarch64_insn_r->aarch64_insn, 12, 15);
5491 insn_bits11_14 = bits (aarch64_insn_r->aarch64_insn, 11, 14);
5492 opcode = bits (aarch64_insn_r->aarch64_insn, 16, 18);
5493 rmode = bits (aarch64_insn_r->aarch64_insn, 19, 20);
5494 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
5495 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
5497 if (record_debug)
5498 debug_printf ("Process record: data processing SIMD/FP: ");
5500 if ((insn_bits28_31 & 0x05) == 0x01 && insn_bits24_27 == 0x0e)
5502 /* Floating point - fixed point conversion instructions. */
5503 if (!insn_bit21)
5505 if (record_debug)
5506 debug_printf ("FP - fixed point conversion");
5508 if ((opcode >> 1) == 0x0 && rmode == 0x03)
5509 record_buf[0] = reg_rd;
5510 else
5511 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
5513 /* Floating point - conditional compare instructions. */
5514 else if (insn_bits10_11 == 0x01)
5516 if (record_debug)
5517 debug_printf ("FP - conditional compare");
5519 record_buf[0] = AARCH64_CPSR_REGNUM;
5521 /* Floating point - data processing (2-source) and
5522 conditional select instructions. */
5523 else if (insn_bits10_11 == 0x02 || insn_bits10_11 == 0x03)
5525 if (record_debug)
5526 debug_printf ("FP - DP (2-source)");
5528 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
5530 else if (insn_bits10_11 == 0x00)
5532 /* Floating point - immediate instructions. */
5533 if ((insn_bits12_15 & 0x01) == 0x01
5534 || (insn_bits12_15 & 0x07) == 0x04)
5536 if (record_debug)
5537 debug_printf ("FP - immediate");
5538 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
5540 /* Floating point - compare instructions. */
5541 else if ((insn_bits12_15 & 0x03) == 0x02)
5543 if (record_debug)
5544 debug_printf ("FP - immediate");
5545 record_buf[0] = AARCH64_CPSR_REGNUM;
5547 /* Floating point - integer conversions instructions. */
5548 else if (insn_bits12_15 == 0x00)
5550 /* Convert float to integer instruction. */
5551 if (!(opcode >> 1) || ((opcode >> 1) == 0x02 && !rmode))
5553 if (record_debug)
5554 debug_printf ("float to int conversion");
5556 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
5558 /* Convert integer to float instruction. */
5559 else if ((opcode >> 1) == 0x01 && !rmode)
5561 if (record_debug)
5562 debug_printf ("int to float conversion");
5564 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
5566 /* Move float to integer instruction. */
5567 else if ((opcode >> 1) == 0x03)
5569 if (record_debug)
5570 debug_printf ("move float to int");
5572 if (!(opcode & 0x01))
5573 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
5574 else
5575 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
5577 else
5578 return AARCH64_RECORD_UNKNOWN;
5580 else
5581 return AARCH64_RECORD_UNKNOWN;
5583 else
5584 return AARCH64_RECORD_UNKNOWN;
5586 else if ((insn_bits28_31 & 0x09) == 0x00 && insn_bits24_27 == 0x0e)
5588 if (record_debug)
5589 debug_printf ("SIMD copy");
5591 /* Advanced SIMD copy instructions. */
5592 if (!bits (aarch64_insn_r->aarch64_insn, 21, 23)
5593 && !bit (aarch64_insn_r->aarch64_insn, 15)
5594 && bit (aarch64_insn_r->aarch64_insn, 10))
5596 if (insn_bits11_14 == 0x05 || insn_bits11_14 == 0x07)
5597 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
5598 else
5599 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
5601 else
5602 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
5604 /* All remaining floating point or advanced SIMD instructions. */
5605 else
5607 if (record_debug)
5608 debug_printf ("all remain");
5610 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
5613 if (record_debug)
5614 debug_printf ("\n");
5616 /* Record the V/X register. */
5617 aarch64_insn_r->reg_rec_count++;
5619 /* Some of these instructions may set bits in the FPSR, so record it
5620 too. */
5621 record_buf[1] = AARCH64_FPSR_REGNUM;
5622 aarch64_insn_r->reg_rec_count++;
5624 gdb_assert (aarch64_insn_r->reg_rec_count == 2);
5625 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
5626 record_buf);
5627 return AARCH64_RECORD_SUCCESS;
5630 /* Decodes insns type and invokes its record handler. */
5632 static unsigned int
5633 aarch64_record_decode_insn_handler (aarch64_insn_decode_record *aarch64_insn_r)
5635 uint32_t ins_bit25, ins_bit26, ins_bit27, ins_bit28;
5637 ins_bit25 = bit (aarch64_insn_r->aarch64_insn, 25);
5638 ins_bit26 = bit (aarch64_insn_r->aarch64_insn, 26);
5639 ins_bit27 = bit (aarch64_insn_r->aarch64_insn, 27);
5640 ins_bit28 = bit (aarch64_insn_r->aarch64_insn, 28);
5642 /* Data processing - immediate instructions. */
5643 if (!ins_bit26 && !ins_bit27 && ins_bit28)
5644 return aarch64_record_data_proc_imm (aarch64_insn_r);
5646 /* Branch, exception generation and system instructions. */
5647 if (ins_bit26 && !ins_bit27 && ins_bit28)
5648 return aarch64_record_branch_except_sys (aarch64_insn_r);
5650 /* Load and store instructions. */
5651 if (!ins_bit25 && ins_bit27)
5652 return aarch64_record_load_store (aarch64_insn_r);
5654 /* Data processing - register instructions. */
5655 if (ins_bit25 && !ins_bit26 && ins_bit27)
5656 return aarch64_record_data_proc_reg (aarch64_insn_r);
5658 /* Data processing - SIMD and floating point instructions. */
5659 if (ins_bit25 && ins_bit26 && ins_bit27)
5660 return aarch64_record_data_proc_simd_fp (aarch64_insn_r);
5662 return AARCH64_RECORD_UNSUPPORTED;
5665 /* Cleans up local record registers and memory allocations. */
5667 static void
5668 deallocate_reg_mem (aarch64_insn_decode_record *record)
5670 xfree (record->aarch64_regs);
5671 xfree (record->aarch64_mems);
5674 #if GDB_SELF_TEST
5675 namespace selftests {
5677 static void
5678 aarch64_process_record_test (void)
5680 struct gdbarch_info info;
5681 uint32_t ret;
5683 info.bfd_arch_info = bfd_scan_arch ("aarch64");
5685 struct gdbarch *gdbarch = gdbarch_find_by_info (info);
5686 SELF_CHECK (gdbarch != NULL);
5688 aarch64_insn_decode_record aarch64_record;
5690 memset (&aarch64_record, 0, sizeof (aarch64_insn_decode_record));
5691 aarch64_record.regcache = NULL;
5692 aarch64_record.this_addr = 0;
5693 aarch64_record.gdbarch = gdbarch;
5695 /* 20 00 80 f9 prfm pldl1keep, [x1] */
5696 aarch64_record.aarch64_insn = 0xf9800020;
5697 ret = aarch64_record_decode_insn_handler (&aarch64_record);
5698 SELF_CHECK (ret == AARCH64_RECORD_SUCCESS);
5699 SELF_CHECK (aarch64_record.reg_rec_count == 0);
5700 SELF_CHECK (aarch64_record.mem_rec_count == 0);
5702 deallocate_reg_mem (&aarch64_record);
5705 } // namespace selftests
5706 #endif /* GDB_SELF_TEST */
5708 /* Parse the current instruction and record the values of the registers and
5709 memory that will be changed in current instruction to record_arch_list
5710 return -1 if something is wrong. */
5713 aarch64_process_record (struct gdbarch *gdbarch, struct regcache *regcache,
5714 CORE_ADDR insn_addr)
5716 uint32_t rec_no = 0;
5717 uint8_t insn_size = 4;
5718 uint32_t ret = 0;
5719 gdb_byte buf[insn_size];
5720 aarch64_insn_decode_record aarch64_record;
5722 memset (&buf[0], 0, insn_size);
5723 memset (&aarch64_record, 0, sizeof (aarch64_insn_decode_record));
5724 target_read_memory (insn_addr, &buf[0], insn_size);
5725 aarch64_record.aarch64_insn
5726 = (uint32_t) extract_unsigned_integer (&buf[0],
5727 insn_size,
5728 gdbarch_byte_order (gdbarch));
5729 aarch64_record.regcache = regcache;
5730 aarch64_record.this_addr = insn_addr;
5731 aarch64_record.gdbarch = gdbarch;
5733 ret = aarch64_record_decode_insn_handler (&aarch64_record);
5734 if (ret == AARCH64_RECORD_UNSUPPORTED)
5736 gdb_printf (gdb_stderr,
5737 _("Process record does not support instruction "
5738 "0x%0x at address %s.\n"),
5739 aarch64_record.aarch64_insn,
5740 paddress (gdbarch, insn_addr));
5741 ret = -1;
5744 if (0 == ret)
5746 /* Record registers. */
5747 record_full_arch_list_add_reg (aarch64_record.regcache,
5748 AARCH64_PC_REGNUM);
5749 /* Always record register CPSR. */
5750 record_full_arch_list_add_reg (aarch64_record.regcache,
5751 AARCH64_CPSR_REGNUM);
5752 if (aarch64_record.aarch64_regs)
5753 for (rec_no = 0; rec_no < aarch64_record.reg_rec_count; rec_no++)
5754 if (record_full_arch_list_add_reg (aarch64_record.regcache,
5755 aarch64_record.aarch64_regs[rec_no]))
5756 ret = -1;
5758 /* Record memories. */
5759 if (aarch64_record.aarch64_mems)
5760 for (rec_no = 0; rec_no < aarch64_record.mem_rec_count; rec_no++)
5761 if (record_full_arch_list_add_mem
5762 ((CORE_ADDR)aarch64_record.aarch64_mems[rec_no].addr,
5763 aarch64_record.aarch64_mems[rec_no].len))
5764 ret = -1;
5766 if (record_full_arch_list_add_end ())
5767 ret = -1;
5770 deallocate_reg_mem (&aarch64_record);
5771 return ret;