1 /* GNU/Linux/AArch64 specific low level interface, for the remote server for
4 Copyright (C) 2009-2024 Free Software Foundation, Inc.
5 Contributed by ARM Ltd.
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
23 #include "linux-low.h"
24 #include "nat/aarch64-linux.h"
25 #include "nat/aarch64-linux-hw-point.h"
26 #include "arch/aarch64-insn.h"
27 #include "linux-aarch32-low.h"
28 #include "elf/common.h"
30 #include "tracepoint.h"
35 #include "nat/gdb_ptrace.h"
36 #include <asm/ptrace.h>
41 #include "gdb_proc_service.h"
42 #include "arch/aarch64.h"
43 #include "arch/aarch64-mte-linux.h"
44 #include "arch/aarch64-scalable-linux.h"
45 #include "linux-aarch32-tdesc.h"
46 #include "linux-aarch64-tdesc.h"
47 #include "nat/aarch64-mte-linux-ptrace.h"
48 #include "nat/aarch64-scalable-linux-ptrace.h"
59 /* Linux target op definitions for the AArch64 architecture. */
61 class aarch64_target
: public linux_process_target
65 const regs_info
*get_regs_info () override
;
67 int breakpoint_kind_from_pc (CORE_ADDR
*pcptr
) override
;
69 int breakpoint_kind_from_current_state (CORE_ADDR
*pcptr
) override
;
71 const gdb_byte
*sw_breakpoint_from_kind (int kind
, int *size
) override
;
73 bool supports_z_point_type (char z_type
) override
;
75 bool supports_tracepoints () override
;
77 bool supports_fast_tracepoints () override
;
79 int install_fast_tracepoint_jump_pad
80 (CORE_ADDR tpoint
, CORE_ADDR tpaddr
, CORE_ADDR collector
,
81 CORE_ADDR lockaddr
, ULONGEST orig_size
, CORE_ADDR
*jump_entry
,
82 CORE_ADDR
*trampoline
, ULONGEST
*trampoline_size
,
83 unsigned char *jjump_pad_insn
, ULONGEST
*jjump_pad_insn_size
,
84 CORE_ADDR
*adjusted_insn_addr
, CORE_ADDR
*adjusted_insn_addr_end
,
87 int get_min_fast_tracepoint_insn_len () override
;
89 struct emit_ops
*emit_ops () override
;
91 bool supports_memory_tagging () override
;
93 bool fetch_memtags (CORE_ADDR address
, size_t len
,
94 gdb::byte_vector
&tags
, int type
) override
;
96 bool store_memtags (CORE_ADDR address
, size_t len
,
97 const gdb::byte_vector
&tags
, int type
) override
;
101 void low_arch_setup () override
;
103 bool low_cannot_fetch_register (int regno
) override
;
105 bool low_cannot_store_register (int regno
) override
;
107 bool low_supports_breakpoints () override
;
109 CORE_ADDR
low_get_pc (regcache
*regcache
) override
;
111 void low_set_pc (regcache
*regcache
, CORE_ADDR newpc
) override
;
113 bool low_breakpoint_at (CORE_ADDR pc
) override
;
115 int low_insert_point (raw_bkpt_type type
, CORE_ADDR addr
,
116 int size
, raw_breakpoint
*bp
) override
;
118 int low_remove_point (raw_bkpt_type type
, CORE_ADDR addr
,
119 int size
, raw_breakpoint
*bp
) override
;
121 bool low_stopped_by_watchpoint () override
;
123 CORE_ADDR
low_stopped_data_address () override
;
125 bool low_siginfo_fixup (siginfo_t
*native
, gdb_byte
*inf
,
126 int direction
) override
;
128 arch_process_info
*low_new_process () override
;
130 void low_delete_process (arch_process_info
*info
) override
;
132 void low_new_thread (lwp_info
*) override
;
134 void low_delete_thread (arch_lwp_info
*) override
;
136 void low_new_fork (process_info
*parent
, process_info
*child
) override
;
138 void low_prepare_to_resume (lwp_info
*lwp
) override
;
140 int low_get_thread_area (int lwpid
, CORE_ADDR
*addrp
) override
;
142 bool low_supports_range_stepping () override
;
144 bool low_supports_catch_syscall () override
;
146 void low_get_syscall_trapinfo (regcache
*regcache
, int *sysno
) override
;
149 /* The singleton target ops object. */
151 static aarch64_target the_aarch64_target
;
154 aarch64_target::low_cannot_fetch_register (int regno
)
156 gdb_assert_not_reached ("linux target op low_cannot_fetch_register "
157 "is not implemented by the target");
161 aarch64_target::low_cannot_store_register (int regno
)
163 gdb_assert_not_reached ("linux target op low_cannot_store_register "
164 "is not implemented by the target");
168 aarch64_target::low_prepare_to_resume (lwp_info
*lwp
)
170 aarch64_linux_prepare_to_resume (lwp
);
173 /* Per-process arch-specific data we want to keep. */
175 struct arch_process_info
177 /* Hardware breakpoint/watchpoint data.
178 The reason for them to be per-process rather than per-thread is
179 due to the lack of information in the gdbserver environment;
180 gdbserver is not told that whether a requested hardware
181 breakpoint/watchpoint is thread specific or not, so it has to set
182 each hw bp/wp for every thread in the current process. The
183 higher level bp/wp management in gdb will resume a thread if a hw
184 bp/wp trap is not expected for it. Since the hw bp/wp setting is
185 same for each thread, it is reasonable for the data to live here.
187 struct aarch64_debug_reg_state debug_reg_state
;
190 /* Return true if the size of register 0 is 8 byte. */
193 is_64bit_tdesc (void)
195 /* We may not have a current thread at this point, so go straight to
196 the process's target description. */
197 return register_size (current_process ()->tdesc
, 0) == 8;
201 aarch64_fill_gregset (struct regcache
*regcache
, void *buf
)
203 struct user_pt_regs
*regset
= (struct user_pt_regs
*) buf
;
206 for (i
= 0; i
< AARCH64_X_REGS_NUM
; i
++)
207 collect_register (regcache
, AARCH64_X0_REGNUM
+ i
, ®set
->regs
[i
]);
208 collect_register (regcache
, AARCH64_SP_REGNUM
, ®set
->sp
);
209 collect_register (regcache
, AARCH64_PC_REGNUM
, ®set
->pc
);
210 collect_register (regcache
, AARCH64_CPSR_REGNUM
, ®set
->pstate
);
214 aarch64_store_gregset (struct regcache
*regcache
, const void *buf
)
216 const struct user_pt_regs
*regset
= (const struct user_pt_regs
*) buf
;
219 for (i
= 0; i
< AARCH64_X_REGS_NUM
; i
++)
220 supply_register (regcache
, AARCH64_X0_REGNUM
+ i
, ®set
->regs
[i
]);
221 supply_register (regcache
, AARCH64_SP_REGNUM
, ®set
->sp
);
222 supply_register (regcache
, AARCH64_PC_REGNUM
, ®set
->pc
);
223 supply_register (regcache
, AARCH64_CPSR_REGNUM
, ®set
->pstate
);
227 aarch64_fill_fpregset (struct regcache
*regcache
, void *buf
)
229 struct user_fpsimd_state
*regset
= (struct user_fpsimd_state
*) buf
;
232 for (i
= 0; i
< AARCH64_V_REGS_NUM
; i
++)
233 collect_register (regcache
, AARCH64_V0_REGNUM
+ i
, ®set
->vregs
[i
]);
234 collect_register (regcache
, AARCH64_FPSR_REGNUM
, ®set
->fpsr
);
235 collect_register (regcache
, AARCH64_FPCR_REGNUM
, ®set
->fpcr
);
239 aarch64_store_fpregset (struct regcache
*regcache
, const void *buf
)
241 const struct user_fpsimd_state
*regset
242 = (const struct user_fpsimd_state
*) buf
;
245 for (i
= 0; i
< AARCH64_V_REGS_NUM
; i
++)
246 supply_register (regcache
, AARCH64_V0_REGNUM
+ i
, ®set
->vregs
[i
]);
247 supply_register (regcache
, AARCH64_FPSR_REGNUM
, ®set
->fpsr
);
248 supply_register (regcache
, AARCH64_FPCR_REGNUM
, ®set
->fpcr
);
251 /* Store the pauth registers to regcache. */
254 aarch64_store_pauthregset (struct regcache
*regcache
, const void *buf
)
256 uint64_t *pauth_regset
= (uint64_t *) buf
;
257 int pauth_base
= find_regno (regcache
->tdesc
, "pauth_dmask");
262 supply_register (regcache
, AARCH64_PAUTH_DMASK_REGNUM (pauth_base
),
264 supply_register (regcache
, AARCH64_PAUTH_CMASK_REGNUM (pauth_base
),
268 /* Fill BUF with the MTE registers from the regcache. */
271 aarch64_fill_mteregset (struct regcache
*regcache
, void *buf
)
273 uint64_t *mte_regset
= (uint64_t *) buf
;
274 int mte_base
= find_regno (regcache
->tdesc
, "tag_ctl");
276 collect_register (regcache
, mte_base
, mte_regset
);
279 /* Store the MTE registers to regcache. */
282 aarch64_store_mteregset (struct regcache
*regcache
, const void *buf
)
284 uint64_t *mte_regset
= (uint64_t *) buf
;
285 int mte_base
= find_regno (regcache
->tdesc
, "tag_ctl");
287 /* Tag Control register */
288 supply_register (regcache
, mte_base
, mte_regset
);
291 /* Fill BUF with TLS register from the regcache. */
294 aarch64_fill_tlsregset (struct regcache
*regcache
, void *buf
)
296 gdb_byte
*tls_buf
= (gdb_byte
*) buf
;
297 int tls_regnum
= find_regno (regcache
->tdesc
, "tpidr");
299 collect_register (regcache
, tls_regnum
, tls_buf
);
301 /* Read TPIDR2, if it exists. */
302 std::optional
<int> regnum
= find_regno_no_throw (regcache
->tdesc
, "tpidr2");
304 if (regnum
.has_value ())
305 collect_register (regcache
, *regnum
, tls_buf
+ sizeof (uint64_t));
308 /* Store TLS register to regcache. */
311 aarch64_store_tlsregset (struct regcache
*regcache
, const void *buf
)
313 gdb_byte
*tls_buf
= (gdb_byte
*) buf
;
314 int tls_regnum
= find_regno (regcache
->tdesc
, "tpidr");
316 supply_register (regcache
, tls_regnum
, tls_buf
);
318 /* Write TPIDR2, if it exists. */
319 std::optional
<int> regnum
= find_regno_no_throw (regcache
->tdesc
, "tpidr2");
321 if (regnum
.has_value ())
322 supply_register (regcache
, *regnum
, tls_buf
+ sizeof (uint64_t));
326 aarch64_target::low_supports_breakpoints ()
331 /* Implementation of linux target ops method "low_get_pc". */
334 aarch64_target::low_get_pc (regcache
*regcache
)
336 if (register_size (regcache
->tdesc
, 0) == 8)
337 return linux_get_pc_64bit (regcache
);
339 return linux_get_pc_32bit (regcache
);
342 /* Implementation of linux target ops method "low_set_pc". */
345 aarch64_target::low_set_pc (regcache
*regcache
, CORE_ADDR pc
)
347 if (register_size (regcache
->tdesc
, 0) == 8)
348 linux_set_pc_64bit (regcache
, pc
);
350 linux_set_pc_32bit (regcache
, pc
);
353 #define aarch64_breakpoint_len 4
355 /* AArch64 BRK software debug mode instruction.
356 This instruction needs to match gdb/aarch64-tdep.c
357 (aarch64_default_breakpoint). */
358 static const gdb_byte aarch64_breakpoint
[] = {0x00, 0x00, 0x20, 0xd4};
360 /* Implementation of linux target ops method "low_breakpoint_at". */
363 aarch64_target::low_breakpoint_at (CORE_ADDR where
)
365 if (is_64bit_tdesc ())
367 gdb_byte insn
[aarch64_breakpoint_len
];
369 read_memory (where
, (unsigned char *) &insn
, aarch64_breakpoint_len
);
370 if (memcmp (insn
, aarch64_breakpoint
, aarch64_breakpoint_len
) == 0)
376 return arm_breakpoint_at (where
);
380 aarch64_init_debug_reg_state (struct aarch64_debug_reg_state
*state
)
384 for (i
= 0; i
< AARCH64_HBP_MAX_NUM
; ++i
)
386 state
->dr_addr_bp
[i
] = 0;
387 state
->dr_ctrl_bp
[i
] = 0;
388 state
->dr_ref_count_bp
[i
] = 0;
391 for (i
= 0; i
< AARCH64_HWP_MAX_NUM
; ++i
)
393 state
->dr_addr_wp
[i
] = 0;
394 state
->dr_ctrl_wp
[i
] = 0;
395 state
->dr_ref_count_wp
[i
] = 0;
399 /* Return the pointer to the debug register state structure in the
400 current process' arch-specific data area. */
402 struct aarch64_debug_reg_state
*
403 aarch64_get_debug_reg_state (pid_t pid
)
405 struct process_info
*proc
= find_process_pid (pid
);
407 return &proc
->priv
->arch_private
->debug_reg_state
;
410 /* Implementation of target ops method "supports_z_point_type". */
413 aarch64_target::supports_z_point_type (char z_type
)
419 case Z_PACKET_WRITE_WP
:
420 case Z_PACKET_READ_WP
:
421 case Z_PACKET_ACCESS_WP
:
428 /* Implementation of linux target ops method "low_insert_point".
430 It actually only records the info of the to-be-inserted bp/wp;
431 the actual insertion will happen when threads are resumed. */
434 aarch64_target::low_insert_point (raw_bkpt_type type
, CORE_ADDR addr
,
435 int len
, raw_breakpoint
*bp
)
438 enum target_hw_bp_type targ_type
;
439 struct aarch64_debug_reg_state
*state
440 = aarch64_get_debug_reg_state (pid_of (current_thread
));
443 fprintf (stderr
, "insert_point on entry (addr=0x%08lx, len=%d)\n",
444 (unsigned long) addr
, len
);
446 /* Determine the type from the raw breakpoint type. */
447 targ_type
= raw_bkpt_type_to_target_hw_bp_type (type
);
449 if (targ_type
!= hw_execute
)
451 if (aarch64_region_ok_for_watchpoint (addr
, len
))
452 ret
= aarch64_handle_watchpoint (targ_type
, addr
, len
,
454 current_lwp_ptid (), state
);
462 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
463 instruction. Set it to 2 to correctly encode length bit
464 mask in hardware/watchpoint control register. */
467 ret
= aarch64_handle_breakpoint (targ_type
, addr
, len
,
468 1 /* is_insert */, current_lwp_ptid (),
473 aarch64_show_debug_reg_state (state
, "insert_point", addr
, len
,
479 /* Implementation of linux target ops method "low_remove_point".
481 It actually only records the info of the to-be-removed bp/wp,
482 the actual removal will be done when threads are resumed. */
485 aarch64_target::low_remove_point (raw_bkpt_type type
, CORE_ADDR addr
,
486 int len
, raw_breakpoint
*bp
)
489 enum target_hw_bp_type targ_type
;
490 struct aarch64_debug_reg_state
*state
491 = aarch64_get_debug_reg_state (pid_of (current_thread
));
494 fprintf (stderr
, "remove_point on entry (addr=0x%08lx, len=%d)\n",
495 (unsigned long) addr
, len
);
497 /* Determine the type from the raw breakpoint type. */
498 targ_type
= raw_bkpt_type_to_target_hw_bp_type (type
);
500 /* Set up state pointers. */
501 if (targ_type
!= hw_execute
)
503 aarch64_handle_watchpoint (targ_type
, addr
, len
, 0 /* is_insert */,
504 current_lwp_ptid (), state
);
509 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
510 instruction. Set it to 2 to correctly encode length bit
511 mask in hardware/watchpoint control register. */
514 ret
= aarch64_handle_breakpoint (targ_type
, addr
, len
,
515 0 /* is_insert */, current_lwp_ptid (),
520 aarch64_show_debug_reg_state (state
, "remove_point", addr
, len
,
527 aarch64_remove_non_address_bits (CORE_ADDR pointer
)
529 /* By default, we assume TBI and discard the top 8 bits plus the
530 VA range select bit (55). */
531 CORE_ADDR mask
= AARCH64_TOP_BITS_MASK
;
533 /* Check if PAC is available for this target. */
534 if (tdesc_contains_feature (current_process ()->tdesc
,
535 "org.gnu.gdb.aarch64.pauth"))
537 /* Fetch the PAC masks. These masks are per-process, so we can just
538 fetch data from whatever thread we have at the moment.
540 Also, we have both a code mask and a data mask. For now they are the
541 same, but this may change in the future. */
543 struct regcache
*regs
= get_thread_regcache (current_thread
, 1);
544 CORE_ADDR dmask
= regcache_raw_get_unsigned_by_name (regs
, "pauth_dmask");
545 CORE_ADDR cmask
= regcache_raw_get_unsigned_by_name (regs
, "pauth_cmask");
546 mask
|= aarch64_mask_from_pac_registers (cmask
, dmask
);
549 return aarch64_remove_top_bits (pointer
, mask
);
552 /* Implementation of linux target ops method "low_stopped_data_address". */
555 aarch64_target::low_stopped_data_address ()
559 struct aarch64_debug_reg_state
*state
;
561 pid
= lwpid_of (current_thread
);
563 /* Get the siginfo. */
564 if (ptrace (PTRACE_GETSIGINFO
, pid
, NULL
, &siginfo
) != 0)
565 return (CORE_ADDR
) 0;
567 /* Need to be a hardware breakpoint/watchpoint trap. */
568 if (siginfo
.si_signo
!= SIGTRAP
569 || (siginfo
.si_code
& 0xffff) != 0x0004 /* TRAP_HWBKPT */)
570 return (CORE_ADDR
) 0;
572 /* Make sure to ignore the top byte, otherwise we may not recognize a
573 hardware watchpoint hit. The stopped data addresses coming from the
574 kernel can potentially be tagged addresses. */
575 const CORE_ADDR addr_trap
576 = aarch64_remove_non_address_bits ((CORE_ADDR
) siginfo
.si_addr
);
578 /* Check if the address matches any watched address. */
579 state
= aarch64_get_debug_reg_state (pid_of (current_thread
));
580 for (i
= aarch64_num_wp_regs
- 1; i
>= 0; --i
)
582 const unsigned int offset
583 = aarch64_watchpoint_offset (state
->dr_ctrl_wp
[i
]);
584 const unsigned int len
= aarch64_watchpoint_length (state
->dr_ctrl_wp
[i
]);
585 const CORE_ADDR addr_watch
= state
->dr_addr_wp
[i
] + offset
;
586 const CORE_ADDR addr_watch_aligned
= align_down (state
->dr_addr_wp
[i
], 8);
587 const CORE_ADDR addr_orig
= state
->dr_addr_orig_wp
[i
];
589 if (state
->dr_ref_count_wp
[i
]
590 && DR_CONTROL_ENABLED (state
->dr_ctrl_wp
[i
])
591 && addr_trap
>= addr_watch_aligned
592 && addr_trap
< addr_watch
+ len
)
594 /* ADDR_TRAP reports the first address of the memory range
595 accessed by the CPU, regardless of what was the memory
596 range watched. Thus, a large CPU access that straddles
597 the ADDR_WATCH..ADDR_WATCH+LEN range may result in an
598 ADDR_TRAP that is lower than the
599 ADDR_WATCH..ADDR_WATCH+LEN range. E.g.:
601 addr: | 4 | 5 | 6 | 7 | 8 |
602 |---- range watched ----|
603 |----------- range accessed ------------|
605 In this case, ADDR_TRAP will be 4.
607 To match a watchpoint known to GDB core, we must never
608 report *ADDR_P outside of any ADDR_WATCH..ADDR_WATCH+LEN
609 range. ADDR_WATCH <= ADDR_TRAP < ADDR_ORIG is a false
610 positive on kernels older than 4.10. See PR
616 return (CORE_ADDR
) 0;
619 /* Implementation of linux target ops method "low_stopped_by_watchpoint". */
622 aarch64_target::low_stopped_by_watchpoint ()
624 return (low_stopped_data_address () != 0);
627 /* Fetch the thread-local storage pointer for libthread_db. */
630 ps_get_thread_area (struct ps_prochandle
*ph
,
631 lwpid_t lwpid
, int idx
, void **base
)
633 return aarch64_ps_get_thread_area (ph
, lwpid
, idx
, base
,
637 /* Implementation of linux target ops method "low_siginfo_fixup". */
640 aarch64_target::low_siginfo_fixup (siginfo_t
*native
, gdb_byte
*inf
,
643 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
644 if (!is_64bit_tdesc ())
647 aarch64_compat_siginfo_from_siginfo ((struct compat_siginfo
*) inf
,
650 aarch64_siginfo_from_compat_siginfo (native
,
651 (struct compat_siginfo
*) inf
);
659 /* Implementation of linux target ops method "low_new_process". */
662 aarch64_target::low_new_process ()
664 struct arch_process_info
*info
= XCNEW (struct arch_process_info
);
666 aarch64_init_debug_reg_state (&info
->debug_reg_state
);
671 /* Implementation of linux target ops method "low_delete_process". */
674 aarch64_target::low_delete_process (arch_process_info
*info
)
680 aarch64_target::low_new_thread (lwp_info
*lwp
)
682 aarch64_linux_new_thread (lwp
);
686 aarch64_target::low_delete_thread (arch_lwp_info
*arch_lwp
)
688 aarch64_linux_delete_thread (arch_lwp
);
691 /* Implementation of linux target ops method "low_new_fork". */
694 aarch64_target::low_new_fork (process_info
*parent
,
697 /* These are allocated by linux_add_process. */
698 gdb_assert (parent
->priv
!= NULL
699 && parent
->priv
->arch_private
!= NULL
);
700 gdb_assert (child
->priv
!= NULL
701 && child
->priv
->arch_private
!= NULL
);
703 /* Linux kernel before 2.6.33 commit
704 72f674d203cd230426437cdcf7dd6f681dad8b0d
705 will inherit hardware debug registers from parent
706 on fork/vfork/clone. Newer Linux kernels create such tasks with
707 zeroed debug registers.
709 GDB core assumes the child inherits the watchpoints/hw
710 breakpoints of the parent, and will remove them all from the
711 forked off process. Copy the debug registers mirrors into the
712 new process so that all breakpoints and watchpoints can be
713 removed together. The debug registers mirror will become zeroed
714 in the end before detaching the forked off process, thus making
715 this compatible with older Linux kernels too. */
717 *child
->priv
->arch_private
= *parent
->priv
->arch_private
;
720 /* Wrapper for aarch64_sve_regs_copy_to_reg_buf. */
723 aarch64_sve_regs_copy_to_regcache (struct regcache
*regcache
,
724 ATTRIBUTE_UNUSED
const void *buf
)
726 /* BUF is unused here since we collect the data straight from a ptrace
727 request in aarch64_sve_regs_copy_to_reg_buf, therefore bypassing
728 gdbserver's own call to ptrace. */
730 int tid
= lwpid_of (current_thread
);
732 /* Update the register cache. aarch64_sve_regs_copy_to_reg_buf handles
733 fetching the NT_ARM_SVE state from thread TID. */
734 aarch64_sve_regs_copy_to_reg_buf (tid
, regcache
);
737 /* Wrapper for aarch64_sve_regs_copy_from_reg_buf. */
740 aarch64_sve_regs_copy_from_regcache (struct regcache
*regcache
, void *buf
)
742 int tid
= lwpid_of (current_thread
);
744 /* Update the thread SVE state. aarch64_sve_regs_copy_from_reg_buf
745 handles writing the SVE/FPSIMD state back to thread TID. */
746 aarch64_sve_regs_copy_from_reg_buf (tid
, regcache
);
748 /* We need to return the expected data in BUF, so copy whatever the kernel
749 already has to BUF. */
750 gdb::byte_vector sve_state
= aarch64_fetch_sve_regset (tid
);
751 memcpy (buf
, sve_state
.data (), sve_state
.size ());
754 /* Wrapper for aarch64_za_regs_copy_to_reg_buf, to help copying NT_ARM_ZA
755 state from the thread (BUF) to the register cache. */
758 aarch64_za_regs_copy_to_regcache (struct regcache
*regcache
,
759 ATTRIBUTE_UNUSED
const void *buf
)
761 /* BUF is unused here since we collect the data straight from a ptrace
762 request, therefore bypassing gdbserver's own call to ptrace. */
763 int tid
= lwpid_of (current_thread
);
765 int za_regnum
= find_regno (regcache
->tdesc
, "za");
766 int svg_regnum
= find_regno (regcache
->tdesc
, "svg");
767 int svcr_regnum
= find_regno (regcache
->tdesc
, "svcr");
769 /* Update the register cache. aarch64_za_regs_copy_to_reg_buf handles
770 fetching the NT_ARM_ZA state from thread TID. */
771 aarch64_za_regs_copy_to_reg_buf (tid
, regcache
, za_regnum
, svg_regnum
,
775 /* Wrapper for aarch64_za_regs_copy_from_reg_buf, to help copying NT_ARM_ZA
776 state from the register cache to the thread (BUF). */
779 aarch64_za_regs_copy_from_regcache (struct regcache
*regcache
, void *buf
)
781 int tid
= lwpid_of (current_thread
);
783 int za_regnum
= find_regno (regcache
->tdesc
, "za");
784 int svg_regnum
= find_regno (regcache
->tdesc
, "svg");
785 int svcr_regnum
= find_regno (regcache
->tdesc
, "svcr");
787 /* Update the thread NT_ARM_ZA state. aarch64_za_regs_copy_from_reg_buf
788 handles writing the ZA state back to thread TID. */
789 aarch64_za_regs_copy_from_reg_buf (tid
, regcache
, za_regnum
, svg_regnum
,
792 /* We need to return the expected data in BUF, so copy whatever the kernel
793 already has to BUF. */
795 /* Obtain a dump of ZA from ptrace. */
796 gdb::byte_vector za_state
= aarch64_fetch_za_regset (tid
);
797 memcpy (buf
, za_state
.data (), za_state
.size ());
800 /* Wrapper for aarch64_zt_regs_copy_to_reg_buf, to help copying NT_ARM_ZT
801 state from the thread (BUF) to the register cache. */
804 aarch64_zt_regs_copy_to_regcache (struct regcache
*regcache
,
805 ATTRIBUTE_UNUSED
const void *buf
)
807 /* BUF is unused here since we collect the data straight from a ptrace
808 request, therefore bypassing gdbserver's own call to ptrace. */
809 int tid
= lwpid_of (current_thread
);
811 int zt_regnum
= find_regno (regcache
->tdesc
, "zt0");
813 /* Update the register cache. aarch64_zt_regs_copy_to_reg_buf handles
814 fetching the NT_ARM_ZT state from thread TID. */
815 aarch64_zt_regs_copy_to_reg_buf (tid
, regcache
, zt_regnum
);
818 /* Wrapper for aarch64_zt_regs_copy_from_reg_buf, to help copying NT_ARM_ZT
819 state from the register cache to the thread (BUF). */
822 aarch64_zt_regs_copy_from_regcache (struct regcache
*regcache
, void *buf
)
824 int tid
= lwpid_of (current_thread
);
826 int zt_regnum
= find_regno (regcache
->tdesc
, "zt0");
828 /* Update the thread NT_ARM_ZT state. aarch64_zt_regs_copy_from_reg_buf
829 handles writing the ZT state back to thread TID. */
830 aarch64_zt_regs_copy_from_reg_buf (tid
, regcache
, zt_regnum
);
832 /* We need to return the expected data in BUF, so copy whatever the kernel
833 already has to BUF. */
835 /* Obtain a dump of NT_ARM_ZT from ptrace. */
836 gdb::byte_vector zt_state
= aarch64_fetch_zt_regset (tid
);
837 memcpy (buf
, zt_state
.data (), zt_state
.size ());
840 /* Array containing all the possible register sets for AArch64/Linux. During
841 architecture setup, these will be checked against the HWCAP/HWCAP2 bits for
842 validity and enabled/disabled accordingly.
844 Their sizes are set to 0 here, but they will be adjusted later depending
845 on whether each register set is available or not. */
846 static struct regset_info aarch64_regsets
[] =
849 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_PRSTATUS
,
851 aarch64_fill_gregset
, aarch64_store_gregset
},
852 /* Floating Point (FPU) registers. */
853 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_FPREGSET
,
855 aarch64_fill_fpregset
, aarch64_store_fpregset
857 /* Scalable Vector Extension (SVE) registers. */
858 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_ARM_SVE
,
860 aarch64_sve_regs_copy_from_regcache
, aarch64_sve_regs_copy_to_regcache
862 /* Scalable Matrix Extension (SME) ZA register. */
863 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_ARM_ZA
,
865 aarch64_za_regs_copy_from_regcache
, aarch64_za_regs_copy_to_regcache
867 /* Scalable Matrix Extension 2 (SME2) ZT registers. */
868 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_ARM_ZT
,
870 aarch64_zt_regs_copy_from_regcache
, aarch64_zt_regs_copy_to_regcache
873 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_ARM_PAC_MASK
,
875 nullptr, aarch64_store_pauthregset
},
876 /* Tagged address control / MTE registers. */
877 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_ARM_TAGGED_ADDR_CTRL
,
879 aarch64_fill_mteregset
, aarch64_store_mteregset
},
881 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_ARM_TLS
,
883 aarch64_fill_tlsregset
, aarch64_store_tlsregset
},
887 static struct regsets_info aarch64_regsets_info
=
889 aarch64_regsets
, /* regsets */
891 nullptr, /* disabled_regsets */
894 static struct regs_info regs_info_aarch64
=
896 nullptr, /* regset_bitmap */
897 nullptr, /* usrregs */
898 &aarch64_regsets_info
,
901 /* Given FEATURES, adjust the available register sets by setting their
902 sizes. A size of 0 means the register set is disabled and won't be
906 aarch64_adjust_register_sets (const struct aarch64_features
&features
)
908 struct regset_info
*regset
;
910 for (regset
= aarch64_regsets
; regset
->size
>= 0; regset
++)
912 switch (regset
->nt_type
)
915 /* General purpose registers are always present. */
916 regset
->size
= sizeof (struct user_pt_regs
);
919 /* This is unavailable when SVE is present. */
920 if (features
.vq
== 0)
921 regset
->size
= sizeof (struct user_fpsimd_state
);
925 regset
->size
= SVE_PT_SIZE (AARCH64_MAX_SVE_VQ
, SVE_PT_REGS_SVE
);
927 case NT_ARM_PAC_MASK
:
929 regset
->size
= AARCH64_PAUTH_REGS_SIZE
;
931 case NT_ARM_TAGGED_ADDR_CTRL
:
933 regset
->size
= AARCH64_LINUX_SIZEOF_MTE
;
936 if (features
.tls
> 0)
937 regset
->size
= AARCH64_TLS_REGISTER_SIZE
* features
.tls
;
940 if (features
.svq
> 0)
941 regset
->size
= ZA_PT_SIZE (features
.svq
);
945 regset
->size
= AARCH64_SME2_ZT0_SIZE
;
948 gdb_assert_not_reached ("Unknown register set found.");
953 /* Matches HWCAP_PACA in kernel header arch/arm64/include/uapi/asm/hwcap.h. */
954 #define AARCH64_HWCAP_PACA (1 << 30)
956 /* Implementation of linux target ops method "low_arch_setup". */
959 aarch64_target::low_arch_setup ()
961 unsigned int machine
;
965 tid
= lwpid_of (current_thread
);
967 is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
971 struct aarch64_features features
;
972 int pid
= current_thread
->id
.pid ();
974 features
.vq
= aarch64_sve_get_vq (tid
);
975 /* A-profile PAC is 64-bit only. */
976 features
.pauth
= linux_get_hwcap (pid
, 8) & AARCH64_HWCAP_PACA
;
977 /* A-profile MTE is 64-bit only. */
978 features
.mte
= linux_get_hwcap2 (pid
, 8) & HWCAP2_MTE
;
979 features
.tls
= aarch64_tls_register_count (tid
);
981 /* Scalable Matrix Extension feature and size check. */
982 if (linux_get_hwcap2 (pid
, 8) & HWCAP2_SME
)
983 features
.svq
= aarch64_za_get_svq (tid
);
985 /* Scalable Matrix Extension 2 feature check. */
986 CORE_ADDR hwcap2
= linux_get_hwcap2 (pid
, 8);
987 if ((hwcap2
& HWCAP2_SME2
) || (hwcap2
& HWCAP2_SME2P1
))
989 /* Make sure ptrace supports NT_ARM_ZT. */
990 features
.sme2
= supports_zt_registers (tid
);
993 current_process ()->tdesc
= aarch64_linux_read_description (features
);
995 /* Adjust the register sets we should use for this particular set of
997 aarch64_adjust_register_sets (features
);
1000 current_process ()->tdesc
= aarch32_linux_read_description ();
1002 aarch64_linux_get_debug_reg_capacity (lwpid_of (current_thread
));
1005 /* Implementation of linux target ops method "get_regs_info". */
1008 aarch64_target::get_regs_info ()
1010 if (!is_64bit_tdesc ())
1011 return ®s_info_aarch32
;
1013 /* AArch64 64-bit registers. */
1014 return ®s_info_aarch64
;
1017 /* Implementation of target ops method "supports_tracepoints". */
1020 aarch64_target::supports_tracepoints ()
1022 if (current_thread
== NULL
)
1026 /* We don't support tracepoints on aarch32 now. */
1027 return is_64bit_tdesc ();
1031 /* Implementation of linux target ops method "low_get_thread_area". */
1034 aarch64_target::low_get_thread_area (int lwpid
, CORE_ADDR
*addrp
)
1039 iovec
.iov_base
= ®
;
1040 iovec
.iov_len
= sizeof (reg
);
1042 if (ptrace (PTRACE_GETREGSET
, lwpid
, NT_ARM_TLS
, &iovec
) != 0)
1051 aarch64_target::low_supports_catch_syscall ()
1056 /* Implementation of linux target ops method "low_get_syscall_trapinfo". */
1059 aarch64_target::low_get_syscall_trapinfo (regcache
*regcache
, int *sysno
)
1061 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
1067 collect_register_by_name (regcache
, "x8", &l_sysno
);
1068 *sysno
= (int) l_sysno
;
1071 collect_register_by_name (regcache
, "r7", sysno
);
1074 /* List of condition codes that we need. */
1076 enum aarch64_condition_codes
1087 enum aarch64_operand_type
1093 /* Representation of an operand. At this time, it only supports register
1094 and immediate types. */
1096 struct aarch64_operand
1098 /* Type of the operand. */
1099 enum aarch64_operand_type type
;
1101 /* Value of the operand according to the type. */
1105 struct aarch64_register reg
;
1109 /* List of registers that we are currently using, we can add more here as
1110 we need to use them. */
1112 /* General purpose scratch registers (64 bit). */
1113 static const struct aarch64_register x0
= { 0, 1 };
1114 static const struct aarch64_register x1
= { 1, 1 };
1115 static const struct aarch64_register x2
= { 2, 1 };
1116 static const struct aarch64_register x3
= { 3, 1 };
1117 static const struct aarch64_register x4
= { 4, 1 };
1119 /* General purpose scratch registers (32 bit). */
1120 static const struct aarch64_register w0
= { 0, 0 };
1121 static const struct aarch64_register w2
= { 2, 0 };
1123 /* Intra-procedure scratch registers. */
1124 static const struct aarch64_register ip0
= { 16, 1 };
1126 /* Special purpose registers. */
1127 static const struct aarch64_register fp
= { 29, 1 };
1128 static const struct aarch64_register lr
= { 30, 1 };
1129 static const struct aarch64_register sp
= { 31, 1 };
1130 static const struct aarch64_register xzr
= { 31, 1 };
1132 /* Dynamically allocate a new register. If we know the register
1133 statically, we should make it a global as above instead of using this
1136 static struct aarch64_register
1137 aarch64_register (unsigned num
, int is64
)
1139 return (struct aarch64_register
) { num
, is64
};
1142 /* Helper function to create a register operand, for instructions with
1143 different types of operands.
1146 p += emit_mov (p, x0, register_operand (x1)); */
1148 static struct aarch64_operand
1149 register_operand (struct aarch64_register reg
)
1151 struct aarch64_operand operand
;
1153 operand
.type
= OPERAND_REGISTER
;
1159 /* Helper function to create an immediate operand, for instructions with
1160 different types of operands.
1163 p += emit_mov (p, x0, immediate_operand (12)); */
1165 static struct aarch64_operand
1166 immediate_operand (uint32_t imm
)
1168 struct aarch64_operand operand
;
1170 operand
.type
= OPERAND_IMMEDIATE
;
1176 /* Helper function to create an offset memory operand.
1179 p += emit_ldr (p, x0, sp, offset_memory_operand (16)); */
1181 static struct aarch64_memory_operand
1182 offset_memory_operand (int32_t offset
)
1184 return (struct aarch64_memory_operand
) { MEMORY_OPERAND_OFFSET
, offset
};
1187 /* Helper function to create a pre-index memory operand.
1190 p += emit_ldr (p, x0, sp, preindex_memory_operand (16)); */
1192 static struct aarch64_memory_operand
1193 preindex_memory_operand (int32_t index
)
1195 return (struct aarch64_memory_operand
) { MEMORY_OPERAND_PREINDEX
, index
};
1198 /* Helper function to create a post-index memory operand.
1201 p += emit_ldr (p, x0, sp, postindex_memory_operand (16)); */
1203 static struct aarch64_memory_operand
1204 postindex_memory_operand (int32_t index
)
1206 return (struct aarch64_memory_operand
) { MEMORY_OPERAND_POSTINDEX
, index
};
1209 /* System control registers. These special registers can be written and
1210 read with the MRS and MSR instructions.
1212 - NZCV: Condition flags. GDB refers to this register under the CPSR
1214 - FPSR: Floating-point status register.
1215 - FPCR: Floating-point control registers.
1216 - TPIDR_EL0: Software thread ID register. */
1218 enum aarch64_system_control_registers
1220 /* op0 op1 crn crm op2 */
1221 NZCV
= (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x2 << 3) | 0x0,
1222 FPSR
= (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x1,
1223 FPCR
= (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x0,
1224 TPIDR_EL0
= (0x1 << 14) | (0x3 << 11) | (0xd << 7) | (0x0 << 3) | 0x2
1227 /* Write a BLR instruction into *BUF.
1231 RN is the register to branch to. */
1234 emit_blr (uint32_t *buf
, struct aarch64_register rn
)
1236 return aarch64_emit_insn (buf
, BLR
| ENCODE (rn
.num
, 5, 5));
1239 /* Write a RET instruction into *BUF.
1243 RN is the register to branch to. */
1246 emit_ret (uint32_t *buf
, struct aarch64_register rn
)
1248 return aarch64_emit_insn (buf
, RET
| ENCODE (rn
.num
, 5, 5));
1252 emit_load_store_pair (uint32_t *buf
, enum aarch64_opcodes opcode
,
1253 struct aarch64_register rt
,
1254 struct aarch64_register rt2
,
1255 struct aarch64_register rn
,
1256 struct aarch64_memory_operand operand
)
1260 uint32_t write_back
;
1263 opc
= ENCODE (2, 2, 30);
1265 opc
= ENCODE (0, 2, 30);
1267 switch (operand
.type
)
1269 case MEMORY_OPERAND_OFFSET
:
1271 pre_index
= ENCODE (1, 1, 24);
1272 write_back
= ENCODE (0, 1, 23);
1275 case MEMORY_OPERAND_POSTINDEX
:
1277 pre_index
= ENCODE (0, 1, 24);
1278 write_back
= ENCODE (1, 1, 23);
1281 case MEMORY_OPERAND_PREINDEX
:
1283 pre_index
= ENCODE (1, 1, 24);
1284 write_back
= ENCODE (1, 1, 23);
1291 return aarch64_emit_insn (buf
, opcode
| opc
| pre_index
| write_back
1292 | ENCODE (operand
.index
>> 3, 7, 15)
1293 | ENCODE (rt2
.num
, 5, 10)
1294 | ENCODE (rn
.num
, 5, 5) | ENCODE (rt
.num
, 5, 0));
1297 /* Write a STP instruction into *BUF.
1299 STP rt, rt2, [rn, #offset]
1300 STP rt, rt2, [rn, #index]!
1301 STP rt, rt2, [rn], #index
1303 RT and RT2 are the registers to store.
1304 RN is the base address register.
1305 OFFSET is the immediate to add to the base address. It is limited to a
1306 -512 .. 504 range (7 bits << 3). */
1309 emit_stp (uint32_t *buf
, struct aarch64_register rt
,
1310 struct aarch64_register rt2
, struct aarch64_register rn
,
1311 struct aarch64_memory_operand operand
)
1313 return emit_load_store_pair (buf
, STP
, rt
, rt2
, rn
, operand
);
1316 /* Write a LDP instruction into *BUF.
1318 LDP rt, rt2, [rn, #offset]
1319 LDP rt, rt2, [rn, #index]!
1320 LDP rt, rt2, [rn], #index
1322 RT and RT2 are the registers to store.
1323 RN is the base address register.
1324 OFFSET is the immediate to add to the base address. It is limited to a
1325 -512 .. 504 range (7 bits << 3). */
1328 emit_ldp (uint32_t *buf
, struct aarch64_register rt
,
1329 struct aarch64_register rt2
, struct aarch64_register rn
,
1330 struct aarch64_memory_operand operand
)
1332 return emit_load_store_pair (buf
, LDP
, rt
, rt2
, rn
, operand
);
1335 /* Write a LDP (SIMD&VFP) instruction using Q registers into *BUF.
1337 LDP qt, qt2, [rn, #offset]
1339 RT and RT2 are the Q registers to store.
1340 RN is the base address register.
1341 OFFSET is the immediate to add to the base address. It is limited to
1342 -1024 .. 1008 range (7 bits << 4). */
1345 emit_ldp_q_offset (uint32_t *buf
, unsigned rt
, unsigned rt2
,
1346 struct aarch64_register rn
, int32_t offset
)
1348 uint32_t opc
= ENCODE (2, 2, 30);
1349 uint32_t pre_index
= ENCODE (1, 1, 24);
1351 return aarch64_emit_insn (buf
, LDP_SIMD_VFP
| opc
| pre_index
1352 | ENCODE (offset
>> 4, 7, 15)
1353 | ENCODE (rt2
, 5, 10)
1354 | ENCODE (rn
.num
, 5, 5) | ENCODE (rt
, 5, 0));
1357 /* Write a STP (SIMD&VFP) instruction using Q registers into *BUF.
1359 STP qt, qt2, [rn, #offset]
1361 RT and RT2 are the Q registers to store.
1362 RN is the base address register.
1363 OFFSET is the immediate to add to the base address. It is limited to
1364 -1024 .. 1008 range (7 bits << 4). */
1367 emit_stp_q_offset (uint32_t *buf
, unsigned rt
, unsigned rt2
,
1368 struct aarch64_register rn
, int32_t offset
)
1370 uint32_t opc
= ENCODE (2, 2, 30);
1371 uint32_t pre_index
= ENCODE (1, 1, 24);
1373 return aarch64_emit_insn (buf
, STP_SIMD_VFP
| opc
| pre_index
1374 | ENCODE (offset
>> 4, 7, 15)
1375 | ENCODE (rt2
, 5, 10)
1376 | ENCODE (rn
.num
, 5, 5) | ENCODE (rt
, 5, 0));
1379 /* Write a LDRH instruction into *BUF.
1381 LDRH wt, [xn, #offset]
1382 LDRH wt, [xn, #index]!
1383 LDRH wt, [xn], #index
1385 RT is the register to store.
1386 RN is the base address register.
1387 OFFSET is the immediate to add to the base address. It is limited to
1388 0 .. 32760 range (12 bits << 3). */
1391 emit_ldrh (uint32_t *buf
, struct aarch64_register rt
,
1392 struct aarch64_register rn
,
1393 struct aarch64_memory_operand operand
)
1395 return aarch64_emit_load_store (buf
, 1, LDR
, rt
, rn
, operand
);
1398 /* Write a LDRB instruction into *BUF.
1400 LDRB wt, [xn, #offset]
1401 LDRB wt, [xn, #index]!
1402 LDRB wt, [xn], #index
1404 RT is the register to store.
1405 RN is the base address register.
1406 OFFSET is the immediate to add to the base address. It is limited to
1407 0 .. 32760 range (12 bits << 3). */
1410 emit_ldrb (uint32_t *buf
, struct aarch64_register rt
,
1411 struct aarch64_register rn
,
1412 struct aarch64_memory_operand operand
)
1414 return aarch64_emit_load_store (buf
, 0, LDR
, rt
, rn
, operand
);
1419 /* Write a STR instruction into *BUF.
1421 STR rt, [rn, #offset]
1422 STR rt, [rn, #index]!
1423 STR rt, [rn], #index
1425 RT is the register to store.
1426 RN is the base address register.
1427 OFFSET is the immediate to add to the base address. It is limited to
1428 0 .. 32760 range (12 bits << 3). */
1431 emit_str (uint32_t *buf
, struct aarch64_register rt
,
1432 struct aarch64_register rn
,
1433 struct aarch64_memory_operand operand
)
1435 return aarch64_emit_load_store (buf
, rt
.is64
? 3 : 2, STR
, rt
, rn
, operand
);
1438 /* Helper function emitting an exclusive load or store instruction. */
1441 emit_load_store_exclusive (uint32_t *buf
, uint32_t size
,
1442 enum aarch64_opcodes opcode
,
1443 struct aarch64_register rs
,
1444 struct aarch64_register rt
,
1445 struct aarch64_register rt2
,
1446 struct aarch64_register rn
)
1448 return aarch64_emit_insn (buf
, opcode
| ENCODE (size
, 2, 30)
1449 | ENCODE (rs
.num
, 5, 16) | ENCODE (rt2
.num
, 5, 10)
1450 | ENCODE (rn
.num
, 5, 5) | ENCODE (rt
.num
, 5, 0));
1453 /* Write a LAXR instruction into *BUF.
1457 RT is the destination register.
1458 RN is the base address register. */
1461 emit_ldaxr (uint32_t *buf
, struct aarch64_register rt
,
1462 struct aarch64_register rn
)
1464 return emit_load_store_exclusive (buf
, rt
.is64
? 3 : 2, LDAXR
, xzr
, rt
,
1468 /* Write a STXR instruction into *BUF.
1472 RS is the result register, it indicates if the store succeeded or not.
1473 RT is the destination register.
1474 RN is the base address register. */
1477 emit_stxr (uint32_t *buf
, struct aarch64_register rs
,
1478 struct aarch64_register rt
, struct aarch64_register rn
)
1480 return emit_load_store_exclusive (buf
, rt
.is64
? 3 : 2, STXR
, rs
, rt
,
1484 /* Write a STLR instruction into *BUF.
1488 RT is the register to store.
1489 RN is the base address register. */
1492 emit_stlr (uint32_t *buf
, struct aarch64_register rt
,
1493 struct aarch64_register rn
)
1495 return emit_load_store_exclusive (buf
, rt
.is64
? 3 : 2, STLR
, xzr
, rt
,
1499 /* Helper function for data processing instructions with register sources. */
1502 emit_data_processing_reg (uint32_t *buf
, uint32_t opcode
,
1503 struct aarch64_register rd
,
1504 struct aarch64_register rn
,
1505 struct aarch64_register rm
)
1507 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1509 return aarch64_emit_insn (buf
, opcode
| size
| ENCODE (rm
.num
, 5, 16)
1510 | ENCODE (rn
.num
, 5, 5) | ENCODE (rd
.num
, 5, 0));
1513 /* Helper function for data processing instructions taking either a register
1517 emit_data_processing (uint32_t *buf
, enum aarch64_opcodes opcode
,
1518 struct aarch64_register rd
,
1519 struct aarch64_register rn
,
1520 struct aarch64_operand operand
)
1522 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1523 /* The opcode is different for register and immediate source operands. */
1524 uint32_t operand_opcode
;
1526 if (operand
.type
== OPERAND_IMMEDIATE
)
1528 /* xxx1 000x xxxx xxxx xxxx xxxx xxxx xxxx */
1529 operand_opcode
= ENCODE (8, 4, 25);
1531 return aarch64_emit_insn (buf
, opcode
| operand_opcode
| size
1532 | ENCODE (operand
.imm
, 12, 10)
1533 | ENCODE (rn
.num
, 5, 5)
1534 | ENCODE (rd
.num
, 5, 0));
1538 /* xxx0 101x xxxx xxxx xxxx xxxx xxxx xxxx */
1539 operand_opcode
= ENCODE (5, 4, 25);
1541 return emit_data_processing_reg (buf
, opcode
| operand_opcode
, rd
,
1546 /* Write an ADD instruction into *BUF.
1551 This function handles both an immediate and register add.
1553 RD is the destination register.
1554 RN is the input register.
1555 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1556 OPERAND_REGISTER. */
1559 emit_add (uint32_t *buf
, struct aarch64_register rd
,
1560 struct aarch64_register rn
, struct aarch64_operand operand
)
1562 return emit_data_processing (buf
, ADD
, rd
, rn
, operand
);
1565 /* Write a SUB instruction into *BUF.
1570 This function handles both an immediate and register sub.
1572 RD is the destination register.
1573 RN is the input register.
1574 IMM is the immediate to substract to RN. */
1577 emit_sub (uint32_t *buf
, struct aarch64_register rd
,
1578 struct aarch64_register rn
, struct aarch64_operand operand
)
1580 return emit_data_processing (buf
, SUB
, rd
, rn
, operand
);
1583 /* Write a MOV instruction into *BUF.
1588 This function handles both a wide immediate move and a register move,
1589 with the condition that the source register is not xzr. xzr and the
1590 stack pointer share the same encoding and this function only supports
1593 RD is the destination register.
1594 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1595 OPERAND_REGISTER. */
1598 emit_mov (uint32_t *buf
, struct aarch64_register rd
,
1599 struct aarch64_operand operand
)
1601 if (operand
.type
== OPERAND_IMMEDIATE
)
1603 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1604 /* Do not shift the immediate. */
1605 uint32_t shift
= ENCODE (0, 2, 21);
1607 return aarch64_emit_insn (buf
, MOV
| size
| shift
1608 | ENCODE (operand
.imm
, 16, 5)
1609 | ENCODE (rd
.num
, 5, 0));
1612 return emit_add (buf
, rd
, operand
.reg
, immediate_operand (0));
1615 /* Write a MOVK instruction into *BUF.
1617 MOVK rd, #imm, lsl #shift
1619 RD is the destination register.
1620 IMM is the immediate.
1621 SHIFT is the logical shift left to apply to IMM. */
1624 emit_movk (uint32_t *buf
, struct aarch64_register rd
, uint32_t imm
,
1627 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1629 return aarch64_emit_insn (buf
, MOVK
| size
| ENCODE (shift
, 2, 21) |
1630 ENCODE (imm
, 16, 5) | ENCODE (rd
.num
, 5, 0));
1633 /* Write instructions into *BUF in order to move ADDR into a register.
1634 ADDR can be a 64-bit value.
1636 This function will emit a series of MOV and MOVK instructions, such as:
1639 MOVK xd, #(addr >> 16), lsl #16
1640 MOVK xd, #(addr >> 32), lsl #32
1641 MOVK xd, #(addr >> 48), lsl #48 */
1644 emit_mov_addr (uint32_t *buf
, struct aarch64_register rd
, CORE_ADDR addr
)
1648 /* The MOV (wide immediate) instruction clears to top bits of the
1650 p
+= emit_mov (p
, rd
, immediate_operand (addr
& 0xffff));
1652 if ((addr
>> 16) != 0)
1653 p
+= emit_movk (p
, rd
, (addr
>> 16) & 0xffff, 1);
1657 if ((addr
>> 32) != 0)
1658 p
+= emit_movk (p
, rd
, (addr
>> 32) & 0xffff, 2);
1662 if ((addr
>> 48) != 0)
1663 p
+= emit_movk (p
, rd
, (addr
>> 48) & 0xffff, 3);
1668 /* Write a SUBS instruction into *BUF.
1672 This instruction update the condition flags.
1674 RD is the destination register.
1675 RN and RM are the source registers. */
1678 emit_subs (uint32_t *buf
, struct aarch64_register rd
,
1679 struct aarch64_register rn
, struct aarch64_operand operand
)
1681 return emit_data_processing (buf
, SUBS
, rd
, rn
, operand
);
1684 /* Write a CMP instruction into *BUF.
1688 This instruction is an alias of SUBS xzr, rn, rm.
1690 RN and RM are the registers to compare. */
1693 emit_cmp (uint32_t *buf
, struct aarch64_register rn
,
1694 struct aarch64_operand operand
)
1696 return emit_subs (buf
, xzr
, rn
, operand
);
1699 /* Write a AND instruction into *BUF.
1703 RD is the destination register.
1704 RN and RM are the source registers. */
1707 emit_and (uint32_t *buf
, struct aarch64_register rd
,
1708 struct aarch64_register rn
, struct aarch64_register rm
)
1710 return emit_data_processing_reg (buf
, AND
, rd
, rn
, rm
);
1713 /* Write a ORR instruction into *BUF.
1717 RD is the destination register.
1718 RN and RM are the source registers. */
1721 emit_orr (uint32_t *buf
, struct aarch64_register rd
,
1722 struct aarch64_register rn
, struct aarch64_register rm
)
1724 return emit_data_processing_reg (buf
, ORR
, rd
, rn
, rm
);
1727 /* Write a ORN instruction into *BUF.
1731 RD is the destination register.
1732 RN and RM are the source registers. */
1735 emit_orn (uint32_t *buf
, struct aarch64_register rd
,
1736 struct aarch64_register rn
, struct aarch64_register rm
)
1738 return emit_data_processing_reg (buf
, ORN
, rd
, rn
, rm
);
1741 /* Write a EOR instruction into *BUF.
1745 RD is the destination register.
1746 RN and RM are the source registers. */
1749 emit_eor (uint32_t *buf
, struct aarch64_register rd
,
1750 struct aarch64_register rn
, struct aarch64_register rm
)
1752 return emit_data_processing_reg (buf
, EOR
, rd
, rn
, rm
);
1755 /* Write a MVN instruction into *BUF.
1759 This is an alias for ORN rd, xzr, rm.
1761 RD is the destination register.
1762 RM is the source register. */
1765 emit_mvn (uint32_t *buf
, struct aarch64_register rd
,
1766 struct aarch64_register rm
)
1768 return emit_orn (buf
, rd
, xzr
, rm
);
1771 /* Write a LSLV instruction into *BUF.
1775 RD is the destination register.
1776 RN and RM are the source registers. */
1779 emit_lslv (uint32_t *buf
, struct aarch64_register rd
,
1780 struct aarch64_register rn
, struct aarch64_register rm
)
1782 return emit_data_processing_reg (buf
, LSLV
, rd
, rn
, rm
);
1785 /* Write a LSRV instruction into *BUF.
1789 RD is the destination register.
1790 RN and RM are the source registers. */
1793 emit_lsrv (uint32_t *buf
, struct aarch64_register rd
,
1794 struct aarch64_register rn
, struct aarch64_register rm
)
1796 return emit_data_processing_reg (buf
, LSRV
, rd
, rn
, rm
);
1799 /* Write a ASRV instruction into *BUF.
1803 RD is the destination register.
1804 RN and RM are the source registers. */
1807 emit_asrv (uint32_t *buf
, struct aarch64_register rd
,
1808 struct aarch64_register rn
, struct aarch64_register rm
)
1810 return emit_data_processing_reg (buf
, ASRV
, rd
, rn
, rm
);
1813 /* Write a MUL instruction into *BUF.
1817 RD is the destination register.
1818 RN and RM are the source registers. */
1821 emit_mul (uint32_t *buf
, struct aarch64_register rd
,
1822 struct aarch64_register rn
, struct aarch64_register rm
)
1824 return emit_data_processing_reg (buf
, MUL
, rd
, rn
, rm
);
1827 /* Write a MRS instruction into *BUF. The register size is 64-bit.
1831 RT is the destination register.
1832 SYSTEM_REG is special purpose register to read. */
1835 emit_mrs (uint32_t *buf
, struct aarch64_register rt
,
1836 enum aarch64_system_control_registers system_reg
)
1838 return aarch64_emit_insn (buf
, MRS
| ENCODE (system_reg
, 15, 5)
1839 | ENCODE (rt
.num
, 5, 0));
1842 /* Write a MSR instruction into *BUF. The register size is 64-bit.
1846 SYSTEM_REG is special purpose register to write.
1847 RT is the input register. */
1850 emit_msr (uint32_t *buf
, enum aarch64_system_control_registers system_reg
,
1851 struct aarch64_register rt
)
1853 return aarch64_emit_insn (buf
, MSR
| ENCODE (system_reg
, 15, 5)
1854 | ENCODE (rt
.num
, 5, 0));
1857 /* Write a SEVL instruction into *BUF.
1859 This is a hint instruction telling the hardware to trigger an event. */
1862 emit_sevl (uint32_t *buf
)
1864 return aarch64_emit_insn (buf
, SEVL
);
1867 /* Write a WFE instruction into *BUF.
1869 This is a hint instruction telling the hardware to wait for an event. */
1872 emit_wfe (uint32_t *buf
)
1874 return aarch64_emit_insn (buf
, WFE
);
1877 /* Write a SBFM instruction into *BUF.
1879 SBFM rd, rn, #immr, #imms
1881 This instruction moves the bits from #immr to #imms into the
1882 destination, sign extending the result.
1884 RD is the destination register.
1885 RN is the source register.
1886 IMMR is the bit number to start at (least significant bit).
1887 IMMS is the bit number to stop at (most significant bit). */
1890 emit_sbfm (uint32_t *buf
, struct aarch64_register rd
,
1891 struct aarch64_register rn
, uint32_t immr
, uint32_t imms
)
1893 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1894 uint32_t n
= ENCODE (rd
.is64
, 1, 22);
1896 return aarch64_emit_insn (buf
, SBFM
| size
| n
| ENCODE (immr
, 6, 16)
1897 | ENCODE (imms
, 6, 10) | ENCODE (rn
.num
, 5, 5)
1898 | ENCODE (rd
.num
, 5, 0));
1901 /* Write a SBFX instruction into *BUF.
1903 SBFX rd, rn, #lsb, #width
1905 This instruction moves #width bits from #lsb into the destination, sign
1906 extending the result. This is an alias for:
1908 SBFM rd, rn, #lsb, #(lsb + width - 1)
1910 RD is the destination register.
1911 RN is the source register.
1912 LSB is the bit number to start at (least significant bit).
1913 WIDTH is the number of bits to move. */
1916 emit_sbfx (uint32_t *buf
, struct aarch64_register rd
,
1917 struct aarch64_register rn
, uint32_t lsb
, uint32_t width
)
1919 return emit_sbfm (buf
, rd
, rn
, lsb
, lsb
+ width
- 1);
1922 /* Write a UBFM instruction into *BUF.
1924 UBFM rd, rn, #immr, #imms
1926 This instruction moves the bits from #immr to #imms into the
1927 destination, extending the result with zeros.
1929 RD is the destination register.
1930 RN is the source register.
1931 IMMR is the bit number to start at (least significant bit).
1932 IMMS is the bit number to stop at (most significant bit). */
1935 emit_ubfm (uint32_t *buf
, struct aarch64_register rd
,
1936 struct aarch64_register rn
, uint32_t immr
, uint32_t imms
)
1938 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1939 uint32_t n
= ENCODE (rd
.is64
, 1, 22);
1941 return aarch64_emit_insn (buf
, UBFM
| size
| n
| ENCODE (immr
, 6, 16)
1942 | ENCODE (imms
, 6, 10) | ENCODE (rn
.num
, 5, 5)
1943 | ENCODE (rd
.num
, 5, 0));
1946 /* Write a UBFX instruction into *BUF.
1948 UBFX rd, rn, #lsb, #width
1950 This instruction moves #width bits from #lsb into the destination,
1951 extending the result with zeros. This is an alias for:
1953 UBFM rd, rn, #lsb, #(lsb + width - 1)
1955 RD is the destination register.
1956 RN is the source register.
1957 LSB is the bit number to start at (least significant bit).
1958 WIDTH is the number of bits to move. */
1961 emit_ubfx (uint32_t *buf
, struct aarch64_register rd
,
1962 struct aarch64_register rn
, uint32_t lsb
, uint32_t width
)
1964 return emit_ubfm (buf
, rd
, rn
, lsb
, lsb
+ width
- 1);
1967 /* Write a CSINC instruction into *BUF.
1969 CSINC rd, rn, rm, cond
1971 This instruction conditionally increments rn or rm and places the result
1972 in rd. rn is chosen is the condition is true.
1974 RD is the destination register.
1975 RN and RM are the source registers.
1976 COND is the encoded condition. */
1979 emit_csinc (uint32_t *buf
, struct aarch64_register rd
,
1980 struct aarch64_register rn
, struct aarch64_register rm
,
1983 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1985 return aarch64_emit_insn (buf
, CSINC
| size
| ENCODE (rm
.num
, 5, 16)
1986 | ENCODE (cond
, 4, 12) | ENCODE (rn
.num
, 5, 5)
1987 | ENCODE (rd
.num
, 5, 0));
1990 /* Write a CSET instruction into *BUF.
1994 This instruction conditionally write 1 or 0 in the destination register.
1995 1 is written if the condition is true. This is an alias for:
1997 CSINC rd, xzr, xzr, !cond
1999 Note that the condition needs to be inverted.
2001 RD is the destination register.
2002 RN and RM are the source registers.
2003 COND is the encoded condition. */
2006 emit_cset (uint32_t *buf
, struct aarch64_register rd
, unsigned cond
)
2008 /* The least significant bit of the condition needs toggling in order to
2010 return emit_csinc (buf
, rd
, xzr
, xzr
, cond
^ 0x1);
2013 /* Write LEN instructions from BUF into the inferior memory at *TO.
2015 Note instructions are always little endian on AArch64, unlike data. */
2018 append_insns (CORE_ADDR
*to
, size_t len
, const uint32_t *buf
)
2020 size_t byte_len
= len
* sizeof (uint32_t);
2021 #if (__BYTE_ORDER == __BIG_ENDIAN)
2022 uint32_t *le_buf
= (uint32_t *) xmalloc (byte_len
);
2025 for (i
= 0; i
< len
; i
++)
2026 le_buf
[i
] = htole32 (buf
[i
]);
2028 target_write_memory (*to
, (const unsigned char *) le_buf
, byte_len
);
2032 target_write_memory (*to
, (const unsigned char *) buf
, byte_len
);
2038 /* Sub-class of struct aarch64_insn_data, store information of
2039 instruction relocation for fast tracepoint. Visitor can
2040 relocate an instruction from BASE.INSN_ADDR to NEW_ADDR and save
2041 the relocated instructions in buffer pointed by INSN_PTR. */
2043 struct aarch64_insn_relocation_data
2045 struct aarch64_insn_data base
;
2047 /* The new address the instruction is relocated to. */
2049 /* Pointer to the buffer of relocated instruction(s). */
2053 /* Implementation of aarch64_insn_visitor method "b". */
2056 aarch64_ftrace_insn_reloc_b (const int is_bl
, const int32_t offset
,
2057 struct aarch64_insn_data
*data
)
2059 struct aarch64_insn_relocation_data
*insn_reloc
2060 = (struct aarch64_insn_relocation_data
*) data
;
2062 = insn_reloc
->base
.insn_addr
- insn_reloc
->new_addr
+ offset
;
2064 if (can_encode_int32 (new_offset
, 28))
2065 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, is_bl
, new_offset
);
2068 /* Implementation of aarch64_insn_visitor method "b_cond". */
2071 aarch64_ftrace_insn_reloc_b_cond (const unsigned cond
, const int32_t offset
,
2072 struct aarch64_insn_data
*data
)
2074 struct aarch64_insn_relocation_data
*insn_reloc
2075 = (struct aarch64_insn_relocation_data
*) data
;
2077 = insn_reloc
->base
.insn_addr
- insn_reloc
->new_addr
+ offset
;
2079 if (can_encode_int32 (new_offset
, 21))
2081 insn_reloc
->insn_ptr
+= emit_bcond (insn_reloc
->insn_ptr
, cond
,
2084 else if (can_encode_int32 (new_offset
, 28))
2086 /* The offset is out of range for a conditional branch
2087 instruction but not for a unconditional branch. We can use
2088 the following instructions instead:
2090 B.COND TAKEN ; If cond is true, then jump to TAKEN.
2091 B NOT_TAKEN ; Else jump over TAKEN and continue.
2098 insn_reloc
->insn_ptr
+= emit_bcond (insn_reloc
->insn_ptr
, cond
, 8);
2099 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, 8);
2100 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, new_offset
- 8);
2104 /* Implementation of aarch64_insn_visitor method "cb". */
2107 aarch64_ftrace_insn_reloc_cb (const int32_t offset
, const int is_cbnz
,
2108 const unsigned rn
, int is64
,
2109 struct aarch64_insn_data
*data
)
2111 struct aarch64_insn_relocation_data
*insn_reloc
2112 = (struct aarch64_insn_relocation_data
*) data
;
2114 = insn_reloc
->base
.insn_addr
- insn_reloc
->new_addr
+ offset
;
2116 if (can_encode_int32 (new_offset
, 21))
2118 insn_reloc
->insn_ptr
+= emit_cb (insn_reloc
->insn_ptr
, is_cbnz
,
2119 aarch64_register (rn
, is64
), new_offset
);
2121 else if (can_encode_int32 (new_offset
, 28))
2123 /* The offset is out of range for a compare and branch
2124 instruction but not for a unconditional branch. We can use
2125 the following instructions instead:
2127 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
2128 B NOT_TAKEN ; Else jump over TAKEN and continue.
2134 insn_reloc
->insn_ptr
+= emit_cb (insn_reloc
->insn_ptr
, is_cbnz
,
2135 aarch64_register (rn
, is64
), 8);
2136 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, 8);
2137 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, new_offset
- 8);
2141 /* Implementation of aarch64_insn_visitor method "tb". */
2144 aarch64_ftrace_insn_reloc_tb (const int32_t offset
, int is_tbnz
,
2145 const unsigned rt
, unsigned bit
,
2146 struct aarch64_insn_data
*data
)
2148 struct aarch64_insn_relocation_data
*insn_reloc
2149 = (struct aarch64_insn_relocation_data
*) data
;
2151 = insn_reloc
->base
.insn_addr
- insn_reloc
->new_addr
+ offset
;
2153 if (can_encode_int32 (new_offset
, 16))
2155 insn_reloc
->insn_ptr
+= emit_tb (insn_reloc
->insn_ptr
, is_tbnz
, bit
,
2156 aarch64_register (rt
, 1), new_offset
);
2158 else if (can_encode_int32 (new_offset
, 28))
2160 /* The offset is out of range for a test bit and branch
2161 instruction but not for a unconditional branch. We can use
2162 the following instructions instead:
2164 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
2165 B NOT_TAKEN ; Else jump over TAKEN and continue.
2171 insn_reloc
->insn_ptr
+= emit_tb (insn_reloc
->insn_ptr
, is_tbnz
, bit
,
2172 aarch64_register (rt
, 1), 8);
2173 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, 8);
2174 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0,
2179 /* Implementation of aarch64_insn_visitor method "adr". */
2182 aarch64_ftrace_insn_reloc_adr (const int32_t offset
, const unsigned rd
,
2184 struct aarch64_insn_data
*data
)
2186 struct aarch64_insn_relocation_data
*insn_reloc
2187 = (struct aarch64_insn_relocation_data
*) data
;
2188 /* We know exactly the address the ADR{P,} instruction will compute.
2189 We can just write it to the destination register. */
2190 CORE_ADDR address
= data
->insn_addr
+ offset
;
2194 /* Clear the lower 12 bits of the offset to get the 4K page. */
2195 insn_reloc
->insn_ptr
+= emit_mov_addr (insn_reloc
->insn_ptr
,
2196 aarch64_register (rd
, 1),
2200 insn_reloc
->insn_ptr
+= emit_mov_addr (insn_reloc
->insn_ptr
,
2201 aarch64_register (rd
, 1), address
);
2204 /* Implementation of aarch64_insn_visitor method "ldr_literal". */
2207 aarch64_ftrace_insn_reloc_ldr_literal (const int32_t offset
, const int is_sw
,
2208 const unsigned rt
, const int is64
,
2209 struct aarch64_insn_data
*data
)
2211 struct aarch64_insn_relocation_data
*insn_reloc
2212 = (struct aarch64_insn_relocation_data
*) data
;
2213 CORE_ADDR address
= data
->insn_addr
+ offset
;
2215 insn_reloc
->insn_ptr
+= emit_mov_addr (insn_reloc
->insn_ptr
,
2216 aarch64_register (rt
, 1), address
);
2218 /* We know exactly what address to load from, and what register we
2221 MOV xd, #(oldloc + offset)
2222 MOVK xd, #((oldloc + offset) >> 16), lsl #16
2225 LDR xd, [xd] ; or LDRSW xd, [xd]
2230 insn_reloc
->insn_ptr
+= emit_ldrsw (insn_reloc
->insn_ptr
,
2231 aarch64_register (rt
, 1),
2232 aarch64_register (rt
, 1),
2233 offset_memory_operand (0));
2235 insn_reloc
->insn_ptr
+= emit_ldr (insn_reloc
->insn_ptr
,
2236 aarch64_register (rt
, is64
),
2237 aarch64_register (rt
, 1),
2238 offset_memory_operand (0));
2241 /* Implementation of aarch64_insn_visitor method "others". */
2244 aarch64_ftrace_insn_reloc_others (const uint32_t insn
,
2245 struct aarch64_insn_data
*data
)
2247 struct aarch64_insn_relocation_data
*insn_reloc
2248 = (struct aarch64_insn_relocation_data
*) data
;
2250 /* The instruction is not PC relative. Just re-emit it at the new
2252 insn_reloc
->insn_ptr
+= aarch64_emit_insn (insn_reloc
->insn_ptr
, insn
);
2255 static const struct aarch64_insn_visitor visitor
=
2257 aarch64_ftrace_insn_reloc_b
,
2258 aarch64_ftrace_insn_reloc_b_cond
,
2259 aarch64_ftrace_insn_reloc_cb
,
2260 aarch64_ftrace_insn_reloc_tb
,
2261 aarch64_ftrace_insn_reloc_adr
,
2262 aarch64_ftrace_insn_reloc_ldr_literal
,
2263 aarch64_ftrace_insn_reloc_others
,
2267 aarch64_target::supports_fast_tracepoints ()
2272 /* Implementation of target ops method
2273 "install_fast_tracepoint_jump_pad". */
2276 aarch64_target::install_fast_tracepoint_jump_pad
2277 (CORE_ADDR tpoint
, CORE_ADDR tpaddr
, CORE_ADDR collector
,
2278 CORE_ADDR lockaddr
, ULONGEST orig_size
, CORE_ADDR
*jump_entry
,
2279 CORE_ADDR
*trampoline
, ULONGEST
*trampoline_size
,
2280 unsigned char *jjump_pad_insn
, ULONGEST
*jjump_pad_insn_size
,
2281 CORE_ADDR
*adjusted_insn_addr
, CORE_ADDR
*adjusted_insn_addr_end
,
2289 CORE_ADDR buildaddr
= *jump_entry
;
2290 struct aarch64_insn_relocation_data insn_data
;
2292 /* We need to save the current state on the stack both to restore it
2293 later and to collect register values when the tracepoint is hit.
2295 The saved registers are pushed in a layout that needs to be in sync
2296 with aarch64_ft_collect_regmap (see linux-aarch64-ipa.c). Later on
2297 the supply_fast_tracepoint_registers function will fill in the
2298 register cache from a pointer to saved registers on the stack we build
2301 For simplicity, we set the size of each cell on the stack to 16 bytes.
2302 This way one cell can hold any register type, from system registers
2303 to the 128 bit SIMD&FP registers. Furthermore, the stack pointer
2304 has to be 16 bytes aligned anyway.
2306 Note that the CPSR register does not exist on AArch64. Instead we
2307 can access system bits describing the process state with the
2308 MRS/MSR instructions, namely the condition flags. We save them as
2309 if they are part of a CPSR register because that's how GDB
2310 interprets these system bits. At the moment, only the condition
2311 flags are saved in CPSR (NZCV).
2313 Stack layout, each cell is 16 bytes (descending):
2315 High *-------- SIMD&FP registers from 31 down to 0. --------*
2321 *---- General purpose registers from 30 down to 0. ----*
2327 *------------- Special purpose registers. -------------*
2330 | CPSR (NZCV) | 5 cells
2333 *------------- collecting_t object --------------------*
2334 | TPIDR_EL0 | struct tracepoint * |
2335 Low *------------------------------------------------------*
2337 After this stack is set up, we issue a call to the collector, passing
2338 it the saved registers at (SP + 16). */
2340 /* Push SIMD&FP registers on the stack:
2342 SUB sp, sp, #(32 * 16)
2344 STP q30, q31, [sp, #(30 * 16)]
2349 p
+= emit_sub (p
, sp
, sp
, immediate_operand (32 * 16));
2350 for (i
= 30; i
>= 0; i
-= 2)
2351 p
+= emit_stp_q_offset (p
, i
, i
+ 1, sp
, i
* 16);
2353 /* Push general purpose registers on the stack. Note that we do not need
2354 to push x31 as it represents the xzr register and not the stack
2355 pointer in a STR instruction.
2357 SUB sp, sp, #(31 * 16)
2359 STR x30, [sp, #(30 * 16)]
2364 p
+= emit_sub (p
, sp
, sp
, immediate_operand (31 * 16));
2365 for (i
= 30; i
>= 0; i
-= 1)
2366 p
+= emit_str (p
, aarch64_register (i
, 1), sp
,
2367 offset_memory_operand (i
* 16));
2369 /* Make space for 5 more cells.
2371 SUB sp, sp, #(5 * 16)
2374 p
+= emit_sub (p
, sp
, sp
, immediate_operand (5 * 16));
2379 ADD x4, sp, #((32 + 31 + 5) * 16)
2380 STR x4, [sp, #(4 * 16)]
2383 p
+= emit_add (p
, x4
, sp
, immediate_operand ((32 + 31 + 5) * 16));
2384 p
+= emit_str (p
, x4
, sp
, offset_memory_operand (4 * 16));
2386 /* Save PC (tracepoint address):
2391 STR x3, [sp, #(3 * 16)]
2395 p
+= emit_mov_addr (p
, x3
, tpaddr
);
2396 p
+= emit_str (p
, x3
, sp
, offset_memory_operand (3 * 16));
2398 /* Save CPSR (NZCV), FPSR and FPCR:
2404 STR x2, [sp, #(2 * 16)]
2405 STR x1, [sp, #(1 * 16)]
2406 STR x0, [sp, #(0 * 16)]
2409 p
+= emit_mrs (p
, x2
, NZCV
);
2410 p
+= emit_mrs (p
, x1
, FPSR
);
2411 p
+= emit_mrs (p
, x0
, FPCR
);
2412 p
+= emit_str (p
, x2
, sp
, offset_memory_operand (2 * 16));
2413 p
+= emit_str (p
, x1
, sp
, offset_memory_operand (1 * 16));
2414 p
+= emit_str (p
, x0
, sp
, offset_memory_operand (0 * 16));
2416 /* Push the collecting_t object. It consist of the address of the
2417 tracepoint and an ID for the current thread. We get the latter by
2418 reading the tpidr_el0 system register. It corresponds to the
2419 NT_ARM_TLS register accessible with ptrace.
2426 STP x0, x1, [sp, #-16]!
2430 p
+= emit_mov_addr (p
, x0
, tpoint
);
2431 p
+= emit_mrs (p
, x1
, TPIDR_EL0
);
2432 p
+= emit_stp (p
, x0
, x1
, sp
, preindex_memory_operand (-16));
2436 The shared memory for the lock is at lockaddr. It will hold zero
2437 if no-one is holding the lock, otherwise it contains the address of
2438 the collecting_t object on the stack of the thread which acquired it.
2440 At this stage, the stack pointer points to this thread's collecting_t
2443 We use the following registers:
2444 - x0: Address of the lock.
2445 - x1: Pointer to collecting_t object.
2446 - x2: Scratch register.
2452 ; Trigger an event local to this core. So the following WFE
2453 ; instruction is ignored.
2456 ; Wait for an event. The event is triggered by either the SEVL
2457 ; or STLR instructions (store release).
2460 ; Atomically read at lockaddr. This marks the memory location as
2461 ; exclusive. This instruction also has memory constraints which
2462 ; make sure all previous data reads and writes are done before
2466 ; Try again if another thread holds the lock.
2469 ; We can lock it! Write the address of the collecting_t object.
2470 ; This instruction will fail if the memory location is not marked
2471 ; as exclusive anymore. If it succeeds, it will remove the
2472 ; exclusive mark on the memory location. This way, if another
2473 ; thread executes this instruction before us, we will fail and try
2480 p
+= emit_mov_addr (p
, x0
, lockaddr
);
2481 p
+= emit_mov (p
, x1
, register_operand (sp
));
2485 p
+= emit_ldaxr (p
, x2
, x0
);
2486 p
+= emit_cb (p
, 1, w2
, -2 * 4);
2487 p
+= emit_stxr (p
, w2
, x1
, x0
);
2488 p
+= emit_cb (p
, 1, x2
, -4 * 4);
2490 /* Call collector (struct tracepoint *, unsigned char *):
2495 ; Saved registers start after the collecting_t object.
2498 ; We use an intra-procedure-call scratch register.
2499 MOV ip0, #(collector)
2502 ; And call back to C!
2507 p
+= emit_mov_addr (p
, x0
, tpoint
);
2508 p
+= emit_add (p
, x1
, sp
, immediate_operand (16));
2510 p
+= emit_mov_addr (p
, ip0
, collector
);
2511 p
+= emit_blr (p
, ip0
);
2513 /* Release the lock.
2518 ; This instruction is a normal store with memory ordering
2519 ; constraints. Thanks to this we do not have to put a data
2520 ; barrier instruction to make sure all data read and writes are done
2521 ; before this instruction is executed. Furthermore, this instruction
2522 ; will trigger an event, letting other threads know they can grab
2527 p
+= emit_mov_addr (p
, x0
, lockaddr
);
2528 p
+= emit_stlr (p
, xzr
, x0
);
2530 /* Free collecting_t object:
2535 p
+= emit_add (p
, sp
, sp
, immediate_operand (16));
2537 /* Restore CPSR (NZCV), FPSR and FPCR. And free all special purpose
2538 registers from the stack.
2540 LDR x2, [sp, #(2 * 16)]
2541 LDR x1, [sp, #(1 * 16)]
2542 LDR x0, [sp, #(0 * 16)]
2548 ADD sp, sp #(5 * 16)
2551 p
+= emit_ldr (p
, x2
, sp
, offset_memory_operand (2 * 16));
2552 p
+= emit_ldr (p
, x1
, sp
, offset_memory_operand (1 * 16));
2553 p
+= emit_ldr (p
, x0
, sp
, offset_memory_operand (0 * 16));
2554 p
+= emit_msr (p
, NZCV
, x2
);
2555 p
+= emit_msr (p
, FPSR
, x1
);
2556 p
+= emit_msr (p
, FPCR
, x0
);
2558 p
+= emit_add (p
, sp
, sp
, immediate_operand (5 * 16));
2560 /* Pop general purpose registers:
2564 LDR x30, [sp, #(30 * 16)]
2566 ADD sp, sp, #(31 * 16)
2569 for (i
= 0; i
<= 30; i
+= 1)
2570 p
+= emit_ldr (p
, aarch64_register (i
, 1), sp
,
2571 offset_memory_operand (i
* 16));
2572 p
+= emit_add (p
, sp
, sp
, immediate_operand (31 * 16));
2574 /* Pop SIMD&FP registers:
2578 LDP q30, q31, [sp, #(30 * 16)]
2580 ADD sp, sp, #(32 * 16)
2583 for (i
= 0; i
<= 30; i
+= 2)
2584 p
+= emit_ldp_q_offset (p
, i
, i
+ 1, sp
, i
* 16);
2585 p
+= emit_add (p
, sp
, sp
, immediate_operand (32 * 16));
2587 /* Write the code into the inferior memory. */
2588 append_insns (&buildaddr
, p
- buf
, buf
);
2590 /* Now emit the relocated instruction. */
2591 *adjusted_insn_addr
= buildaddr
;
2592 target_read_uint32 (tpaddr
, &insn
);
2594 insn_data
.base
.insn_addr
= tpaddr
;
2595 insn_data
.new_addr
= buildaddr
;
2596 insn_data
.insn_ptr
= buf
;
2598 aarch64_relocate_instruction (insn
, &visitor
,
2599 (struct aarch64_insn_data
*) &insn_data
);
2601 /* We may not have been able to relocate the instruction. */
2602 if (insn_data
.insn_ptr
== buf
)
2605 "E.Could not relocate instruction from %s to %s.",
2606 core_addr_to_string_nz (tpaddr
),
2607 core_addr_to_string_nz (buildaddr
));
2611 append_insns (&buildaddr
, insn_data
.insn_ptr
- buf
, buf
);
2612 *adjusted_insn_addr_end
= buildaddr
;
2614 /* Go back to the start of the buffer. */
2617 /* Emit a branch back from the jump pad. */
2618 offset
= (tpaddr
+ orig_size
- buildaddr
);
2619 if (!can_encode_int32 (offset
, 28))
2622 "E.Jump back from jump pad too far from tracepoint "
2623 "(offset 0x%" PRIx64
" cannot be encoded in 28 bits).",
2628 p
+= emit_b (p
, 0, offset
);
2629 append_insns (&buildaddr
, p
- buf
, buf
);
2631 /* Give the caller a branch instruction into the jump pad. */
2632 offset
= (*jump_entry
- tpaddr
);
2633 if (!can_encode_int32 (offset
, 28))
2636 "E.Jump pad too far from tracepoint "
2637 "(offset 0x%" PRIx64
" cannot be encoded in 28 bits).",
2642 emit_b ((uint32_t *) jjump_pad_insn
, 0, offset
);
2643 *jjump_pad_insn_size
= 4;
2645 /* Return the end address of our pad. */
2646 *jump_entry
= buildaddr
;
2651 /* Helper function writing LEN instructions from START into
2652 current_insn_ptr. */
2655 emit_ops_insns (const uint32_t *start
, int len
)
2657 CORE_ADDR buildaddr
= current_insn_ptr
;
2659 threads_debug_printf ("Adding %d instructions at %s",
2660 len
, paddress (buildaddr
));
2662 append_insns (&buildaddr
, len
, start
);
2663 current_insn_ptr
= buildaddr
;
2666 /* Pop a register from the stack. */
2669 emit_pop (uint32_t *buf
, struct aarch64_register rt
)
2671 return emit_ldr (buf
, rt
, sp
, postindex_memory_operand (1 * 16));
2674 /* Push a register on the stack. */
2677 emit_push (uint32_t *buf
, struct aarch64_register rt
)
2679 return emit_str (buf
, rt
, sp
, preindex_memory_operand (-1 * 16));
2682 /* Implementation of emit_ops method "emit_prologue". */
2685 aarch64_emit_prologue (void)
2690 /* This function emit a prologue for the following function prototype:
2692 enum eval_result_type f (unsigned char *regs,
2695 The first argument is a buffer of raw registers. The second
2696 argument is the result of
2697 evaluating the expression, which will be set to whatever is on top of
2698 the stack at the end.
2700 The stack set up by the prologue is as such:
2702 High *------------------------------------------------------*
2705 | x1 (ULONGEST *value) |
2706 | x0 (unsigned char *regs) |
2707 Low *------------------------------------------------------*
2709 As we are implementing a stack machine, each opcode can expand the
2710 stack so we never know how far we are from the data saved by this
2711 prologue. In order to be able refer to value and regs later, we save
2712 the current stack pointer in the frame pointer. This way, it is not
2713 clobbered when calling C functions.
2715 Finally, throughout every operation, we are using register x0 as the
2716 top of the stack, and x1 as a scratch register. */
2718 p
+= emit_stp (p
, x0
, x1
, sp
, preindex_memory_operand (-2 * 16));
2719 p
+= emit_str (p
, lr
, sp
, offset_memory_operand (3 * 8));
2720 p
+= emit_str (p
, fp
, sp
, offset_memory_operand (2 * 8));
2722 p
+= emit_add (p
, fp
, sp
, immediate_operand (2 * 8));
2725 emit_ops_insns (buf
, p
- buf
);
2728 /* Implementation of emit_ops method "emit_epilogue". */
2731 aarch64_emit_epilogue (void)
2736 /* Store the result of the expression (x0) in *value. */
2737 p
+= emit_sub (p
, x1
, fp
, immediate_operand (1 * 8));
2738 p
+= emit_ldr (p
, x1
, x1
, offset_memory_operand (0));
2739 p
+= emit_str (p
, x0
, x1
, offset_memory_operand (0));
2741 /* Restore the previous state. */
2742 p
+= emit_add (p
, sp
, fp
, immediate_operand (2 * 8));
2743 p
+= emit_ldp (p
, fp
, lr
, fp
, offset_memory_operand (0));
2745 /* Return expr_eval_no_error. */
2746 p
+= emit_mov (p
, x0
, immediate_operand (expr_eval_no_error
));
2747 p
+= emit_ret (p
, lr
);
2749 emit_ops_insns (buf
, p
- buf
);
2752 /* Implementation of emit_ops method "emit_add". */
2755 aarch64_emit_add (void)
2760 p
+= emit_pop (p
, x1
);
2761 p
+= emit_add (p
, x0
, x1
, register_operand (x0
));
2763 emit_ops_insns (buf
, p
- buf
);
2766 /* Implementation of emit_ops method "emit_sub". */
2769 aarch64_emit_sub (void)
2774 p
+= emit_pop (p
, x1
);
2775 p
+= emit_sub (p
, x0
, x1
, register_operand (x0
));
2777 emit_ops_insns (buf
, p
- buf
);
2780 /* Implementation of emit_ops method "emit_mul". */
2783 aarch64_emit_mul (void)
2788 p
+= emit_pop (p
, x1
);
2789 p
+= emit_mul (p
, x0
, x1
, x0
);
2791 emit_ops_insns (buf
, p
- buf
);
2794 /* Implementation of emit_ops method "emit_lsh". */
2797 aarch64_emit_lsh (void)
2802 p
+= emit_pop (p
, x1
);
2803 p
+= emit_lslv (p
, x0
, x1
, x0
);
2805 emit_ops_insns (buf
, p
- buf
);
2808 /* Implementation of emit_ops method "emit_rsh_signed". */
2811 aarch64_emit_rsh_signed (void)
2816 p
+= emit_pop (p
, x1
);
2817 p
+= emit_asrv (p
, x0
, x1
, x0
);
2819 emit_ops_insns (buf
, p
- buf
);
2822 /* Implementation of emit_ops method "emit_rsh_unsigned". */
2825 aarch64_emit_rsh_unsigned (void)
2830 p
+= emit_pop (p
, x1
);
2831 p
+= emit_lsrv (p
, x0
, x1
, x0
);
2833 emit_ops_insns (buf
, p
- buf
);
2836 /* Implementation of emit_ops method "emit_ext". */
2839 aarch64_emit_ext (int arg
)
2844 p
+= emit_sbfx (p
, x0
, x0
, 0, arg
);
2846 emit_ops_insns (buf
, p
- buf
);
2849 /* Implementation of emit_ops method "emit_log_not". */
2852 aarch64_emit_log_not (void)
2857 /* If the top of the stack is 0, replace it with 1. Else replace it with
2860 p
+= emit_cmp (p
, x0
, immediate_operand (0));
2861 p
+= emit_cset (p
, x0
, EQ
);
2863 emit_ops_insns (buf
, p
- buf
);
2866 /* Implementation of emit_ops method "emit_bit_and". */
2869 aarch64_emit_bit_and (void)
2874 p
+= emit_pop (p
, x1
);
2875 p
+= emit_and (p
, x0
, x0
, x1
);
2877 emit_ops_insns (buf
, p
- buf
);
2880 /* Implementation of emit_ops method "emit_bit_or". */
2883 aarch64_emit_bit_or (void)
2888 p
+= emit_pop (p
, x1
);
2889 p
+= emit_orr (p
, x0
, x0
, x1
);
2891 emit_ops_insns (buf
, p
- buf
);
2894 /* Implementation of emit_ops method "emit_bit_xor". */
2897 aarch64_emit_bit_xor (void)
2902 p
+= emit_pop (p
, x1
);
2903 p
+= emit_eor (p
, x0
, x0
, x1
);
2905 emit_ops_insns (buf
, p
- buf
);
2908 /* Implementation of emit_ops method "emit_bit_not". */
2911 aarch64_emit_bit_not (void)
2916 p
+= emit_mvn (p
, x0
, x0
);
2918 emit_ops_insns (buf
, p
- buf
);
2921 /* Implementation of emit_ops method "emit_equal". */
2924 aarch64_emit_equal (void)
2929 p
+= emit_pop (p
, x1
);
2930 p
+= emit_cmp (p
, x0
, register_operand (x1
));
2931 p
+= emit_cset (p
, x0
, EQ
);
2933 emit_ops_insns (buf
, p
- buf
);
2936 /* Implementation of emit_ops method "emit_less_signed". */
2939 aarch64_emit_less_signed (void)
2944 p
+= emit_pop (p
, x1
);
2945 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2946 p
+= emit_cset (p
, x0
, LT
);
2948 emit_ops_insns (buf
, p
- buf
);
2951 /* Implementation of emit_ops method "emit_less_unsigned". */
2954 aarch64_emit_less_unsigned (void)
2959 p
+= emit_pop (p
, x1
);
2960 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2961 p
+= emit_cset (p
, x0
, LO
);
2963 emit_ops_insns (buf
, p
- buf
);
2966 /* Implementation of emit_ops method "emit_ref". */
2969 aarch64_emit_ref (int size
)
2977 p
+= emit_ldrb (p
, w0
, x0
, offset_memory_operand (0));
2980 p
+= emit_ldrh (p
, w0
, x0
, offset_memory_operand (0));
2983 p
+= emit_ldr (p
, w0
, x0
, offset_memory_operand (0));
2986 p
+= emit_ldr (p
, x0
, x0
, offset_memory_operand (0));
2989 /* Unknown size, bail on compilation. */
2994 emit_ops_insns (buf
, p
- buf
);
2997 /* Implementation of emit_ops method "emit_if_goto". */
3000 aarch64_emit_if_goto (int *offset_p
, int *size_p
)
3005 /* The Z flag is set or cleared here. */
3006 p
+= emit_cmp (p
, x0
, immediate_operand (0));
3007 /* This instruction must not change the Z flag. */
3008 p
+= emit_pop (p
, x0
);
3009 /* Branch over the next instruction if x0 == 0. */
3010 p
+= emit_bcond (p
, EQ
, 8);
3012 /* The NOP instruction will be patched with an unconditional branch. */
3014 *offset_p
= (p
- buf
) * 4;
3019 emit_ops_insns (buf
, p
- buf
);
3022 /* Implementation of emit_ops method "emit_goto". */
3025 aarch64_emit_goto (int *offset_p
, int *size_p
)
3030 /* The NOP instruction will be patched with an unconditional branch. */
3037 emit_ops_insns (buf
, p
- buf
);
3040 /* Implementation of emit_ops method "write_goto_address". */
3043 aarch64_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
3047 emit_b (&insn
, 0, to
- from
);
3048 append_insns (&from
, 1, &insn
);
3051 /* Implementation of emit_ops method "emit_const". */
3054 aarch64_emit_const (LONGEST num
)
3059 p
+= emit_mov_addr (p
, x0
, num
);
3061 emit_ops_insns (buf
, p
- buf
);
3064 /* Implementation of emit_ops method "emit_call". */
3067 aarch64_emit_call (CORE_ADDR fn
)
3072 p
+= emit_mov_addr (p
, ip0
, fn
);
3073 p
+= emit_blr (p
, ip0
);
3075 emit_ops_insns (buf
, p
- buf
);
3078 /* Implementation of emit_ops method "emit_reg". */
3081 aarch64_emit_reg (int reg
)
3086 /* Set x0 to unsigned char *regs. */
3087 p
+= emit_sub (p
, x0
, fp
, immediate_operand (2 * 8));
3088 p
+= emit_ldr (p
, x0
, x0
, offset_memory_operand (0));
3089 p
+= emit_mov (p
, x1
, immediate_operand (reg
));
3091 emit_ops_insns (buf
, p
- buf
);
3093 aarch64_emit_call (get_raw_reg_func_addr ());
3096 /* Implementation of emit_ops method "emit_pop". */
3099 aarch64_emit_pop (void)
3104 p
+= emit_pop (p
, x0
);
3106 emit_ops_insns (buf
, p
- buf
);
3109 /* Implementation of emit_ops method "emit_stack_flush". */
3112 aarch64_emit_stack_flush (void)
3117 p
+= emit_push (p
, x0
);
3119 emit_ops_insns (buf
, p
- buf
);
3122 /* Implementation of emit_ops method "emit_zero_ext". */
3125 aarch64_emit_zero_ext (int arg
)
3130 p
+= emit_ubfx (p
, x0
, x0
, 0, arg
);
3132 emit_ops_insns (buf
, p
- buf
);
3135 /* Implementation of emit_ops method "emit_swap". */
3138 aarch64_emit_swap (void)
3143 p
+= emit_ldr (p
, x1
, sp
, offset_memory_operand (0 * 16));
3144 p
+= emit_str (p
, x0
, sp
, offset_memory_operand (0 * 16));
3145 p
+= emit_mov (p
, x0
, register_operand (x1
));
3147 emit_ops_insns (buf
, p
- buf
);
3150 /* Implementation of emit_ops method "emit_stack_adjust". */
3153 aarch64_emit_stack_adjust (int n
)
3155 /* This is not needed with our design. */
3159 p
+= emit_add (p
, sp
, sp
, immediate_operand (n
* 16));
3161 emit_ops_insns (buf
, p
- buf
);
3164 /* Implementation of emit_ops method "emit_int_call_1". */
3167 aarch64_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
3172 p
+= emit_mov (p
, x0
, immediate_operand (arg1
));
3174 emit_ops_insns (buf
, p
- buf
);
3176 aarch64_emit_call (fn
);
3179 /* Implementation of emit_ops method "emit_void_call_2". */
3182 aarch64_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
3187 /* Push x0 on the stack. */
3188 aarch64_emit_stack_flush ();
3190 /* Setup arguments for the function call:
3193 x1: top of the stack
3198 p
+= emit_mov (p
, x1
, register_operand (x0
));
3199 p
+= emit_mov (p
, x0
, immediate_operand (arg1
));
3201 emit_ops_insns (buf
, p
- buf
);
3203 aarch64_emit_call (fn
);
3206 aarch64_emit_pop ();
3209 /* Implementation of emit_ops method "emit_eq_goto". */
3212 aarch64_emit_eq_goto (int *offset_p
, int *size_p
)
3217 p
+= emit_pop (p
, x1
);
3218 p
+= emit_cmp (p
, x1
, register_operand (x0
));
3219 /* Branch over the next instruction if x0 != x1. */
3220 p
+= emit_bcond (p
, NE
, 8);
3221 /* The NOP instruction will be patched with an unconditional branch. */
3223 *offset_p
= (p
- buf
) * 4;
3228 emit_ops_insns (buf
, p
- buf
);
3231 /* Implementation of emit_ops method "emit_ne_goto". */
3234 aarch64_emit_ne_goto (int *offset_p
, int *size_p
)
3239 p
+= emit_pop (p
, x1
);
3240 p
+= emit_cmp (p
, x1
, register_operand (x0
));
3241 /* Branch over the next instruction if x0 == x1. */
3242 p
+= emit_bcond (p
, EQ
, 8);
3243 /* The NOP instruction will be patched with an unconditional branch. */
3245 *offset_p
= (p
- buf
) * 4;
3250 emit_ops_insns (buf
, p
- buf
);
3253 /* Implementation of emit_ops method "emit_lt_goto". */
3256 aarch64_emit_lt_goto (int *offset_p
, int *size_p
)
3261 p
+= emit_pop (p
, x1
);
3262 p
+= emit_cmp (p
, x1
, register_operand (x0
));
3263 /* Branch over the next instruction if x0 >= x1. */
3264 p
+= emit_bcond (p
, GE
, 8);
3265 /* The NOP instruction will be patched with an unconditional branch. */
3267 *offset_p
= (p
- buf
) * 4;
3272 emit_ops_insns (buf
, p
- buf
);
3275 /* Implementation of emit_ops method "emit_le_goto". */
3278 aarch64_emit_le_goto (int *offset_p
, int *size_p
)
3283 p
+= emit_pop (p
, x1
);
3284 p
+= emit_cmp (p
, x1
, register_operand (x0
));
3285 /* Branch over the next instruction if x0 > x1. */
3286 p
+= emit_bcond (p
, GT
, 8);
3287 /* The NOP instruction will be patched with an unconditional branch. */
3289 *offset_p
= (p
- buf
) * 4;
3294 emit_ops_insns (buf
, p
- buf
);
3297 /* Implementation of emit_ops method "emit_gt_goto". */
3300 aarch64_emit_gt_goto (int *offset_p
, int *size_p
)
3305 p
+= emit_pop (p
, x1
);
3306 p
+= emit_cmp (p
, x1
, register_operand (x0
));
3307 /* Branch over the next instruction if x0 <= x1. */
3308 p
+= emit_bcond (p
, LE
, 8);
3309 /* The NOP instruction will be patched with an unconditional branch. */
3311 *offset_p
= (p
- buf
) * 4;
3316 emit_ops_insns (buf
, p
- buf
);
3319 /* Implementation of emit_ops method "emit_ge_got". */
3322 aarch64_emit_ge_got (int *offset_p
, int *size_p
)
3327 p
+= emit_pop (p
, x1
);
3328 p
+= emit_cmp (p
, x1
, register_operand (x0
));
3329 /* Branch over the next instruction if x0 <= x1. */
3330 p
+= emit_bcond (p
, LT
, 8);
3331 /* The NOP instruction will be patched with an unconditional branch. */
3333 *offset_p
= (p
- buf
) * 4;
3338 emit_ops_insns (buf
, p
- buf
);
3341 static struct emit_ops aarch64_emit_ops_impl
=
3343 aarch64_emit_prologue
,
3344 aarch64_emit_epilogue
,
3349 aarch64_emit_rsh_signed
,
3350 aarch64_emit_rsh_unsigned
,
3352 aarch64_emit_log_not
,
3353 aarch64_emit_bit_and
,
3354 aarch64_emit_bit_or
,
3355 aarch64_emit_bit_xor
,
3356 aarch64_emit_bit_not
,
3358 aarch64_emit_less_signed
,
3359 aarch64_emit_less_unsigned
,
3361 aarch64_emit_if_goto
,
3363 aarch64_write_goto_address
,
3368 aarch64_emit_stack_flush
,
3369 aarch64_emit_zero_ext
,
3371 aarch64_emit_stack_adjust
,
3372 aarch64_emit_int_call_1
,
3373 aarch64_emit_void_call_2
,
3374 aarch64_emit_eq_goto
,
3375 aarch64_emit_ne_goto
,
3376 aarch64_emit_lt_goto
,
3377 aarch64_emit_le_goto
,
3378 aarch64_emit_gt_goto
,
3379 aarch64_emit_ge_got
,
3382 /* Implementation of target ops method "emit_ops". */
3385 aarch64_target::emit_ops ()
3387 return &aarch64_emit_ops_impl
;
3390 /* Implementation of target ops method
3391 "get_min_fast_tracepoint_insn_len". */
3394 aarch64_target::get_min_fast_tracepoint_insn_len ()
3399 /* Implementation of linux target ops method "low_supports_range_stepping". */
3402 aarch64_target::low_supports_range_stepping ()
3407 /* Implementation of target ops method "sw_breakpoint_from_kind". */
3410 aarch64_target::sw_breakpoint_from_kind (int kind
, int *size
)
3412 if (is_64bit_tdesc ())
3414 *size
= aarch64_breakpoint_len
;
3415 return aarch64_breakpoint
;
3418 return arm_sw_breakpoint_from_kind (kind
, size
);
3421 /* Implementation of target ops method "breakpoint_kind_from_pc". */
3424 aarch64_target::breakpoint_kind_from_pc (CORE_ADDR
*pcptr
)
3426 if (is_64bit_tdesc ())
3427 return aarch64_breakpoint_len
;
3429 return arm_breakpoint_kind_from_pc (pcptr
);
3432 /* Implementation of the target ops method
3433 "breakpoint_kind_from_current_state". */
3436 aarch64_target::breakpoint_kind_from_current_state (CORE_ADDR
*pcptr
)
3438 if (is_64bit_tdesc ())
3439 return aarch64_breakpoint_len
;
3441 return arm_breakpoint_kind_from_current_state (pcptr
);
3444 /* Returns true if memory tagging is supported. */
3446 aarch64_target::supports_memory_tagging ()
3448 if (current_thread
== NULL
)
3450 /* We don't have any processes running, so don't attempt to
3451 use linux_get_hwcap2 as it will try to fetch the current
3452 thread id. Instead, just fetch the auxv from the self
3454 #ifdef HAVE_GETAUXVAL
3455 return (getauxval (AT_HWCAP2
) & HWCAP2_MTE
) != 0;
3461 return (linux_get_hwcap2 (current_thread
->id
.pid (), 8) & HWCAP2_MTE
) != 0;
3465 aarch64_target::fetch_memtags (CORE_ADDR address
, size_t len
,
3466 gdb::byte_vector
&tags
, int type
)
3468 /* Allocation tags are per-process, so any tid is fine. */
3469 int tid
= lwpid_of (current_thread
);
3471 /* Allocation tag? */
3472 if (type
== static_cast <int> (aarch64_memtag_type::mte_allocation
))
3473 return aarch64_mte_fetch_memtags (tid
, address
, len
, tags
);
3479 aarch64_target::store_memtags (CORE_ADDR address
, size_t len
,
3480 const gdb::byte_vector
&tags
, int type
)
3482 /* Allocation tags are per-process, so any tid is fine. */
3483 int tid
= lwpid_of (current_thread
);
3485 /* Allocation tag? */
3486 if (type
== static_cast <int> (aarch64_memtag_type::mte_allocation
))
3487 return aarch64_mte_store_memtags (tid
, address
, len
, tags
);
3492 /* The linux target ops object. */
3494 linux_process_target
*the_linux_target
= &the_aarch64_target
;
3497 initialize_low_arch (void)
3499 initialize_low_arch_aarch32 ();
3501 initialize_regsets_info (&aarch64_regsets_info
);